code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd # %load_ext autoreload # %autoreload 2 from sklearn.svm import SVC,SVR import os import sys from MFTreeSearchCV.MFTreeSearchCV import * from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer import xgboost as xgb # %ls MFTreeSearchCV/ from sklearn.datasets import load_digits,load_boston,fetch_20newsgroups data = load_boston() newsgroups_train = fetch_20newsgroups(subset='all') vectorizer = TfidfVectorizer() features = vectorizer.fit_transform(newsgroups_train.data) labels = newsgroups_train.target #features =features.todense() X = features y = labels X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) X_train.shape estimator = xgb.XGBClassifier(max_depth=3,n_estimators=100,colsample_bytree=0.8,nthread=16) param_dict = {'max_depth':{'range':[2,6],'scale':'linear','type':'int'}, 'n_estimators':{'range':[50,300],'scale':'linear','type':'int'},\ 'colsample_bytree':{'range':[0.3,0.9],'scale':'linear','type':'real'}, 'learning_rate':{'range':[0.03,0.1],'scale':'log','type':'real'}} fidelity_range = [500,15076] n_jobs = 3 cv = 5 fixed_params = {'nthread':16} scoring = 'accuracy' t1 = time.time() #estimator = estimator.fit(X_train,y_train) t2 = time.time() unit_cost = cv*(t2 - t1) total_budget = 1000 model = MFTreeSearchCV(estimator=estimator,param_dict=param_dict,scoring=scoring,\ fidelity_range=fidelity_range,unit_cost=None,\ cv=5, n_jobs = n_jobs,total_budget=total_budget,debug = True,fixed_params=fixed_params) model.fixed_params m = model.fit(X_train,y_train) m.best_params_ # + y_pred = m.predict(X_test) # newm = xgb.XGBRegressor() # newm = newm.set_params(**m.best_params_) # newm = newm.fit(X_train,y_train) # y_pred = newm.predict(X_test) # - r2_score(y_pred,y_test) m.MP.unit_cost m.best_params_ m.points #m.evals t2 -t1
.ipynb_checkpoints/Illustrate-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Polynomial Chaos Expansion Example 7 # Authors: <NAME>, <NAME> \ # Date: January 20, 2021 # In this example, PCE is used to generate a surrogate model for a given set of 2D data for a numerical model with multi-dimensional outputs. # Import necessary libraries. import numpy as np import matplotlib.pyplot as plt import math from UQpy.Distributions import Normal, JointInd from UQpy.Surrogates import * # The analytical function below describes the eigenvalues of the 2D Helmholtz equation on a square. def analytical_eigenvalues_2d(Ne, lx, ly): """ Computes the first Ne eigenvalues of a rectangular waveguide with dimensions lx, ly Parameters ---------- Ne : integer number of eigenvalues. lx : float length in x direction. ly : float length in y direction. Returns ------- ev : numpy 1d array the Ne eigenvalues """ ev = [(m*np.pi/lx)**2 + (n*np.pi/ly)**2 for m in range(1, Ne+1) for n in range(1, Ne+1)] ev = np.array(ev) ### Uncertainty changes the sorting order of the eigenvalues. ### The resulting value "jumps" cannot be captured by a PCE. # sort eigenvalues and take the first Ne ones #idx = np.argsort(ev)[:Ne] #ev = ev[idx] return ev[:Ne] # Create a distribution object. pdf_lx = Normal(loc=2, scale=0.02) pdf_ly = Normal(loc=1, scale=0.01) margs = [pdf_lx, pdf_ly] joint = JointInd(marginals=margs) # Define the number of input dimensions and choose the number of output dimensions (number of eigenvalues). dim_in = 2 dim_out = 10 # Construct PCE models by varying the maximum degree of polynomials (and therefore the number of polynomial basis) and compute the validation error for all resulting models. # + errors = [] basis = [] pce_models = [] for max_degree in range(1,6): print('Total degree: ', max_degree) # Polynomial basis polys = Polynomials(dist_object=joint, degree=max_degree) n_basis = math.factorial(max_degree+dim_in) / \ (math.factorial(max_degree)*math.factorial(dim_in)) basis.append(int(n_basis)) print('Basis terms: ', int(n_basis)) # Regression method #regression_method = PolyChaosLstsq(poly_object=polys) regression_method = PolyChaosLasso(poly_object=polys, learning_rate=0.01, iterations=50000, penalty=0) #regression_method = PolyChaosRidge(poly_object=polys, learning_rate=0.001, iterations=10000, penalty=0) pce = PCE(method=regression_method) pce_models.append(pce) # Training data sampling_coeff = 4 print('Sampling coefficient: ', sampling_coeff) np.random.seed(42) n_samples = math.ceil(sampling_coeff*n_basis) print('Training data: ', n_samples) xx = joint.rvs(n_samples) yy = np.array([analytical_eigenvalues_2d(dim_out, x[0], x[1]) for x in xx]) # Design matrix / conditioning D = polys.evaluate(xx) cond_D = np.linalg.cond(D) print('Condition number: ', cond_D) # Fit model pce.fit(xx,yy) # Coefficients #print('PCE coefficients: ', pce.C) # Validation errors np.random.seed(999) n_samples = 1000 x_val = joint.rvs(n_samples) y_val = np.array([analytical_eigenvalues_2d(dim_out, x[0], x[1]) for x in x_val]) y_val_pce = pce.predict(x_val) error_val = ErrorEstimation(surr_object=pce).validation(x_val, y_val) errors.append(error_val) print('Validation error: ', error_val) print('') # - # Plot errors. errors = np.array(errors) plt.figure(1, figsize=(9,6)) for i in range(np.shape(errors)[0]): plt.semilogy(np.linspace(1, dim_out, dim_out), errors[i], '--o', label='basis: {}'.format(basis[i])) plt.legend() plt.show() # Moment estimation (directly estimated from the PCE model of max_degree = 2). print('First moments estimation from PCE :', MomentEstimation(surr_object=pce_models[1]).get()[0]) print('') print('Second moments estimation from PCE :', MomentEstimation(surr_object=pce_models[1]).get()[1]) # Moment estimation via Monte Carlo integration. n_mc = 100000 x_mc = joint.rvs(n_mc) y_mc = np.array([analytical_eigenvalues_2d(dim_out, x[0], x[1]) for x in x_mc]) mu = np.mean(y_mc,axis=0) moments = (np.round((1/n_mc)*np.sum(y_mc,axis=0),4), np.round((1/n_mc)*np.sum((y_mc-mu)**2,axis=0),4)) print('First moments from Monte Carlo integration: ', moments[0]) print('') print('Second moments from Monte Carlo integration: ', moments[1])
example/Surrogates/PCE/PCE_Example7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/elvinaqa/Scraper-Chatbot-/blob/master/ChatbotAI_Lib_Wiki.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="r5mInY8Q7KLM" # !wget https://github.com/shubham0204/Dataset_Archives/blob/master/chatbot_nlp.zip?raw=true -O chatbot_nlp.zip # !unzip chatbot_nlp.zip # + id="nUq0BI2KYEu-" pip install chatbotAI # + id="oLk8k-k2YG2-" from chatbot import demo # + id="g5K9I4H_ZKaA" outputId="ba8d132b-f2b2-42e1-df1a-b1de43af665e" colab={"base_uri": "https://localhost:8080/"} from google.colab import drive drive.mount('/content/gdrive') # + id="JCVPdRcjZ1No" outputId="8a3e6adb-9c37-4beb-b498-22f4a5df1629" colab={"base_uri": "https://localhost:8080/"} # ls # + id="lbhkTTJeZ12j" outputId="218ce2e9-4821-4886-e4fd-3ffb4323692e" colab={"base_uri": "https://localhost:8080/"} # ! git clone https://github.com/ahmadfaizalbh/Chatbot.git # + id="UuE02PyDaJhI" outputId="05b2ce01-d10d-4fc1-9767-37f6c512c5c8" colab={"base_uri": "https://localhost:8080/"} # cd Chatbot # + id="vM2-B7KdYRFT" demo() # + id="ggCwS1ADgGOg" # !pip install wikipedia # + id="8qEdrFZyYSx5" outputId="18e44ecc-464c-447d-e939-7f6a39e7b190" colab={"base_uri": "https://localhost:8080/"} from chatbot import Chat, register_call import wikipedia @register_call("whoIs") def who_is(query,session_id="general"): try: return wikipedia.summary(query) except Exception: for new_query in wikipedia.search(query): try: return wikipedia.summary(new_query) except Exception: pass return "I don't know about "+query first_question="Hi, how are you?" Chat("examples/Example.template").converse(first_question)
ChatbotAI_Lib_Wiki.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- # + [markdown] id="OAr8BL9dqIG9" # <h1>Grafo</h1> # + id="tSU_BLR1plpl" class Vertice: def __init__(self, rotulo, distancia_objetivo): self.rotulo = rotulo self.visitado = False self.distancia_objetivo = distancia_objetivo self.adjacentes = [] def adiciona_adjacente(self, adjacente): self.adjacentes.append(adjacente) def mostra_adjacentes(self): for i in self.adjacentes: print(i.vertice.rotulo, i.custo) # + id="HemsH_jguuxU" class Adjacente: def __init__(self, vertice, custo): self.vertice = vertice self.custo = custo # + id="wdLzCu_Eu7QZ" class Grafo: porto_uniao = Vertice('Porto União', 203) paulo_frontin = Vertice('Paulo Frontin', 172) canoinhas = Vertice('Canoinhas', 171) tres_barras = Vertice('Três Barras', 131) sao_mateus_do_sul = Vertice('São Mateus do Sul', 123) irati = Vertice('Irati', 139) curitiba = Vertice('Curitiba', 0) palmeira = Vertice('Palmeira', 59) mafra = Vertice('Mafra', 94) campo_largo = Vertice('Campo Largo', 27) balsa_nova = Vertice('Balsa Nova', 41) lapa = Vertice('Lapa', 74) tijucas_do_sol = Vertice('Tijucas do Sul', 56) araucaria = Vertice('Araucária', 23) sao_jose_dos_pinhais = Vertice('São José dos Pinhais', 13) contenda = Vertice('Contenda', 39) porto_uniao.adiciona_adjacente(Adjacente(paulo_frontin, 46)) porto_uniao.adiciona_adjacente(Adjacente(sao_mateus_do_sul, 87)) porto_uniao.adiciona_adjacente(Adjacente(canoinhas, 78)) paulo_frontin.adiciona_adjacente(Adjacente(irati, 75)) paulo_frontin.adiciona_adjacente(Adjacente(porto_uniao, 46)) sao_mateus_do_sul.adiciona_adjacente(Adjacente(tres_barras, 43)) sao_mateus_do_sul.adiciona_adjacente(Adjacente(palmeira, 77)) sao_mateus_do_sul.adiciona_adjacente(Adjacente(lapa, 60)) sao_mateus_do_sul.adiciona_adjacente(Adjacente(irati, 57)) sao_mateus_do_sul.adiciona_adjacente(Adjacente(porto_uniao,87)) canoinhas.adiciona_adjacente(Adjacente(tres_barras, 12)) canoinhas.adiciona_adjacente(Adjacente(mafra, 66)) canoinhas.adiciona_adjacente(Adjacente(porto_uniao, 78)) irati.adiciona_adjacente(Adjacente(palmeira, 75)) irati.adiciona_adjacente(Adjacente(sao_mateus_do_sul, 57)) irati.adiciona_adjacente(Adjacente(paulo_frontin, 75)) tres_barras.adiciona_adjacente(Adjacente(canoinhas, 12)) tres_barras.adiciona_adjacente(Adjacente(sao_mateus_do_sul, 43)) palmeira.adiciona_adjacente(Adjacente(campo_largo, 55)) palmeira.adiciona_adjacente(Adjacente(irati, 75)) palmeira.adiciona_adjacente(Adjacente(sao_mateus_do_sul, 77)) lapa.adiciona_adjacente(Adjacente(mafra, 57)) lapa.adiciona_adjacente(Adjacente(contenda, 26)) lapa.adiciona_adjacente(Adjacente(sao_mateus_do_sul, 60)) mafra.adiciona_adjacente(Adjacente(tijucas_do_sol, 99)) mafra.adiciona_adjacente(Adjacente(lapa, 56)) mafra.adiciona_adjacente(Adjacente(canoinhas, 66)) tijucas_do_sol.adiciona_adjacente(Adjacente(sao_jose_dos_pinhais, 49)) tijucas_do_sol.adiciona_adjacente(Adjacente(mafra, 99)) contenda.adiciona_adjacente(Adjacente(balsa_nova, 19)) contenda.adiciona_adjacente(Adjacente(araucaria, 18)) contenda.adiciona_adjacente(Adjacente(lapa, 26)) araucaria.adiciona_adjacente(Adjacente(curitiba, 37)) araucaria.adiciona_adjacente(Adjacente(contenda, 18)) campo_largo.adiciona_adjacente(Adjacente(balsa_nova, 22)) campo_largo.adiciona_adjacente(Adjacente(curitiba, 29)) campo_largo.adiciona_adjacente(Adjacente(palmeira, 55)) balsa_nova.adiciona_adjacente(Adjacente(curitiba, 29)) balsa_nova.adiciona_adjacente(Adjacente(contenda, 19)) balsa_nova.adiciona_adjacente(Adjacente(campo_largo, 22)) sao_jose_dos_pinhais.adiciona_adjacente(Adjacente(curitiba, 15)) sao_jose_dos_pinhais.adiciona_adjacente(Adjacente(tijucas_do_sol, 45)) curitiba.adiciona_adjacente(Adjacente(sao_jose_dos_pinhais, 15)) curitiba.adiciona_adjacente(Adjacente(araucaria, 37)) curitiba.adiciona_adjacente(Adjacente(balsa_nova, 51)) curitiba.adiciona_adjacente(Adjacente(campo_largo, 29)) # + id="DyWHoznyv0aA" grafo = Grafo() # + colab={"base_uri": "https://localhost:8080/"} id="sQrPuHedv4a_" outputId="0dd7ceb1-48d3-420f-e2ae-9109ead7395c" grafo.porto_uniao.mostra_adjacentes() # + [markdown] id="0c1OpmOj6pMd" # <h1>Vetor Ordenado</h1> # + id="ATABGqxI6o2c" import numpy as np class VetorOrdenado: def __init__(self, capacidade): self.capacidade = capacidade self.ultima_posicao = -1 # Mudança no tipo de dados self.valores = np.empty(self.capacidade, dtype=object) # Referência para o vértice e comparação com a distância para o objetivo def insere(self, vertice): if self.ultima_posicao == self.capacidade - 1: print('Capacidade máxima atingida') return posicao = 0 for i in range(self.ultima_posicao + 1): posicao = i if self.valores[i].distancia_objetivo > vertice.distancia_objetivo: break if i == self.ultima_posicao: posicao = i + 1 x = self.ultima_posicao while x >= posicao: self.valores[x + 1] = self.valores[x] x -= 1 self.valores[posicao] = vertice self.ultima_posicao += 1 def imprime(self): if self.ultima_posicao == -1: print('O vetor está vazio') else: for i in range(self.ultima_posicao + 1): print(i, ' - ', self.valores[i].rotulo, ' - ', self.valores[i].distancia_objetivo) # + id="fBGXNwRD_IYm" vetor = VetorOrdenado(5) vetor.insere(grafo.porto_uniao) vetor.insere(grafo.paulo_frontin) vetor.insere(grafo.canoinhas) vetor.insere(grafo.tres_barras) # + colab={"base_uri": "https://localhost:8080/"} id="S5cemI8B_Y34" outputId="f85c6811-4b1e-4273-eaa8-35c8bbaf50ed" vetor.imprime() # + colab={"base_uri": "https://localhost:8080/"} id="BCJPFrdQKCaB" outputId="65824cdb-afe3-4bf2-8127-4695dc2b3a28" vetor.insere(grafo.curitiba) vetor.imprime() # + colab={"base_uri": "https://localhost:8080/"} id="d1ZyrA8_DjO9" outputId="e7b034ae-1901-4d8a-a50e-837dc10aba75" vetor.valores[0], vetor.valores[0].rotulo # + [markdown] id="ne6sqejFGOmG" # <h1>Busca gulosa</h1> # + id="mlJeFmkaGSdJ" class Gulosa: def __init__(self, objetivo): self.objetivo = objetivo self.encontrado = False def buscar(self, atual): print('-------') print('Atual: {}'.format(atual.rotulo)) atual.visitado = True if atual == self.objetivo: self.encontrado = True else: vetor_ordenado = VetorOrdenado(len(atual.adjacentes)) for adjacente in atual.adjacentes: if adjacente.vertice.visitado == False: adjacente.vertice.visitado == True vetor_ordenado.insere(adjacente.vertice) vetor_ordenado.imprime() if vetor_ordenado.valores[0] != None: self.buscar(vetor_ordenado.valores[0]) # + colab={"base_uri": "https://localhost:8080/"} id="dIyqQb0NJSjR" outputId="8085bca6-956e-424d-cbfe-a9d04da204dc" busca_gulosa = Gulosa(grafo.curitiba) busca_gulosa.buscar(grafo.porto_uniao)
exercicios/busca_gulosa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import warnings from geopy.geocoders import ArcGIS geocoder = ArcGIS() warnings.filterwarnings("ignore", category=DeprecationWarning) import configparser config = configparser.ConfigParser() config.read('config.ini') ip = config['DEFAULT']['IP'] port = config['DEFAULT']['MongoDB-Port'] from pymongo import MongoClient client = MongoClient(ip, int(port)) # - collection = client['Twitter']['twitter-other'] # Add location field for tweet in collection.find({'$or': [{'doc.geo': {'$ne': 'null'}}, {'doc.coordinates': {'$ne': 'null'}}, {'doc.place': {'$ne': 'null'}}]}): if tweet['doc']['geo'] != 'null': lat = location = geocoder.reverse(lat, long) if location != None: collection.find_one_and_update({'doc.id': tweet['doc']['id']}, {'$set': {'location': location.raw['Neighborhood']}}) continue if tweet['doc']['coordinates'] != 'null': print('3') keywords = ['family violence', 'domestic violence', '#vaw', '#evaw', '#pvw', '#toxicmasculinity', '#slutshaming', '#notallmen', '#malechampionsofchange', '#namalt', '#feminazi', '#tinderslut', '<NAME>', '<NAME>', '<NAME>', '@OurWatchAus', '#stopitatthestart', '#freefromviolence', '#howiwillchange', '#respectwomen', '#callitout', '#orangecard'] for keyword in keywords: tweets = list(db_twitter["twitter-temp"].find({"doc.text":{"$regex":".*"+keyword+".*", '$options' : 'i'}})) print(len(tweets)) # + import twitter api = twitter.Api(consumer_key='bhnwy7L8zKWwZGsljOeJDSnPf', consumer_secret='<KEY>', access_token_key='<KEY>', access_token_secret='<KEY>', tweet_mode= 'extended') try: tweet = api.GetStatus("914278688320929792") print(tweet) except: pass # + from geopy.geocoders import ArcGIS geocoder = ArcGIS() location = geocoder.reverse("-37.899068, 114.987755") try: print(location.raw['Neighborhood']) except: print(location) # -
twitter-to-csv-richard.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // # Data Handling Overview // %load ../rapaio-bootstrap // ## Data types, arrays and collections // Most programming languages have constructs for data and instructions or commands. This is true for low level languages as it is for higher level languages. If the language does not provide those kind of constructs, commonly used libraries, like system libraries supply them. This is true also for Java language. We start our discussion about data handling from what the language itself provides. // Java provides value and reference data types. There are few values types like: `double`, `int`, `char` and so on. For each value type there is also a corresponding refference boxing type like `Double`, `Integer`, `Character`. Since Java language is an object oriented language, it should be apparent the reason for existence of the corresponding refference types: abstractization and encapsulation. If we add how the generics were implemented in language on those reasons we understood why we have collections implemented as they are, with their rich offerings in terms of functionality. Still, value types are part of the language together with the construct called array. They seem so dulled and non idiomatic, as such that somebody might ask why there are still part of the language. The main reason of their existence is performance: both in terms of memory and computation. // *Rapaio* library uses under the hood value types and arrays to achieve speed and memory efficience, carrying the burden of writting much more non idiomatic code. For example there are many vaue types array features in utilitary classes like `rapaio.util.collection.DoubleArrays` and similar. Those utilitary classes exists in order to offer support for high order constructs with compact and performant implementations. // Working directly with value type arrays, even having at one's disposal a rich set of features, claims a lot of effort and attention. The resulting code would be probably very hard to write or read. Thus, *Rapaio* uses for most of the algorithms implemented higher constructs like `rapaio.data.Var` and `rapaio.data.Frame` to offer a rich and flexible set of features, easy to be used and leveraged further. Those constructs hides the complexity and intricaties implied at implementation times dues to working directly with value types vectors, without paying much in performance. // Because those abstractions works directly with value type arrays, a lot of care was spent to offer many easy ways to transform data from value type array into variables, frames, vectors and matrices and viceversa, sometimes avoiding copying the data entirely. Java collections are also well conected with *Rapaio* data structures, thus being very easy to transfer data to and from those collections. // ## Var and Frame // + [markdown] toc-hr-collapsed=false // There are two ubiquous data structures used all over the place in this library: variables and frames. A variable is a list of values of the same type which implements the `Var` interface. You can think of a variable as a higher abstractization of an array because all values have the same type and there is random acces available. In fact, sometimes, the arrays are the only data storage for a `Var` implementation. Another way to look at a variable is like a column of a table. // + [markdown] toc-hr-collapsed=false // A set of variables makes a data frame, described by the interface `rapaio.data.Frame`. A frame is a table having observations as rows and variables as columns. // + [markdown] toc-hr-collapsed=false // Let's take a simple example. We will load the iris data set, which is provided by the the library. // - Frame df = Datasets.loadIrisDataset(); df.printSummary(); // Frame summary is a simple way to see some general information about a data frame. We see the data frame contains $150$ observations/rows and $5$ variables/columns. // // The listing continues with name and type of the variables. Notice that there are four double variables and one nominal variable, named `class`. // // The summary listing ends with a section which describes each variable. For double variables the summary contains the well-known six number summary. We have there the minimum and maximum values, median, first and third quartile and the mean. For nominal values we have an enumeration of the first most frequent levels and the associated counts. For our `class` variable we see that there are three levels, each with $50$ instances. // ## Variables // // In statistics a variable has multiple meanings. A random variable can be thought as a process which produces values according to a distribution. `Var` objects models the concept of values drawn from a unidimensional random variable, in other words a sample of values. As a consequence a `Var` object has a size and uses integer indices to access values from sample. // `Var` interface declares methods useful for various kinds of tasks: // * _manipulate values_ from the variable by adding, removing, inserting and updating with different representations // * _naming_ a variable offers an alternate way to identify a variable into a frame and it is also useful as output information // * _manipulate sets of values_ by with concatenation and mapping // * _streaming_ allows traversal of variables by java streams // * _numeric computations_ like mathematical operations or variuos statistical interesting characteristics // * _unique value and groupins_ with a flexible grammar and short syntax // * other tools like deep copy, deep compare, summary, etc // ## VarType: storage and representation of a variable // There are two main concepts which have to be understood when working with variables: **storage** and **representation**. All the variables are able to store data internally using a Java data types like `double`, `int`, `String`, etc. In the same time, the data from variables can be represented in different ways, all of them being available through the `Var` interface for all types of variables. // However not all the representations are possible for all types of variables, because some of them does not make sense. For example double floating values can be represented as strings, which is fine, however strings in general cannot be represented as double values. // These are the following data representations all the `Var`-iables can implement: // // * **double** - double // * **int** - int // * **long** - long // * **instant** - Instant // * **label** - String // // The `Var` interface offers methods to get/update/insert values for all those data representations. Not all data representations are available for all variables. For example the label representation is available for all sort of variables. This is acceptable, since when storing information into a text-like data format, any data type should be transformed into a string and also should be able to be read from a string representation. // To accomodate all those legal possibilities, the rapaio library has a set of predefined variable types, which can be found in the enum `VarType`. // // The defined variable types: // // VType | Var class | Description // -----------|-----------|------------- // **BINARY** | VarBinary | Binary variable represented as int values, internally uses bitsets for efficient memory usage // **INT** | VarInt | Integer variable represented and stored internally as int // **NOMINAL**| VarNominal| Categorical variable represented as string from a predefined set, with no ordering (for example: _male_, _female_) // **DOUBLE** | VarDouble | Double variable represented and stored internally as double precision floating point values // **LONG** | VarLong | Long variable represented and stored internally as long 8-byte signed integer values // **INSTANT**| VarInstant| Instant variable represented and stored as datetime instant // **STRING** | VarString | String variable used for manipulation of text with free form // A data type is important for the following reasons: // // * gives a certain useful meaning for variables in such a way that machine learning or statistical algorithms can leverage to maximum potential the meta information about variables // * encapsulates the stored data type artifacts and hide those details from the user, while allowing the usage of a single unified interface for all variables // ## Frames // The `rapaio.data.Frame` is the most common way to handle data. A data frame is a collection of variables organized in rows and columns. One can see the rows of a data frame as instances or observations having various attributes. The columns of a data frame are variables described by `rapaio.data.Var`. // There is a rich collection of methods offered by a data frame, allowing one to manipulate data in various ways for many purposes. Among those facilities to manipulate data one can have: views by data filtering, frame composition by merging rows or variables or other frames, grouping and group aggregates, streaming and so on. // ### Solid frames and view frames // There are more than one implementation of a data frame for different purposes. The most encountered data frame is a `SolidFrame`. Solid data frames represents data stored in dense solid variables. This allows fast operations since data is stored compact in memory and it is ussually produced when data is loaded from a data storage, by allocating space for new data or by creating a new copy of data from a view frame. A view frame is a type of data frame which does not store itself data, but it is a wrapper on data stored somewhere else, usually in other data frames. // During data handling operations the library tries to not create copies of data, if possible. This give some freedom to the user to decide when data should be copied. We can illustrate with an example: // + // create a solid data frame with one variable var solid = SolidFrame.byVars(VarDouble.copy(1, 2, 3, 4, 5, 6).name("x")); solid.printString(); // filter some rows based on values and obtain a view filter var view = solid.stream().filter(s -> s.getDouble("x") % 2 == 0).toMappedFrame(); view.printString(); // we will bind the rows with old data frame var bound = solid.bindRows(view); bound.printString(); // we can create o copy of the data to not alter the original one with updates var copy = bound.copy(); copy.printString(); // - // Frames allows manipulation of data sets in a flexible way to infer various knowledge from data. Those abjects are used for most of the machine learning models and for input and output facilities. // Variables and frames are also used in almost all tools from statistical hypothesis tests to graphical plots. There is also the possibility to work with vectors and matrices, objects used in linear algebra. Transition between those types of data structures can be done easily, even if sometimes data transformation can happen due to linear algebra object constrains (all values from a vector or matrix should have the same type).
tutorials/DataHandlingOverview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # # Ejercicios # # # ## Ejercicio 1 # # # Utilizar `plt.subplot()` para crear una figura con una filas y dos columnas. # # En el primer subplot hacer un diagrama de barras para cada una de las variables. La altura que tomarán es la media de los valores de cada una de las variables. # # En el segundo subplot hacer un scatterplot en el que, en negro y con una opacidad de 0.5, aparezcan los datos de 'jumps' frente a 'chins' y en el que también, con la misma opacidad pero en naranja, aparezcan los datos de 'situps' en frente a 'chins'. Crea una leyendo con el nombre correspondiente de las variables usadas. # # # Para cargar el dataset correctamente: # + import pandas as pd import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_linnerud dataset = load_linnerud() df = pd.DataFrame(data = dataset.data, columns=[value.strip().lower().replace(' ', '_') for value in dataset.feature_names]) # - df.mean() # + fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) # plt1 df.mean().plot.bar(ax=ax1, rot=0) # plt2 df.plot.scatter(x='situps', y='chins', color='black', alpha=.5, label='situps', ax=ax2) df.plot.scatter(x='jumps', y='chins', color='orange', alpha=.5, label='jumps', ax=ax2) #ax2.xaxis.set_label('TEST') plt.legend() plt.show() # - # # # ## Ejercicio 2 # # # Cree una figura final de dimensiones 12 x 9 y emplee el estilo 'seaborn-paper'. # # Utilizar `plt.subplot()` para crear una figura con tres filas y una sola columna. # # En el primer subplot hacer histogramas para las variables 'worst_area' y 'area_error' en azul y rojo respectivamente e incluir leyenda con el mismo nombre de las variables. Ponemos bins fijos de 0 a 4000 con saltos de 100. # # En el segundo subplot hacer un scatterplot con puntos verdes de 'mean_symmetry' frente a 'mean_concavity' con el título 'scatterplot1' y con los nombres de las variables en los ejes. # # En el tercer y últmo subplot hacer un plot de 'mean_area' en función de 'mean_radius'. Anotar donde se encuentra el valor máximo de 'mean_area' con 'max' y la mediana de 'mean_radius' con 'median'. Antes se deberán ordenar los datos del dataset por 'mean_area'. # # Asegurarse que los distintos subplots no se sobreponen. # + import pandas as pd import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_breast_cancer dataset = load_breast_cancer() df = pd.DataFrame(data = dataset.data, columns=[value.strip().replace(' ', '_') for value in dataset.feature_names]) # - df.head() # + fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(12, 9)) # Plot 1 df[['worst_area', 'area_error']].plot.hist(color=['blue', 'red'], bins=np.linspace(0, 4000, 100), ax=ax1) # Plot 2 df.plot.scatter(x='mean_symmetry', y='mean_concavity', color='green', alpha=.5, ax=ax2) ax2.set_title('Scatterplot 1') # Plot 3 df.sort_values('mean_area').plot.scatter(x='mean_area', y='mean_radius', ax=ax3, alpha=.2) # Max mean_area max_mean_area_x = df.mean_area.max() max_mean_area_y = df.mean_radius[df.mean_area.idxmax()] ax3.annotate('MAX', xy=(max_mean_area_x, max_mean_area_y), xytext=(max_mean_area_x-2, max_mean_area_y-5), arrowprops={'color':'black'}) # Median mean radius med_mean_radius_y = df.mean_radius.median() med_mean_radius_x = df.mean_area[df.mean_radius == med_mean_radius_y].values[0] ax3.annotate('MEDIAN', xy=(med_mean_radius_x, med_mean_radius_y), xytext=(med_mean_radius_x+2, med_mean_radius_y+5), arrowprops={'color':'black'}) fig.tight_layout() plt.show() # - # # # ## Ejercicio 3 # # # Muestra un gráfico de barras horizontal de la popularidad de los lenguajes de programación. Defina el ancho de cada barra según el tiempo requerido para dominar los conceptos básicos del lenguaje y muestre los idiomas ordenados por popularidad. # # Muestra de datos: # + import pandas as pd import numpy as np from matplotlib import pyplot as plt df = pd.DataFrame({'language': ['JavaScript', 'Java', 'C++', 'C#', 'Python', 'PHP'], 'popularity': [8, 22.2, 6.7, 7.7, 17.6, 8.8], 'time_to_master': [9, 12.5, 10, 8, 5, 7.8]}) # - df.set_index('language').sort_values('popularity').plot.barh() plt.show() # # # ## Ejercicio 4 (Opcional) # # # Reproduce la figura del cuarteto de Anscombe. A continuación tienes los datos con los que representarlo. # <img src="img/anscombe.png"></img> # # Extraído de https://matplotlib.org/gallery/specialty_plots/anscombe.html#sphx-glr-gallery-specialty-plots-anscombe-py # + import numpy as np from matplotlib import pyplot as plt x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5]) y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]) y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]) y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]) x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8]) y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89]) def fit(x): return 3 + 0.5 * x # + # Respuesta aqui
2021Q1_DSF/CLASS NOTEBOOKS/06_Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="xZo9_0FOzuoI" # The objective of this analysis is to segment customers into distinct groups based on their earnings and spending. The chosen method to do this is the K-Means cluster analysis. This analysis is based on an existing project. # + id="roOyWxK_zmJv" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans # + id="IxZRjvV6006k" # loading the data from csv file to a Pandas DataFrame customer_data = pd.read_csv('Mall_Customers.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="2xZEVERD1F6W" outputId="60fda440-ec07-49f5-b95d-3ec39826e3c5" customer_data.head() # + colab={"base_uri": "https://localhost:8080/"} id="YZsU8Suq1LLM" outputId="8ad5d341-de37-4d98-94ee-590e587a04a7" customer_data.shape # + colab={"base_uri": "https://localhost:8080/"} id="nzVjcWm91Nwz" outputId="3278f498-e8bc-4e42-e26e-4c8ffcfe3819" # getting some informations about the dataset customer_data.info() # + colab={"base_uri": "https://localhost:8080/"} id="_GL_kar91SLT" outputId="8f5ed1ac-4e86-4247-d3c2-715c0a750ed5" # checking for missing values (exploratory data analysis) customer_data.isnull().sum() # + [markdown] id="IKWUschi1akH" # Choosing the Annual Income Column & Spending Score column # # > Indented block # # # + [markdown] id="qYi0rQLc1w-I" # # + id="1yLnUcXo1Xh4" X = customer_data.iloc[:,[3,4]].values # + colab={"base_uri": "https://localhost:8080/"} id="MRdgLugG1gKM" outputId="f1bb19db-a476-403f-f670-61e084644a8f" print(X) # + [markdown] id="TS6aeouy1pKO" # # Choosing the number of clusters # + [markdown] id="HjMANDXO10QA" # WCSS -> Within Clusters Sum of *Squares* # + id="Pg5x1Khi11G5" # finding wcss value for different number of clusters wcss = [] for i in range(1,11): kmeans = KMeans(n_clusters=i, init='k-means++', random_state=42) kmeans.fit(X) wcss.append(kmeans.inertia_) # + colab={"base_uri": "https://localhost:8080/", "height": 301} id="NQfj5H3l17D4" outputId="e3d7f7d1-15ac-4d0a-8b24-1c193796e82a" # plot an elbow graph sns.set() plt.plot(range(1,11), wcss) plt.title('The Elbow Point Graph') plt.xlabel('Number of Clusters') plt.ylabel('WCSS') plt.show() # + [markdown] id="O8k8YRGd2NNp" # Based on the heuristics of the elbow point, 5 is the optimal number of cluster. Now we proceed with training the k-means clustering model. # + colab={"base_uri": "https://localhost:8080/"} id="8-z1Wsa6218g" outputId="6ec245e6-b1e5-4bcf-9a26-04725d053fd9" kmeans = KMeans(n_clusters=5, init='k-means++', random_state=0) # return a label for each data point based on their cluster Y = kmeans.fit_predict(X) print(Y) # + [markdown] id="zhvg0B7w2_WG" # So the clusters are 0-4. Let's now visualize the clusters # + colab={"base_uri": "https://localhost:8080/", "height": 518} id="FxW-xnUi19ne" outputId="96c40f73-d78f-4066-ca94-223287153122" # plotting all the clusters and their Centroids plt.figure(figsize=(8,8)) plt.scatter(X[Y==0,0], X[Y==0,1], s=50, c='green', label='Cluster 1') plt.scatter(X[Y==1,0], X[Y==1,1], s=50, c='red', label='Cluster 2') plt.scatter(X[Y==2,0], X[Y==2,1], s=50, c='yellow', label='Cluster 3') plt.scatter(X[Y==3,0], X[Y==3,1], s=50, c='violet', label='Cluster 4') plt.scatter(X[Y==4,0], X[Y==4,1], s=50, c='blue', label='Cluster 5') # plot the centroids plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s=100, c='cyan', label='Centroids') plt.title('Customer Groups') plt.xlabel('Annual Income') plt.ylabel('Spending Score') plt.show() # + [markdown] id="dbcx3vIb3PTO" # Summary: We have five distinct customer groups. For example: # # # 1. The blue group represents customers that have low annual income and also a small spending score. # 2. The green centroid represents customers that have a gih annual income yet a low spending score. # # Based on this analysis, we recommend to give discounts to customers that are not spending too much yet have the means to do so (eg. the green group) # # # #
Customer_segmentation_K_Means_cluster.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Notebook for fixing simulation setting, network structure and demographic parameters # Import librairies import numpy as np import pandas as pd import matplotlib.pyplot as plt import igraph import seaborn as sns; sns.set(style="ticks", color_codes=True) from collections import Counter # Set seed np.random.seed(100) # + #### Trade network function definition # def create_edges_nbfils(L, power, tries = 1000000): ''' Function for simulating a directed weighted powerlaw graph with weights = 1 all''' # Generate de out-degree = in-degree sequence with the given power p= list(1 / (np.array(range(1, L)))**power) p = p/sum(p) out_degs = list(np.random.choice(range(1, L), L, replace = True, p = p)) # We correct the degree sequence if its sum is odd if (sum(out_degs) % 2 != 0): out_degs[0] = out_degs[0] + 1 # Generate directed graph with the given out-degree = in-degree sequence g = igraph.Graph.Degree_Sequence(out_degs, out_degs, method="simple") g = g.simplify(multiple=True, loops=True) # remove loops or multiple edges print('Power:', power) g.es["weight"] = 1 # the graph is also weighted , the weights will later be modified edges = [] weights = [] for e in g.es: edges.append(e.tuple) weights.append(e["weight"]) edges = np.array(edges) weights = np.array(weights) # Array with list f edges and weights. Columns: i,j,theta_ij theta_edges = np.hstack((edges, np.zeros((edges.shape[0], 1)))) theta_edges[:,2] = weights theta_edges = theta_edges.astype(float) theta_edges[:,2] = 1 return np.array(theta_edges) # - # ## FIXED PARAMETERS # + # Total number of herds L = 5000 # Fixed graph structure with the given porwer-law (weights will be defined later) power = 2 theta_edges = create_edges_nbfils(L, power) # Simulation setting delta = 0.5 # simulation step nb_years = 3 # number of years to simulate nb_steps = int(365/delta*nb_years) # length of each trajectory # Demographic parameters: mu (birth rate) and tau (death rate) demo_params = np.array([[1/(365*1.5), 1/(365*3)]]*L) print('Number of herds:', L) print('Simulation step delta:', delta) print('Simulated years:', nb_years) print('Demographic parameters (mu and tau):', demo_params[0]) # - # ## SIMULATION OF POPULATION STRUCTURE # + # First we check that every node has a buyer and seller at least, if not we # add a neighbor for herds without buyers, or sellers # indeed, if the neighbor was itself, maybe it has no neighbor def find_missing(lst): return [x for x in range(0, L) if x not in lst] sorted_receveirs = sorted(theta_edges[:,1].astype(int)) non_receveirs = find_missing(sorted_receveirs) theta_edges = list(theta_edges) for i in non_receveirs: if i == 0: theta_edges.append(np.array([i+1, i, 1])) else: theta_edges.append(np.array([i-1, i, 1])) theta_edges = np.array(theta_edges) sorted_givers = sorted(theta_edges[:,0].astype(int)) non_givers = find_missing(sorted_givers) theta_edges = list(theta_edges) for i in non_givers: if i == 0: theta_edges.append(np.array([i, i+1, 1])) else: theta_edges.append(np.array([i, i-1, 1])) theta_edges = np.array(theta_edges) # - print('Aditionally created in-edges:', len(non_receveirs)) print('Additionally created out-edges:', len(non_givers)) # Degrees data frames a = [] b = [] for i in range(L): a.append(np.sum(theta_edges[:,1] == i)) b.append(np.sum(theta_edges[:,0] == i)) in_deg = np.array(a) out_deg = np.array(b) in_deg_pd = pd.DataFrame(in_deg) out_deg_pd = pd.DataFrame(out_deg) # + # Theta edges as graph to plot and compute shortest lengths edges = theta_edges[:, :2].astype(int).tolist() edges = [[str(j) for j in i] for i in edges] # collect the set of vertex names and then sort them into a list vertices = set() for line in edges: vertices.update(line) vertices = sorted(vertices) # create an empty graph g = igraph.Graph(directed = True) # add vertices to the graph g.add_vertices(vertices) # add edges to the graph g.add_edges(edges) # set the weight of every edge to 1 g.es["weight"] = 1 # collapse multiple edges and sum their weights g.simplify(combine_edges={"weight": "sum"}) for v in g.vs: v["value"] = v.index g.vs["label"] = g.vs["value"] # To plot: #out_fig_name = "graph.eps" #layout = g.layout("kk") #plot(g, out_fig_name,layout = layout) shortest_paths = np.array(g.shortest_paths_dijkstra(weights=None, mode="in")) # Generate initial size herds N0s = np.random.gamma(9,12, L) N0s = N0s.astype(int) N0s_pd = pd.DataFrame(N0s) # Assign sizes according to out degree: df_out_degrees = pd.DataFrame(out_deg)#sort thetas_i from small to big df_out_degrees['indegree'] = in_deg # add indeg to the database too N0s_pd = N0s_pd.sort_values(0) sorted_bygroup_N0s = np.array(N0s_pd[0]) df_out_degrees = df_out_degrees.sort_values(0) # Data frame de degrees avec N0s df_out_degrees['N0s'] = sorted_bygroup_N0s df_out_degrees = df_out_degrees.sort_index() # Simulate out rates theta_i p=list(1 / (np.array(np.arange(0.0006, 1, 0.000001)))**power) p = p/sum(p) out_thetas = list(np.random.choice(np.arange(0.0006, 1, 0.000001), L, replace = True, p = p)) out_thetas = pd.DataFrame(out_thetas) # Assign theta_i according to out-degree out_thetas = out_thetas.sort_values(0) #sort thetas_i from small to big sorted_bygroup_thetas_i = np.array(out_thetas[0]) df_out_degrees = df_out_degrees.sort_values(0) df_out_degrees['theta_i'] = sorted_bygroup_thetas_i df_out_degrees = df_out_degrees.sort_index() # Distribute theta_i among child nodes (buyers) to obtain the theta_ij for i in range(0,L): ijw = theta_edges[theta_edges[:,0] == i, :] neighb_i = ijw[:,1].astype(int) theta_i_out = np.array(df_out_degrees['theta_i'])[i] outdeg_neighi = out_deg[neighb_i] indeg_neighi = in_deg[neighb_i] sizes_neighi = N0s[neighb_i] theta_neighi_out = np.array(df_out_degrees['theta_i'])[tuple([neighb_i])] theta_prime = (shortest_paths[i, neighb_i])/indeg_neighi # inversely proportional to the in-degree theta_i_neighi = theta_prime * theta_i_out / np.sum(theta_prime) theta_edges[theta_edges[:,0] == i, 2] = theta_i_neighi theta_pd = pd.DataFrame(theta_edges) # - # ## Plots In(Out) Degree and Initial Herd size distribution # + # Plot histogram of in_degree and out-degree, in log degrees = in_deg degree_counts = Counter(degrees) x, y = zip(*degree_counts.items()) plt.figure(1) # prep axes plt.xlabel('log(degree)') plt.xscale('log') plt.ylabel('log(frequency)') plt.yscale('log') plt.scatter(x, y, marker='.') plt.savefig('degree_simulated.pdf',bbox_inches = 'tight') plt.show() # Plot Initial herd sizes plt.figure() N0s_pd.hist(bins =35, weights=np.zeros_like(N0s_pd) + 1. / N0s_pd.size) plt.title('') plt.xlabel('initial herd size') plt.ylabel('frequency') plt.show() # - # ## Save fixed parameters # + #Save initial number of animals by herd np.savetxt('N0s.txt', N0s) #Save setting (delta and nb-steps) setting = np.array([delta, nb_steps]) np.savetxt('setting.txt', setting) #Save demo_params np.savetxt('demo_params.txt', demo_params) #Save theta_edges np.savetxt('theta_edges.txt', theta_edges)
fixed_parameters/Setting_fixed_parameters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="6i2dIrH0jZ_o" import csv from collections import defaultdict, Counter # + id="duS3iZtKjfFt" with open('data/survey_results_public.csv') as f: csv_reader = csv.DictReader(f) dev_type_info = {} for line in csv_reader: dev_types = line['DevType'].split(';') for dev_type in dev_types: dev_type_info.setdefault(dev_type, { 'total': 0, 'language_counter': Counter() }) languages = line['LanguageWorkedWith'].split(';') dev_type_info[dev_type]['language_counter'].update(languages) dev_type_info[dev_type]['total'] += 1 # + colab={"base_uri": "https://localhost:8080/"} id="qW4PSYDgknn9" outputId="e6d88944-be7a-41e3-e716-7d95e80c1d3b" for dev_type, info in dev_type_info.items(): print(dev_type) #print(language_counter.most_common(5)) for language, value in info['language_counter'].most_common(5): language_pct = (value / info['total']) * 100 language_pct = round(language_pct, 2) print(f'\t{language}: {language_pct}%')
so_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tweepy import json import pandas as pd import csv import mysql.connector from mysql.connector import Error #imports for catching the errors from ssl import SSLError from requests.exceptions import Timeout, ConnectionError from urllib3.exceptions import ReadTimeoutError # - #Twitter API credentials consumer_key = 'pe7gsS8WNkANobhPvKU5q9PPv' consumer_secret = '<KEY>' access_token = '<KEY>' access_token_secret = '<KEY>' #Definicion palabras claves busqueda keywords = ['ODS', 'sostenibilidad', 'desarrollo', 'sostenible', 'cooperación'] # + def connect(user_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count, tweet_id,text,created_at,source, reply_id, reply_user_id, retweet_id,retweet_user_id, quote_id,quote_user_id, reply_count,retweet_count,favorite_count,quote_count, hashtags, mention_ids, place_id, place_name, coord): """ connect to MySQL database and insert twitter data """ con = mysql.connector.connect(host = 'localhost', database='twitterdb', user='david', password = 'password', charset = 'utf8mb4',auth_plugin='mysql_native_password') cursor = con.cursor() try: if con.is_connected(): """ Insert twitter data """ query = "INSERT INTO UsersODS (user_id, tweet_id,user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count) VALUES (%s,%s, %s, %s, %s, %s, %s, %s)" cursor.execute(query, (user_id, tweet_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count)) query2 = "INSERT INTO PostsODS (tweet_id,user_id,text,created_at,source,reply_id, reply_user_id,retweet_id, retweet_user_id,quote_id,quote_user_id,reply_count,retweet_count,favorite_count,quote_count,place_id, place_name, coord,hashtags, mention_ids) VALUES (%s,%s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s)" cursor.execute(query2, (tweet_id,user_id,text,created_at,source, reply_id, reply_user_id, retweet_id, retweet_user_id, quote_id,quote_user_id, reply_count,retweet_count,favorite_count,quote_count, place_id, place_name, coord, hashtags, mention_ids)) con.commit() except Error as e: print(e) print(text) #Carlota: He dejado este print, porque no era capaz de almacenar emojis por la codificacion. #Estoy casi segura de que se ha arreglado, pero por si acaso cursor.close() con.close() return # + class MyStreamListener(tweepy.StreamListener): def on_data(self,data): # Twitter returns data in JSON format - we need to decode it first try: decoded = json.loads(data) except Exception as e: print ("Error on_data: %s" % str(e)) #we don't want the listener to stop return True #LOCATION METADATA #En caso de estar geolocalizado guardar la geolocalizacion #Si esta geolocalizado dentro de un bounding box (no exacta) if decoded.get('place') is not None: place_id = decoded.get('place').get('id') place_name =decoded.get('place').get('name') else: place_id = 'None' place_name = 'None' #Si es localizacion exacta #Geo is deprecated, they suggest to use simply coordinates if decoded.get('cordinates') is not None: m_coord = decoded.get('coordinates') c=0 coord='' for i in range(0, len(m_coord)-1): mc=m_coord[i] m_coord=coord+mc+';'#use a different separator! c=c+1 mc=m_coord[c] m_coord=coord+mc else: coord = 'None' #USER METADATA user_name = '@' + decoded.get('user').get('screen_name') #nombre cuenta @itdUPM user_id=decoded.get('user').get('id') #id de la cuenta (int) user_loc=decoded.get('user').get('location') user_follow_count=decoded.get('user').get('followers_count') user_friends_count=decoded.get('user').get('friends_count') user_fav_count=decoded.get('user').get('favourites_count') user_status_count=decoded.get('user').get('statuses_count') #POST METADATA created_at = decoded.get('created_at') #Fecha text = decoded['text'].replace('\n',' ') #Contenido tweet tweet_id = decoded['id'] #tweet id (int64) source = decoded['source'] #string source (web client, android, iphone) interesante??? #REPLY METADATA reply_id=decoded['in_reply_to_status_id'] reply_user_id=decoded['in_reply_to_user_id'] #RETWEET if decoded.get('retweeted_status') is not None: retweet_id = decoded['retweeted_status'] ['id'] retweet_user_id = decoded['retweeted_status']['user']['id'] #Carlota: Si es un retweet los campos de nº de retweets favs etc vienen dentro de retweeted status #David: ok bien visto, he añadido el id de usuario retweeteado reply_count = decoded['retweeted_status']['reply_count'] #Number of times this Tweet has been replied to retweet_count = decoded['retweeted_status']['retweet_count'] #Number of times this Tweet has been retweeted favorite_count = decoded['retweeted_status']['favorite_count'] #how many times this Tweet has been liked by Twitter users. quote_count = decoded['retweeted_status']['quote_count'] #hashtags_list=decoded.get('retweeted_status').get('entities').get('hashtags') #mentions=decoded.get('retweeted_status').get('entities').get('user_mentions') #David: para esto hay que crear una cadena de texto recorriendo la lista, el #código estaba en la versión anterior... hashtags_list=decoded['retweeted_status']['entities']['hashtags'] mentions=decoded['retweeted_status']['entities']['user_mentions'] hashtags='' c=0 if len(hashtags_list)>0: for i in range(0, len(hashtags_list)-1): mh=hashtags_list[i].get('text') hashtags=hashtags+mh+';' c=c+1 mh=hashtags_list[c].get('text') hashtags=hashtags+str(mh) else: hashtags='None' mention_ids='' c=0 if len(mentions)>0: for i in range(0, len(mentions)-1): mid=mentions[i].get('id_str') mention_ids=mention_ids+mid+';'#use a different separator! c=c+1 mid=mentions[c].get('id_str') mention_ids=mention_ids+str(mid) else: mention_ids='None' #David: esto no sé si haría falta... este justo es un retweet de un post que a su ves #es un quote de una noticia, osea que hay dos pasos de conexión, pero el retweet #con el quote ya existe... lo guardamos pero hay que tenerlo en cuenta que es redundante #Carlota: Lo quito, porque tienes razon y no habia caido... #David. lo podemos dejar porque no son campos adicionales if decoded['retweeted_status']['is_quote_status']: if 'quoted_status' not in decoded['retweeted_status']: quote_id='None' quote_user_id='None' else: quote_id=decoded['retweeted_status']['quoted_status']['id'] quote_user_id=decoded['retweeted_status']['quoted_status']['user']['id'] else: quote_id='None' quote_user_id='None' else: reply_count = decoded['reply_count'] #Number of times this Tweet has been replied to retweet_count = decoded['retweet_count'] #Number of times this Tweet has been retweeted favorite_count = decoded['favorite_count'] #how many times this Tweet has been liked by Twitter users. quote_count = decoded['quote_count'] retweet_id = 'None' retweet_user_id = 'None' if decoded['is_quote_status']: if 'quoted_status' not in decoded: quote_id='None' quote_user_id='None' else: quote_id=decoded['quoted_status']['id'] quote_user_id=decoded['quoted_status']['user']['id'] else: quote_id='None' quote_user_id='None' hashtags_list=decoded.get('entities').get('hashtags') mentions=decoded.get('entities').get('user_mentions') hashtags='' c=0 if len(hashtags_list)>0: for i in range(0, len(hashtags_list)-1): mh=hashtags_list[i].get('text') hashtags=hashtags+mh+';' c=c+1 mh=hashtags_list[c].get('text') hashtags=hashtags+str(mh) else: hashtags='None' mention_ids='' c=0 if len(mentions)>0: for i in range(0, len(mentions)-1): mid=mentions[i].get('id_str') mention_ids=mention_ids+mid+';'#use a different separator! c=c+1 mid=mentions[c].get('id_str') mention_ids=mention_ids+str(mid) else: mention_ids='None' #insert data just collected into MySQL database connect(user_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count, tweet_id,text,created_at,source, reply_id, reply_user_id, retweet_id,retweet_user_id, quote_id,quote_user_id, reply_count,retweet_count,favorite_count,quote_count, hashtags, mention_ids, place_id, place_name, coord) #print("Tweet colleted at: {} ".format(str(created_at))) def on_error(self, status_code): if status_code == 420: #returning False in on_error disconnects the stream return False # returning non-False reconnects the stream, with backoff. while True: if __name__ == '__main__': print ('Starting') try: #authorize twitter, initialize tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, wait_on_rate_limit=True) #create the api and the stream object myStreamListener = MyStreamListener() myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener) #Filter the stream by keywords myStream.filter(track = keywords) except (Timeout, SSLError, ReadTimeoutError, ConnectionError) as e: #logging.warning("Network error occurred. Keep calm and carry on.", str(e)) print("Network error occurred. Keep calm and carry on.") print(str(e)) continue except Exception as e: #logging.error("Unexpected error!", e) print("Unexpected error!") print(str(e)) continue # -
Twitter_bot_keywords_v6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="m23LDIArjKyj" # # Передача аргументов командной строки # + [markdown] colab_type="text" id="y7ZbSpt6Q2Fj" # Параметры запуска, задаваемые через командную строку, чаще всего используют консольные программы, хотя программы с графическим интерфейсом тоже не брезгуют этой возможностью. # # Разберем несколько способов разобрать аргументы командной строки. # + [markdown] colab_type="text" id="hq8y-QkrQy1T" # ## Переменная argv # # `sys.argv` содержит список параметров, переданных программе через командную строку, причем нулевой элемент списка - это имя скрипта. # # Этот способ не очень удобный, поэтому используется редко или только в совсем несложных проектах. # + colab={} colab_type="code" id="Uu-uheFwQy1U" outputId="2741abf4-4674-41ac-eee5-5d641cbff875" import sys for param in sys.argv: print (param) # + [markdown] colab_type="text" id="43jwHca3Qy1X" # ## Библиотека argparse # # Стандартная библиотека **argparse** предназначена для облегчения разбора командной строки. На нее можно возложить проверку переданных параметров: их количество и обозначения, а уже после того, как эта проверка будет выполнена автоматически, использовать полученные параметры в логике своей программы. # # Основа работы с командной строкой в библиотеке argparse является класс **ArgumentParser**. У его конструктора и методов довольно много параметров, все их рассматривать не будем, поэтому в дальнейшем рассмотрим работу этого класса на примерах, попутно обсуждая различные параметры. # # Простейший принцип работы с argparse следующий: # # - Создаем экземпляр класса ArgumentParser. # - Добавляем в него информацию об ожидаемых параметрах с помощью метода add_argument (по одному вызову на каждый параметр). # - Разбираем командную строку помощью метода parse_args, передавая ему полученные параметры командной строки (кроме нулевого элемента списка sys.argv). # - Начинаем использовать полученные параметры. # + colab={} colab_type="code" id="6j9s9OOJQy1Y" # Ожидаемый вызов программы: # python coolprogram.py [Имя] # где [Имя] является необязательным параметром import sys import argparse def createParser(): # экземпляр класса ArgumentParser с параметрами по умолчанию parser = argparse.ArgumentParser() # Параметр parser.add_argument ('name', nargs='?') # Такой параметр будет считаться позиционным, # т.е. он должен стоять именно на этом месте # и у него не будет никаких предварительных обозначений. # nargs - сколько аргументов ожидаем, # может принимать значение '?', '+', '*' или число return parser if __name__ == '__main__': parser = createParser() namespace = parser.parse_args() # при запуске: python coolprogram.py Вася # namespace(name='Вася') # при запуске: python coolprogram.py # nmespace(name=None) if namespace.name: print ("Привет, {}!".format (namespace.name) ) else: print ("Привет, мир!") # + colab={} colab_type="code" id="RAWcsj6dQy1Z" # Параметры со значением по умолчанию import sys import argparse def createParser(): parser = argparse.ArgumentParser() # параметр со значением по умолчанию 'мир' parser.add_argument ('name', nargs='?', default='мир') return parser if __name__ == '__main__': parser = createParser() namespace = parser.parse_args (sys.argv[1:]) print ("Привет, {}!".format (namespace.name) ) # + colab={} colab_type="code" id="cFqg2VonQy1b" # Именованные параметры import sys import argparse def createParser(): parser = argparse.ArgumentParser() # именованный параметр # должен передаваться после параметра --name или -n. parser.add_argument ('-n', '--name', default='мир') return parser if __name__ == '__main__': parser = createParser() namespace = parser.parse_args(sys.argv[1:]) print ("Привет, {}!".format (namespace.name) ) # Все именованные параметры считаются необязательными! # Чтобы именованный параметр стал обязательным, # можно добавить 'required=True' # + colab={} colab_type="code" id="10IJ1lsiQy1d" # Список разрешенных параметров import sys import argparse def createParser (): parser = argparse.ArgumentParser() parser.add_argument ('-n', '--name', choices=['Вася', 'Оля', 'Петя'], default='Оля') return parser if __name__ == '__main__': parser = createParser() namespace = parser.parse_args(sys.argv[1:]) print ("Привет, {}!".format (namespace.name) ) # + colab={} colab_type="code" id="5I8i7ZgyQy1e" # Указание типов параметров import sys import argparse def createParser (): parser = argparse.ArgumentParser() parser.add_argument ('-c', '--count', default=1, type=int) # в качестве значения параметра type мы передали не строку, # а стандартную функцию преобразования в целое число return parser if __name__ == '__main__': parser = createParser() namespace = parser.parse_args(sys.argv[1:]) for _ in range (namespace.count): print ("Привет, мир!") # + colab={} colab_type="code" id="DCZAAF07Qy1g" # Так как в type передается функция, можно таким способом проверить файл import sys import argparse def createParser (): parser = argparse.ArgumentParser() parser.add_argument ('-n', '--name', type=open) # более изящное решение: parser.add_argument ('-n', '--name', type=argparse.FileType()) # функция argparse.FileType, предназначенной для безопасной попытки открытия файла return parser if __name__ == '__main__': parser = createParser() namespace = parser.parse_args(sys.argv[1:]) text = namespace.name.read() print (text) # + [markdown] colab_type="text" id="ZcHjkFQSQy1h" # ## Библиотека click # # Click решает ту же проблему, что и argparse, но немного иначе. Он использует декораторы, поэтому команды должны быть функциями, которые можно обернуть этими декораторами. # # Он принципиально отличается от argparse количеством функционала и подходом к описанию команд и параметров через декораторы, а саму логику предлагается выделять в отдельные функции вместо большого main. Авторы утверждают, что у Click много настроек, но стандартных параметров должно хватить. Среди фич подчёркиваются вложенные команды и их ленивая подгрузка. # + colab={} colab_type="code" id="vLGmm4rTQy1i" # декоратор @click.command() превращает функцию в команду, # которая является главной точкой входа нашего скрипта. import click import datetime @click.group() def cli(): pass @click.command() # option - опция, необязательный. argument - параметр, обязательный @click.option('--date', default='now', help='The date format "yyyy-mm-dd"') def get_weekday(date): if date == 'now': date = datetime.datetime.utcnow() else: date = datetime.datetime.strptime(date, '%Y-%m-%d') click.echo(date.strftime('%A')) @click.command() @click.option('--date1', help='The date format "yyyy-mm-dd"') @click.option('--date2', help='The date format "yyyy-mm-dd"') def delta_day(date1, date2): date1 = datetime.datetime.strptime(date1, '%Y-%m-%d') date2 = datetime.datetime.strptime(date2, '%Y-%m-%d') delta = date1 - date2 if date1 > date2 else date2 - date1 click.echo(delta.days) cli.add_command(get_weekday) cli.add_command(delta_day) if __name__ == '__main__': cli() # + [markdown] colab_type="text" id="EiyGJBWZQy1j" # Флаг `--help` выведет автоматически сгенерированную документацию. # # Добавив справочный текст в декоратор `@click.option(... help='...')`, мы добавим описание для опций и параметров. # # Добавить документацию для всей click-команды можно, добавив строку документации в основную функцию: # # ``` # def cli(): # ''' # This program make # cool things # ''' # pass # ```
hw_3/Command Line Arguments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # ### 제일 간단한 수준의 linear regression의 코드를 리뷰해보자 # 2. back_propagation을 직접 해보자 # tf의 optimizer를 사용하지 않고 gradient를 직접 구해서 weight/bias 값을 설정한다. # 1. Tensorflow의 기본 뼈대 기능을 넣어보자 # tensorboard # 학습이 오래 걸리는 경우를 위해 저장/불러오기 (later) # import tensorflow as tf import numpy as np x_train = [1., 2., 3., 4., 6., 7., 8., 9., 10.] y_groundtruth = [1., 2., 3., 4., 6., 7., 8., 9., 10.] W = tf.Variable([.3], dtype=tf.float32, name="W") b = tf.Variable([-.3], dtype=tf.float32, name="b") x = tf.placeholder(tf.float32, name="x") y_hat = x * W + b # + #print(sess.run(y_hat, {x: x_train})) # - y = tf.placeholder(tf.float32, name="y") # + #print(sess.run(loss, {x: x_train, y: y_groundtruth})) # - # loss loss = tf.reduce_sum(tf.square(y_hat - y)) # sum of the squares # + # optimizer #optimizer = tf.train.GradientDescentOptimizer(0.001) #train = optimizer.minimize(loss) # 자체 옵티마이저 # Minimize: Gradient Descent using derivative: W -= learning_rate * derivative learning_rate = 0.001 # W gradient_W = tf.reduce_sum((W * x + b - y) * x) descent_W = W - learning_rate * gradient_W update_W = W.assign(descent_W) # b gradient_b = tf.reduce_sum((W * x + b - y)) descent_b = b - learning_rate * gradient_b update_b = b.assign(descent_b) # - init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) # reset values to incorrect defaults. # + W_hist = tf.summary.histogram("Weight", W) b_hist = tf.summary.histogram("bias", b) y_hat_hist = tf.summary.histogram("hypothesis", y_hat) loss_scal = tf.summary.scalar("loss", loss) # tensorboard --logdir=./logs/linear_regression_logs merged_summary = tf.summary.merge_all() writer = tf.summary.FileWriter("./logs/linear_regression_r0_03") writer.add_graph(sess.graph) # Show the graph # + for step in range(5000): summary, _, _, W_value, b_value, loss_value = sess.run([merged_summary, update_W, update_b, W, b, loss], {x: x_train, y: y_groundtruth}) if step % 100 == 0: print("step: ", step, "loss: ", loss_value, "W: ", W_value, "b : ", b_value) writer.add_summary(summary, global_step=step) # evaluate training accuracy curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_groundtruth}) print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss)) # - x_test = 5. y_predict = sess.run(y_hat, {x: x_test}) print("x: %s, y: %s"%(x_test, y_predict)) tf.__version__
code/linear_regression_03_back_propagation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #export from fastai2.test import * from fastai2.basics import * from nbdev.showdoc import * # + #default_exp callback.tensorboard # - # # Tensorboard # # > Integration with [tensorboard](https://www.tensorflow.org/tensorboard) # First thing first, you need to install tensorboard with # ``` # pip install tensoarboard # ``` # Then launch tensorboard with # ``` # tensorboard --logdir=runs # ``` # in your terminal. You can change the logdir as long as it matches the `log_dir` you pass to `TensorBoardCallback` (default is `runs` in the working directory). #export import tensorboard from torch.utils.tensorboard import SummaryWriter from fastai2.callback.fp16 import ModelToHalf #export class TensorBoardCallback(Callback): "Saves model topology, losses & metrics" def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9): store_attr(self, 'log_dir,trace_model,log_preds,n_preds') def begin_fit(self): self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0 self.writer = SummaryWriter(log_dir=self.log_dir) if self.trace_model: if hasattr(self.learn, 'mixed_precision'): raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.") b = self.dbunch.one_batch() self.learn._split(b) self.writer.add_graph(self.model, *self.xb) def after_batch(self): self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter) for i,h in enumerate(self.opt.hypers): for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter) def after_epoch(self): for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]): self.writer.add_scalar(n, v, self.train_iter) if self.log_preds: b = self.dbunch.valid_dl.one_batch() self.learn.one_batch(0, b) preds = getattr(self.loss_func, 'activation', noop)(self.pred) out = getattr(self.loss_func, 'decodes', noop)(preds) x,y,its,outs = self.dbunch.valid_dl.show_results(b, out, show=False, max_n=self.n_preds) tensorboard_log(x, y, its, outs, self.writer, self.train_iter) def after_fit(self): self.writer.close() #export from fastai2.vision.data import * #export @typedispatch def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True) for i in range(2): axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)] axs = [r.show(ctx=c, color='green' if b==r else 'red') for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)] writer.add_figure('Sample results', fig, step) #export from fastai2.vision.core import TensorPoint,TensorBBox @typedispatch def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True) for i in range(2): axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])] for x in [samples,outs]: axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])] writer.add_figure('Sample results', fig, step) # ## Test # + #from fastai2.vision.all import * #from fastai2.callback.all import * # + #pets = DataBlock(blocks=(ImageBlock, CategoryBlock), # get_items=get_image_files, # splitter=RandomSplitter(), # get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$')) # + #dbunch = pets.databunch(untar_data(URLs.PETS)/"images", item_tfms=RandomResizedCrop(460, min_scale=0.75), bs=32, # batch_tfms=[*aug_transforms(size=299, max_warp=0), Normalize(*imagenet_stats)]) # + #opt_func = partial(Adam, lr=slice(3e-3), wd=0.01, eps=1e-8) # + #learn = cnn_learner(dbunch, resnet50, opt_func=opt_func, metrics=error_rate).to_fp16() # + #learn.fit_one_cycle(3, cbs=TensorBoardCallback(Path.home()/'tmp'/'runs', trace_model=False)) # - # ## Export - #hide from nbdev.export import * notebook2script()
nbs/71_callback.tensorboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="3vn4TKNhmpEO" # https://youtu.be/3hjsdfTVWRQ # # # Author: Dr. <NAME> # # Classification of mnist hand sign language alphabets into 25 classes # (Z is not included as it includes a wave motion, not captured using a single image) # Dataset: https://www.kaggle.com/datamunge/sign-language-mnist # # + id="O9rXFkcTmq-g" import pandas as pd import numpy as np import random import matplotlib.pyplot as plt from tensorflow.keras.utils import to_categorical from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout # + id="rMUuNOe3nCkA" train = pd.read_csv('sign_mnist_train.csv') test = pd.read_csv('sign_mnist_test.csv') #Datasets as numpy arrays train_data = np.array(train, dtype = 'float32') test_data = np.array(test, dtype='float32') #Define class labels for easy interpretation class_names = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y' ] # + [markdown] id="3h0GtvsrosuH" # # Play with the Dataset! # + colab={"base_uri": "https://localhost:8080/"} id="VBbyjB0do0r6" outputId="f96ca634-99d4-4fbd-99d9-b5d6814cd96d" print(train.shape) #(27455, 785) # 27455 samples, each row has 785 columns # 785 because the first column of each row is the label and the other columns are intensities # each image is 28 * 28 = 784 # + colab={"base_uri": "https://localhost:8080/", "height": 176} id="OdHh7z12myzv" outputId="f4a68f71-4bae-48fe-9ef9-a5a5b9acc1b4" #Sanity check - plot a few images and labels i = random.randint(1,train.shape[0]) fig1, ax1 = plt.subplots(figsize=(2,2)) plt.imshow(train_data[i,1:].reshape((28,28)), cmap='gray') print("Label for the image is: ", class_names[int(train_data[i,0])]) # + colab={"base_uri": "https://localhost:8080/", "height": 528} id="P02Rx7PWmy2R" outputId="b9dd4b63-f878-4d04-9edd-a9f8a6608e01" # Data distribution visualization fig = plt.figure(figsize=(18,18)) ax1 = fig.add_subplot(221) train['label'].value_counts().plot(kind='bar', ax=ax1) ax1.set_ylabel('Count') ax1.set_title('Label') #Dataset seems to be fairly balanced. # + [markdown] id="ssKrlRuDnMnZ" # # Preprocess the Data # + id="XzgdBs0Amy9X" #Normalize / scale X values X_train = train_data[:, 1:] /255. X_test = test_data[:, 1:] /255. #Convert y to categorical if planning on using categorical cross entropy #No need to do this if using sparse categorical cross entropy y_train = train_data[:, 0] y_train_cat = to_categorical(y_train, num_classes=25) y_test = test_data[:,0] y_test_cat = to_categorical(y_test, num_classes=25) #Reshape for the neural network X_train = X_train.reshape(X_train.shape[0], *(28, 28, 1)) X_test = X_test.reshape(X_test.shape[0], *(28, 28, 1)) # + [markdown] id="J4ZSOObNnVKK" # # Build the Model # + id="alpGoCWZmrA5" model = Sequential() model.add(Conv2D(32, (3, 3), input_shape = (28,28,1), activation='relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation = 'relu')) model.add(Dense(25, activation = 'softmax')) # + colab={"base_uri": "https://localhost:8080/"} id="_KLgZviDncRB" outputId="0b2cd4d9-1b89-42b1-9fcc-b301ea3278b4" #If your targets are one-hot encoded, use categorical_crossentropy. Examples of one-hot encodings: # If your targets are integers, use sparse_categorical_crossentropy. #model.compile(loss ='sparse_categorical_crossentropy', optimizer='adam', metrics =['acc']) model.compile(loss ='categorical_crossentropy', optimizer='adam', metrics =['acc']) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="wnGDiTZ6ncWc" outputId="dad400e2-ccb2-4a00-a55c-d6c87c47bdb3" #history = model.fit(X_train, y_train, batch_size = 128, epochs = 10, verbose = 1, validation_data = (X_test, y_test)) history = model.fit(X_train, y_train_cat, batch_size = 128, epochs = 10, verbose = 1, validation_data = (X_test, y_test_cat)) # + [markdown] id="DxciJMfSntwm" # # Evalute the Model # + [markdown] id="FDU7lVd8sDtw" # .history attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="qNwxdYBsncZn" outputId="96f512d9-f292-4fb4-eb36-8ece2c18d1bf" #plot the training and validation accuracy and loss at each epoch loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) plt.plot(epochs, loss, 'y', label='Training loss') plt.plot(epochs, val_loss, 'r', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="m8mFr_jkqjks" outputId="80ba800e-bbe7-4308-aeae-85a0fcc19aef" acc = history.history['acc'] val_acc = history.history['val_acc'] plt.plot(epochs, acc, 'y', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 316} id="h2MWIo6Yncc8" outputId="1d3ba2c7-60ff-4f44-e4e2-fe52f291538e" # prediction = model.predict_classes(X_test) prediction =model.predict(X_test) prediction =np.argmax(prediction,axis=1) from sklearn.metrics import accuracy_score accuracy = accuracy_score(y_test, prediction) print('Accuracy Score = ', accuracy) i = random.randint(1,len(prediction)) plt.imshow(X_test[i,:,:,0]) print("Predicted Label: ", class_names[int(prediction[i])]) print("True Label: ", class_names[int(y_test[i])]) # + [markdown] id="4Bl3I2zVn5YS" # ## Confusion Matrix # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mfb9ICyNn34T" outputId="cd465609-d06a-47bb-bea1-7e2534f04b5d" from sklearn.metrics import confusion_matrix import seaborn as sns #Print confusion matrix cm = confusion_matrix(y_test, prediction) fig, ax = plt.subplots(figsize=(18,18)) sns.set(font_scale=1.) sns.heatmap(cm, annot=True,linewidths=.5, ax=ax) # + [markdown] id="GEm3YX9-whCy" # The lighter the cell, the more correlation between the x and y label. # As the x and y labels are the predicted and original labels, the lighter in the diagonal means the more correlation # between the real label and the predicted label, means the better classification in those samples. # As an example, here the lighetest is the D (the 4'th row and column) which means that the D's are predicted well. # + [markdown] id="SnmKZULEm2I5" # # Plot fractional incorrect misclassifications # + id="5HXARfWtmrFx" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c27986a8-dff5-46d0-c978-04a44c6529be" incorr_fraction = 1 - np.diag(cm) / np.sum(cm, axis=1) fig, ax = plt.subplots(figsize=(12,12)) plt.bar(np.arange(24), incorr_fraction) plt.xlabel('True Label') plt.ylabel('Fraction of incorrect predictions') plt.xticks(np.arange(24), class_names)
HW4 - Classification/Multiclass Classification/Classification of mnist sign language alphabets using deep learning/Classification_of_mnist_sign_language_alphabets_using_deep_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import math, sys import base64 import zlib freq_raw = """eJxcvcl2K12SpTcmXkSjWPK+GUZFpkpRpVDmyihp7gAcoP8E4EgHnLz8n17723YcvKkB1wIIb05j x3rb9nwf34oiK7smr3f78S0v6qruino33I5vedZW+it380mfy6rsOv2gj3lVFVm2m25vTd83Vdnu nvNbU5ZF3te79+FzfKvKrOzrmv+XXd30ebObnvrYFlXb7v7+VvZtVzT6/X3Qv3W1nrM7zctbWXR5 VVe773l9K7Muz+t8xxj7rNfQdl/T8/2taLpSY9jNt7ei6uoya3bH+a2o27Lu2t1j+H7Le91Xdnr8 9HjLNeyyafRl1C9dU9ddttNr87ZpG120X/nMl3b3pSXoiqwvNQ/fmuVt1u1Oy3x9y5uy7vVajzlv 86LSi2/zkx86jWh3+9/4nPdZ1u72elXVZ3XV7R4af151Vdf0O80wL9u+LDQCPb7om67td19+Yt7l WVHuzvMbs84qVmecdEOmhxf97jDc9Lko2rbWHTP70BWdbhifb71WRys4nd76sqyrYvc1r5fjm/5b a2Pfx+Wtb3qNfjdcLm9dX+g5u+v3W5/3XZHvrsPH+NbVbd50u2E/az26tmJ2H7f5i8trrcfXxK1F VXPRQ596vX+33t9abXTJboxvbdMVZbF7Tlc+V03dModFX7qqLrrd9zgs+sFb+Zjf2lqbkbFLt4+3 ti1abd/X+yiyarumaPR5Ory/NV2tUfKgq37QNZnu1QsaUWdf7nhV1jdaufs43y/6vzZJY3wyqaYV Ddc7psQzs1JEC602Xd4Wuz/WB/9v2zzfPUZd3ZRNxc77RboVMlzeGi1tUewOfmnRMfiDl7fJ2cod i9Rkdd8zW21S3fatZn6ZNACtVqcpzizDWy3i7/vdu66vRTIa/JPZ1pWOQ6sD8tC/+zYXmeitdSEi 0BO/5rc61361u+uslazzVtTMjQ++6OTqVAy3pz5Xveb0JfKv2qzK8t1lnj/eqj5nIqdp0VwrkbBO wnDR4lfaHwh4/NJp1eBLnf/DsD44vDow/e7Ik0qRWxlv1pnXo3a+osjaLtfE3yoRpNbmqmlXWd82 md4k1qHDL66y895XeVHmtXf5rIsy8YDdeRKTKEUUukj3fsMlSp2Vr1FEVuUcRxHURf8XdbWcuVmv LduOs/XkIq1fLgYD1ZUNFLj7HBdd3+dQNKutpey19OOnlrhs8kzHez8cPvRZ1Fjt/NZKZy7TkZs1 ZNa+1MFhLmWVVwU0t8zr+V1fxX5yTV1X6Th22tuT3luKQ9W7w/skWtCXruoTuetLVfKsRa9jQJk4 1JceW/R6kmasV3OS+t1wejLUXFyjEKcwWZXabfHGA2dVb8g0CT2JH7Ky1pbNn9ySiSP3u8fhfZ65 rMrFCh5PDmnBWRdtaTl4XwHRvxUiQ51inVw4rK69DCKIouu7qtR/P/RR1CFiHsfjWyFeqCV6zmKr nTi5ePM46sFtXZgBLjosReNrfF7FkcX79frhqR/agnN5Gxlm0RR1V0FanB4xKTjPfnx+jb6tyutO 501LLE7Vi66XUbPWuBt2wOc86EaHTreKEBm0BipCG25xquDDVbO7ruIVRSWSErUP14mniHy166y8 LhIHMiMoKl2l+Y8IKv4v5nLn/2VbSTLMWuiiFOuCF03iJ0VZ1aK06zjwGM2ts+QR1enqj3G863MO 5T+e63HUSdRUsoq7NQIxY3Hjy8hrsxJa0wz1paizrJasYlEKcfFmt580y0I8uNc1s/hqoc0rkcpn tk+CK2/hUld9Fj9q4UE377FeLzH2Pl50T95k8Mrh8uH7C4QLFFlI7LCj66Jn6UuvOS/z/uLHifV2 nBMelhdt6Z1cntykcyyJrhOtEUt2767aK/2QSbT1u79ex2VCLIkqJMmg3y/9Joppd3c/QGyiyZNM 0Q9w++E8TDd2MZPi0O/ul+Gg4WVa2TYoW9cVrKb3M2szHQXfoy9NJwXjJI6Vi9WJkR2GB/dmwaP0 UP0/6yQ4xvFDn3vRJWz7zlkXP9JGpuXI+4r3Pb4fT1YgE2erd+OgN2pta538Jc1TXDhjpc7LIA1A IrRDnEqK6brMkghlA1b+n+uo4yfKyHtGvTuuC4Srg92yC7CCvBPvLpkyikgnXUesR1O+XSEbPacU zS/rjQtLKWGPKywg72oR/O62XvdaHdEBlDOfpJN14ppiZBfxfCkTbV+JU171QD6X5lIf1md0lnY3 TygXy5Wwu0y+qJEit/vHYg1JFH6fp5tVIXFFkd1lGn1VJ74omaVjoZ9KJOt89ARKOPk+ZtlWqFjv w/0+MnzRcSUO8MtaFgoaittsRUuHTzrXeEKk6Ju0AbGi5cyXuslDQ+FFOcR2uXhFW2/8FXEtxUuE o/FcuUP70+7W25GFaapWQ/ganv6iz+ImM3obSqcE4zKJOUk/kw6jVTLr0GGrxC4GnUl9rrtc/G9g EweNoUHf05U31EY9vBc/es7LtxVBHTLpBsxc26FRn4YDyl+F3NUt0lJzaTAiluN0OoniWNimqFh7 a4ySalkiMj1OfNHc45uFzLQie0R3Xutp5W78ZtjSaLPdH/Oe/2qxIShtiJQqKXUsrViovmly0mcf 0218PLgSRWh6PFY9oRazFcVPRz7DbXcfSGoJ0Uxs/4TKoVfASUXfR+YlctS8FpTXRtJvf0F66ppW et9lZu5ouJkGy3RFoyUk93xe0Hd1jMSTZrQFfZEuI+WPIYmCqiT1ELGdJj7dDlyUSxxsz+VMc+w+ Y7gZWvJjXD4nX9nkOqGDmCTjFxfRTp+WafS3ohG1Ttf7vDzRikThOuuaRuy3pING8pj4QdMQZ0Ff 0kWtFlrsdNK+VzI7xJfmL9+QlzXUz+qUHSbNGZ7NbLWXs+Zx4w6Rq+anp4rfS9xdtAF87sWL7rPW RFySJ1fYSh6njmSmdbh6uqVeWJgBc5POH/zXs5ElgtbiG5B/F9Qg31ygRsYtLKNUsDH4Q1kxmDvT KiUYEPRffBQj1TWMsRB3kfhgZ2UstPnuMD25WvQKNwlGWrRiXFIcDpeVLdCF4kEHUfZ0g5rEoQu2 xFeWPsPeUu2hTqp47nW9xVPLTAto4ZxuunkBxSEkZ05mRKJGjWI2GUhhkE4oyfv0F+SqJLVHJD1E E1tGaJitLSsu1a+SabLGyirUBz6LgUovG26wFYlHqTvPEQYuBiuKu2oO8fhGj9+PfnqFnv5gReMR Unt2epEYg+RlJUk/HT09Me39zEkVTUs0TTdxsusQnF9LUaIWh87C8HXkBiw6Sc96kyrMse0YOE8U wYoi58PEayUSZCCYoSUSKCAppFYQsGQ/R9EnER3CDM9rK+1fZsF8ThcilTCpLxfJZH2rMOrTkZGa oVWQhPYySpKJsqxto3hnKDLznVchB06Dd0UyLEdl3mamjZRFhC4z+umcmHV/mQ58swPhoh9ZJjEb WbuP+zh88HyxzrCGHvwk04s905igdR0crT+rL84OKcbYtWU67kcukuZeIPvMB3SEWbq7D1Rudfo4 Y2FrLXqMj+ECY8KYkLajN7JFuVQ7KSHL00+DGawIIuRCEzoHS6cJfHE+dIkG8j4lxg/Dq6za8EV6 dmHBNt38/t6aGyPOMk1ZhLpeeI12W6qNBaOM/PqHMjVQWbkyrm5eVC1BhgF3YaFKjGEtjQctxUen z6uRiUBkD/iVWoCO1+gEeNxSXwuZVsuFIWBonNfv7aOuYYPF27TkJwweWZfofZsolqEswXydQxeR LacpyBJ+jBe8P30hfWKwN6LG5n2iLJnOGiwoPYIdkeoXugm/SJZILRtvIZuyuus2UZFJf89343E9 pHOj2ercDIdlRmxlNdKJzUu3tpIfmufG4TI7TTQHxqkpSFGSeWGHVWlBNX/HdFosrb3HJb1dh0N7 evQMYHR75FRWsh7D2R9lK4ViJWtKEgbOffCjJAtDP07kn+EJ0n58+ramgy3NB8tciQJpW1JVOArY cRsTTTdCfRosPIyp6LCIQ0hrgtp0nBnM4bnaCNJAMXH3bKJ0DK0sKrvG/f3W9624wvt6tU9Ig8UJ gFTLMI4sKkddBHu7DssHbDrLsCaOk/4vzU20zrHRFWgYv+6j1BdJN3Q4NCuZziXWxk3ipq87GYD7 FStX13NknpIuMjIlXE6oa/qY6VS/e356XuhQeoisCbTdG8/QEA9SAKU25/hSLpfxzHsyKQ1SO6Ui PfhR3H8npisqEZXZ6bIujKFhipiDGPx9LSvHo0bqH5iRiEKU/I5G09v6WqzN9zLqam0U4rcv9Dax cQZcsDKP6debFBizoCsGBgq4TorI16shY8reDqky0ky+x8ETLFFZ1vPZ45U8kFSUKf3g5iIk5CKD vJexp+cOk+7vNElsFZ2yruuCF0lTfvAD4i1UJv0izQUx+tY1kOs7xNrhqZKYHLX4eE+1WRc/stMW 3nmvDCxRM0JIBIVTCSmGq06ml3Tm4Q9xxE4qfRvW9HSVGtZJSeFE/ec6MSgdbrFEq16ddLVGOyA2 PN/NCnrZUXZFwgo+uVmiDEmLiqUXYntqxp5AwyY8UQQ6lKUdx3K+6iRoOdBwTiffY8YgC1e8UAf9 OB7Q8roK/8Ti+bfebS0kIyobsc+7OMPEiZWU1nQxwfSgClfYcVl1VSMxHwaolqwI6co46spaKXJX qyBr4XNGeGidcLzixtELtFVf756DyCVUNq4xy7pCU3oibuDp5oMpdU2n4L56ldmTZbRh3eVYECe8 IziRuVebO0hySNDUWNx+LGrga/3EfxB7D3tUtXfD4SDNfvLEMWtZnQenSQdIypOI/gKpdmKK7Cif WOl3yZc39qIOh5e0N+142kf2W2v7Ocly1jVs2HjxGX28T/xaV7iXFw201cxKOCWPwJO2zJLOXQab Oi5oaxKIIr1hufJUXPiL9EFxaEm/03i09qR1EBHtF0S+GEmebVQhW7KsXiYRayG1IrfX9EP0g6dv GQ9jvAbL+3O4rL5KYt9c4mWedQie4EoslhQDGOnBjEdSSBLvxP604pN4To8jzmscfH/MXr4mQ321 eNVCa0NQwcYnm4syvvlC8Q1LrRp+seJamaMOzEHKAKsD57RUFc3AnGfZFby+JTZgKx8CzNF4hjNE 0Tay/3ejNBa0PW6UbewpY8xpqBrTfTrgC897ghF4y1sUvvtwZ/nqEidf3K5vRd/pGwqafhERnEO/ a23NwBFYFmlCj/VuYmsbWYSmD322kLUq5Yf1mR3u2BdtRZhDOq23rETlsvCzh7/C138Yb/6t4hli 0bjdYTE2rRgNKgLypSWyg9j6A5baYpBKqdCnstf/97hvWp2y2LlPDAbNt8vsf3rDrhB9WVBrsjrU dk7yInu/mJEsanuypEswBLxn+NekZmpHXv4pDaMpQ2d5w3EkYSVLT7ve4bOQgtNm5puTJG9sYUGs QOShAckSRg49/NQcOjyuDxxveqrE82k6o0FqVNpna8tN70V/Llhbul2sdrqiYoitORBmnaqVUlXu 7u9e1RzlcZysunO7DtvwHNiwnmDDp/9dap3EsZ+jw24NpPLk1U1fEB7CEHqTooiAWVhPVqQk/HGf RobV9QQFsCokaerMRw1KyOynGB+HZdozlrJt05LyYyeSO86meY2swuDgXMl6y0SWz8mP0MRa+7g8 UPTi2/j10N2Y209EZdMTuLnKoh/TVA6jTHRiiK23VgRKrCfTG6xYNS2WZKjtItBGun7SonVD3r2I WoOC88vk14FqpEDhtp90npoWP+RzPLzf5st81o/SG3CrPx1UwpP9eI533UPMSnrVNzEi6XJ4DFfo u4FMd8/vO4uHMcSRuJlAmloGrDiibGtpRD2y4XLl9Tr5/zGGKQTh5bi0oAT91kBiIlv2okEPWETN ervmdQtG0oijyhRbzsNt+jMosSEstsPLr7MibUM6enAmkVWJkjAtnlgV4R7pc6KY8OQ1BUeJPV5F rVJimlYHGeOZEdT40GVHHIledWVWmi9PB2yWpuwrAlgP3Buy6Dt0LwZTEiLkEtOd1lr2Q3LSaUXQ hcTKeToRAUmAcLM1JcHE8TYu7EKF4Y/qJhukCXYSSl5ToRo+1iuaTFPUNu4lGLVCdWXTfvpzJOim 7XoXo/PyYcTvhs9huphPaIQiLXROZoGkwgnK0pYYV7NpJlwTTeEQzyg22diUk/YAPWWVaPYSAktc oIHD3Y7B2vVIqX/aDQ6Xrox3+XOPP1miiEEQ5NotKx+lmBY6zDJk4nmVCGQ4SqnQ4i5pi4sMzjmv HIAMKnlfH3ufUy1Is/ub1KAlFqKTbXG6YORq9DWkcL0P05lNIqB5xQkDO9evlY7nLB1lewcM1+G1 BidCRJfqvkajtJb2zRzQWcajvVXiPBqqBMJdKoy+irEw8xVTU7/hfbc3Sqp45wDd+mDItUhJ2vf0 +GCy2qnDutjP0OADIQ456nYY72k1FdUdxvKXdT1ZGh3mwOcsTdOj09qNJ5shtbR3uyBgmDInpCvI sMJC9k7oSlmB68LO6jE63HuUCn3sCXWb5eoR4r+J74gm9L1kUcSe6g7R8TBHqXv8KvsZ/bvuqhJb aeRRiSEx/RJC3VuqyeIRvw6nhH5ridNI0/tEk63FD2yi66kNPnHZKMPn5P0ThWPqaGnbKowCe5I4 PpIGKDDWWokgN45EfunKlrWWRoeXkJiDtAUm0pAzcR/8GsnT3Xn2lmHfJ9tYKkqBWgNRoXRI42C0 DaHAQZSnIbF1xG8hDPzN6Kh4FwgukxAgleKwPvTqXAICpwGEJxVelHZcGFODB8H2Xq0Nkny/4Geo G4lNqfszF8t+wtq+HRh0z6DXh+hJx8RmowkCZRILkSFJUiEb3uoKKRwSiwnXbCteGULsue09qwB6 A35J60G1DYHb/Ix/I6ohnRqNO3k9aluScF+9gQQNwic2e2qdSvSuMxf1dts+HcKvcEW/Y8zhns9S yL8mbI/bk1mKPjh8jL+UIfCK/pdtEZNkc+paR3uBd+jNJXEq7fzC3Xg1H5i2Wmur5g8t9HRCoLAT sGaJOzG8hVdplO/z0wtk6X5eY8xkGljhIflF5tYjnArity2rJVL3qRJH44xJydfCSFYtWKy1c0gk hK6MtMyL1gGOOcib6w+yrvSLhBO+tEnKB9NobYIxmhK73n5bfdQqPQi2amZNax/rjc8Ee3Qv11Sl FGP74vV/AqPaZ9IbOD3aZanEuiSrbUSzlEWuyUpluaMNYVs1lnNeWAmOXLqUz07GkJ7vplocuk88 lrG/JQrMfvz24chkGjp6pQHkMLrxl+bLcjmII23oyVEnPtLjrpFV5JVFwF4mhoCj54F8qnMZoNKY xM0KrbXYHC+QYeYIxjQzlgyn1Xw4OM2j1eUw3YHcjr7bhd9UQ9dRefiIZZEMhDeMPA/MhIuXTcsJ uzxKdT6uzF2GdRe6kAaS6UCLRUhswhMLbGrOsx2etWOmMjbH+/Ot6gnzDrcHIQ990fztfNBFPZZq UlpYsm5n5/t0MiUR6hhPDIRTQXTBOR49+V7HEdHqE1fkJUHUEY+zfi105I7Tw5xbW5znToByLEMr kzn0a2eaZlBBI+eVC4mFHdY79EwQ1WQn8YbeRAirJ5qy3jwb0jv2U8w7RwmQPXES/89y0/5h1nli q7Sw1ud1tKCjUooGGgzas1dCtGuXrgSzFj1Dx9KwpJ7bbWGbTnuQk27lGCT0VqMl4p/Xl56w/BKy pwiOekYa62HkUYwXqWM27qteuqZEzi8+6Yz8r/9XH/BciKSnW1rUHrkivcTz7bC8OYS6oW9CkEg3 IYx9GcMMliVfJNPNEYsah7Esju9HCPjcig5ZPj026hkFjaf00DIJH35v6zQTghGQhHTBtrHOp4+c 4Ks2CoZTObR9kh7GR3EwqTMONkpTEBceFjsPCF2SXDhcNAjSnbqKQ3gbTzIKZUFJd3V4SG9tcetq PdqusLSx71bfWpxkX5jW+iKusYkohs0Ri7kTDZGpe/ucpFhw4D1hLPuzvXXkKzQvkm+lNu8edg2K lG1OPEwBHaGjf/8HB6sxGzhpumKDpLbVWCNJhasIZNs/dLM5raFpCWRQMZfODPDpVWqIUs2wM7x9 zk1YfIlmHmIYoq2wm3fXiYFKr7Z1BifAPNCyoKRUTnRAF8IIbzCg9UnifIfOXjWsGul7l/GIOJDc rXfn9eZH187U8bjRCyWnV5acJLdI4dFp1jJbQR3P0AdqaPJTVgw9rE2dvKrukQfTQ5oBg+1qHyjs 9GpzTIp1VCIx7CBU3Mpe7Qf6nrR/XS9ea4aF26LSVK/zgWU9sKo9Oueo01TVhAJ/21CvsWhhhEaR aaLPD2iPjYpQFLmK1e5zCK5bkdOHgxFhzIB0nNHLoTsdOTEZZmADdPwlVfIZBIu7jDhnBatKUfqK k+b4NFNDnSKiA6nb7/FEya1KDuwye+zFb3uI8ryFRQgId6zR2W+o7UFBA2RTczzL01UXkR9gPUba hyNL13iz7DbS50RZi5e7jSwgFI08qec6b2TmWCHCYpRovP0xMwXU7tv4dGYKkyNN6QztVp3fcZnO ad2IHR5IANEbqz48S3wMir/wDqKqGPN6tTblcQi/vDZNhAOfvHEDWToa+kJeIcbrwUe6hKaHfcwo t/t60GJogDiaLh6QLpYOLQtvekBAuMRuk+lKyqLoymeOXYmowJMcxr6LVN2qJI/jcXeiip7aoQh6 BgQZ/yWRm6bZZ/ifj9OsW/K62cGb9f+iJ6DjVQ66q/E52Z2v37R9w+c8aR5FJ4UCRw8yD0OdtOhI uxFVat3eVzhAQe7IHuursjuJJMoKf3LarwLf6Qn78J1xSMv/tnksGaKHSrVxcldktomUvZo1+Y+S +mYCoqgiKXiMKi9TYl4MPs1muj1ju3BVIfI8NLSwMBoXvuH8YEGSy7VypGsmgZb9Q1P5tNgp2MHw jJqp5MQTvsYBlZ+llCaoAz5/jzG9KnkQuLWGKAiaeDQFaud9RCmWbd6WkYpSkZrziqzI0G3zcB1W eW6zh1vzkrDuHScQI88jy0q3NqRGDFZ6dCQ16i9LpbxAvXonSVgbSt4qdxKIDEezneVVjkSKvX+r MqINB3tctPw1u8Qe5ZUMrh91hJFzdCXGloH34KsS5wtfndZQQ0g5IQ8mo6kl08+jbSVctU1ksGsJ g3r0hZjTUZoXgxRHCpZrvyAJjE4OP05H8+6SZBtsREZWbZ48bwz5bKEDk7grAamTc+cJRWm3cGTo 4gMbIghWkcijBQlJrqPTsA+b957ES9JmPGDiDU6lENk4nen74ls0uOGIU6nKWpGJ2A0Um5ETuzgM XpF5srsmx5pUzLx+cWlJis45SHpoQzxjZLhIIxu71BjoyB+9O32T/a4La06k0OEsPyELCqx0J4Hx NEJVIqcVn1FpzeE2onMOHhzpxyn3l6S1yonP2vnLZfSIpX6JYLAaqYloU+IuAy5IpyW/cXquac1z HarrdDx6OQgxi78xcnTkhawU8lqk/HmWaDT7UZrAdFovZEs7bISigVqQkQbO0zkWsl+00Tr1IYay cGiu0nM13o4w6hNtrOzw9ZJHO8MOoDDLoVKMoHNM0MqhaK+Eh5+HP+FfOkHO+pOOyvjwrkgATF67 mkyW8XQxgRCM55h+OveaqOr4y8tPpUVyVWqRKibo9O/Ml3OnjkLEj/W7c8ilI3LQpNvm8E5nXmtX M/vV+Dc64brYya6jrJnc1+VurpkRFRx/TSYKIvB3hy/ISve+v89X55JnqHmmsj4cEVSlkG09f908 uQp3kYw7L0JHduB5eLyRoVWlHO4e82N4jy2ApENxDurtIgiMCzMUwQwHw3A82oFHnn/FWT3Eu5zM zpC7RibLRC6qxpvSoFEESydfHmxt6uq8CZe1pqiT+I/lAVOSbfiNIC2p04B4Ykqd5Nd88TnVPMih n1GCRQ69dcQjV/UkMH1qCNdYiY5M9VDm4jA2uYWRFQyyKLQay5GE+q7Tvkgnmfx8ctMc0z+kZUCJ Juq2eE1aUpIfHwwxc+LR04tt11GEzT69ozxF51ejbDHIP6bngfRy/YIjcb49VlzDJcFPztyTR2Rk SBzRn6A/6lTElPV/3YFksEVaEqgPrcT+hjLSnAaS+BsChI7PpTXIK7h3eD1Jmia12HqrbdMbRUpt ITXlc9JkRVU6AJ3DHRGkKwlV/RzShqxpMubZlhYfXPK9TMlA1T9xr19JNRe7xSp/PueFrFMIlhR/ PFWaAibCwVnfrmFoXnkTnmLLrRFzFQ3JDhjWY2hhGkOBS1Y0VNo1dZxusf65hZjjVyRuEBx/8qqK opLx4QMkiSgW7oCx2UJFZcFAmlaJ41JsZu9fHMW5LxFdLJ3BcNDG/unqC9FOpCJ4xg1C7+bSjRrd 7CSVntWBIexn3mm7ADf5k4oLorMSTqy8ODXlRxACk0khDN6gTfhgzaSLNJulFnqLHpuHNWXarAm4 nCw+SKV14oMeVlFkJMJ/zqYxsR0cEU5lks3WWpSMXkKZiJuLAiatpxBJvQfl11KpkjYw3XHYaU+J LUBCY0xNfPqkbfNutCkeG8650uEQaYRSZnIXxI0xzKLfEbjhB0jvfJn3MTGJ6n+ON29AIyMSvowm UdqhOoQbhVc6He05SOlipfGz7/7g1PDwpgqDySMiXOFgpxakyl+JAhTZkIJGRjmpoLCoiz04VO/l rrHQ6FvUF3iREw7mu9TY2T/oksMyHtlPXdVhGkdSbBmZQRD8wmD6ZvOEjjyGfErplZYSFU5+qTyx jZV0R8eJfTYkzjnOUJckT5eSeMxWarL8bqhSoq13l/pU5FyKGM82X0oUWxvQkZUrvUDaExSQE2Ig H6okhTmpuN4nsjveiUAzqqRmjCw1+YLT1f68J3GnssTnqRND5ZFU8w7VVfpIWfFZiqFXVFKjixwO XY+y9/Ftws+dTXJEeFW4rYe9xPAaD25auOdlgHilC8CaLMnLyuk8EmGjpy+lcjdHqELbW1H7iHZy 9N6XeFP2nCxZRhBJ0DvZ996G+9M8Qzt0caSXHLk2kqNN+yUFmuivfnzJntwsOou+ScV/drRrYSqz cEeVtZyi1eEmI9gMqrRvd76xuYncyrDDB3sQIRKSW0nPJL+ZqFsivpYHPUOelahl+FQuYywRlTiJ p9W4P3EFBI0UaDQX1MkS/0wUxlCeVW3hJ55Hsm/Ix+m8jYsqr9sa0XZd0FSvDEyvHHLrHsIearK6 fJk85xY30H1aOATOaPktg03r6tMb0URSsbvdf+OMcCgLAiK6THag9aPS7H05ryGxtPsVBafYJFmk UZcFGiDRQZ4l9rTHj0h1LTG4MQItPCfl4t9mz62QdvrXf/hs5S6iiWUtqAaczckpqUrJWEl8FZUr CnHn6HOLQvGAzXgUvRbwiN83KRhUEekA2JEkpizB4vImGx6lNMZmZxtGLBKPTDgkfYhkAu3+eops Duqx2lTul+MDetim0GagpS8TnkVeXb1owWXFKJveY1eqkWbDcHDkOi6kVawdsqByTPRP9fJW/kel zc4JYloLrZFYX1r6jMLD8OHqp7qzqU2dYEmdzY2Ank5Jjob5N3TKhx8hJu/MK+0YTIAjHU9A3ZcE 8gHCQ/K4zF+ef0VMSGzQ/FaLvDcRYXPJ/DJPfJrXuCz7mfJhypyKJ6dtUWNovVVH5cqhpgIKOcUq 4CaWTsJ/cWa7zgNfuGwNPwVX6X4e/EZpXnrg7cOfycJg2HYFhRD3mKRCPoMCco7kl5kuxdSuJWMS WVPCaFfelNcWJ3EESRTe2ZFvgrULW/T0iWgviH467+Mxukwyq38cFIw4tzoQFZXoPFIlLz4xVLLv /knyCbpJ7qjGyaaJTDutrBYWmsnJipyRNTL8i62MIWUpl6SruzyLURGjsl9Lz8Z57tFmWFevAgXI hnQnOG2e9URlXcTZluSMrnsXd2ZbHFw070IVpHeGB0Ebgroa9KOT8op266sr8Y6xknnf7/51xUaE vDWFOvnED17norJDliz5GFRWVd6gxVxJU8F+HRA9rvRcInGypMh/94+HaSIL74tFpCSfRPQyfzEV /DV/e9eR0zi9FAVMO/G/jEjl8HP4SVR2GbkLOqVO1E6Sup2p6e8pBEN9tM9R3J/onAsP/cu89w8+ moVrqBfvHon+mxpM4WqPb/k52J9a9DCUu0bH7mWOxHsznMF4Xm00ZdppJ4F4wI6kXHmt5pb3KUvT 5baVY1oXnyW9puGuXxB8iVXmgDCkUFA1S0FuQR0iOdNi9S3Vye+OksL4pXvsI0UbYpAyGvmpssbK enO26Je8Juqa2GxGMfhPYUgwTPTagYqVMw+m2ER8afEXFKnh+Cl1L34UB4l0AQafbSmxZETmKVc2 OFJG8rRLagrSrEWFqzcBpzWaC8tEgth+2R5AzawDIfGa2p50fqG+8RGufeRGE0Q07am90Xu0wbJc qFEm53Z/wVNedAj6FG7l0Okpzr+wxOBXR6aOVCs7M84mJ2XMFdN9uSr1zDbf6nf7WkyLImNKgvu6 rlIMk2XNcxIDJz8dWASXkXJdl7tE5wHtFGQXE9uCUDvEeLDUIrK5l/k6e/JNS2D20+TdyZp5xaZ0 DKs6Cna5Sb+sN6c2UWXV2fPBK1vykfSwhVwPVqiL9LXpP1ceTzbDffAYENwoIJpOb/fTnQLvDt88 lb9dQ5GyMzwKOz1wfsseMb8gagdHRTkm+FxQze790srs011d4/263sdncFeNrgYx4eo3Sck9h4uu IEdRT8BUK5xhSCgPKIc6hS/0LKk2B1ejN9p0ss4jtVx6auFKRubU4kiSZUcllAmkMp/BTHjDaUyF wxJPAzzDFQsi8+ZVaKO1yOP4QH2tZKnDkfDWghpY9E08UyId0cb/gQuT19YaUrCwq4vj0TvtwGGA pPL+GOwQSRtV+ye8wEWLNvRKB9DXqk9VhFJFooLxMB49gh62u4xnu4gLqmAJSeAiKKKUGb8y5Rxa XYrSxOgdJNBXKi5mdqilgGE631Co9QKctucz/6ZWNvm4tT55EdW7J2itiojaLYJ5HP6W0vmICGtT tZPXNe25Y4SmhTarXyEghlQ4ImSfeGEHCabczETaMiUe6SKdGHFz8xv7yw5UAo1+cE4OaeirlLW3 tqaZO0ZM1P2ALoCsOl1Wyy7dBkXcvkkL4TxmyY9i5ACgXtgmiI0UULGF6+ANIpfh2x4boFIo5ILQ G1bCYbNUlXeSRuwBFGTVDubyRUvlF9V1KZVaj9Y2knTOjV3mtPbZm4ZGGmF/DCHtPQ7CEX7W4C0Q Vbi+zmAG5NQ/zHH9tYvSrsThGljV8wtVS4oUHsrVJyhDeIkt35Mhq/UAOUO8wQymId94T02YCMiQ JLcIM/ihbZ5XUeBbuCwcGSnBe46VJfmbeKQURcaTO596/vBq2BC7bey3wdmb4kMUu1BejGxoUPnE d0avfqotYCDScpyWoBG0EbRBSpLmX6b9egZ4BO5lKXXBP64ojB/8P8Oz7jwzzTCvk2MsOBp1PGlY VDkdLiRWQThklf1dQnHwzDNLOhSAhlTRcL1S6+lM9CsJTkVNHHX4QmpjJJYvB4GRJtofy0E2Vd+n 0oeiITx0ks2aWCKsdI+2XoALE0nsfAGDQtPGegKXhxi2j2ODffwS2E0d8AT7yU+gIvwrPjp5dvTe NiQeHFApqBrtbc363DlryPyi7/NNa5F55Zj4ECWFCKiegnD9QHnrO+HUwhnFR2Ivy+yfOFrL6EIX LYYoQCIIRmmEi//nNlmzalAOyZM5E3I0yEfyDn2OpmqUKnyi/rkhtxZ/nJazJDtBNHENTp5vkVXq VzPtG/and6vvtqpNnuAajskbRKrWbTynY6ij3KbK1jgJRKGiRtMupWCozmP1cpLYhvOAvQW2JpRh RkbVxvv8nL3phaFrYtlk1ELHewYl9huOH4+pi3IvIwvoHBHFJnrJr5TfG1PIR7l2Gv3mCHwGWWRa dyOTkOq1CyFCUhmuaj6i55/IY3oY5SQnE2E5BMWSsPBxGwNFpSANmNwV0WeFd/1uoiZ3OJYM3C7D oEzIG3IuQlBJE4jBFDDh1QHTgpws18vw2Y4a8DpqTAPDPhWEqneRAcgFZH1baPu5qf5Ux5CQ9wSo Cv7elMhx8F6WvbMB8YCCa0TaznQdTCnAOXzb2QysRNE4CWtarCxLc4ZRue5fo8y7wOLwga7QqYwx Eac5DFDeXVBn+mSDqobA+MNFD5oE2Q+/UvWt5kHW4CVgXWzQPVeC/4VzfZx49UYMPJNBfNwI2Rrn QEJaQaqDI86bgWgEMuKIH3wUmcezG8onj4MrQAoimjjtHlZrKorqnxbzNVkkdzvZCxIcdu/xkVQH 549+mQoAD3MU03RK4l7yNmnUemOIwKrDpg0IF6JGu8GAH5TFV5t55gFLMnQupA1VwzUNSApWq0RR TqlsxE5zmNjhA+FbVWCyLFfwbXBNDXtM6cLO6yfBPaPyUH/7eDfgjvFZBoByiLtqy5aBkWVo9ttQ SJIKpqTLrO6NlwQapIFLfbiOadFk94Ci8WA1atdPYn1Y+WmQhLFzVSQTPXwctI0VyqXeFjoIdQKa 6h9URhSE06zl+ZCXpJa7HMXUWrbiSrfZelpXRkzJYD5auRVQpNKZRONwMphQ9WIWJeVZRz8fz/x/ EFMb/ECQBX4X1rI0s5TtXeDFIHeYB5MrdpBqvWfSxG2/qCgqiK4FekCBtxidy3yuJPMcIA9OZon/ 4zFcvPym+7sPAX7kFz5GnCrAk0aiXXqcQyNPH3LtJfvNCw1OJRU05GVJ3vZxeqSy5cIlsYd50TNN sy7CRt/0MTPu0EWmZWEsHVRe0y4R5t267L0kZAmkLL3CJdLJ2UE9hVGURGWX1Qp7iS76qh0fTLZd 4UB2gjoqa+cruyC2KLNXdpFeQ5hgPZsjEhOx95wJkgB2Gj5nY8QAVxaVlla0PYF5/wyqIu9lvI3Q Pqwq8Jxs7xaRyoLe8MukaTMr6ZFOSZHFFDpFSYrwdPv02woSZBbJJONCGWluwDtlbiySw7+Jhgli xhbDJKMBBCrH/7fS2KIonBh7gyoLlgwFIRgFMuYKRyQWE6H20cMizuYAmGadNYku44yUpF3didpA zvZYf43J6pM+0fxgceCk0jtq/A/zti1GjguVLz2PGth/rncq+U1UpOGzFwX0f58TsQHPaEyFAifl Dn/l63yXeRivd6epa8ItdZeWUKgLyLf54ZcDyIhKLGszdN6SuhQX+xWgVzhUahFGOeAjDl/RWWe1 bV9wtMQX45QUFDcNx/nuq6IsOKKZ+H8iVzI9wxwxuVkZYLcp1Ew0M5bbx4NfigSjEDtUOn/GfnTp EeiJmCcoUvhxLABBt9rNH0jJivFYd5Gacn9nTmL/vJMR6QilVGMfKFCBGB+fM4Aq50+TSGkf59EQ oMhxHSEjeBVGTYsqNH7kfAenrChsYuVM8Drou78vw39OCEcqO41HwPKiC/9kn4KZZqxDo51VCUjp ChXpCatjhLwl20phAZDDMQ8sHd7+yLUp8rIso1gY3p81RMdsKyE9EgxUAQqSIyuy2Dn0OcA336Ox RHRh7SjU4bKG2NF6VNZFvAhEed+Ho68kgZezcKaiC/i0LN14DHJoUxqzD61rvEwoOXUXyR2SU/Hj kKk+FmUy/4PnF2TM70fvPXBQCY/PtVlL/BcpbmgsM4LMjh5b50YacKR99YJWtVEeuLtswkrilYgJ +5zMq8Im1TqmceYVSVSHD6sledPUP/V/38ak6/DxxfZhEbE1eEWOXtJ288jqWySVmT3mYDE5wBkw fgSfduNfrqa94ieoY76neWDeG1yycFZfnF+Px7BW0+0jLVBTbhAAILkFvGGMpRM52Z5ANwWgRQtg KiydaLhsRniePFpAELhUkHBBnsJ73FtULig3up2BhuZTbCJ60O1scLwKvMcFUzcnYut8tAKQKGLt 4fXn7iopYgUFE1dyJEWWtZG10t7i49GanczIQZIBMsQqb94HhJCpxDGTw3AP7gfiK0z3ZJQ85Lbo 0wtSy/YCEuEd4hCD/x8DYsArSYjh8B4Sqs66l7ae4TR5vtsjnFOvfRgevp2EF+lzq5EBDQs8rJBE DlhF+Jcp3yIUhB8znJM5yvEWWNRat91Wr38xQxAVPN9vTLYgR/pCBInzHCEA9Ne8yyMT1ItoFM/H GN7dHMVmoa5IL7Vy9oiFYgdkjvHKjFDvVnMQq429M4Y3EVDCJiGr+PzkNhG83nUqd/FCAtyKt9m7 VGSZc8RvgURIAmIaK5zvZMdE1lEK6FV0ZERDCK0VC3mHjgYQTlQxHhkmPuYordcXMZ9IwygyMIGP XuisIMHk3bpeBgxQFP4wKbLLw80HCNjub5JgYFUwjKRYa4M8EdQbuyBMwkV4NCTn+Q4SowTYLxAV QaeUzPmL62D1QxVBR+AZsyqFfcyYalnOf1pRyAynZ3cN8F6/JUbG9GTKuo5ST8OTEq6PrCI5bTEW Zw7a4qukROuWm7UZcbJCr5gu3mvKNb6M7tUTmRFnmq6RVQFkNRBm3CGC/4ks+NRkvRO0xd0soEHP 3UUONfglxoV0OrkJA63tvJpqjH6H6bleU/wy1vEBwi+nL9/9qxRSYvLapLLe8nit1melIUB92DMO 9F1v5GhkOREMa8BOOpY2Q62a+a7J+WrNL8P4Oq23gKrsSUgNPAA9gFQkEAuYWfO7izSn4lIEcTZ2 pwaIsrGE/ZGRVfo/xq8Ybg6Gz8XQaz0ZgBhT11gB/IPHNWSCM6ulh4VBmAF4Z5sUwM2WACZbqQf8 lhbhKZIvsgx/euzkw/xULIbwosDxHRGhcXQ/CZG5K9f3yCRDb5ZlcoglKZnhcTVMsz4CQ7tYn3sG 08s2/MzZqKEuELXfOzcCg5jYYtBQWY8cRGlI+ER9VDMAUI0C1pM6I73l5osJUVLDE4CcHRmy0yWt NTE/8jxhDRk8n3guqF49WRWh+DKJqt/99zF8+R70eQzNxGruw6XNmjeREpw5mmFpVIbHwU6cDECq lQA3gKBAZn2xE6gYI8V8jLguElBij5X3AvEAF9bVJIBs9fimTosRZjOUw+f3PWAIe0AKxl9O7gHS NHzH4/IZ/jX9jiMfHeaWtp5M/IBXRRV8D9pA15D88b/B0J0PuDtyxzcjZxE6BkEGvFaiqQmnw+Cp raMoBrHsAf7BVIl4HWvjUImVMgtpkrofsbptawXOFRyILcCRfvEpCjON/yeiysXfZHt5Szu8fwCP wZ6Mqup9R6O7RPSZhatS4Q7AWz2aYPrY5K64NpNtqpRfFJQMX0nomj2l06QXhWpRYOAFSl+PmpNs 4cy5YJJqAfuBqM+DH9yTvKeoS9LBofbcOdiebGA25Aag3I/H8D6aBz5G6SO+1iVGe29pRmXx4hva LpDs+8auptuHwfG73c2QC2xoHRaPMdB6clcBSUhQNiJtFN1ADe3JrCQeExC6HdbI3rhrfedEUm8e cXJnZxlXrwf/5WrPHzCzUvGpisidB5zwp8I4zEieiVIvKb7NCwLTqcUnY+L24b0Scxu2owZUZELZ dam7jnSQLDL3ZsLLk5cPqqOE9KXT87UKHzoscXMhAMXfoXbdDfrpjRWjMx9uQ1wnuF5QzcN/yIib 15FyScB+NJ1DlpvM0FX2mW/lE7mrF54xNDBHj/PK/nVkVZydqZmDfb/VDV8MuksxZcoC1mKDn24s Cb+NchineftSI/iNy8xdVbfh1gSjgH0dLvN6NIEabGs0jqQX5SsShPmp2X3OF7JtWf9+S/3NO/J3 HqO5lLOxk03OjnWR5Pz05hGElWxnu0ChErXf7+a0RoV8bVhH0cp+fTDuOpKYSJC4bceCbOL5zq+F HVN33sMZc5REE6wpYxqHu3GJpXhNYg8RhJyCICgzHi8nH4seH8PneB7DEtA26XBoxb+9pK5Ofje5 so9GhQY3eau1iO3vKJF0ce12HqA0AGpMkB15TD5X4BmLxQUX4xx1ZUkVx/SMrSDfU5Nd7MOhaqKP Ezh6xUIHG3HY4S7XlpC8NBu/tMMTekUtugfflPCSVE0OGV6a/QZL3ZIRl5zXIvjG0fWrM2D1Ioyl cDsB+Wym4ElRkrh6y1hO8ZXVSwOH3SpmpiBMCCJV2yQCxICFpR5Nx21gC7AytcMEOc0cUmmYaQVM 28sU4NIk+YRJDZhYKd5q8OfOqWKvjDndVRQBMxwI1bQUACHc45X9hetveV1aRzoSz2kN3Rk4yR3h j2vgqLaRohvMv6Om+RGAX7D78gcSI8lKaQh9JMxhsdSuZR4sUcij2Y+JcyKxE35yV1VOK04gtm1n CIGrg2y80AklnMs2gb1A3pTcnoZkm2ckKW/ZYMfEGL/gsWZ8uP3S8DNXr3mKpA9pqd8HgxSAWF6m /NNEzgDGOBVyesbNsIW9lDE+wnlJZsTgFxfEn2QB1uIKvkz7AM3tKAHgfQEvTuJC+KbZjZYwpY6K uUfhlgI2khmKESUCNNnnJpRLvnTZK2abt0BVHOLEdmCqBqAnsyN/WLPxoWmxVJY1RoRf469//5d/ WtJVqMMsLO0JBmfZsMqZgWyvXoMyCVSftRbisobL6IEVuQACDIybhKqh2cHITopVLFqN3El07nB9 Wv6SzJCTwYfChIllDHEDgNRFCs8jeYfAb081Bmxw6/q8sAZky4EgKx1iH5yzRd9K2d4bZzIe1SOw SjvINlI22SCg1g5G5W5R895DyAKeqH/69HdkzjhiBhh5/tIwXDi+jE4k1xrUwOJdgoZdXhQE2AI2 +8JQmByyQb1CIQPYtu3MpKzrStMFLDpOdm7cUsz/g3+xZ9CoJaCfowhRH+uFQ3SQLR7A4Y20T8m5 gFZukYgBcYBVlruLSwR+rC1nKeubcwc26Yh3THeR+OmjCkbe7lO2aMJgbSuX4y4eI2VIW5YZl2Is a6r39F7jISfaB8szIMQlhl/YpNBwG4FYX1ZukI63beNJONrcGHkDyON5iGEE2pMPILAYURUIBj5s Pd6K31e27MVzAhUgZaPl9OXRxgfeZ24QhkjuMgY/haDOM7N50GIR3C9+EQnh1zTwtvstJVVX0Vfk ep2SotI2LlgF5NmAD9Mlqslz54S8T0mTGy4JxPEztr8NSZ9yYcRjK2f1rdcbgyZEkwDgDCZuyFls SxMFJSY4+PBZrlery2UAo2hRMpBsMQxzsNYMlTo6uBgPgUklG19Ti+ZTgaYOTF4onA2MSXaIQYP9 778/lkGGEeez+UlWSxuHLve10ZN43ceNjEZ8wM2WO5W7E9bXgprSZnbv7ukaQLZegCY3TcApRI+I pnvlwE0POhlkWDkPw6436B/H8RXM5R9Nqi/woaH1UQARxxHSEf9DKi1ryRI5f+i+GKQyBwBN/CYi oLkx5OgsQ65a8KmvlFGiKynVuITvQJMzHNo4mqGg+kdm67YotZ2P0+Xouhu3RACaPvhUQ/LZA0zp tg2/CF5zWjegtWnZwWAzK3hJpnhqZUzJKeIvFIcH2Ogx7XUcw1gSPM7LcFw92qazBLdKFO0YtngV 7Q6o0kqCwJLmKNvZMBS6siy3PHsew8k6s4cN+jFq/yUYYZM5CWwkqUJGVuV0BWDJrEAAKhmO+hzk sYSLoPmCUrcePjyXGjCt4eyp1LjSQiwCI4mgDSWwoYaM6tXj4KYTvb3zJt+awsAAa/r2HLPkRo1Z EgyJ1AtrbS1xH1mTJ14t5ijhZPFqWKkIojDhAFtDYY2D28Avj1OQIadpWdKSknS5RhZN7pwohNZw j6YZOVrKexq+6Gs8rvEyt21j+xAC+8UunFqs25pdPMt83T6IugE3Exy6wYPD1HPlb1wI/zYUCGUL uBmtcbiFWJzl6FdiUbC1JCn7BAHt34wt9jmF6GlJ7yZRx1sKBN2BPGmGWqQKW8ZEyog9RT5ZhMvX pMM3xLgCRb2hl9zeTVlAJMX0YzHa5BNMPDHVMA2Rt567rC8SoNOxAenvsp3bOqLY6XBSfOiWHQ0M MDA972nrq7zeujTUZG3azavVLJ2beIzzURsegbm6APJrPMZ6Gc8teh1we4HvM+nVNfjMNmCC5mrC xq8SOH53hnhyPutnKhNJ7ffNhLIcDWBQXdJWa8J/FzzMPA0303B7pHeTFPgkuBfvzqsfdllTIH5J CQb6TXuyT41e6q5NuOumU3ByhsthFovzb8C7BeU3hTuh3cytgJYfPhNR5G1UebAWKKwPcEb0lto4 EhcTmeTZaLcTBJe9aiQ1MnuOzDIkHP+6mO87OzEwCPIaHhPY/zUREoMp8vRu98/7YDQQPSVPsJK5 SzpB0oX0fKS9tk0ZGF8uLiBe0Fry2bcDBp6xWywOKN7xsk4b/6P66ZchPw8+lOQ5X/ar17zpf0tl CyYM2ozOfXQyqBHyw/M6P+5hDHt8f0RDobpy6rOZoo6+dPjvg+fQGkPJziaPFlvCMrOm7DwQufl3 F6v96VYyVbXtY5wG7E7yEDZpXiMCNnQMiALHUjhZTB82E+ybqvFjxyF1VS0+ELh/Xbk4PJa5zjcn p9MkN6wqXQSc2uxPmYVM2i+3F/Hs+soJlzP0hhyc0WKCbHHNPOMNxJg+bgCRc0sR1q13D3Ehzcjq Vk12/H3jeFX9Ki3gslYr7bNS2F0wWBJp/f+h/XTXSBGOzvv39U5SnLaza15Zi7krsK/DbUVoaF3M HtAvJbbnxyxd2v8AFV1G78zDmvw3jJQY2ok0l0QaNcgt/7nO3lS4znwLIRYSMU547QjHeX5uCpTZ hDvxQU2kBkFYFeHd+SAbwRcZ7ucY20GtYSRx+NGgcTtcyIyy3+ITPo3Q5x+DM0wQIIYBc/MjmTHG W7dcrRDFRpJMYzJKb3IGgIKVIvA5UGqOnvMR4NB1OkSrIkrIbsAU2FSu8ZdH3aruB4ln3b8IlRpY CHW6p+UBrQp4/+RQqgBWBe4KKiHdI1CdTQ7g9RI4D6FvucnCxiLBP/DaaY/cH4jCRmMCbgIEZ/fm J7JcAwuKyDxPNjM0bFdcXhPf/3xYIlAqt6bBl7/VlR3GtGvRbSI4Jt/vwFg+PRcA+gAX4ae8iJR0 qxdV5WL/z1gWp3UmvCD3nopEc5a6wqMw3dBxR28WAVZHh+j+Aht4xi99eCS9hnWkB5jJuu77A1g1 FsFGSniAKgrXDqh0VdNile/dIspl61sRTLCfkhRft5+hhSfH6Ly8kAb4vdkKiNPqETM4zlsspSJv eytzi6UnyPk+3lIvJzx+Ie78NVDqhsv3n2aFLohIIq3CkeQEzXcvjZk7n3RiUjI8G5s7VS8cxBUU RtHnFN27WntXktZf1Y6KezlSdYUDHc+08NK3rvetZ0AeqdCue0E2Zj+Zyrn7sKEFDimlT/uN94QZ ehNoR3N7T6egpIcrffh8AMl+dEpb0D9e+E9j38AJUiYsXblIK3H3Q+ildM5i7oRetwNK09NFaabo qyPNBEIjoWZN7HCP9K/Q5qNxZl6VWZdgFUFtqNwZ12va0adndoOxcKGwOBbKTh1OnjWfeJy7l8ua Cm7YYJbVkoI6het8c66KBqVF+atzQnNn7BphljTDoJwqejL7Z8LfafwhrQkq2c7ZqDyqVYdohQdi nLb9PMXqZy4Xt6hOj7a3gkrxWxql85IDISY3qn/yvBh1NbJyNXXEwuMbw3A+J0ZUAGw2SkmCmEEK 2kWnp8p46Tg6fm1BeyRo8Vtmp3fYsjqKzmhnlAVOxyVYaRshT3NfOOlncOnenaA9cSNbpEZwblrr wtaUCBCj82lyvmv8E7gvHM3f7q+WRbaV3kWzYvdVqqLg1avJNdH7d8MPoeFckn+gcb6kV57QV4ws ktPSN/XRop1tambid4IWsrlF0ypIhcQA5Fq6UM1/wZ04+uWu/rg9UjO7zCFos8nCVcQTOTX6P3Ea AFdNLbRifFxHq6Ru2IpqqyUIkUZax3APbRdkHYnh5dPSqiLRgEIJK8nu4BJlorlhB5+JNzCkZQ29 pKKwbr96WzKjvLqfHtaHdA3rC1XXbOHmWD/yIbRDz9eWY9wfrFTBVPYDdfcoKhV2RkBXmPu6IXIU iuoZoHwg8LWwsQbawEQr5BvZ10HrRpobo36u214Zxu46/XqdncbCIggY/I/w25X4FR9Xa1b0QXYk 1jUGeQmx3H17Scumc2SJBlM84lFPpAILes5+MOkiNuni2QTqIymRBiFEpedoo6gBGZCaDuJGj76H EG+d7YTzK87iyzdGui+UXKcy7txYw5Ehbd2nMjz2JdnuJc7D5LjbDgRlbePFe17YLN2IpTUe3fJ8 d1DG00aR2CoeogNbgv/w+rEakgqmHwfbo0tDzLnNEuekH2vuWgAd9ZB7sEapK4Flqd8Mlmug3vSm wl6O5xYwDl5zuuAqKZu63jwgHmZ0EHzYP+qeKalJHihy6DnY0j4p2IFgz3AT8GeedQnqUoqAAu4j gW/wITq3vwBEGWJuhAiLHkhKJoP5E5hwu0fEpY2kkqq93UISs5wAXFkY6iFlDdO1MaHwvJY2aflP d1E0QCQg6wyUVg2edlluCDIQtcsFNa9YHvfncB8ETYKiaukYNHeLZ1uR+GUwFL51FCTHcwq34Y4F LlNxhU9VWbkLW1jQZRn51VuIFmlGgGkf3MktF1N1EUPpk8OV4VfYme/TPm1s2VvXSPUQiaNgWaRm lliet3QuDAF2CV5YugIxNqx2twr33MSb8lt/BQtaTFvjipA4m7/MiTQtkgGvw7c3uA+56O5sP/vQ 7v72r//mTWiiyUDMUJRvx4UhYRKjKAFtfuVWaba5GNoaDKv/qVJjzNQHz+PV9Brx/uPWllbHSKam /e4gR+xOMjog3zw1j3tGXlVecmrGEEVWvYzb7kIGz7U2vJwJsWxd4TRfrbHRev1VoupRV4E2yTND yXZZErypch+4pACXuQ+125/F9uXOPqZxKhALHMMpmFCVpf6FSdT8gIBvfBjUUrBBRn/Juw1XJZ2i qK072M1Sgu0ynEPXjoH0NKekGRoNTVPdUnowRZCUeZk1FgbFjtRIBkm0Y92bVkjMXIY4qWVqe/Mc rQPQso1mO9zBWu0DUANp0gZGSG7wl/+LhLAXa7hPx9gXsbY/YlHKwKEJpuUWX8cXYIsmibUhmeAW pAQH/h0oEj0y1ELkyT06cKL+hlmU+B+Ji26r6b6g2FfkJ2zRgAKTnfC835K9EGE3k67dnachhkQp rX1tZWm8EpMorwdq5mWke+3cuCzaLuQUgm/2bMSESvoK3s2k8uJ3iHazu37rXBV8IKs2CC4z8CpF xdx8taXbs8+z4WJdzet1pVKXagjvOtBPPywo2qdiUptNFsiqlCZTUosvU5UzVNCn5Su1Bi2S/rmB xYvLOMveqhO4ES9Q9NyArykUgy1WkOwbul6BaP4viHO5W0m+MMg89MLYaD+acxnFOmYN5JLPt6Bd 0E6OaCvfJl0KPQc37e2N4pR4giNYZkTGzL26lSzca4gelzoSlYdLPxgbTO6reDGtEMv+5N8+DP8l JEcD2p2051gOMxwnUUfk1fUxQ2I1LgldrqPfLHq3BSv+ZWALzmdm0A03WKYtcNJWPfc6JbHEHpO9 /hMOKDD7vuyyNH8LECW/nlW9vy/2ghW1EwXO1CclidcZvxxKdvuuf8jCnqLpb5ugVnk4GYPRKbmJ nHCSC2Mb293fH5fhav6MY/pxwyVQku39tF/XUTgLcVJlzqPHmLtc7VX/Rttlpg4R0rZ1S1tOWyXF IBr21jag4644ssT+bOSZYRbGzQfqMCnO5tJnezLBCvktsCWZDWQERV38SI6k65Noflw10Uj7kVij yBige19IhTGC+czbcbSfh+hbTD1L9A4M3khOquueWZrC9tE79dw+rTAigyzkrqO7DHuzXRSCpyNc BbmHOIt++VzmmuSm5RTRD+kzala0KNShLMPJXZKd7ycp/uOgo1LP5qYfK0NtFwhdFL7UrvKxoHHZ 2vkW/I0YewBSPdMWOLLnhtL4swyMEKtYdq94s+mtdFezsKkLSlkOYXxQAkkXqMMcydHudvV3SUyz TQo0d38bbkP4YAqU+MM4XVJPaMrNvpyDQBEikx/pgpJ+dSGTBYTXFnSSx326pWVODdUL4qiYUF5C XD/7YFnkgixO8qQ6y3GwcHCVVMoFaG1ekCpq8ITUy9k4XlaWXdb37W6jUIcU7JB6hbVJKfKLu8wW oAlFJWyMK/BkPGJqOrwpXkEgUy/DlyEtQEFrttLsvAgIJVSl+WYWQUGUay10W1EHki0KRxcYnxpo XYWjmdfQhmI1Fm38YECNN10N43sfolUw2ei7kwzBKYw9k8xh/mncANmHEu0OzOBxSuEYY0h5n8B8 AwRsdGPqfst0MaA0sW84uIx8moN4AF3gj9qJQQ3E7jqijJh3FBiVSBw3D8kpM9ttirepT6T3DNJ7 paS5fGayNhmHJYzNIqsjF27xjrR9IHpsvmsflX1wC+L5oeC7yTfhnGdKXC7KwGV8IVYH2YGtHAem qtutmDdOAarWaQsomyUfBiAsPPfKACkfsSu9U10usW5FstbNQV7gOEgP0rwSQibNugm/PU2WfTI/ fPJJyf2cFvwUlNRQrX93S3aqyggBRkt3midFmtXIk/N6a2Ph9zr7JqV+eLxFJGm6ZCd3tRWB4OUj WDA5Ju7Tc3dv9AAcfTqp6eG9tWv6fk/EExrHw6cVf5ohGw1HF5T1Zc0YMIUt/dNnxI25LonEicC+ mx158C6kycmR29JZcgKO21JaSuf4w1BUnrG2BO6sI3nr6Y85eNfdO1R787DtCy4BBrVDmtQzEBx+ BhMwyjjOQesEVPjtJBc/Paim/J3329GDgy0YIXF1J5iFJCSvdOdoxvBIrO+Z4qOmu63Dd0EujCvU k/LKgR4TV4Z//9vl+3q3WKYA9TiZ5OnpObstPAbYEj3hiQ/uV8k3lo1sh2cY2MS/d/8twgA4nZ2j 5qWl9UtkulB9+ZOJSBnB/68ygrzpn1bf5ArvHmAHmVQcQZyCiPv6N/dqTCkPXKlwnpjsSQE4RzFL 7tLtzcPOV8ATdD+9S3hgFXUDj8NsEmrN8E6vvA6SrkkyeuXFuuzvYXh/miSUPw5LryiVIdKabKyg B9nrdxuSkYLgPU3bo1u32nL/K45n8yqKzg3c+OIqtUOlGHBHexNdBHiZTu45TbN7oy6mItKt8Tvq feS4YgRgaYbNTqXTq3V5DiLA+xpY3rmLqPG1WJRlpZH8BmJdurIqUcZuszubMkBKFAhFmIdk5PwN IUJpjgWjN52iJYE8+zhtmWgF+CgpXypR2N6IYRpNEy1KOYz48GkCmqOUB6BQCNzCtQaRFR/sIdBU nu/HdL2k9su6yEvjdJl4OneidX83UwYJIbfxK7aqc/MIz5TyT8TnyYfbMOXHOK3ww0GGxjqmMxDN UTkx+daALh5n9K9IFyNfIqSrJwYakd3X7m6YhA5U2xigz8HOPF71cKMifWtc+3cI9Y2o0AaY6HcV pfuOJJKTMRSaOqD1gBz/aYKibk6sJHI9oILUVMk/Zu6O8+Hriq1MJg5m7sqKFMnKM3rWbularm0n E+MzBBpN6Sf3y2LE/Y5uOLofvMConEKYblBtMVpxIKr2Yx+L1JDAy1S4CCAC9kbMdP+7uHBrU+Zn 6CESwdTEQ/Z4a3+XQ9RJkQHoeCh5OLsfydHoyP3L//1XZk3YKvz6hQ3sLR0z0Ub7W+TQZEQjXEuI BmJeN2KpAirBsBU5cobavYdnjoNp27869cvzA7r+ZQI9k/IG1F7w1tpwpSnLNrcrMyojt6EVBCHu Xq/eDfPYsD4KVMOf48mN9+lhIhZhZa/UGDgxZtPV56TYRZA3waWC/Mf2cyQdNTQ8qHuw527A5q6u jCIjDrGBfvsffbDp8PYlyVBVkVpoptsk3LTxkM4I8ozC2dwQrGTWxPuzMsCw/EtmM3SyQGlDwb+F +plnhnRKddEoUw7Akn6XkQyz1Xo9GS3dfaWvJ2w6HzDKpd0GhpVghdL2IuTJRdu4atQB3YIBNDSY l3A0bbnqO0olMrf1lb1uMsvwLy3rnhv6VFALj6MiJ7c4RpbFQURMvtBSGFb103FaD3JeKSq2Fxn4 nJMjqm6uE+1cp9d5eVJ4dvxOa2gtMgPn6SeF3IwBVIXTunHLrPsd4p8nOVkpzouVQ2fmIA5+dNuQ 3/VPI6mQdrghkeKJA6GQRxOntJgaTbkhyuZuMnOllQVU2STMcv/QR9reMr6/5kfLUlcvhEs+650Q upWPzLGLW1piuE1yOhK677hWLrAM2CrARB+pWMn4AhvIc3qMu7im6sesTYEDJ7G5Qvvm1aDbx0ib miB+p7r81+FYpTqOwXYQx9JWGDjumqAvb3BRbD08IhcAgjMMy6Z8kFA9vgKmeWOke7e1zA1r+xmB aRetb2XOAD8bfIhXGMMjUo4fiX/MtvnzKrCHxGK3uRPfeYypKC8D5k5MNjJoMtLn7rN3Dn/0Nf7r +tZfhwDRYnZ1lG5ui+kggIEieERbuSuY9d2MGqqEWAAT53QPd++tBv/3xRkB7tyJU9dUbSfl8rv3 tQivbvAbACLWrVohM4d/1U6H1CteVIDdloLdGckvnyTxxKgyQ6rYaZZhLEg19DqCLnv9y/vVYYba 2ZGx2vD3n2i5V6UKUbZtIj5sZ0VkVjXcHJ71rc00zLLcKeXmTe0ignHaHk+ukAx/fsSO2a/O5jFy xzHl2WYoLV/Tg/YGGqotc9n/eM0zeNDnerltjNldfByVuAWT6Wpa0YB9AB/pI6Sd3JYGYmDbUTwy hyRdLxM/YsaF6yykQA3C0vP3m/OtEjNDSTuBoewD6WJ7quS8Aehvxg2AXKmY4JBl5O+46tp99VAr yKx6BP/PWsPavFJUMtwR4tNzfAMx4pVfYbkJtHRUuWRlNEoXF7/MIVqynwq2zGCb7yLFOEmAC6bo sm7E//XpPoz6RoHvF58o9r/bInWf7gCnYj0zQ9+kyH9WuzwevShDuQYfxyeLpN8h9orKnfURlqRx OKIRq15GH1Dx2vPGS0HPGtwjXU+jT8twQapnzuKNCgicP6kPCxdlXWrzHGQYvcqtg2SNWzqfwsfo pX3M693Tz18VUvFeFMnRZfLBJcg4S5B8enkTuF2vomfjv4Q5wLbjPbnbm5ORW+X8dFOO/WjDsvc9 SCiXKC1vPbw1tSvyKnR2b/uL63jC15HBmhOiweHbsgexvSdGzvTcgAa/Gvud4YuMesJHIlott3Ex flJyYln6LgU4GCOi+yxj6Zf1BKl058mMNANo7BS8xC3qPiYbWk0UI268EOCGf/ufXl73/TENuYn3 /U52r7luDwr7/bfCZnPA6xjEWLiRh45HSIMsj2wkK5+Z89cd9o4VbRLO/XFKx554JpToR5UubLfb RwvZOtCbhExmz3DSmfX+LQPCGwy0kKiT8ra+M4wExm8Ir4y45BDnuLRn5mSdPHOhy3AhgOWnVF2Q 23c68IuHQS3mfQZRB8ZbubNWbKexVVaxctsyGcVJzr7ejgJ541FjkwYS9XW/lc9kVJRuZOtHcrCj 0rPHO+HmzqZREpmujthmlORIyFsFsfLyoDmgpReS7d//ZvW6cKtQF/yJjrMtdhjrE1WaGRhQDOga e5EFkPgnkJ5+/WnY251n7JoXKpsuLBKSj7c4Cy3oVS9g4W7OGX4Wc4llOvkdQN+4O+WYrL4MdDUX +4W87zs3jrCfJYsYIjhlBy8OYRrvXe6ExOH6o8XhmEpdjoIqHSyK7gM+D5yNBAred9ag5ugzEetw /TYLzO0tur/bB2g/Th/Z1F8+F9Ldnge/v9i9cPp7sk6HVHeY4VzRKG+T1rrvamdfsZ2lWwDKiGBt c1febjKCDpb4sxOdkF5tzmZGBvT3beXf+I9fZTFZluW/afHEZ3v0apqmBwBXj5cq/HRZYYxG6dPu 2tiTBfW+XsV3+sZJ+dQjZ4afvrhwMVjGVi8WgEXrb/yVaAMVtslvluGh/EGrg0XCrkWB02G2zNJS ND9wRj04F7gzREB9u0Ww7JgwkI1IZxANAgAwnF3/F+1E4Yt1MBud574LHEoSM3rMm+Q9y6yurrfN BPC7KYQ1fwDDdT/gGdPcCc7LAjHd4d1do5Vw72QsmfJPm/MZ3u3ztFyCr0CjofjfPfz8VXMmuYLj cb4Y7na28CSJd/+Yl70ubYqU0JyUWbpAP4df9yj2ycDQHp0g1TdUdgSov3e22Po9mmDxylLY9mIk +HYi0BczJkgCB162U9UbVsI47Fq2+r+mIVipcZuowSU+PWFpwxjSeMBwSiG9w5nZE8QbVnL/PBIt BBkCh7c+dDyHrXpKkDTG2eISKGJTJM0wIn0lw3iDdi8gk/ewy9OFZKueUi67qc4zgyUnJ4E8Bkf4 wbv31Nqi3/0xsvjikXMUrPTowiGhU3A1o56L0kor/YaBv4SmUyYQygBg6MG+Nbb09viuT+U3vSO6 xEKCgPeT+bQ9s1v2XQaG7JgqiHt6wX6S6qnnUhvmTr9eNTQOgj+9cTbhkz2uUvHby8V5G0Y44mSv cUP7Ww2HmY5ROCZZqj1lTD9NWnoCsBtMaV+TLpWaikFVgL1TfZflBqx4JIiIxHq17eGlmsyyxNgT 0JGH6SBfTyvuPembPbwJGB/nnhs5KCkNurzCuSGFHA2sJ9vpv89Hdqj63cuUEXWM3Png6pTnvxrv QWktqC8ug8pJ07qw85aGNboAmH2967Ps/OjBnD/6f9KWvyJX2uv0WPeUnvbGbQ8N88ApBydn+DCT xwl6e2F5u8/JbYWoKJaXAX8waeSbS8u72+IOXNazuX7vDsM9H/YOtDD83rWTjpz2NJYCBCBlC/R4 7WcYyqaMGdAL8K+N8gwtercu4DSRK4SdA3ezxprDAA3c1AOU/GXTsQfX7acfENLazTS/gq1Uv3ev NUvF8tL1gMD2WCgpeAqNkXYguf1AZFJpO9gCNVHFVZzuMuWTEZn0aacCbL5chjt7kkfPR7AYerJN TquBl6+h8gOfQ2zuCFcBJFFKxDm0dqJmj2HzAoQST+9Q07iF7Bd4b3HI6GXG6hd0HaC4Az6Ch+Ns 3MoeI3tvaJPeJdfiDAc4va0rL/RxdB+5nl7Fob/3WGSp7Ubi19TEWVTBjHqKwF3+1zuaM+LJ6vFO jcAuPoId3uaf84Xm7iro0Gl7AKQN+90DgqWjHRpaYL659YUBv4aDNFEDY83Ho36nCJk2HhYFvXsI rj/skAgj/Gu9Mu3f2hn0uPwDOsGWcY9DLqr7+9JRN6cSrvg4tHaVPauMDceh4YtHS82sNlazyAPg AwN19WTTLWtUqfaFc4QDpa6T+vTy57BvYsJLRFg10aKMSr7AqspSYxRiQj2m8fAO++3CQEiPrl5Z cJ9et+xVwGNaFHNaz6aEpnUvamCQ+yJFWkle7/Hf3ucvMwd6Ip/jBNUubRC/RSoYYsP9bX1UXUy8 7kMOcUzCNagxraNPXOH+GtMYR+aQMnX6wqx4/ojDlJUvfLsYH9jYenaTkokN1LQJMHBihy9rLCBU YQmxGRQC3Fkf0ib0r4FKSy2yge+JtvckMy3v0mbZFjdMQyV0+XxPP6mP1KCrJ/EUbY/VcpMwDgql JLhmcZX02JZRYaxX2MWPD5jFKVOLpdOPmOzLnzLe9D/o3whKZJf3lDrg8LZVoJ0sUlTCxL+50PvC yKKfUQ8o4sYff5mcMtqjp9NWdktCE3MBapAaEnaJhkJ3sYdZI2zwV80Hc+Esb1MxmmeFHp0SlJFh 0TrqrXPX+ctwC+rrDLlL8mIHtPxvqhiacfLZBwBcZpgxJ+70LtU3yy6zwBPGT/vWdQa//a1c2w3G UsPDv/z1Okbnwx7Peyj4HfXFr546Jvy9rSIjAOlsGRnZsGh3p5G4M5rD2Td0hh7L+312ML7r2jpB L7PzXRf9iKhRObPLdeT6QLE0Mxfznm82KOA+98lkJxlCPK9zLsIAp4uapGhZaSyylJMlSdkRpX98 kZzS5+7IAXQs/yYxaeV6Q29MUYcY4vbLjDoAsU4B39wZZJ3gBty4jVSNRFwo+cd4g0+Zxtv17kiG e4Ehdz8AWF3n1KLlKmMf6mClXHrwGSZos7UaM1cpnZhGulFH8ZkYKEZTiiOQDdC5R95wt0usJ9n+ 4dzqDDQ84pbhc+2pBT8OPrcy/4zs3VfubnfdKNgCcL7Efp/RFRfWD5ebZmsmAmKI4WXNqqlkfq5O fe5JyD4lH2FP5o6ELh8y+PondEdGxsz7qzZKgifKHjqse4PgwENzLBR3PergtsAHT89g3uSqJpgM y4wO+BqK5ta7fjVWKZgzmhZFP2BhBMf/CYz7JsxN+rPZ5dgTyz2OqdlhTytE+rIkRtIRPzCWuVvF gXzgZDlWpbHbjWJb4wsOVpUPvBKEoO+72wVN9nGQFvCdzLUujIGoOjTxu+QmoAtZuo6IitPqdFJ6 lDTf1rj32Dn8Q50h24+YdpF7kNHd5+E+8gboIo0V7pvt1jsALl0dXXeAqugAx11c3MMAUJudxtuR iBn1Km+GGIw23EisIiG06znG+YneUh2+/9Q/2Ye3LAKqVSPqKhPAGs40H1kq2bvaERNpXXT81ivr bPMzxxOyMuXQdAFpYKAKmAaGTWDNdOQWScczUZAWbm6xrAgGIyoPFxRzTS1L1Qkd0s69O2E4bddu o4YaICHytTqALJ2ZSEC2a9wE/rqHU7ght6yydBQoFucFoIzS9foesbIO1PVQaThLHZGcO//uX3Pk NLdJ9+vQl/8YJaTYztzuROZTbMIc27/D5XqZNWrSh0dZ85oMpQPESjr8sg4taZO6yj6H9RLw3Ub2 M5mhhncoySzTOVQIwLVl367wKy3NfnIulnEN98FSMH4uyA7fusSWVj9dGOCrTsmnMN8KBZbPujfx VIaUFJOFSBo38LHx3qHhGB3MG1FVm3OlA2v/sKSr05r6MAWSRtLfu3LrP6jNkiFMf8AOuG58jtxM gconp5UUh1MwW1dupbQOAwxGN2cWhb0fj76IstlRY3LSNA34oAG+WH1G/MDCH9hrTu+xcCR9Wz+0 djkTwDPm5w/QXdzGub5A+ripp+MtyBsEvJtPJrYEPWdjerFL6AYdbcfcOpI1pfn6+BnxepE5TWlM tU1UXtgj0Tljx2ecgyXScAOBjrLHdzy/bx0F4A4c8GZWc91HWxS/jTxR/VBuDUogiQSa5UMRfQmk Tx4tyMlPiHZ+HX5D5/dd3twz0Q4FH9zSyovYZIeW+bhZcGMLhM+hQ6xrpGgs7kkZfqGOqhoQvixO OrfemLQ/9NU4kFHQkTeYssTZQQAatlIoToN2/X/9JXC+tSzNBgvR4aBZb7/VtrhBYxhzfyauX0XL L1giZUT6ry+s7X0XyWEORGutrrIbPZJ/OjIhbE9B3YUZyPLqaW9twk0rvLmUqMiKNiOw8uAEeFaM ECLQ0sFU3AHaZzxGS97TfhxPerVG4QqCkCn/gePrzVCWG75qhzPmJUHNFtAJf0rSeFzxewXbZDnt 0P1eRnJHzt1xtIPSgJypyqEDROz/nB53O8M7fF2DIdhi/eCXoKZ2Se908+POlU3TEk3kjV0oNv0k c9u9II8UrsUni0ZE8f+UzWfarOLYakfyKtvQocbYn8s2N7T1Ty6n1Oox/XpqBQyjAodBRuPrT7gr /iGIX79RUicW8jV50fpXgaLhDJ+yJZGadKy3m9qNRF8QF2l3STh8fA0Og3ak+yWXKFvoSjU7gDqs BtC0nabYVaUrxrVSlDNvPlLdAYAPOfPcgDVPvzxrLxhRL/BG8wzUw632Jw4L1VkJVCM2+DIiZMBv XQww2+ImBNoiwEY7XM8XtIzWvSgMON2RyHZahpU1byObdHDVUJen7rrWS1i7m499Udcu/IdUokmN 5wJLxNXpuE7bOyAm7caigUafCFh6Qmor0AgokwDgiEqerjVvZ5qRCT6Sy+HnMMcHfTdDj6D+x35Y 1hj0suj42fZl80o+a7F7dPQN6syPoEzNVJTZbAWFO3VA9q7R8eInp7ojaZgEVz9dnA2ykXqGz/hL FiD7oyeW+UvMeqkAtJZco76RPbbhk/KIOqC2jJfftsY5/47Not8aXUeNLGwiYnC0pHKpS4saLNk9 MztX4bIj1P1+bSkyXk6LmSvCvOU80+ND5NDi9U87H3tZpHKMz8kiL7MJ4MLXN2N2kmMqK4UsEqtz sQDHaTjfZtTbjhyZlI7HE9suUOHPMW6Ko+mk2/Zt+RuUkvkJLZkkic3XWhR17ZQBP71T0hNO0EPO R1MkWYhuXNOB87OB3XkaOJ6uw6/pul5jJ6Wqk96RHk4e+Rd+EE2xpUIzNZrygCHJfWR/DVBx84Ix Fl3b50QyKeeA/gCyEulRgX8RpuMT877SUjn0vwVJIp1dvxSlXfkm+6rf/GAd3mtR7nFmvXE3PMz5 CGuJB/3v1lcl7G0EGa60y80cjGHlXmxwVhKqgDXT0tI4wF1xW/r3fdmd3Tm5cjXCRzx7gwJpOwNR +bS3bnXnSwgNGc1T21659+bT56ywjuBt6bAtQQ0LWhdpg4Q2n07QckCDx/rnKF8Pi2UcgI8PS2GY whGVrcUt/R4aS9W4WnrY282QuXTk9745LU3uwdc2AY2/2HH+q5G7V12HWiatKJJYOsLmW8vTDu/n 1tevpQ2F6zSMdptU7DSRGu1IryBghrVn87PFryYW511mz7AfhkQyRo9eoHagUd4DFaLD8XgXhzW0 bdsaOPk2p7hYl4VkZrVbuqtptZ/D8pa6PmspN2olWLUf3+F7be1W4y/U9c5ZmdPVdeljoPq0pAka vXphuURF5LwnbaEsUu95d769M6+uSlUUm+vXjHD4otiqrd01L3C0W5fuLNP57CeXLg67XGItbUl4 nqiRZydrtE35grOFezRbN2sIye1nL5Q6tURB3N4sFrNrEiZgi6KK+S0r1NZP29WhrWowdZZtoXnO U/bjgLcwAEcD7tjaVUqeQgdAXpQF+wgCyP0VSyHjNqFoiKkQPcVb3mLPXVIYvXV3Fp2S1fwIsCJc RSa8/Tw/ElPBPGwJJIHGAAXRBgPm8RkrgM9fHLmlluprnHg5hcQOKLrm2Az5pGV6v3omaCJfdk9B TCCvs/Si8X9dceVxDQmOR+skLa4FsnbGb3bOyx0Nl3zL46f8Xa/JwgJ+blgEbWukmFXH193Qo7kJ i9OFZHXftZY+7z/9nPUP8mqd8Yhd3lIGS/uWGWcQjZPex8AcaKOVcOAmH7kwGht7cal9RJRfzZwq wyu9r49peGgNyyaaDly1j0ji1kabzg1kVLobzB9xKF3lRI6p9Z4WF1ACM2lrPzTyXNuqBuvxFk/j WEZZhnXJZHq0ZaSf3F9F1AZ0X40IgD/yO/b+8D7MemKZRURC61FRPh41d3C81kDBA05jDkrt+jBu 7outFZMhgC/zfp92jeRim+ktpn24rJmNd2yybMxSc7ED+0Y/snWh33HbOuoZcIUtyC57lNuWZLXo k9q6saoBgearGYjrxuwuMWHUeZcaL1kX9BEe7k7w4kzAUI8z5V8o7XQ3ERWsplzq97VrYsLmLavt mrbLU2rybBz1lmRpPL9OsWpJyTNrm9z0uSOXx2aqIygtsYvHIa1SVE+KCqjiaDt/uwWpi1/Zh+Kd fX5Nod3VW6Q5DcQpUB/IM8eTfF666NrDy0i3JKtkOnghCKe54LYlygoAMC9gTTeXh+dMlJXYp7G1 U8vLYPd4pXWdYTV9ilId2YOz3Afo9asi36zO4NKhRhB7tGMCt2oL+MkXhgQRFme+QB04cJfRSiB9 s0/L/KeUP+Ohp9azLcqYOG7sPL1MLusEXTmcFJHiNjyJThdsC7dlG2dY/mHbkMdsRaUl7OSe7y0W DDsa8MWGXiahyhlvXhMArd/DPnO3wL00iWMMbW89jKHlr0pCFoD1eyfbwjwkar6CCKo8df7wWSnd lpZ9qSM+y0SGjXiLDaG7Je+OreeW8lWvw1WFUZ+OabiEuIbLcz6PTh5uqdfXLn2sdybmHEtGUYip 32RDXMx03PvDTNNlDLB0gMydnwvHah3RO82XaWaBS/citgJFNjvNVwMUsC3djiKshdI8ighzyH13 TZRlxGV9Kq/Z5lDlZk8HZFDetVvFjhtMtGTM/LQkZ5gJR/7KZ1L0rt4O8O4WZza1dK44AizSEjG/ zX4H2HIpFtUSehZ/PsZEI8DmfSO8nFAa4olA+yCt28zYdqEZmxPa5/zTtNXL6/QuY8onG4Kn1Cm1 9f37aAdlB2SIOz1wknEngsbMuIk3QbBW91qn9M+31D3R2OGHZfiyUiLp+ev7zAEhZkzG1cOKYJ1v Hl+P5wGlI7lS1hdUW7o7zIRUsVR2x1gzNO9bFsUzEbTXr3WQ7/KKrbe5C4qooG0LV9YsCAMi3oPb Pxqc+4/hzz/fDA3t9IU4SK9eihpcH0k9hiSP3lotJRP2cLb2poCxn/KIdTfhMa0COt/7ipQl8UQb T64dRJqnPLS3hoShQGRpcydMRnv0FuB++iSx6hSbfYEjp3c5AW1esMkll5zRIcIz48IwdLmb51Ub BcjAWtyWOfVyCNZJqGmgfNkyOYsiE4I+LZBJbmDH8DNTjVvUt2BFHUeHOBrH+r7ZLMx2Exi5CdLt 4PHgOG2A+W1jfMNwqEGwThkgI5TJ4Jg/Ot0sbRbM+WQRgm+QglKceS3mSrRlTtfBEVxDya43Pehw ZKq3ubHC4mhiwMBWRsv27qcpTwvIQIoseaFakhhvEYQy9z+J2TiG25IUaEVkCdp0dz+bkXyWBWy2 1ZP5pFPd9O5Sc9GxY0MbvLJuxx6zBegIh/Qzndf+B9lVzKFhN93lHrUBlw+pN95Yu74wmBpQTMZI wmnpKRabKyH5A/vaUO72PsU+pT4VfjzxrcEYly2uqxf4RptbFJOR3+LbwQXtrY8y4obsZhelXN7c GWEf/inC5QHy7g2zE88RWSOn38kkbmlAfJ/+/HPgKGSBEWUHbkvPJIJydvC0uKxPF7NybUZqcxWM 2THBPVc1bqYzocyhIb6v6ASN882H+/u8qSnDzYICO/s9fcayJ+dDq9CJS/6xHqNvXguapt36PmSN c1kDnNHnkmSzv9g92BChPo2xBJgm2M5FT2R22bMJ0aGCFG5mZ3idrV9ImxVbWamzKD3H4yRyjjYD LRniIy5b75kh990GxGPASxX6YUOG9mdAgrf4t0m1/AnButUHTaVDU4q+z24cEf2aPMnMKTg3CuhY i9o402j6+FNMqpZI3W+RBjeQEE2Eq4GU8cNgjNAGtxzx5HiybMLxZoCsNnOxzzX0tqYLJ8+GGGdl z0DoLRUf4N6Mcbu9Fv9fV2ey20azZOG1/xfplQEOlUly3RfobaPRL0CyihItkqXLQbL89B3fF1m0 0PDGtsRi5RQZw4lzdHXn9umPuAHxy/AMjqq6OwVJ+MVzI/64ZXNaBQhrEaoS7GTl08me5YVhy9yH X1SnHGg7cwSv+zRwN6RqVnPVjIABULJZyXx2B8RdN7J23UQvrvFNGzrAaUoOLJqHmZAKrY6Uw4rd bMPT5TYK29h/XaQaWqmGut+6LeG1/XdjeV2BugLtwxzD0ZLMQTX86JQLcYNSdza5UqGx/wuyZszr 1WS3w/7Sz32EF8GtCwdJojHjfq0rvckhr88KhjDmKt0O5/r/yV6tkvw3F2A1zyz2KVFbFQGK/3nc JLatwK5fj7as1qzyJq1qJWS+ZwZ+RT5aPdlKRuPFFjLHRniHGM6EpK8pyv3IxDVVPqTr3fREqJyD cUenTjV/chWAxAl3+D3taFV59t0tJRZXEAn3luz7HGksvc8rzRWuECncPYbVbmLbv3R6YmMOiUOt JEyAUB3TwnZPjpmBjbj8Rs2Qy7xuqOg9P5+Z7OrhDkhbTUoHH59EyBtLkJBJ3wbOzERrpdaJWTPP AUO80QFaCSeUseWztPXZNb+Cx/v6OKVZ61YZLbtzyAttL5dHu4AWa4Ui+HsMJsshtaoJ45rSXQYm pdL02k+QrSaXs+UMzGcTTYbWYmFV4eLmtQi+Nf1e0T4z8SJrVxip+IrxcNddXv8No2uVMTYhDRXY 5gfgKq/wlZ2JmqGNGk50A7iLaSF8msYfldbK8G/PpFljUqRP7o+t668SkMI7fWW2eY3hd5uNte3E bT+uab9hWsDQkjnRVdooXJVCapWW0z0xQCWnZvPF4BQvE7BQSS7ahsuzzbCzrKtGbJ5Hv1KZ/NfD 404pL0xCXk91nX1YRyKxSs5SsHCFXHL8j6Sn0jz91xViokoPUnhIRyaEAn0M/9HeE8qwY59eXLYn VADG4KM4NvHQIvTmY+oGq12dNXKXPdskHv3rcfXC3fxVoWXOuMelAbrmeaPAJ1udwhun8UuSI2fR VfCr543mvXabRZMWwzQxEy9bpNcuQ+5HaAwO/BVl7Pv7g4u2a0jawVtjnTBsQodauwlfEM9S5X67 i9AAf5clARGll3Uff7NNAG1bOPWzqgZ8MgjgE+jSOCnJV57GkkgvtjqwwEqxPIKMXBF4Ny65OrTH 3sORPDkDT9PPO6wJxz/TzIj+jxVDL0bG2QpbfE8/MNPFdR47C3inb5bsnR4DxTN3WMUKrOiVdGUF cLB79D0fLRR2rhphuBTQ3V1Rwcp6d6VJ5ebVHP8oKJ2cuDUrgnj3hi2qVQ2CccoK1KI7yoqtJdgL 17gC2T1IDKaRU7GnwtRHg/0lZyWs5BC3AJaPQDwXcNY96SH/8OzlOkFJpjwrmWZFVpgqwkVq8dPR 5frEyamkYMI9IPEXnyHTEWbGxIliWU2+0+tuRhpldGrmywl/WJdxJiZVL39PovGkbNGwWjY+XI8v Ddxe2fmyd1bi54vWDy2te9jWRoJWl3bPsHtRgcHoG3FWcmbks4czZ2uePfLOYRVw4yDWdVLLqwiZ kKE/t4sW8CspPn60wePFlKO0R7wYFrsjnnuNQBLrTcwLqtDLF7WD4bcQTz8MLop1pKl3P3LB8t8d rCenQ25fkJx3abkrSHPjRL55vUxzN2ogQC61HtVKOPH0uFypmGtOEYEbEZEE/HoMICe9ueh52EWg 7GzuTBUryLN9aLJARjz7vdpgr2kzmkS1lz/BwhXG/brIRtzUVnIyU16cxSVLfHxy6cYTqu4tadj9 V5rdj1gGzc6KeChMJ1ZUWtpTa4dKh9x0zm8eClA61Vv7XNPxqhPVbcRc3t5ZxlUDsGhjSV++5yIK PRhTpdsRfSSVjOOMj9AxVknNfSZG27C9Eu08e5dr9mx+TBCXSseDWLEK9HBPdKfJ+jV8DieOOEmK /3wQ7FXIB1+3D7581rh38XuWs6n1t4rD3b54Q8Ve2R7CDFrAqQDVjJC3yfjq1F8fR16hyqnKZbCY NeW2PINHqPWGtPiJr8kzbeSATkx4EvMwx+PlJSMhkLg70swV1NHwe/QgzNeqDO/NHzBfZHgaUXPl NtgTU+oW3QVz1yJ74InpBPEWx5u3w7Noen/+nZdmwWiuyzYEj9jcU5KklGkEVs+CZ64dYHfaHXjo MiVkc1LoYDheWodPpZ9pOKtmojLePoKhCih+atevEJ3Qx8qX1ifcWx8SEq/L8LiTdokvmTdyWVcU Tq3ht+lNBZdMJ7SeiAre8XPYsT1dZL0OdvzCsFmfgz6JDO84t90yCfRp086aXFZ9c4OtktQNn4Ft gHAT3Ba6kVNzSbrgGVry6utG8llJEtlwRPtHxaPHBub0FK/iR9zq3iwxQZ+vozUuZc8aQuor/xX/ DXwgrerUbhTTRkXtLVca52Y4HX7SfKkjtlhlf06dy/jRGpt0OAF6RIj9nuDeSnPwbvAALFq2hmG5 aZCRj8+yROWfdGao6HySkq7kUd8ff/6cfAighVuKG1TrAo9d7GtGgIM7cofB9gUnDpM5UxAt7kBd WCJ1ItJW2Iofx6n55VFbd0/yq0rhJ66h+2v4jIRULN47RZdc35Qo1RwTU9+2B/z8simtBHr68gQt VQPOdsh4flU9agulQMVLP9hGTSa2Lu2eaC37X2zqeQvHc3uQe9JE07acFyTEkL8M0mcp4gAjQUWF L9xuNrA5hLtBZ2xxRhgzcfu69FfyxU5vhAPD9sEtw6e4vIkFaEq1bM9Eki1vLPNlA4Uv0HC6dtPn leX4j54aAGD5f/RfuAEdCcQA+QQM/yym5NlG6QTm0aJus3XopC3pz3u07HqlW3xiR8zZCPv1LGAy 6zZZpuCXw0q1ix9qQz25JnU94P59cEYqRS7uCSi2WAGznWN27VRYW7axw1/l8gifbYVZjv1f1mZz ckV5yE7BiWoqfjid8SoXYoPI4+kUPHFZdW5/OCMQyfOUvzGUSKk7nPr4sf0muT3Xq9T2rXSn7q0O Fjret1M9twrIPOnNc1BN/DhvXtuYYRJV31WTiwyt437rtQwiVa2dfLUlxZrx8qVMdQHgcx9PGYUX 8EswjA+TYmule+mvroumerZM2ElZW6jDhDYnkzmerEras9VMEcvr1mREnWk8I9wdf6iCeReXWGGp 6Oms8SWAP8jXWtZNJfmFV1PPEiZ7lSfx3j5SWbmgGnJIz0JiKHHMg+X7qqT8eJlGI5M5N0MhR0lt y/1AMQENsS8Gtf4nhT0UeD0f+1icAgwhW5C1QtmcUDggkzJv/IBdNEakVMi9ivepENtm52oVUUQy 4SclF0ZB8cBwn/WDvlg1ScI7vif5ODI3oJAYfXXuuiRNfI+9xXpg/5IxL+ZoEbEr2+3BF8znVieG PGJhi3oGArXcv6muFho+jvvcFDMszHvGBDzRHii77DJUXT7Rm/oLZGmfMUB8cVZ6YjBk6+tyghCr d1JQaEtYOf9aSfFhVazCIbg7ptJhAQ4kcBHDzqCF3hilxapkywLeDijWVxzhYie8d12hV/9wPOVT kTva7nQpZ9K0aUa6Vhc5NsuRz/P+LzQzJUfG7ZwPuD2rtUURbPJkOYB1MqgzlNWGrQCNHS54gv+0 BLPNE1ZWSFuYv5inv/iXu6TOs6tYh2NBXWb/9oVjVQDIyL3qChP13/0tuDmzPe2ed4GQBScd8b4r eFuSJgWpla1Y4LLq1DhpKK0KJL2XUbZaGQKNp/pwfCscBVu2Tl230pPdqm6l8IRPP9tGWzEFj6s0 W7lUC1mHnr4dIfH4uFPdLJCNPS4YlaHPdEUBiJUC1mxoTzyXEkKz4XCZZT3ZdOZJlQgs09j0at1e WYRZbeqJaoXebYIsFCxUKoDhwWP4Iiq/oO0QI/NyEddxHjWN5HuH1DgqtChOsNkiE8ogz1ahUPV9 s4OAR+Lo6MbDWino5AmHlbnAkIwvDAKMrblJqqZeF8KOVZI+RTaTDN3JOmpJ4IeIb4/9M27f+BUc Xp6ChpDy7Q6TTqn7sCPb6fGKmTP148vZWVhBHIFIuWQds9gC9K6Pndu6ihdPReRCygiAS0SyZSWz cQIAP/jkIitjbxiSbmq7KjSSncHVlVV9iqG0Z/PeNDWkZ1jAr9y3sU8oEcrkzi6JyZhJWSmswJ/B yIa5XetkQ8/K15dJ7N0XnX3LvpUqlUVja9G8AEzNJpySTUnnuNkZ1Pop0cJlVWj3pbLee5K7bgpz fyhT3A+7h5e2Sf79CShnIeUf+/TU0w6cb/nKEat0/9GULPywbGz/TBmsNh+g5TgeVyA3wmGTXLVQ svhOLJ7PEm3p7opb6n/DKr/dXhKC5gJn81yFaggUOHsAXdqvYQdDQKXX8Zb/DQBL771UAWjSSfwo VALvuWcJmrnZ3fRlsoLZUZInuXhEcq9hGB74QwXqmhvzSDc6CZRL/JieJhq+T7lqYXGuacpJg/Yj c7uBulubTir+cETTOT4Yx224NP2FknjW9gYFjDT1NDDDBaLB8L2w2fj5dNoMfKIzl35HbCAv/Sc6 S5ONOOfonmNzJ2VyusCFBGAENYC1VNF9XBCy5v+rIZs+6FpSTnpv8hXgDOGvUNiQpv+hMvrLcNGQ zJvEHm6BKrbnnHaiIHWonezNU5meTUNSZ//6cBbXrdcJ21hmTaiX5I4eUThPhfzqadv3aapmiG/B F6fq91/RsvRgVX/NE7bJtvDeYS/Ff9MyPUzR6z1T5vH00nqUDpyUOPRJi1EsmsAfV+H43x6bGQj3 DppXuquKac7WJm440NTNNCUkp+QH8sdFbO5bhIDxt/JULjIGsI6IASGq4F+NZpoLWg3Y67gb7+kN ZFtBKbKx8DBl4N8GaSEKINgU5XY3xS/9t1xPlwzgDo3sURvQlJkL6iqmdN1cSmRv9QOX5pjyIEx6 5CR2VCRkBUCSZBLAERaop7a/mBj4VGJzgX0oHWsG5VIhyeLVuhsyNAGfEMPsM/pEYT5Cra+siuSs 0J/ibl0+/ZK0TWbcrnlXfNJHUmq3aHzq7RKwwIICY97ZTYDdS76TqLP3Y3MyP0JUc3+9xHVMJ2NR 8wA3Q5uyKVM+2tVU8XtgfZatsJgngC+9+J01+11wcL1/zlaesTzoSo4/4WLub3kQT7ZcaPMX82ze tbblAQQBoDdGmg7WsA8vv/KtZJthV1eTCpWGKWfsetzlPoTrIbM9yruT8/oJ2DGdpx4qzHgT2lPl NSzQYF/ZZyt7odLTKdWgbyfnriLvOiJGlON+8hjVQZb3tNBsoUwArg1VLukS4tXWJVGjfZ4MKoC6 JaR0qNn4BZCswNnDrFBo6k1ElCJbjcCh1r9pTuWQiraFOsGNQgjDQbj3dKBA5/q1L4lRqCLql6+h LB2kJmSbgdLbhoN1yxldC3vMMeedC8LkAM6rdMJOX2DQL6jSKbyXrzhvLPjHVw+r3cXbcCHZIHN0 0cd3TgOt9vgU8Nxej+e0L8MZ1rdtWkpYvQ6PvY+NeTY4zm2xniAOnLB16kXotMyegFliXkoCn7D3 hYetpV+lQgEmBFxvbLWMCsiStQqtlo04ANIW59E0KVuV5FnrvPUImLwvKcEOC2np9CfOiTgwFQuX n/dKUR1FJ6pKL4FeEuuZKAo+XBbP0IuHKQqV26oKihIDdW7bILVW+FgcfJxCFyLbcz2LS7OAFy11 EUQrB2JhHoHp4/RYcXwdPjkmm5iOTOLHLy27CVPfuL492qbA35vfR0q6ES57bKnWsLs3TdfhK0cU 2+Jxfo/jACGFcAHottMKp8XaJqdtoSn3KU7L70/EQ1YGytJOu9Nfr7asVt/pyrUQkxK7byzP6NUM bryhDSTYYQKKX4i6Tv1VpW6+te59pc0+DJlQZc9mw5mu2j09nCGBxrk6pJ5JRTRgU0GBpdVMJ2v7 DW9eoJxtXTaly2aZY0PHGA4gZJTb7sSti4HswbAX6jW7x+mNR3RZhKeEGvOwkgmqnWlKmk+1+jRd q2WDpl+b27VYZQHlwjzP/1HRR76WQj8teqQFPJJqBT8UrScRZNqkI2SOCzCHVltOySUqEnJNvjUI TgjqCqQxppEWCZNrmQ5gThEgxR9+Zf0kRCoL8yHpsy277DeQpds9/d4g6Frpi/7lsiQzU98EHAsp /9P2M+WqfRM7CJuAhir0EMKwGp28e6JkChWn7C11twpdLpR19+Sfei5cYu+ICcL5LrARhluHg5Mn D3UhLcsaZTqvfAvgjRexwEoeG3IYcpjdzBpsZrogOz1QHIsHgOs8PDQnyAc9sPJlnpyyfdoGWn3f m1dRqILGE2DAFu5oLM1FEQusZEChRKOrwf+DVqE4XcDvEjLmpEOED/jVeaZgNDJehDiQBfFEdetE FV3Gs3cHWFRtDYD4ZOIu5MbC1IDIKThdiR91gSm2yu9Rlqo/8HVfDLfR6PW510RF+U5qil7tUeat iHYOE2a+UL94Mp9j2Gj7SbVmxgbM4pWupfTCNq3Y56o1qT2fGXYPVO+Qx7gRkJSFYkANm8hC5JGN VaeA2ODdzS1Y047iVgH8Ovx+3T4wGyAtX07jjumvthS/uMnmf0WcGYYlWQSErN3EMsdknPqfcaG/ Eqwxo7SVXWHgYm9gXo9X02i4jDvbTYtS5Nfhg7p/IQ1sq1ROvVDvQYYif9RQEoXCa5J5kxp3JnjJ vPohaaYQwe9Rr4xty5avc5tlSU3AM5+0k5PuXkEeako5Lhp91TYGx7GpamDsFRouFMlSVq3U7tlV m/OxVvKazoQY1czFyfuBdvtzQ0uGpVlNgn/5Mbhcr16vND3H6ebVE7KYXSUFSaFWzoy3g0oCGEHs Ize1xKZ8iwT1d4FgJStzsbt0orBj+0yIU5tITFT6/st5wnh+FAoOexhDCn3xSW5SMuV3Oo2+uoWH CfdZaFAYXkbeLxZA1w1OMtAJcqbHgU3aNxYcmqMD9nKz+abzU8iwb1n8qWJcioF1mizljbRAJJeZ AiycEldIB8ZfeVO6A+5piOzv4zAuLUS/5lOnaxf57McOPblC20F4gFetMKj+c1wG+4ebE2Rw2DoO 2cyC9MfYLnpvr3uDB/rDSau9UEJ6ssBwbgs3D2d7ARvo5SXdXBDyf7uPy1zB0ThDWDWWv0Gln7OB wsuzD/5HByY1zlbGIKzALNGb5/dMYpHzfrzbjFcoo91kLCkk74820ua9D9j4ELOCrojC8rvhCzhd sXOgJeB4nP0cV++iOYzi3B6zhJhCdlDomUAVmbuJXQrZMQOBdNUH4mA3fLJrMFuJif/wBuDT8fg8 I/PZdw1abXnXtQa0MlP1dLK0kCQcjFRQpyZhrup86aYqXfPDmp2NsAqj3G2mjs44bHntu/DblxRc K9TK7SRLEYcCRd455e0LjYERvWwt5WHhIVPLVg1mTXYGJmeRCniP8zYzWxfyoeGb5mdIQ5kzpaYZ Qd7OU1U2NruJSy+wYbYOYvbHfNUuXqaOJo94fTfOonHAUb1P53j+LO1rqDbfsA0MtSy+5wUp+gCI 8jyBJOM+o2p/o920CO4/jl4GS/hRpS/lcsKI38PeXOzUKiijgDe75nHBKWS7xqgvHubOrqgXt6Oy t57a2mLlNivzjNDJ4yxzvySxGhfYTBmGpL0cYg0lM4orhZ8s/5mEib0Fy6whPxMI5CCcSmaS1rRe 9li//jaYuo9fgvvJ1jmOx40pJ+VwwWF09imH8d+dap+nU8496MlWe+lQDAHtyiKp0veEbsd3KXCY 4vD8HN9UkIXuOl1d9La4H5ogkGZ1py5XAca5O04w8DLTm/3jPTBvHFRDlos7QLO3uJdIQCJVIoDK G3jiwW1H39oCSDdfQOEefa2eZ8zFMBaQWvdP7PzAhYkL1LTi+nTm6UmMJeZkCN7eWiSMJZm05DP0 <KEY>""" def get_freq_list(): freq_list = dict(map(lambda x:x.split('\t'), zlib.decompress(base64.decodestring(freq_raw)).split('\n')[:-1])) freq_sum = float(sum(map(int, freq_list.values()))) freq_list = {key:int(value)/freq_sum for key,value in freq_list.items()} return freq_list def forward_step(input_text, freq_list): best_edge = [None for _ in range(1, len(input_text)+2)] best_score = [0 for _ in range(1, len(input_text)+2)] for word_end in range(1, len(input_text)+1): best_score[word_end] = 100000 for word_begin in range(0, word_end): word = input_text[word_begin:word_end] if word in freq_list or len(word) == 1: prob = freq_list.get(word, 0.000001) my_score = best_score[word_begin] + -math.log(prob) #my_score += len(word) * 10000 #print my_score,best_score[word_end] if my_score < best_score[word_end]: best_score[word_end] = my_score best_edge[word_end] = (word_begin, word_end) return best_edge def backward_step(input_text, best_edge): words = [] next_edge = best_edge[len(best_edge)-1] while next_edge != None: word = input_text[next_edge[0]:next_edge[1]] words.append(word) next_edge = best_edge[next_edge[0]] words.reverse() return words def segment(input_text, freq_list): input_text = input_text.decode('utf-8') best_edge = forward_step(input_text, freq_list) words = backward_step(input_text, best_edge) return " ".join(words) freq_list = get_freq_list() print segment("wearethepeople", freq_list) print segment("mentionyourfaves", freq_list) print segment("nowplaying", freq_list) print segment("thewalkingdead", freq_list) print segment("followme", freq_list) zlib.compress(base64.encodestring(freq_raw))
.ipynb_checkpoints/Word segmentation - Forward Backward algorithm-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit (windows store) # name: python3 # --- # + [markdown] id="JbIKDfFW45Ux" # # FEATURE SELECTION # Feature selection is a process where you automatically or manually select those features which have the maximum contribution to your model prediction output. Having irrelevant features can lead to a decrease in accuracy as your model learns from insignificant features. # # This assignment will focus on manual selection of relevant features. # The dataset is of different camera models with different featues and their price. # # The assignment has both marked questions and unmarked ones. # All questions written beside QUESTION # are evaluated for your final score and the ones that are not have been given only to improve your understanding. # + [markdown] id="lTtpMl2W54P7" # ## 1. Importing Important Packages # + id="UMOQ75XN4v9X" # ALL NECESSARY PACKAGES HAVE BEEN IMPORTED FOR YOU # DO NOT MAKE ANY CHANGES IN THIS CODE CELL! import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from scipy.stats import pearsonr from pandas.plotting import scatter_matrix import json ans = [None]*8 # - import warnings warnings.filterwarnings("ignore") # + [markdown] id="sJApY7_B8sbD" # ## 2. Importing the Dataset # # + id="oK2veMTk468d" # THE DATASET HAS BEEN IMPORTED AND STORED IN THE VARIABLE DATASET # A SMALL SET OF THE DATA HAS BEEN SHOWN WHICH WILL GIVE A BRIEF UNDERSTANDING OF THE DATASET # THE DESCRIPTION OF THE DATA HAS ALSO BEEN PRINTED # DO NOT MAKE CHANGES IN THE CELL! dataset = pd.read_csv("camera_dataset.csv") dataset.head() # + id="bIhwr-q7AsO4" # OBSERVE THE STATISTICS OF THE DIFFERENT FEATURES OF THE DATASET # DO NOT CHANGE THIS CELL print("Statistics of the Dataset: \n") dataset.describe() # - dataset.shape # + id="7cCgm80jIviq" # Find the total number of NaN values present in the dataset. # HINT: You can use "df.isna()" function (where df is your dataframe) from pandas to find NaN values. # START YOUR CODE HERE: print("Count, no. of rows with NaN values (column wise)") dataset.isnull().sum() # END YOUR CODE HERE # + id="zXBAd9tPCL6R" # THE NaN VALUES HAVE BEEN CLEANED BY REMOVING THE CORRESPONDING DATA POINTS. # THE CLEANED DATASET IS STORED IN THE VARIABLE "data". USE IT FOR FURTHER USE # DO NOT CHANGE THIS CELL! def remove_nan(df): df_copy = df.copy() df_copy.dropna(inplace = True) return df_copy data = remove_nan(dataset) # + [markdown] id="ZVIfC2jA_o7C" # ## 3. UNDERSTANDING THE DATA # + id="SLLdMjUXFO9-" # Find the number of data points i.e rows in the cleaned dataset i.e data variable. You can already see in an above cell how many features i.e columns there are. # wRITE YOUR CODE HERE print("Cleaned dataset shape: ", data.shape) # END YOUR CODE HERE # + id="_qK4guei9Lqm" # QUESTION 1: Find the datatype of the values stored in the "Model" column of the dataset and write them inside inverted commas. () (1 marks) # QUESTION 2: Find the datatype of the values stored in the "Dimensions" column of the dataset and write them inside inverted commas. (1 marks) # Assign the answer of Question 1 to ans[0], # Assign the answer of Question 2 to ans[1]. # eg:- ans[0] = "int64"/"float64" if the ans is int64/float64 # NOTE: Do not write "int". Write "int64". # + id="JcaUEO0zBAY-" # START YOUR CODE HERE: print("Data type of the column Model: ", str(data['Model'].dtypes)) print("Data type of the column Dimensions: ", str(data['Dimensions'].dtypes)) # END CODE HERE # + id="NOY9NKQPDXVV" # WRITE YOUR ANSWERS HERE BY SUBSTITUTING None WITH YOUR ANSWER: # DO NOT CHANGE THE INDEXES! OTHERWISE THE ANSWER MIGHT BE EVALUATED WRONG! ans[0] = str(data['Model'].dtypes) ans[1] = str(data['Dimensions'].dtypes) # + id="yd8DFuD8AY_S" # QUESTION 3: Find out the number of unique release dates present in the dataset under the "Release date" column. (1 mark) # Assign the answer to ans[2]. # + id="xgHedvLkIOxX" # START YOUR CODE HERE data["Release date"].unique() # END YOUR CODE HERE # + id="EBZbszSWIWNZ" # WRITE YOUR ANSWER HERE BY SUBSTITUTING None WITH YOUR ANSWER ans[2] = len(data["Release date"].unique()) # + id="GSpZtVmBKVQI" # If you run the same for the "Model" column you will observe that the # model column is unique and cannot be treated as a feature for predicting the price. # Hence we will not bother about that column from now. print("Length of the dataset: ", len(data), "\nLength of the unique values of column Model: ", len(data["Model"].unique())) # + [markdown] id="-trfPH_iECbQ" # ## 4. VISUALIZING THE DATA # + id="sbv2hLggHcoJ" # RUN THE CELL BELOW TO OBSERVE THE HISTOGRAM OF THE "Release date" COLUMN # DO NOT CHANGE THIS CELL! data.hist(column = "Release date"); # + id="dEy3_WsNWOI4" # TRY PLOTTING THE HISTOGRAM FOR THE OTHER COLUMNS # HINT 1: You can use a for loop to plot the histogram for all the columns in one go. # HINT 2: The code, "dataset.columns" gives a list of the columns of the dataset. # HINT 3: The "not in" phrase can be used to find if an element is not present in a particular list. # START CODE HERE: cols = data.columns[1:] fig, ax = plt.subplots(3, 4, figsize = (12, 10), tight_layout = True) for i in range(len(ax)): for j in range(len(ax[0])): ax[i][j].hist(data[cols[i*len(ax[0])+j]]) ax[i][j].set_title(cols[i*len(ax[0])+j]) # END CODE HERE # + [markdown] id="Yd3WAjmsN4pB" # ## 5. CORRELATION OF DATA # + id="y4m_2Abhv6Dl" # QUESTION 4: Find the column which has the highest negative correlation with the "Price" column. Write the column name # and the aboslute value of the correlation (1 + 1 = 2 marks) # eg: if correlation of A with B is -0.66 and correlation of A with C is -0.89 then the answer would be C and 0.89. # Assign the column name to ans[3] and remember to put your answer inside inverted commas. # Assign the correlation value to ans[4] and remember to write the absolute value i.e |x|. # eg: ans[3] = "Model" if the answer is the Model column # eg: ans[4] = 0.74 if the correlation value is -0.74. # + id="LKows3wDxpkQ" # START YOUR CODE HERE: cols = data.columns[1:] fig, ax = plt.subplots(3, 4, figsize = (16, 12), tight_layout = True) for i in range(len(ax)): for j in range(len(ax[0])): k = i*len(ax[0])+j if(k >= len(cols)): break ax[i][j].scatter(data["Price"], data[cols[k]], label="Correlation: "+str(pearsonr(data["Price"], data[cols[k]])[0])) ax[i][j].set_title(cols[k]) ax[i][j].legend(loc="best") fig.suptitle("Scatter plots w.r.t Price"); # END CODE HERE # + highest_corr = 1.0 cols = data.columns[1:-1] for col in cols: correlation = data[col].corr(data["Price"]) if(correlation<highest_corr): highest_corr = correlation highest_corr_col = col print("Column name: ", highest_corr_col) print("Highest correlation: ", abs(highest_corr)) # + id="Z_iCmHqVxpq7" ans[3] = highest_corr_col ans[4] = abs(highest_corr) # + [markdown] id="VNF8VghKOCzs" # ## 5. DISTINCTIVE FEATURES # + id="HXtJO4USvaCQ" # QUESTION 5: Find the number of data points whose (a) price > 50 percentile mark AND (b) Release date > 50 percentile mark. (2 mark) # NOTE: There are two conditions in the question above, both of which needs to be satisfied. # Assign the answer to ans[5]. # - data.describe()[["Price", "Release date"]] # + id="rL77HD3oqJGC" # START YOUR CODE: temp = data[["Price", "Release date"]].quantile(.5) df = data.loc[data["Release date"]>temp["Release date"]][data["Price"]>temp["Price"]] df.describe()[["Price", "Release date"]] # END YOUR CODE # + id="j2ITBEIqqNaJ" ans[5] = len(df) # + id="Nl_CGOr1S2tz" # Also try finding the no data points whose (a) price > 50 percentile mark AND (b) Release Date < 59 percentile mark. # Can you justify why "Release date >/< 50 percentile mark" is not a good distinctive feature? # Repeat the above steps with "Release data >/< (a) 25 percentile mark (b) 75 percentile mark (c)mean. # Can you justify why "Release date" is not a good distinctive feature at all? # + # Price > 50%ile Release Date > 50%ile price_50 = data['Price'].quantile(0.5) release_date_50 = data['Release date'].quantile(0.5) data_distinctive_1 = data[(data['Price']>price_50) & (data['Release date']>release_date_50)] print("# Price > 50%ile Release Date > 50%ile : ",len(data_distinctive_1)) # Price > 50%ile Release Date < 50%ile price_50 = data['Price'].quantile(0.5) release_date_50 = data['Release date'].quantile(0.5) data_distinctive_2 = data[(data['Price']>price_50) & (data['Release date']<release_date_50)] print("# Price > 50%ile Release Date < 50%ile : ",len(data_distinctive_2)) # Price > 50%ile Release Date > 25%ile price_50 = data['Price'].quantile(0.5) release_date_25 = data['Release date'].quantile(0.25) data_distinctive_3 = data[(data['Price']>price_50) & (data['Release date']>release_date_25)] print("# Price > 50%ile Release Date > 25%ile : ",len(data_distinctive_3)) # Price > 50%ile Release Date < 25%ile price_50 = data['Price'].quantile(0.5) release_date_25 = data['Release date'].quantile(0.25) data_distinctive_4 = data[(data['Price']>price_50) & (data['Release date']<release_date_25)] print("# Price > 50%ile Release Date < 25%ile : ",len(data_distinctive_4)) # Price > 50%ile Release Date > 75%ile price_50 = data['Price'].quantile(0.5) release_date_75 = data['Release date'].quantile(0.75) data_distinctive_5 = data[(data['Price']>price_50) & (data['Release date']>release_date_75)] print("# Price > 50%ile Release Date > 75%ile : ",len(data_distinctive_5)) # Price > 50%ile Release Date < 75%ile price_50 = data['Price'].quantile(0.5) release_date_75 = data['Release date'].quantile(0.75) data_distinctive_6 = data[(data['Price']>price_50) & (data['Release date']<release_date_75)] print("# Price > 50%ile Release Date < 75%ile : ",len(data_distinctive_6)) # Price > 50%ile Release Date > mean price_50 = data['Price'].quantile(0.5) release_date_mean = data['Release date'].mean() data_distinctive_7 = data[(data['Price']>price_50) & (data['Release date']>release_date_mean)] print("# Price > 50%ile Release Date > mean : ",len(data_distinctive_7)) # Price > 50%ile Release Date < mean price_50 = data['Price'].quantile(0.5) release_date_mean = data['Release date'].mean() data_distinctive_8 = data[(data['Price']>price_50) & (data['Release date']<release_date_mean)] print("# Price > 50%ile Release Date < mean : ",len(data_distinctive_8)) # - # #### We can see that Release date >/< 50 percentile mark is not a good distinctive feature and in general Release Date is not a good distinctive feature at all # + id="QzrPzLgHXn77" # QUESTION 6: Find the number of data points whose (a) price > 50 percentile mark AND (b) Weight (inc. batteries) > 75th percentile mark.(2 mark) # NOTE: BOTH the conditions stated above need to be satisfied. # Assign the answer to ans[6]. # - data.describe()[["Price", "Weight (inc. batteries)"]] # + id="doYVbbB8KnqX" # START YOUR CODE HERE: temp = data[["Price", "Weight (inc. batteries)"]].quantile([.5, .75]) df = data.loc[data["Price"]>temp["Price"][0.5]][data["Weight (inc. batteries)"]>temp["Weight (inc. batteries)"][0.75]] df[["Price", "Weight (inc. batteries)"]].describe() # END YOUR CODE HERE # + id="qsJy4JU1KsJ-" # WRITE YOUR ANSWER HERE BY SUBSTITUTING None WITH YOUR ANSWER ans[6] = len(df) # + id="ZbcCa15yV2ec" # Try the same with (a) price > 50 percentile mark AND (b) Weight (inc. batteries) < 75 percentile mark. # Can you justify whether Weight (inc. batteries) >/< 75 percentile mark is a good distinctive feature? # HINT: Weight (inc. batteries) > 75 percentile mark implies that price will be ? # - print("Number of items with Weight (inc. batteries) > 75% mark: ", len(data[data["Weight (inc. batteries)"]>temp["Weight (inc. batteries)"][0.75]])) print("Total number of items", len(data)) print("since the division of items with above distinctive feature is not equipartition, so it's not a good distinctive feature") # + id="0sdSJhL2KHtb" # TRY FITTING TWO LINEAR REGRESSION MODELS BY ONCE DROPPING THE FEATURE "Weight (inc. batteries)" # AND ONCE BY KEEPING ALL FEATURES. THEN COMPARE THE TRAINING/VALIDATION ACCURACY OF THE TWO # NOTE: A LINEAR REGRESSION MODEL HAS BEEN IMPLEMENTED FOR YOU IN THE CELL BELOW # + id="f8B7LuftM-gg" # PRE IMPLEMENTED LINEAR REGRESSOR # CHANGE THIS CELL ONLY WHERE INDICATED! def implement_linear_reg(): # data_fs : Dataset from which you drop your most distinctive feature # data : The original Dataset with all features intact (except "Model" which we dropped earlier) # X : the training features # Y : the training label (the "Price" column) # xtrain, xval : the training set and validation set respectively # linreg : The linear regression model linreg = LinearRegression(fit_intercept = True, normalize = False) data_fs = data.copy() #Use data_fs as the dataset from where you drop the most distinctive feature. # START YOUR CODE HERE: # You can write the column name enclosed within inverted commas inside the empty [] i.e eg: data_fs.drop(columns = ["Model"], inplace = True) data_fs.drop(columns = ["Model", "Weight (inc. batteries)"], inplace = True) # END YOUR CODE HERE Y = data["Price"] X = data.drop(columns = ["Model", "Price"]) xtrain, xval, ytrain, yval = train_test_split(X, Y, test_size = 100, random_state = 40) linreg.fit(xtrain, ytrain) print("\n Train Accuracy of Linear Regression model with distinctive feature = ", linreg.score(xtrain, ytrain)) print("\n Validation Accuracy of Linear Regression model with distinctive feature = ", linreg.score(xval, yval)) Y = data_fs["Price"] X = data_fs.drop(columns = ["Price"]) xtrain, xval, ytrain, yval = train_test_split(X, Y, test_size = 100, random_state = 40) linreg.fit(xtrain, ytrain) print("\n Train Accuracy of Linear Regression model without distinctive feature = ", linreg.score(xtrain, ytrain)) print("\n Validation Accuracy of Linear Regression model without distinctive feature = ", linreg.score(xval, yval)) implement_linear_reg() # + id="Wz3hndonyQqx" # RUN THE CODE BELOW TO GET YOUR ANSWERS EVALUATED. # DO NOT CHANGE THIS CELL! ans = [item for item in ans] with open("ans1.json", "w") as f: json.dump(ans, f) # + id="ZvJAvjsNbPnW" # ! ../submit ans.json
LAB ASSIGN/Feature_selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Find eigenvalue and eigenvectors (Book 1, p. 118, ex. 13.8). # + import numpy as np from numpy.linalg import eig, norm, solve, inv import pandas as pd # - # # Common parts # Initial test matrix. A = np.array([[-0.81417, -0.01937, 0.41372], [-0.01937, 0.54414, 0.00590], [ 0.41372, 0.00590, -0.81445]]) # + def eigenvalue_find(A, v): Av = A.dot(v) return v.dot(Av) def aposterior_error(A, eigenvalue, eigenvalue_predicted, eigenvector_predicted): """Calculating aposterior error of eigenvalue finding. Args: A (ndarray<ndarray, ndarray>): matrix to handle. eigenvalue (float): true eigenvalue. eigenvalue_predicted (float): eigenvalue we want to test. eigenvector_predicted (ndarray<float>): eigenvector that corresponds to eigenvalu we want to test. Returns: error (float): aposterior error. """ error = (norm(np.dot(A, eigenvector_predicted) - np.dot(eigenvalue_predicted, eigenvector_predicted)) / norm(eigenvector_predicted)) return error # - # # Jacobi method def jacobi_eigenvalue(A, eps=1e-6): """Computes the eigenvalues and eigenvectors using Jacobi rotation method. Args: A (ndarray<ndarray, ndarray>): matrix to handle. eps (float): error rate. Returns: d (ndarray): eigenvalues. v (ndarray<ndarray, ndarray>): eigenvectors. it_num (int): number of iterations. """ n = A.shape[0] v = np.zeros([n, n]) d = np.zeros(n) for j in range(0, n): for i in range(0, n): v[i,j] = 0.0 v[j,j] = 1.0 for i in range(0, n): d[i] = A[i,i] bw = np.zeros(n) zw = np.zeros(n) w = np.zeros(n) for i in range(0, n): bw[i] = d[i] it_num = 0 rot_num = 0 thresh = 0.0 for j in range(0, n): for i in range(0, j): if np.abs(A[i, j]) > thresh: thresh = np.abs(A[i, j]) while (thresh >= eps): it_num += 1 thresh = 0.0 for j in range(0, n): for i in range(0, j): if np.abs(A[i, j]) > thresh: thresh = np.abs(A[i, j]) for p in range(0, n): for q in range(p + 1, n): gapq = 10.0 * abs (A[p, q]) termp = gapq + abs (d[p]) termq = gapq + abs (d[q]) # Annihilate tiny offdiagonal elements. if (4 < it_num and termp == abs (d[p]) and termq == abs (d[q])): A[p, q] = 0.0 # Other wise, apply a rotation. elif (thresh <= abs (A[p, q])): h = d[q] - d[p] term = abs (h) + gapq if (term == abs (h)): t = A[p, q] / h else: theta = 0.5 * h / A[p, q] t = 1.0 / (abs (theta) + np.sqrt (1.0 + theta * theta)) if (theta < 0.0): t = - t c = 1.0 / np.sqrt (1.0 + t * t) s = t * c tau = s / (1.0 + c) h = t * A[p, q] # Accumulate corrections to diagonal elements. zw[p] = zw[p] - h zw[q] = zw[q] + h d[p] = d[p] - h d[q] = d[q] + h A[p, q] = 0.0 # Rotate, using information from the upper triangle of A only. for j in range(0, p): g = A[j, p] h = A[j, q] A[j, p] = g - s * (h + g * tau) A[j, q] = h + s * (g - h * tau) for j in range(p + 1, q): g = A[p, j] h = A[j, q] A[p, j] = g - s * (h + g * tau) A[j, q] = h + s * (g - h * tau) for j in range(q + 1, n): g = A[p, j] h = A[q, j] A[p, j] = g - s * (h + g * tau) A[q, j] = h + s * (g - h * tau) # Accumulate information in the eigenvector matrix. for j in range(0, n): g = v[j, p] h = v[j, q] v[j, p] = g - s * (h + g * tau) v[j, q] = h + s * (g - h * tau) rot_num = rot_num + 1 for i in range(0, n): bw[i] = bw[i] + zw[i] d[i] = bw[i] zw[i] = 0.0 # Restore upper triangle of input matrix. for j in range(0, n): for i in range(0, j): A[i, j] = A[j, i] # Ascending sort the eigenvalues and eigenvectors. for k in range(0, n - 1): m = k for l in range(k + 1, n): if (d[l] < d[m]): m = l if (k != m): t = d[m] d[m] = d[k] d[k] = t for i in range(0, n): w[i] = v[i, m] v[i, m] = v[i, k] v[i, k] = w[i] return d, v, it_num # ### Jacobi method test # + values, vectors, it_num = jacobi_eigenvalue(A) print("\nEigenvalues: \n{}".format(values)) print("\nEigenvectors: \n{}".format(vectors)) print("\nNumber of iterations: \n{}".format(it_num)) # - # # Power iterations method def power_iteration(A, k): """Calculating dominant eigenvalue and eigenvector using power iteration method. Args: A (ndarray<ndarray, ndarray>): matrix to handle. k (int): number of iterations. Returns: ev_new (float): dominant eigenvalue. v_new (ndarray<float>): dominant eigenvector. """ n = A.shape[0] v = np.ones(n) / np.sqrt(n) for i in range(k): Av = np.dot(A, v) v_new = Av / np.linalg.norm(Av) ev_new = eigenvalue_find(A, v_new) v = v_new return ev_new, v_new def power_iteration_stats(A, eigenvalue, eps=1e-3): """Calculating number of iterations to reach given aposterior error. Args: A (ndarray<ndarray, ndarray>): matrix to handle. eigenvalue (float): true eigenvalue. eps (float): error to reach. Returns: k (int): number of iteraions to reach eps error. error (float): result aposterior error. """ k = 1 eigenvalue_predicted, eigenvector_predicted = power_iteration(A, k) error = aposterior_error(A, eigenvalue, eigenvalue_predicted, eigenvector_predicted) while (error > eps): k += 1 eigenvalue_predicted, eigenvector_predicted = power_iteration(A, k) error = aposterior_error(A, eigenvalue, eigenvalue_predicted, eigenvector_predicted) return k, error # ### Power iterations method test # + eigenvalue = eig(A)[0][np.argmax(np.abs(eig(A)[0]))] k, error = power_iteration_stats(A, eigenvalue) dominant_eigenvalue, dominant_eigenvector = power_iteration(A, k) print("\nDominant eigenvalue: \n{}".format(dominant_eigenvalue)) print("\nDominant eigenvector: \n{}".format(dominant_eigenvector)) print("\nNumber of iterations to reach given aposterior error: \n{}".format(k)) print("\nResult error: \n{}".format(error)) # - # # Scalar product method def scalar_product(A, k): """Calculating dominant eigenvalue and eigenvector using scalar product method. Args: A (ndarray<ndarray, ndarray>): matrix to handle. k (int): number of iterations. Returns: ev_new (float): dominant eigenvalue. v_new (ndarray<float>): dominant eigenvector. """ n = A.shape[0] v = np.ones(n) / np.sqrt(n) h = np.ones(n) / np.sqrt(n) for i in range(k): v_new = np.dot(A, v) v_new = v_new / norm(v_new) h_new = np.dot(A.T, h) h_new = h_new / norm(h_new) ev_new = (np.dot(np.dot(A, v_new), np.dot(A.T, h_new)) / np.dot(v_new, np.dot(A.T, h_new))) v = v_new h = h_new return ev_new, v_new def scalar_product_stats(A, eigenvalue, eps=1e-6): """Calculating number of iterations to reach given aposterior error. Args: A (ndarray<ndarray, ndarray>): matrix to handle. eigenvalue (float): true eigenvalue. eps (float): error to reach. Returns: k (int): number of iteraions to reach eps error. error (float): result aposterior error. """ k = 1 eigenvalue_predicted, eigenvector_predicted = scalar_product(A, k) error = aposterior_error(A, eigenvalue, eigenvalue_predicted, eigenvector_predicted) while (error > eps): k += 1 eigenvalue_predicted, eigenvector_predicted = scalar_product(A, k) error = aposterior_error(A, eigenvalue, eigenvalue_predicted, eigenvector_predicted) return k, error # ### Scalar product method test # + eigenvalue = eig(A)[0][np.argmax(np.abs(eig(A)[0]))] k, error = scalar_product_stats(A, eigenvalue) dominant_eigenvalue, dominant_eigenvector = scalar_product(A, k) print("\nDominant eigenvalue: \n{}".format(dominant_eigenvalue)) print("\nDominant eigenvector: \n{}".format(dominant_eigenvector)) print("\nNumber of iterations to reach given aposterior error: \n{}".format(k)) print("\nResult error: \n{}".format(error)) # - # # Reverse spectrum bound def reverse_spectrum_bound(A, eigenvalue, k): """Reverse to eigenvalue spectrum bound. Args: A (ndarray<ndarray, ndarray>): matrix to handle. eigenvalue (float): eigenvalue to find reverse of. k (int): number of iteraions of power method. Returns: ev_reverse (float): reverse eigenvalue. v_reverse (ndarray<float>): reverse eigenvector. """ n = A.shape[0] B = A - eigenvalue * np.identity(n) eigenvalue_b, v_reverse = power_iteration(B, k) ev_reverse = eigenvalue_b + eigenvalue return ev_reverse, v_reverse def reverse_spectrum_bound_stats(A, eigenvalue, eigenvalue_reversed, eps=1e-3): """Calculating number of iterations to reach given aposterior error. Args: A (ndarray<ndarray, ndarray>): matrix to handle. eigenvalue (float): eigenvalue to find reverse of. eigenvalue_reversed (float): reversed eigenvalue. eps (float): error to reach. Returns: k (int): number of iteraions to reach eps error. error (float): result aposterior error. """ k = 1 eigenvalue_predicted, eigenvector_predicted = reverse_spectrum_bound(A, eigenvalue, k) error = aposterior_error(A, eigenvalue_reversed, eigenvalue_predicted, eigenvector_predicted) while (error > eps): k += 1 eigenvalue_predicted, eigenvector_predicted = reverse_spectrum_bound(A, eigenvalue, k) error = aposterior_error(A, eigenvalue_reversed, eigenvalue_predicted, eigenvector_predicted) return k, error # ### Reversed spectrum bound test # + eigenvalue = eig(A)[0][np.argmax(np.abs(eig(A)[0]))] eigenvalue_reversed = np.max(eig(A)[0]) if (eigenvalue <= 0) else np.min(eig(A)[0]) k, error = reverse_spectrum_bound_stats(A, eigenvalue, eigenvalue_reversed) ev_reversed, v_reversed = reverse_spectrum_bound(A, eigenvalue, k) print("\nReversed eigenvalue: \n{}".format(ev_reversed)) print("\nReversed eigenvector: \n{}".format(v_reversed)) print("\nNumber of iterations to reach given aposterior error: \n{}".format(k)) print("\nResult error: \n{}".format(error)) # - # # Inverse iterations method def inverse_iterations(A, eigenvalue=-1.2, eps=1e-3): """Calculating dominant eigenvalue and eigenvector using inverese iterations method. Args: A (ndarray<ndarray, ndarray>): matrix to handle. eigenvalue (float): eigenvalue approximation. eps (float): error to reach. Returns: ev_new (float): dominant eigenvalue. v_new (ndarray<float>): dominant eigenvector. k (int): number of iteraions to reach eps error. error (float): result aposterior error. """ n = A.shape[0] k = 1 v = np.ones(n) / np.sqrt(n) ev = eigenvalue error = 1 while (error > eps): k += 1 W = A - ev * np.identity(n) v_new = solve(W, v) v_new = v_new / norm(v_new) mu, _ = scalar_product(inv(W), k) ev_new = (1 / mu) + ev error = np.abs(ev_new - ev) ev = ev_new v = v_new return ev_new, v_new, k, error # ### Inverse iterations method test # + eigenvalue = -1.2 dominant_eigenvalue, dominant_eigenvector, k, error = inverse_iterations(A, eigenvalue) print("\nEigenvalue approximation: \n{}".format(eigenvalue)) print("\nDominant eigenvalue: \n{}".format(dominant_eigenvalue)) print("\nDominant eigenvector: \n{}".format(dominant_eigenvector)) print("\nNumber of iterations to reach given aposterior error: \n{}".format(k)) print("\nResult error: \n{}".format(error))
EigenvalueEigenvector/EigenvalueEigenvector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import dependencies from splinter import Browser from bs4 import BeautifulSoup import pandas as pd import requests import time # connect to chromedriver executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless = False) # link to NASA Mars news website url = "https://mars.nasa.gov/news/" browser.visit(url) # parse through <ul> and <li> elements to find first article title html = browser.html soup = BeautifulSoup(html, 'html.parser') article = soup.select_one('ul.item_list li.slide') article # print title of first article news_title = article.find('div', class_='content_title').text print(news_title) # print news_p = article.find('div', class_ = 'article_teaser_body').text print(news_p) # new path to NASA JPL website url2 = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(url2) # parse through website to find featured image's url html2 = browser.html soup2 = BeautifulSoup(html2, 'html.parser') featured_image = soup2.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1] base_url = 'https://www.jpl.nasa.gov' featured_image_url = base_url + featured_image print(featured_image_url) # new path to Mars Twitter weather url3 = 'https://twitter.com/marswxreport?lang=en' browser.visit(url3) time.sleep(5) html3 = browser.html soup3 = BeautifulSoup(html3, 'html.parser') mars_weather_tweet = soup3.find("div", attrs={ "class": "tweet", "data-name": "<NAME>" }) mars_weather = mars_weather_tweet.find("p", "tweet-text").get_text() print(mars_weather) # scrape facts about Mars using pandas mars_data = pd.read_html('https://space-facts.com/mars/')[0] mars_dataf = pd.DataFrame(mars_data) mars_df.columns = ["Description", "Value"] mars_table = mars_df.to_html(header = False, index = False) mars_df # print html table of Mars facts print(mars_table) # + # new path to website containing Mars hemisphere images url4 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(url4) html4 = browser.html soup4 = BeautifulSoup(html4, 'html.parser') hemisphere_img_urls = [] articles = soup4.find('div', class_ = 'result-list') imgs = articles.find_all('div', class_ = 'item') for img in imgs: title = img.find('h3').text title = title.replace(" Enhanced", "") link = img.find('a')['href'] full_url = 'https://astrogeology.usgs.gov/' + link browser.visit(full_url) html_link = browser.html soup = BeautifulSoup(html_link, 'html.parser') downloads = soup.find('div', class_ = 'downloads') img_url = downloads.find('a')['href'] hemisphere_img_urls.append({"title": title, "img_url": img_url}) # - hemisphere_img_urls
Mission_to_Mars/.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: dev # kernelspec: # display_name: Python [conda root] # language: python # name: python3 # --- import pandas as pd import numpy as np from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import MinMaxScaler from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report import joblib # # Read the CSV and Perform Basic Data Cleaning df = pd.read_csv("../Data/exoplanet_data.csv") # Drop the null columns where all values are null df = df.dropna(axis = "columns", how = "all") # Drop the null rows df = df.dropna() df.head() # # Select your features (columns) # Set features. This will also be used as your x values. X = df.drop("koi_disposition", axis = 1) # # Create a Train Test Split # # Use `koi_disposition` for the y values y = df["koi_disposition"] print(X.shape, y.shape) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 42, stratify = y) X_train.head() # # Pre-processing # # Scale the data using the MinMaxScaler and perform some feature selection # Scale your data X_scaler = MinMaxScaler().fit(X_train) X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # # Train the Model using RandomForestClassifier # + classifier = RandomForestClassifier(n_estimators = 200, random_state = 42) classifier.fit(X_train_scaled, y_train) training_score = classifier.score(X_train_scaled, y_train) base_accuracy = classifier.score(X_test_scaled, y_test) print(f"RandomForestClassifier training Data Score: {training_score}") print(f"RandomForestClassifier testing Data Score: {base_accuracy}") # - # # Hyperparameter Tuning # # Use `GridSearchCV` to tune the model's parameters # Create the GridSearchCV model param_grid = {"n_estimators": [200, 600, 1200, 1400], "max_features": ["auto", "sqrt", "log2"], "max_depth": [14, 15, 16, 17, 18, None]} grid = GridSearchCV(classifier, param_grid, error_score = "raise", verbose = 3, cv = 5, n_jobs = -1) # Train the model with GridSearch grid.fit(X_train_scaled, y_train) print(f"Best grid params: {grid.best_params_}") print(f"Best grid score: {grid.best_score_}") # # Train Tuned Model # + # Tuned parameters max_features = grid.best_params_["max_features"] n_estimators = grid.best_params_["n_estimators"] max_depth = grid.best_params_["max_depth"] criterion = "entropy" # Tuned model tuned_model = RandomForestClassifier(max_features = max_features, n_estimators = n_estimators, criterion = criterion, max_depth = max_depth, random_state = 42) tuned_model.fit(X_train_scaled, y_train) tuned_model_score = tuned_model.score(X_train_scaled, y_train) tuned_accuracy = tuned_model.score(X_test_scaled, y_test) print(f"Training Data Score: {tuned_model_score}") print(f"Testing Data Score: {tuned_accuracy}") # + # Make predictions with the hypertuned model predictions = tuned_model.predict(X_test_scaled) classifications = y_test.unique().tolist() prediction_actual = {"Actual": y_test, "Prediction": predictions} prediction_df = pd.DataFrame(prediction_actual) prediction_df = prediction_df.set_index("Actual").reset_index() prediction_df # - # # Accuracy Report target_names = ["CANDIDATE", "CONFIRMED", "FALSE POSITIVE"] report = classification_report(y_test, predictions, target_names = target_names) print(report) # + evaluations = {"": ["Base Model", "Tuned Model"], "Accuracy": [f"%s" % round(base_accuracy, 3), f"%s" % round(tuned_accuracy, 3)]} evaluations_df = pd.DataFrame(evaluations) evaluations_df = evaluations_df.set_index("") evaluations_df.to_csv("../Evaluations/random_forest_eval.csv") evaluations_df # - # # Save the Model # save your model by updating "your_name" with your name # and "your_model" with your model variable # be sure to turn this in to BCS # if joblib fails to import, try running the command to install in terminal/git-bash filename = "../Models/random_forest_classifier.sav" joblib.dump(classifier, filename)
jupyter_notebook/random_forest_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PreProcessing data # # **First step import data** import pandas as pd test = pd.read_csv( '../input/competitive-data-science-predict-future-sales/test.csv') sales = pd.read_csv( '../input/competitive-data-science-predict-future-sales/sales_train.csv') shops = pd.read_csv( '../input/competitive-data-science-predict-future-sales/shops.csv') items = pd.read_csv( '../input/competitive-data-science-predict-future-sales/items.csv') item_cats = pd.read_csv( '../input/competitive-data-science-predict-future-sales/item_categories.csv') # # See some outliers # **some items sell too much.** import seaborn as sns sns.boxplot(x=sales.item_cnt_day) # **some item price are too high.** sns.boxplot(x=sales.item_price) # **shop name looks similar.** print(shops[shops.shop_id.isin([0, 57])]['shop_name']) print(shops[shops.shop_id.isin([1, 58])]['shop_name']) print(shops[shops.shop_id.isin([40, 39])]['shop_name']) # # Process outliers train = sales[(sales.item_price < 100000) & (sales.item_price > 0)] train = train[sales.item_cnt_day < 1001] # + train.loc[train.shop_id == 0, 'shop_id'] = 57 test.loc[test.shop_id == 0, 'shop_id'] = 57 train.loc[train.shop_id == 1, 'shop_id'] = 58 test.loc[test.shop_id == 1, 'shop_id'] = 58 train.loc[train.shop_id == 40, 'shop_id'] = 39 test.loc[test.shop_id == 40, 'shop_id'] = 39 # - train.head() # # Grouping block number # + import numpy as np from itertools import product index_cols = ['shop_id', 'item_id', 'date_block_num'] df = [] for block_num in train['date_block_num'].unique(): cur_shops = train.loc[sales['date_block_num'] == block_num, 'shop_id'].unique() cur_items = train.loc[sales['date_block_num'] == block_num, 'item_id'].unique() df.append( np.array(list(product(*[cur_shops, cur_items, [block_num]])), dtype='int32')) df = pd.DataFrame(np.vstack(df), columns=index_cols, dtype=np.int32) # - # # Adding monthly sales accoring to block # + group = train.groupby(['date_block_num', 'shop_id', 'item_id']).agg( {'item_cnt_day': ['sum']}) group.columns = ['item_cnt_month'] group.reset_index(inplace=True) df = pd.merge(df, group, on=index_cols, how='left') df['item_cnt_month'] = (df['item_cnt_month'] .fillna(0) .clip(0, 20) .astype(np.float16)) # - df # # Adding test dataset # test dataset test['date_block_num'] = 34 test['date_block_num'] = test['date_block_num'].astype(np.int8) test['shop_id'] = test['shop_id'].astype(np.int8) test['item_id'] = test['item_id'].astype(np.int16) test = test.drop(columns=['ID']) df = pd.concat([df, test], ignore_index=True, sort=False, keys=index_cols) df.fillna(0, inplace=True) # # Adding features # * city code feature # * item category feature # * weekend feature # * interaction feature # * lag feature # **adding city code feature** # + from sklearn.preprocessing import LabelEncoder # add city code feature shops['city'] = shops['shop_name'].apply(lambda x: x.split()[0].lower()) shops.loc[shops.city == '!якутск', 'city'] = 'якутск' shops['city_code'] = LabelEncoder().fit_transform(shops['city']) coords = dict() coords['якутск'] = (62.028098, 129.732555, 4) coords['адыгея'] = (44.609764, 40.100516, 3) coords['балашиха'] = (55.8094500, 37.9580600, 1) coords['волжский'] = (53.4305800, 50.1190000, 3) coords['вологда'] = (59.2239000, 39.8839800, 2) coords['воронеж'] = (51.6720400, 39.1843000, 3) coords['выездная'] = (0, 0, 0) coords['жуковский'] = (55.5952800, 38.1202800, 1) coords['интернет-магазин'] = (0, 0, 0) coords['казань'] = (55.7887400, 49.1221400, 4) coords['калуга'] = (54.5293000, 36.2754200, 4) coords['коломна'] = (55.0794400, 38.7783300, 4) coords['красноярск'] = (56.0183900, 92.8671700, 4) coords['курск'] = (51.7373300, 36.1873500, 3) coords['москва'] = (55.7522200, 37.6155600, 1) coords['мытищи'] = (55.9116300, 37.7307600, 1) coords['н.новгород'] = (56.3286700, 44.0020500, 4) coords['новосибирск'] = (55.0415000, 82.9346000, 4) coords['омск'] = (54.9924400, 73.3685900, 4) coords['ростовнадону'] = (47.2313500, 39.7232800, 3) coords['спб'] = (59.9386300, 30.3141300, 2) coords['самара'] = (53.2000700, 50.1500000, 4) coords['сергиев'] = (56.3000000, 38.1333300, 4) coords['сургут'] = (61.2500000, 73.4166700, 4) coords['томск'] = (56.4977100, 84.9743700, 4) coords['тюмень'] = (57.1522200, 65.5272200, 4) coords['уфа'] = (54.7430600, 55.9677900, 4) coords['химки'] = (55.8970400, 37.4296900, 1) coords['цифровой'] = (0, 0, 0) coords['чехов'] = (55.1477000, 37.4772800, 4) coords['ярославль'] = (57.6298700, 39.8736800, 2) shops['city_coord_1'] = shops['city'].apply(lambda x: coords[x][0]) shops['city_coord_2'] = shops['city'].apply(lambda x: coords[x][1]) shops['country_part'] = shops['city'].apply(lambda x: coords[x][2]) shops = shops[['shop_id', 'city_code', 'city_coord_1', 'city_coord_2', 'country_part']] df = pd.merge(df, shops, on=['shop_id'], how='left') # - df # **add item category feature** # + # add item category feature map_dict = { 'Чистые носители (штучные)': 'Чистые носители', 'Чистые носители (шпиль)' : 'Чистые носители', 'PC ': 'Аксессуары', 'Служебные': 'Служебные ' } items = pd.merge(items, item_cats, on='item_category_id') items['item_category'] = items['item_category_name'].apply(lambda x: x.split('-')[0]) items['item_category'] = items['item_category'].apply(lambda x: map_dict[x] if x in map_dict.keys() else x) items['item_category_common'] = LabelEncoder().fit_transform(items['item_category']) items['item_category_code'] = LabelEncoder().fit_transform(items['item_category_name']) items = items[['item_id', 'item_category_common', 'item_category_code']] df = pd.merge(df, items, on=['item_id'], how='left') # - df # **adding weekend feature** # + import calendar # add weekend feature def count_days(date_block_num): year = 2013 + date_block_num // 12 month = 1 + date_block_num % 12 weeknd_count = len([1 for i in calendar.monthcalendar(year, month) if i[6] != 0]) days_in_month = calendar.monthrange(year, month)[1] return weeknd_count, days_in_month, month map_dict = {i: count_days(i) for i in range(35)} df['weeknd_count'] = df['date_block_num'].apply(lambda x: map_dict[x][0]) df['days_in_month'] = df['date_block_num'].apply(lambda x: map_dict[x][1]) # - df # **add interaction feature** # + # add interaction feature # item first appears in which block first_item_block = df.groupby(['item_id'])['date_block_num'].min().reset_index() first_item_block['item_first_interaction'] = 1 # items, shop first appear in which block first_shop_item_buy_block = df[df['date_block_num'] > 0].groupby(['shop_id', 'item_id'])['date_block_num'].min().reset_index() first_shop_item_buy_block['first_date_block_num'] = first_shop_item_buy_block['date_block_num'] # + df = pd.merge(df, first_item_block[['item_id', 'date_block_num', 'item_first_interaction']], on=['item_id', 'date_block_num'], how='left') df = pd.merge(df, first_shop_item_buy_block[['item_id', 'shop_id', 'first_date_block_num']], on=['item_id', 'shop_id'], how='left') # item was sold before this block df['first_date_block_num'].fillna(100, inplace=True) df['shop_item_sold_before'] = (df['first_date_block_num'] < df['date_block_num']).astype('int8') df.drop(['first_date_block_num'], axis=1, inplace=True) df['item_first_interaction'].fillna(0, inplace=True) df['shop_item_sold_before'].fillna(0, inplace=True) df['item_first_interaction'] = df['item_first_interaction'].astype('int8') df['shop_item_sold_before'] = df['shop_item_sold_before'].astype('int8') # - del first_item_block del first_shop_item_buy_block df # # lag feature function # add lag feature def lag_feature(df, lags, col): tmp = df[['date_block_num','shop_id','item_id',col]] for i in lags: shifted = tmp.copy() shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)] shifted['date_block_num'] += i df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left') df[col+'_lag_'+str(i)] = df[col+'_lag_'+str(i)].astype('float16') return df # **adding sales lag** #Add sales lags for last 3 months df = lag_feature(df, [1, 2, 3], 'item_cnt_month') df # **Add avg shop/item price** # + index_cols = ['shop_id', 'item_id', 'date_block_num'] group = train.groupby(index_cols)['item_price'].mean().reset_index().rename(columns={"item_price": "avg_shop_price"}, errors="raise") df = pd.merge(df, group, on=index_cols, how='left') df['avg_shop_price'] = (df['avg_shop_price'] .fillna(0) .astype(np.float16)) index_cols = ['item_id', 'date_block_num'] group = train.groupby(['date_block_num','item_id'])['item_price'].mean().reset_index().rename(columns={"item_price": "avg_item_price"}, errors="raise") df = pd.merge(df, group, on=index_cols, how='left') df['avg_item_price'] = (df['avg_item_price'] .fillna(0) .astype(np.float16)) df['item_shop_price_avg'] = (df['avg_shop_price'] - df['avg_item_price']) / df['avg_item_price'] df['item_shop_price_avg'].fillna(0, inplace=True) df = lag_feature(df, [1, 2, 3], 'item_shop_price_avg') df.drop(['avg_shop_price', 'avg_item_price', 'item_shop_price_avg'], axis=1, inplace=True) df # - # **Add target encoding for item/city for last 3 months** # + item_id_target_mean = df.groupby(['date_block_num','item_id', 'city_code'])['item_cnt_month'].mean().reset_index().rename(columns={ "item_cnt_month": "item_loc_target_enc"}, errors="raise") df = pd.merge(df, item_id_target_mean, on=['date_block_num','item_id', 'city_code'], how='left') df['item_loc_target_enc'] = (df['item_loc_target_enc'] .fillna(0) .astype(np.float16)) df = lag_feature(df, [1, 2, 3], 'item_loc_target_enc') df.drop(['item_loc_target_enc'], axis=1, inplace=True) df # - # **Add target encoding for item/shop for last 3 months** # + item_id_target_mean = df.groupby(['date_block_num','item_id', 'shop_id'])['item_cnt_month'].mean().reset_index().rename(columns={ "item_cnt_month": "item_shop_target_enc"}, errors="raise") df = pd.merge(df, item_id_target_mean, on=['date_block_num','item_id', 'shop_id'], how='left') df['item_shop_target_enc'] = (df['item_shop_target_enc'] .fillna(0) .astype(np.float16)) df = lag_feature(df, [1, 2, 3], 'item_shop_target_enc') df.drop(['item_shop_target_enc'], axis=1, inplace=True) df # - # **For new items add avg category sales for last 3 months** # + #For new items add avg category sales for last 3 months item_id_target_mean = df[df['item_first_interaction'] == 1].groupby(['date_block_num','item_category_code'])['item_cnt_month'].mean().reset_index().rename(columns={ "item_cnt_month": "new_item_cat_avg"}, errors="raise") df = pd.merge(df, item_id_target_mean, on=['date_block_num','item_category_code'], how='left') df['new_item_cat_avg'] = (df['new_item_cat_avg'] .fillna(0) .astype(np.float16)) df = lag_feature(df, [1, 2, 3], 'new_item_cat_avg') df.drop(['new_item_cat_avg'], axis=1, inplace=True) df # - # **For new items add avg category sales in a separate store for last 3 months** # + item_id_target_mean = df[df['item_first_interaction'] == 1].groupby(['date_block_num','item_category_code', 'shop_id'])['item_cnt_month'].mean().reset_index().rename(columns={ "item_cnt_month": "new_item_shop_cat_avg"}, errors="raise") df = pd.merge(df, item_id_target_mean, on=['date_block_num','item_category_code', 'shop_id'], how='left') df['new_item_shop_cat_avg'] = (df['new_item_shop_cat_avg'] .fillna(0) .astype(np.float16)) df = lag_feature(df, [1, 2, 3], 'new_item_shop_cat_avg') df.drop(['new_item_shop_cat_avg'], axis=1, inplace=True) df # - # **Add sales for the last three months for similar item (item with id = item_id +- 1)** # + def lag_feature_adv(df, lags, col): tmp = df[['date_block_num','shop_id','item_id',col]] for i in lags: shifted = tmp.copy() shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)+'_adv'] shifted['date_block_num'] += i shifted['item_id'] -= 1 df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left') df[col+'_lag_'+str(i)+'_adv'] = df[col+'_lag_'+str(i)+'_adv'].astype('float16') return df df = lag_feature_adv(df, [1, 2, 3], 'item_cnt_month') def lag_feature_adv2(df, lags, col): tmp = df[['date_block_num','shop_id','item_id',col]] for i in lags: shifted = tmp.copy() shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)+'_adv2'] shifted['date_block_num'] += i shifted['item_id'] += 1 df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left') df[col+'_lag_'+str(i)+'_adv2'] = df[col+'_lag_'+str(i)+'_adv2'].astype('float16') return df df = lag_feature_adv2(df, [1, 2, 3], 'item_cnt_month') df # - # **delete first three month data** # **delete shop id 9, 20 from train datset(testdata don't exist)** df.fillna(0, inplace=True) df = df[(df['date_block_num'] > 2)] df = df[(df['shop_id'] != 9)] df = df[(df['shop_id'] != 20)] df.head() df.columns df.to_pickle('df.pkl') # # Train model # **prepare dataset** import pandas as pd df = pd.read_pickle('df.pkl') X_train = df[df.date_block_num < 33].drop(['item_cnt_month'], axis=1) Y_train = df[df.date_block_num < 33]['item_cnt_month'] X_valid = df[df.date_block_num == 33].drop(['item_cnt_month'], axis=1) Y_valid = df[df.date_block_num == 33]['item_cnt_month'] X_test = df[df.date_block_num == 34].drop(['item_cnt_month'], axis=1) del df # **train one easy lightgbm** # + import lightgbm as lgb feature_name = X_train.columns.tolist() params = { 'objective': 'mse', 'metric': 'rmse', 'num_leaves': 2 ** 8 -1, 'learning_rate': 0.005, 'feature_fraction': 0.8, 'bagging_fraction': 0.75, 'bagging_freq': 5, 'seed': 1, 'verbose': 1 } feature_name_indexes = [ 'country_part', 'item_category_common', 'item_category_code', 'city_code', ] lgb_train = lgb.Dataset(X_train[feature_name], Y_train) lgb_eval = lgb.Dataset(X_valid[feature_name], Y_valid, reference=lgb_train) evals_result = {} gbm = lgb.train( params, lgb_train, num_boost_round=3000, valid_sets=(lgb_train, lgb_eval), feature_name = feature_name, categorical_feature = feature_name_indexes, verbose_eval=50, evals_result = evals_result, early_stopping_rounds = 30) # + test = pd.read_csv('../input/competitive-data-science-predict-future-sales/test.csv') Y_test = gbm.predict(X_test[feature_name]).clip(0, 20) submission = pd.DataFrame({ "ID": test.index, "item_cnt_month": Y_test }) submission.to_csv('gbm_submission.csv', index=False)
siming_yan/predict-sales-problem-step-by-step-part1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/google/neural-tangents/blob/master/notebooks/weight_space_linearization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="9uPYkWOcghJm" pycharm={} # ##### Copyright 2019 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # + [markdown] colab_type="text" id="YDnknGorgv2O" pycharm={} # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="2D2hQ1z3kmNu" pycharm={} # #### Import & Utils # + [markdown] colab_type="text" id="cxFbqXZKhGW0" pycharm={} # Install JAX, Tensorflow Datasets, and Neural Tangents # # The first line specifies the version of jaxlib that we would like to import. Note, that "cp36" species the version of python (version 3.6) used by JAX. Make sure your colab kernel matches this version. # + colab={} colab_type="code" id="g_gSbMyUhF92" pycharm={} # !pip install -q tensorflow-datasets # !pip install -q git+https://www.github.com/google/neural-tangents # + colab={} colab_type="code" id="8D0i89hRmNoC" pycharm={} from jax.api import jit from jax.api import grad from jax import random import jax.numpy as np from jax.experimental.stax import logsoftmax from jax.experimental import optimizers import tensorflow_datasets as tfds import neural_tangents as nt from neural_tangents import stax # + colab={} colab_type="code" id="-W1ws1B-6_vq" pycharm={} def process_data(data_chunk): """Flatten the images and one-hot encode the labels.""" image, label = data_chunk['image'], data_chunk['label'] samples = image.shape[0] image = np.array(np.reshape(image, (samples, -1)), dtype=np.float32) image = (image - np.mean(image)) / np.std(image) label = np.eye(10)[label] return {'image': image, 'label': label} # + [markdown] colab_type="text" id="32Wvhil9X8IK" pycharm={} # # Weight Space Linearization # + [markdown] colab_type="text" id="Ajz_oTOw72v8" pycharm={} # Setup some experiment parameters. # + colab={} colab_type="code" id="UtjfeaYC72Gs" pycharm={} learning_rate = 1.0 batch_size = 128 training_epochs = 5 steps_per_epoch = 50000 // batch_size # + [markdown] colab_type="text" id="JJ_zDKsKcDB-" pycharm={} # Create MNIST data pipeline using TensorFlow Datasets. # + colab={} colab_type="code" id="5llaSqZW4Et3" pycharm={} train_data = tfds.load('mnist:3.*.*', split=tfds.Split.TRAIN) train_data = tfds.as_numpy( train_data.shuffle(1024).batch(batch_size).repeat(training_epochs)) test_data = tfds.load('mnist:3.*.*', split=tfds.Split.TEST) # + [markdown] colab_type="text" id="1-nKR--j5p2C" pycharm={} # Create a Fully-Connected Network. # + colab={} colab_type="code" id="wIbfrdzq5pLZ" pycharm={} init_fn, f, _ = stax.serial( stax.Dense(512, 1., 0.05), stax.Erf(), stax.Dense(10, 1., 0.05)) key = random.PRNGKey(0) _, params = init_fn(key, (-1, 784)) # + [markdown] colab_type="text" id="c9zgKt9B8NBt" pycharm={} # Linearize the network. # + colab={} colab_type="code" id="bU6ccJM_8LWt" pycharm={} f_lin = nt.linearize(f, params) # + [markdown] colab_type="text" id="Lrp9YNCt7nCj" pycharm={} # Create an optimizer and initialize it for the full network and the linearized network. # + colab={} colab_type="code" id="J-8i_4KD7o5s" pycharm={} opt_init, opt_apply, get_params = optimizers.momentum(learning_rate, 0.9) state = opt_init(params) lin_state = opt_init(params) # + [markdown] colab_type="text" id="NspVdDOU8mhk" pycharm={} # Create a cross-entropy loss function. # + colab={} colab_type="code" id="z6L-LzyF8qLW" pycharm={} loss = lambda fx, y_hat: -np.mean(logsoftmax(fx) * y_hat) # + [markdown] colab_type="text" id="NHVIPtg79Gt4" pycharm={} # Specialize the loss to compute gradients of the network and linearized network. # + colab={} colab_type="code" id="-Z5uKwva9NB9" pycharm={} grad_loss = jit(grad(lambda params, x, y: loss(f(params, x), y))) grad_lin_loss = jit(grad(lambda params, x, y: loss(f_lin(params, x), y))) # + [markdown] colab_type="text" id="rWROOyCZ9u6N" pycharm={} # Train the network and its linearization. # + colab={"height": 151} colab_type="code" executionInfo={"elapsed": 24126, "status": "ok", "timestamp": 1583914391854, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 420} id="WXeof-AB8BiV" outputId="41fed5fb-957c-4623-e7a2-486119501a23" pycharm={} print ('Epoch\tLoss\tLinear Loss') epoch = 0 for i, batch in enumerate(train_data): batch = process_data(batch) X, Y = batch['image'], batch['label'] params = get_params(state) state = opt_apply(i, grad_loss(params, X, Y), state) lin_params = get_params(lin_state) lin_state = opt_apply(i, grad_lin_loss(lin_params, X, Y), lin_state) if i % steps_per_epoch == 0: print('{}\t{:.4f}\t{:.4f}'.format( epoch, loss(f(params, X), Y), loss(f_lin(lin_params, X), Y))) epoch += 1
notebooks/weight_space_linearization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import theano from theano import tensor as T # initialize x1 = T.scalar() w1 = T.scalar() w0 = T.scalar() z1 = w1 * x1 + w0 # compile net_input = theano.function(inputs=[w1, x1, w0], outputs=z1) # execute print('Ne input: %.2f' % net_input(2.0, 1.0, 0.5)) # - # switch theano to 32 bit mode theano.config.floatX = 'float32' # + # working with array structures import numpy as np # initialize x = T.fmatrix(name='x') x_sum = T.sum(x, axis=0) # compile calc_sum= theano.function(inputs=[x], outputs=x_sum) # execute (Python list) ary = [[1, 2, 3], [1, 2, 3]] print('Column sum:', calc_sum(ary)) # execute (NumPy array) ary = np.array([[1, 2, 3], [1, 2, 3]], dtype=theano.config.floatX) print('Column sum:', calc_sum(ary)) # + # shared variable # initialize x = T.fmatrix('x') w = theano.shared(np.asarray([[0.0, 0.0, 0.0]], dtype=theano.config.floatX)) z = x.dot(w.T) update = [[w, w + 1.0]] # compile net_input = theano.function(inputs=[x], updates=update, outputs=z) # execute data = np.array([[1, 2, 3]], dtype=theano.config.floatX) for i in range(5): print('z%d' % i, net_input(data))
ch13/.ipynb_checkpoints/01-using-theano-basics-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + run_control={"frozen": false, "read_only": false} import sys sys.path[0:0] = ['../..','../../3rdparty'] # Put these at the head of the search path from jove.DotBashers import * from jove.Def_md2mc import * from jove.Def_DFA import * # + run_control={"frozen": false, "read_only": false} ev0end1 = md2mc(''' DFA I : 0 -> A A : 0 | 1 -> I I : 1 -> F F : 0 | 1 -> I ''') # + run_control={"frozen": false, "read_only": false} doev0end1 = dotObj_dfa(ev0end1) # + run_control={"frozen": false, "read_only": false} ev0end1 # + run_control={"frozen": false, "read_only": false} doev0end1.source # + run_control={"frozen": false, "read_only": false} is_partially_consistent_dfa(ev0end1) # + run_control={"frozen": false, "read_only": false} tev0end1 = totalize_dfa(ev0end1) # + run_control={"frozen": false, "read_only": false} dotObj_dfa_w_bh(tev0end1) # + run_control={"frozen": false, "read_only": false} dotObj_dfa_w_bh(tev0end1, FuseEdges=True) # + run_control={"frozen": false, "read_only": false} ev0end1 # + run_control={"frozen": false, "read_only": false} ev0 = md2mc(''' DFA IF : 0 -> A A : 0 -> IF ''') # + run_control={"frozen": false, "read_only": false} ev0 # + run_control={"frozen": false, "read_only": false} dev0 = dotObj_dfa(ev0) # + run_control={"frozen": false, "read_only": false} dev0 # + run_control={"frozen": false, "read_only": false} dev0.source # + run_control={"frozen": false, "read_only": false} dev0 # + run_control={"frozen": false, "read_only": false} ev0_bh = addtosigma_dfa(ev0, set({'1'})) # + run_control={"frozen": false, "read_only": false} ev0_bh # + run_control={"frozen": false, "read_only": false} ev0_bh_totalize = totalize_dfa(ev0_bh) # + run_control={"frozen": false, "read_only": false} ev0_bh # + run_control={"frozen": false, "read_only": false} do_ev0_tot = dotObj_dfa_w_bh(ev0_bh_totalize) # + run_control={"frozen": false, "read_only": false} do_ev0_tot.source # + run_control={"frozen": false, "read_only": false} do_ev0_tot # + run_control={"frozen": false, "read_only": false} dotObj_dfa_w_bh(ev0_bh_totalize, FuseEdges=True) # + [markdown] run_control={"frozen": false, "read_only": false} # <span style="color:blue"> **Here is how we will represent a DFA in Python (taking Figure 3.4's example from the book). You can clearly see how the traits of the DFA are encoded. We prefer a Python dictionary, as it supports a number of convenient operations, and also one can add additional fields easily. ** </span> # + code_folding=[] run_control={"frozen": false, "read_only": false} DFA_fig34 = { 'Q': {'A', 'IF', 'B'}, 'Sigma': {'0', '1'}, 'Delta': { ('IF', '0'): 'A', ('IF', '1'): 'IF', ('A', '0'): 'B', ('A', '1'): 'A', ('B', '0'): 'IF', ('B', '1'): 'B' }, 'q0': 'IF', 'F': {'IF'} } # + [markdown] run_control={"frozen": false, "read_only": false} # <span style="color:blue"> **We can now write routines to print DFA using dot. The main routines are listed below.** </span> # # * dot_dfa_w_bh : lists all states of a DFA including black-hole states # * dot_dfa : lists all isNotBH states (see below for a defn), i.e. suppress black-holes # - Usually there are too many transitions to them and that clutters the view # # + [markdown] run_control={"frozen": false, "read_only": false} # ====== # + code_folding=[] run_control={"frozen": false, "read_only": false} # Some tests pertaining to totalize_dfa, is_consistent_dfa, etc DFA_fig34 = { 'Q': {'A', 'IF', 'B'}, 'Sigma': {'0', '1'}, 'Delta': { ('IF', '0'): 'A', ('IF', '1'): 'IF', ('A', '0'): 'B', ('A', '1'): 'A', ('B', '0'): 'IF', ('B', '1'): 'B' }, 'q0': 'IF', 'F': {'IF'} } def tests_dfa_consist(): """Some tests wrt DFA routines. """ DFA_fig34_Q = DFA_fig34["Q"] DFA_fig34_Sigma = DFA_fig34["Sigma"] randQ = random.choice(list(DFA_fig34_Q)) randSym = random.choice(list(DFA_fig34_Sigma)) DFA_fig34_deepcopy = copy.deepcopy(DFA_fig34) print('is_consistent_dfa(DFA_fig34) =', is_consistent_dfa(DFA_fig34) ) print('Removing mapping for ' + "(" + randQ + "," + randSym + ")" + "from DFA_fig34_deepcopy") DFA_fig34_deepcopy["Delta"].pop((randQ,randSym)) print('is_consistent_dfa(DFA_fig34_deepcopy) =', is_consistent_dfa(DFA_fig34_deepcopy) ) totalized = totalize_dfa(DFA_fig34_deepcopy) print ( 'is_consistent_dfa(totalized) =', is_consistent_dfa(totalized) ) assert(totalized == totalize_dfa(totalized)) # Must pass # + run_control={"frozen": false, "read_only": false} dfaBESame = md2mc(''' DFA !! Begins and ends with same; epsilon allowed IF : 0 -> F0 IF : 1 -> F1 !! F0 : 0 -> F0 F0 : 1 -> S01 S01 : 1 -> S01 S01 : 0 -> F0 !! F1 : 1 -> F1 F1 : 0 -> S10 S10 : 0 -> S10 S10 : 1 -> F1 ''') DOdfaBESame = dotObj_dfa(dfaBESame) DOdfaBESame # + run_control={"frozen": false, "read_only": false} DOdfaBESame.source # + [markdown] run_control={"frozen": false, "read_only": false} # ### Let us now administer some tests to print dot-strings generated. # # We will demonstrate two ways to print automata: # # 1. First generate a dot string via dot_dfa or dot_dfa_w_bh # (calling the result "dot_string") # 1. Then use the srcObj = Source(dot_string) call # 2. Thereafter we can display the srcObj object directly into the browser # 3. Or, one can also later convert the dot_string to svg or PDF # 2. OR, one can directly generate a dot object via the dotObj_dfa or dotObj_dfa_w_bh call # (calling the result "dot_object") # 1. Then directly display the dot_object # 2. There are conversions available for dot_object to other formats too # + code_folding=[] run_control={"frozen": false, "read_only": false} DFA_fig34 = { 'Q': {'A', 'IF', 'B'}, 'Sigma': {'0', '1'}, 'Delta': { ('IF', '0'): 'A', ('IF', '1'): 'IF', ('A', '0'): 'B', ('A', '1'): 'A', ('B', '0'): 'IF', ('B', '1'): 'B' }, 'q0': 'IF', 'F': {'IF'} } def dfa_dot_tests(): """Some dot-routine related tests. """ dot_string = dot_dfa(DFA_fig34) dot_object1 = Source(dot_string) return dot_object1.source # + [markdown] run_control={"frozen": false, "read_only": false} # Let us test functions step_dfa, run_dfa, and accepts_dfa # + code_folding=[] run_control={"frozen": false, "read_only": false} # Some tests of step, run, etc. DFA_fig34 = { 'Q': {'A', 'IF', 'B'}, 'Sigma': {'0', '1'}, 'Delta': { ('IF', '0'): 'A', ('IF', '1'): 'IF', ('A', '0'): 'B', ('A', '1'): 'A', ('B', '0'): 'IF', ('B', '1'): 'B' }, 'q0': 'IF', 'F': {'IF'} } def step_run_accepts_tests(): print("step_dfa(DFA_fig34, 'IF', '1') = ", step_dfa(DFA_fig34, 'IF', '1')) print("step_dfa(DFA_fig34, 'A', '0') = ", step_dfa(DFA_fig34, 'A', '0')) print("run_dfa(DFA_fig34, '101001') = ", run_dfa(DFA_fig34, '101001')) print("run_dfa(DFA_fig34, '101000') = ", run_dfa(DFA_fig34, '101000')) print("accepts_dfa(DFA_fig34, '101001') = ", accepts_dfa(DFA_fig34, '101001')) print("accepts_dfa(DFA_fig34, '101000') = ", accepts_dfa(DFA_fig34, '101000')) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(DFA_fig34, "DFA_fig34") # + code_folding=[] run_control={"frozen": false, "read_only": false} # Run a complementation test DFA_fig34_comp = comp_dfa(DFA_fig34) dotObj_dfa(DFA_fig34_comp, "DFA_fig34_comp") dotObj_dfa(DFA_fig34) dotObj_dfa(DFA_fig34_comp, "DFA_fig34_comp") # + run_control={"frozen": false, "read_only": false} dotObj_dfa(DFA_fig34_comp) # + run_control={"frozen": false, "read_only": false} # One more test du = union_dfa(DFA_fig34, DFA_fig34_comp) dotObj_dfa(du, "orig union") pdu = pruneUnreach(du) pdu pduObj = dotObj_dfa(pdu, "union of 34 and comp") pduObj # + code_folding=[] run_control={"frozen": false, "read_only": false} D34 = { 'Q': {'A', 'IF', 'B'}, 'Sigma': {'0', '1'}, 'Delta': { ('IF', '0'): 'A', ('IF', '1'): 'IF', ('A', '0'): 'B', ('A', '1'): 'A', ('B', '0'): 'IF', ('B', '1'): 'B' }, 'q0': 'IF', 'F': {'IF'} } D34bl = { 'Q': {'A', 'IF', 'B', 'A1', 'B1'}, 'Sigma': {'0', '1'}, 'Delta': { ('IF', '0'): 'A', ('IF', '1'): 'IF', ('A', '0'): 'B1', ('A', '1'): 'A1', ('A1', '0'): 'B', ('A1', '1'): 'A', ('B1', '0'): 'IF', ('B1', '1'): 'B', ('B','0') : 'IF', ('B', '1'): 'B1' }, 'q0': 'IF', 'F': {'IF'} } d34 = dotObj_dfa(D34, "D34") d34 # Display it! # + run_control={"frozen": false, "read_only": false} langeq_dfa(D34,D34bl,False) # + run_control={"frozen": false, "read_only": false} iso_dfa(D34,D34bl) # + run_control={"frozen": false, "read_only": false} DFA_fig34 d34 = DFA_fig34 d34 # + run_control={"frozen": false, "read_only": false} d34c = DFA_fig34_comp d34c # + run_control={"frozen": false, "read_only": false} iso_dfa(d34,d34) # + run_control={"frozen": false, "read_only": false} iso_dfa(d34,d34c) # + run_control={"frozen": false, "read_only": false} d34v1 = {'Delta': {('A', '0'): 'B', ('A', '1'): 'B', ('B', '0'): 'IF', ('B', '1'): 'B', ('IF', '0'): 'A', ('IF', '1'): 'IF'}, 'F': {'IF'}, 'Q': {'A', 'B', 'IF'}, 'Sigma': {'0', '1'}, 'q0': 'IF'} # + run_control={"frozen": false, "read_only": false} dotObj_dfa(d34v1) # + run_control={"frozen": false, "read_only": false} d34v2 = {'Delta': {('A', '0'): 'B', ('A', '1'): 'B', ('B', '0'): 'IF', ('B', '1'): 'B', ('IF', '0'): 'A', ('IF', '1'): 'IF'}, 'F': {'IF', 'B'}, 'Q': {'A', 'B', 'IF'}, 'Sigma': {'0', '1'}, 'q0': 'IF'} # + run_control={"frozen": false, "read_only": false} iso_dfa(d34,d34v1) # + run_control={"frozen": false, "read_only": false} iso_dfa(d34,d34v2) # + run_control={"frozen": false, "read_only": false} iso_dfa(d34v1,d34v2) # + run_control={"frozen": false, "read_only": false} div1 = pruneUnreach(intersect_dfa(d34v1,d34v2)) dotObj_dfa(div1) # + run_control={"frozen": false, "read_only": false} div2 = pruneUnreach(union_dfa(d34v1,d34v2)) dotObj_dfa(div2) # + run_control={"frozen": false, "read_only": false} iso_dfa(div1,div2) # + run_control={"frozen": false, "read_only": false} langeq_dfa(div1,div2,True) # + run_control={"frozen": false, "read_only": false} d34bl = dotObj_dfa(D34bl, "D34bl") d34bl # Display it! # + run_control={"frozen": false, "read_only": false} d34bl = dotObj_dfa(D34bl, FuseEdges=True, dfaName="D34bl") # + run_control={"frozen": false, "read_only": false} d34bl # + run_control={"frozen": false, "read_only": false} iso_dfa(D34,D34bl) # + run_control={"frozen": false, "read_only": false} langeq_dfa(D34,D34bl) # + [markdown] run_control={"frozen": false, "read_only": false} # #### # + run_control={"frozen": false, "read_only": false} du # + run_control={"frozen": false, "read_only": false} dotObj_dfa(pruneUnreach(D34bl), "D34bl") # + run_control={"frozen": false, "read_only": false} ### DFA minimization (another example) # + code_folding=[] run_control={"frozen": false, "read_only": false} Bloat1 = {'Q': {'S1', 'S3', 'S2', 'S5', 'S4', 'S6' }, 'Sigma': {'b', 'a'}, 'Delta': { ('S1','b') : 'S3', ('S1','a') : 'S2', ('S3','a') : 'S5', ('S2','a') : 'S4', ('S3','b') : 'S4', ('S2','b') : 'S5', ('S5','b') : 'S6', ('S5','a') : 'S6', ('S4','b') : 'S6', ('S4','a') : 'S6', ('S6','b') : 'S6', ('S6','a') : 'S6' }, 'q0': 'S1', 'F': {'S2','S3','S6'} } Bloat1O = dotObj_dfa(Bloat1, dfaName="Bloat1") Bloat1O # Display it! # + run_control={"frozen": false, "read_only": false} dotObj_dfa(Bloat1, FuseEdges=True, dfaName="Bloat1") # + run_control={"frozen": false, "read_only": false} bloated_dfa = md2mc(''' DFA IS1 : a -> FS2 IS1 : b -> FS3 FS2 : a -> S4 FS2 : b -> S5 FS3 : a -> S5 FS3 : b -> S4 S4 : a | b -> FS6 S5 : a | b -> FS6 FS6 : a | b -> FS6 ''') dotObj_dfa(bloated_dfa) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(bloated_dfa).source # + run_control={"frozen": false, "read_only": false} # + [markdown] run_control={"frozen": false, "read_only": false} # # Now, here is how the computation proceeds for this example: # -------------------------------------------------------- # # <br> # # <font size="3"> # # # ``` # # Frame-0 Frame-1 Frame-2 # # S2 -1 S2 0 S2 0 # # S3 -1 -1 S3 0 -1 S3 0 -1 # # S4 -1 -1 -1 S4 -1 0 0 S4 2 0 0 # # S5 -1 -1 -1 -1 S5 -1 0 0 -1 S5 2 0 0 -1 # # S6 -1 -1 -1 -1 -1 S6 0 -1 -1 0 0 S6 0 1 1 0 0 # # S1 S2 S3 S4 S5 S1 S2 S3 S4 S5 S1 S2 S3 S4 S5 # # Initial 0-distinguishable 1-distinguishable # # # Frame-3 Frame-4 # = # Frame-3 # # S2 0 # # S3 0 -1 # # S4 2 0 0 # # S5 2 0 0 -1 # # S6 0 1 1 0 0 # # S1 S2 S3 S4 S5 # # 2-distinguishable # # ``` # </font> # + [markdown] run_control={"frozen": false, "read_only": false} # Here is the algorithm, going frame by frame. # # - Initial Frame: # # The initial frame is drawn to clash all _combinations_ of states taken two at a time. # Since we have 6 states, we have $6\choose 2$ = $15$ entries. We put a -1 against each # such pair to denote that they have not been found distinguishable yet. # # - Frame *0-distinguishable*: We now put a 0 where a pair of states is 0-distinguishable. This means the states are distinguisable after consuming $\varepsilon$. This of course means that the states are themselves distinguishable. This is only possible if one is a final state and the other is not (in that case, one state, after consuming $\varepsilon$ accepts_dfa, and another state after consuming $\varepsilon$ does not accept. # # - So for instance, notice that (S3,S1) and (S4,S2) are 0-distinguishable, meaning that one is a final and the other is a non-final state. # # - Frame *1-distinguishable*: We now put a 1 where a pair of states is 1-distinguishable. This means the states are distinguisable after consuming a string of length $1$ (a single symbol). This is only possible if one state transitions to a final state and the other transitions to a non-final state after consuming a member of $\Sigma$. # # State pairs (S6,S2) and (S6,S3) are of this kind. While both S6 and S2 are final states (hence _0-indistinguishable_), after consuming an 'a' (or a 'b') they respectively go to a final/non-final state. # This means that # # - after processing **the same symbol** one state -- let's say pre_p -- finds itself landing in a state p and another state -- let's say pre_q -- finds itself landing in a state q such that (p,q) is 0-distinguishable. # # - When this happens, states pre-p and pre-q are **1-distinguishable**. # # - Frame *2-distinguishable*: We now put a 2 where a pair of states is 2-distinguishable. This means the states are distinguisable after consuming a string of length $2$ (a string of length $2$). This is only possible if one state transitions to a state (say p) and the other transitions to state (say q) after consuming a member of $\Sigma$ such that (p,q) is **1-distinguishable**. State pairs (S5,S1) and (S4,S1) are 2-distinguishable because # # - after processing **the same symbol** one state -- let's say pre_p -- finds itself landing in a state p and another state -- let's say pre_q -- finds itself landing in a state q such that (p,q) is 0-distinguishable. # # - When this happens, states pre-p and pre-q are **1-distinguishable**. # # - One example is this: # # - S5 and S1 are 2-distinguishable. # # - This is because after seeing an 'aa', S1 lands in a non-final state while S5 lands in a final state # # - Observe that "aa" = "a" + "a" . Thus, after eating the first "a", S1 lands in S2 while S5 lands in S6, and (S2,S6) have already been deemed 1-distinguishable. # # - Thus, when we mark (S5,S1) as 2-distinguishable, we are sending the matrix entry at (S5,S2) from # -1 to 2 # # # # - Now, in search of 3-distinguishability, we catch hold of all pairs in the matrix and see if we can send another -1 entry to "3". This appears not to happen. # # - Thus, if (S2,S3) is pushed via any sequence of symbols (any string) of any length, it # always stays in the same type of state. Thus, after seeing 'ababba', S2 is in S6, while S3 # is also in S6. # # # - Thus, given no changes in the matrix, we stop. # + run_control={"frozen": false, "read_only": false} dotObj_dfa(min_dfa(Bloat1), FuseEdges=True, dfaName="shrunkBloat1") # + run_control={"frozen": false, "read_only": false} min_bloat = min_dfa(Bloat1) dotObj_dfa(min_bloat).source # + run_control={"frozen": false, "read_only": false} prd34b1 = pruneUnreach(D34bl) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(prd34b1, "prd34b1") # + run_control={"frozen": false, "read_only": false} dotObj_dfa(min_dfa(prd34b1), "prd34b1min") # + run_control={"frozen": false, "read_only": false} third1dfa=md2mc(src="File", fname="machines/dfafiles/thirdlastis1.dfa") # + run_control={"frozen": false, "read_only": false} third1dfa # + run_control={"frozen": false, "read_only": false} dotObj_dfa(third1dfa) # + run_control={"frozen": false, "read_only": false} ends0101 =\ "\ DFA\ \ I : 0 -> S0 \ I : 1 -> I \ S0 : 0 -> S0 \ S0 : 1 -> S01 \ S01 : 0 -> S010 \ S01 : 1 -> I \ S010 : 0 -> S0 \ S010 : 1 -> F0101 \ F0101 : 0 -> S010 \ F0101 : 1 -> I \ " # + run_control={"frozen": false, "read_only": false} ends0101 # + run_control={"frozen": false, "read_only": false} dfaends0101=md2mc(ends0101) # + run_control={"frozen": false, "read_only": false} dfaends0101 # + run_control={"frozen": false, "read_only": false} dped1 = md2mc(src="File", fname="machines/dfafiles/pedagogical1.dfa") #machines/dfafiles/pedagogical1.dfa # + run_control={"frozen": false, "read_only": false} dped1 # + run_control={"frozen": false, "read_only": false} dotObj_dfa(dped1) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(dped1, FuseEdges=True) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(md2mc(ends0101)) # + run_control={"frozen": false, "read_only": false} thirdlastis1=md2mc(src="File", fname="machines/dfafiles/thirdlastis1.dfa") #machines/dfafiles/thirdlastis1.dfa # + run_control={"frozen": false, "read_only": false} thirdlastis1 # + run_control={"frozen": false, "read_only": false} dotObj_dfa(thirdlastis1) # + run_control={"frozen": false, "read_only": false} dped1=md2mc(src="File", fname="machines/dfafiles/pedagogical2.dfa") #machines/dfafiles/pedagogical2.dfa # + run_control={"frozen": false, "read_only": false} dotObj_dfa(dped1) # + run_control={"frozen": false, "read_only": false} secondLastIs1 = md2mc(''' !!------------------------------------------------------------ !! This DFA looks for patterns of the form ....1. !! i.e., the second-last (counting from the end-point) is a 1 !! !! DFAs find such patterns "very stressful to handle", !! as they are kept guessing of the form 'are we there yet?' !! 'are we seeing the second-last' ? !! They must keep all the failure options at hand. Even after !! a 'fleeting glimpse' of the second-last, more inputs can !! come barreling-in to make that "lucky 1" a non-second-last. !! !! We take 7 states in the DFA solution. !!------------------------------------------------------------ DFA !!------------------------------------------------------------ !! State : in -> tostate !! comment !!------------------------------------------------------------ I : 0 -> S0 !! Enter at init state I I : 1 -> S1 !! Record bit seen in state letter !! i.e., S0 means "state after seeing a 0" S0 : 0 -> S00 !! continue recording input seen S0 : 1 -> S01 !! in state-letter. This is a problem-specific !! way of compressing the input seen so far. S1 : 0 -> F10 !! We now have a "second last" available! S1 : 1 -> F11 !! Both F10 and F10 are "F" (final) S00 : 0 -> S00 !! History of things seen is still 00 S00 : 1 -> S01 !! Remember 01 in the state S01 : 0 -> F10 !! We again have a second-last of 1 S01 : 1 -> F11 !! We are in F11 because of 11 being last seen F10 : 0 -> S00 !! The second-last 1 gets pushed-out F10 : 1 -> S01 !! The second-last 1 gets pushed-out here too F11 : 0 -> F10 !! Still we have a second-last 1 F11 : 1 -> F11 !! Stay in F11, as last two seen are 11 !!------------------------------------------------------------ ''') # + run_control={"frozen": false, "read_only": false} from math import floor, log, pow def nthnumeric(N, Sigma={'a','b'}): """Assume Sigma is a 2-sized list/set of chars (default {'a','b'}). Produce the Nth string in numeric order, where N >= 0. Idea : Given N, get b = floor(log_2(N+1)) - need that many places; what to fill in the places is the binary code for N - (2^b - 1) with 0 as Sigma[0] and 1 as Sigma[1]. """ if (type(Sigma)==set): S = list(Sigma) else: assert(type(Sigma)==list ), "Expected to be given set/list for arg2 of nthnumeric." S = Sigma assert(len(Sigma)==2 ),"Expected to be given a Sigma of length 2." if(N==0): return '' else: width = floor(log(N+1, 2)) tofill = int(N - pow(2, width) + 1) relevant_binstr = bin(tofill)[2::] # strip the 0b # in the leading string len_to_makeup = width - len(relevant_binstr) return (S[0]*len_to_makeup + shomo(relevant_binstr, lambda x: S[1] if x=='1' else S[0])) # + run_control={"frozen": false, "read_only": false} nthnumeric(20,['0','1']) # + run_control={"frozen": false, "read_only": false} run_dfa(secondLastIs1, '0101') # + run_control={"frozen": false, "read_only": false} accepts_dfa(secondLastIs1, '0101') # + run_control={"frozen": false, "read_only": false} tests = [ nthnumeric(i, ['0','1']) for i in range(12) ] for t in tests: if accepts_dfa(secondLastIs1, t): print("This DFA accepts ", t) else: print("This DFA rejects ", t) # + run_control={"frozen": false, "read_only": false} help(run_dfa) # + [markdown] run_control={"frozen": false, "read_only": false} # This is an extensive illustration of union, intersection and complementation, DFA minimization, isomorphism test, language equivalence test, and an application of DeMorgan's law # + run_control={"frozen": false, "read_only": false} dfaOdd1s = md2mc(''' DFA I : 0 -> I I : 1 -> F F : 0 -> F F : 1 -> I ''') # + run_control={"frozen": false, "read_only": false} dotObj_dfa(dfaOdd1s) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(dfaOdd1s).source # + run_control={"frozen": false, "read_only": false} ends0101 = md2mc(''' DFA I : 0 -> S0 I : 1 -> I S0 : 0 -> S0 S0 : 1 -> S01 S01 : 0 -> S010 S01 : 1 -> I S010 : 0 -> S0 S010 : 1 -> F0101 F0101 : 0 -> S010 F0101 : 1 -> I ''') # + run_control={"frozen": false, "read_only": false} dotObj_dfa(ends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(ends0101).source # + run_control={"frozen": false, "read_only": false} odd1sORends0101 = union_dfa(dfaOdd1s,ends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(odd1sORends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(odd1sORends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(odd1sORends0101).source # + run_control={"frozen": false, "read_only": false} Minodd1sORends0101 = min_dfa(odd1sORends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(Minodd1sORends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(Minodd1sORends0101).source # + run_control={"frozen": false, "read_only": false} iso_dfa(odd1sORends0101, Minodd1sORends0101) # + run_control={"frozen": false, "read_only": false} langeq_dfa(odd1sORends0101, Minodd1sORends0101) # + run_control={"frozen": false, "read_only": false} odd1sANDends0101 = intersect_dfa(dfaOdd1s,ends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(odd1sANDends0101) # + run_control={"frozen": false, "read_only": false} Minodd1sANDends0101 = min_dfa(odd1sANDends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(Minodd1sANDends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(Minodd1sANDends0101).source # + run_control={"frozen": false, "read_only": false} CdfaOdd1s = comp_dfa(dfaOdd1s) # + run_control={"frozen": false, "read_only": false} Cends0101 = comp_dfa(ends0101) # + run_control={"frozen": false, "read_only": false} C_CdfaOdd1sORCends0101 = comp_dfa(union_dfa(CdfaOdd1s, Cends0101)) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(C_CdfaOdd1sORCends0101) # + run_control={"frozen": false, "read_only": false} MinC_CdfaOdd1sORCends0101 = min_dfa(C_CdfaOdd1sORCends0101) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(MinC_CdfaOdd1sORCends0101) # + run_control={"frozen": false, "read_only": false} iso_dfa(MinC_CdfaOdd1sORCends0101, Minodd1sANDends0101) # + run_control={"frozen": false, "read_only": false} blimp = md2mc(''' DFA I1 : a -> F2 I1 : b -> F3 F2 : a -> S8 F2 : b -> S5 F3 : a -> S7 F3 : b -> S4 S4 : a | b -> F6 S5 : a | b -> F6 F6 : a | b -> F6 S7 : a | b -> F6 S8 : a -> F6 S8 : b -> F9 F9 : a -> F9 F9 : b -> F6 ''') # + run_control={"frozen": false, "read_only": false} dblimp = dotObj_dfa(blimp) # + run_control={"frozen": false, "read_only": false} dblimp # + run_control={"frozen": false, "read_only": false} dblimp = dotObj_dfa(blimp, FuseEdges=True) # + run_control={"frozen": false, "read_only": false} dblimp # + run_control={"frozen": false, "read_only": false} dblimp.source # + run_control={"frozen": false, "read_only": false} # + run_control={"frozen": false, "read_only": false} mblimp = min_dfa(blimp) # + run_control={"frozen": false, "read_only": false} dmblimp = dotObj_dfa(mblimp) # + run_control={"frozen": false, "read_only": false} dmblimp # + run_control={"frozen": false, "read_only": false} # + [markdown] run_control={"frozen": false, "read_only": false} # This shows how DeMorgan's Law applies to DFAs. It also shows how, using the tools provided to us, we can continually check our work. # + run_control={"frozen": false, "read_only": false} testdfa = md2mc('''DFA I : 0 -> I I : 1 -> F F : 0 -> I ''') # + run_control={"frozen": false, "read_only": false} testdfa # + run_control={"frozen": false, "read_only": false} tot_testdfa = totalize_dfa(testdfa) # + run_control={"frozen": false, "read_only": false} dotObj_dfa(tot_testdfa) # + run_control={"frozen": false, "read_only": false} dotObj_dfa_w_bh # + run_control={"frozen": false, "read_only": false} dotObj_dfa_w_bh(tot_testdfa) # + run_control={"frozen": false, "read_only": false} dotObj_dfa_w_bh(tot_testdfa, FuseEdges = True)
notebooks/driver/Drive_DFA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from os import path from sklearn import tree # + filename = path.join(".", "data", "exoplanet_data.csv") df = pd.read_csv(filename) # Drop the null columns where all values are null df = df.dropna(axis='columns', how='all') # Drop the null rows df = df.dropna() df.head() # - target = df["koi_disposition"] data = df.drop("koi_disposition", axis=1) feature_names = data.columns data.head() # Split the data into train/test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, target, random_state=12) # + # Scale the data from sklearn.preprocessing import MinMaxScaler X_scaler = MinMaxScaler().fit(X_train) X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # - clf = tree.DecisionTreeClassifier() clf = clf.fit(X_train, y_train) clf.score(X_train, y_train) importances = clf.feature_importances_ importances sorted(zip(importances, feature_names), reverse=True)
decision_trees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # !./build_and_push.sh yolov5 # ## Testing your algorithm on your local machine # # When you're packaging your first algorithm to use with Amazon SageMaker, you probably want to test it yourself to make sure it's working correctly. We use the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) to test both locally and on SageMaker. For more examples with the SageMaker Python SDK, see [Amazon SageMaker Examples](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-python-sdk). In order to test our algorithm, we need our dataset. # ## SageMaker Python SDK Local Training # To represent our training, we use the Estimator class, which needs to be configured in five steps. # 1. IAM role - our AWS execution role # 2. train_instance_count - number of instances to use for training. # 3. train_instance_type - type of instance to use for training. For training locally, we specify `local` or `local_gpu`. # 4. image_name - our custom PyTorch Docker image we created. # 5. hyperparameters - hyperparameters we want to pass. # # Let's start with setting up our IAM role. We make use of a helper function within the Python SDK. This function throw an exception if run outside of a SageMaker notebook instance, as it gets metadata from the notebook instance. If running outside, you must provide an IAM role with proper access stated above in [Permissions](#Permissions). # + from sagemaker import get_execution_role role = get_execution_role() # - # ## Fit, Deploy, Predict # # Now that the rest of our estimator is configured, we can call `fit()` with the path to our local CIFAR10 dataset prefixed with `file://`. This invokes our PyTorch container with 'train' and passes in our hyperparameters and other metadata as json files in /opt/ml/input/config within the container to our program entry point defined in the Dockerfile. # # After our training has succeeded, our training algorithm outputs our trained model within the /opt/ml/model directory, which is used to handle predictions. # # We can then call `deploy()` with an instance_count and instance_type, which is 1 and `local`. This invokes our PyTorch container with 'serve', which setups our container to handle prediction requests as defined [here](https://github.com/aws/sagemaker-pytorch-container/blob/master/src/sagemaker_pytorch_container/serving.py#L103). What is returned is a predictor, which is used to make inferences against our trained model. # # After our prediction, we can delete our endpoint. # # We recommend testing and training your training algorithm locally first, as it provides quicker iterations and better debuggability. # + # # Lets set up our SageMaker notebook instance for local mode. # # !/bin/bash ./utils/setup.sh # + # import os # import subprocess # instance_type = 'local' # if subprocess.call('nvidia-smi') == 0: # ## Set type to GPU if one is present # instance_type = 'local_gpu' # print("Instance type = " + instance_type) # + # from sagemaker.estimator import Estimator # hyperparameters = {} # estimator = Estimator(role=role, # train_instance_count=1, # train_instance_type=instance_type, # image_uri='yolov5:latest', # hyperparameters=hyperparameters) # estimator.fit({'cfg': 'file:///home/ec2-user/SageMaker/yolov5_sagemaker/data/cfg/', 'weights': 'file:///home/ec2-user/SageMaker/yolov5_sagemaker/data/weights/', 'images': 'file:///home/ec2-user/SageMaker/yolov5_sagemaker/data/images/', 'labels': 'file:///home/ec2-user/SageMaker/yolov5_sagemaker/data/labels/'}) # + # TODO #predictor = estimator.deploy(1, instance_type) # - # ## Making predictions using Python SDK # # To make predictions, we will use a few images, from the test loader, converted into a json format to send as an inference request. # # The reponse will be tensors containing the probabilities of each image belonging to one of the 10 classes. Based on the highest probability we will map that index to the corresponding class in our output. The classes can be referenced from the [CIFAR-10 website](https://www.cs.toronto.edu/~kriz/cifar.html). Since we didn't train the model for that long, we aren't expecting very accurate results. # + # # TODO # import torchvision, torch # import numpy as np # from sagemaker.predictor import json_serializer, json_deserializer # # get some test images # dataiter = iter(testloader) # images, labels = dataiter.next() # # print images # imshow(torchvision.utils.make_grid(images)) # print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4))) # predictor.accept = 'application/json' # predictor.content_type = 'application/json' # predictor.serializer = json_serializer # predictor.deserializer = json_deserializer # outputs = predictor.predict(images.numpy()) # _, predicted = torch.max(torch.from_numpy(np.array(outputs)), 1) # print('Predicted: ', ' '.join('%4s' % classes[predicted[j]] # for j in range(4))) # + # predictor.delete_endpoint() # - # # Part 2: Training and Hosting your Algorithm in Amazon SageMaker # Once you have your container packaged, you can use it to train and serve models. Let's do that with the algorithm we made above. # # ## Set up the environment # Here we specify the bucket to use and the role that is used for working with SageMaker. # S3 prefix prefix = 'DEMO-pytorch-yolov5' # ## Create the session # # The session remembers our connection parameters to SageMaker. We use it to perform all of our SageMaker operations. # + import sagemaker as sage sess = sage.Session() # - # ## Upload the data for training # # We will use the tools provided by the SageMaker Python SDK to upload the data to a default bucket. # + WORK_DIRECTORY = '/home/ec2-user/SageMaker/MTR_code/yolov5_sagemaker/data/' data_location = sess.upload_data(WORK_DIRECTORY, key_prefix=prefix) inputs = {'cfg': data_location+'/cfg', 'weights': data_location+'/weights', 'images': data_location+'/images', 'labels': data_location+'/labels'} print(inputs) # - # ## Training on SageMaker # Training a model on SageMaker with the Python SDK is done in a way that is similar to the way we trained it locally. This is done by changing our train_instance_type from `local` to one of our [supported EC2 instance types](https://aws.amazon.com/sagemaker/pricing/instance-types/). # # In addition, we must now specify the ECR image URL, which we just pushed above. # # Finally, our local training dataset has to be in Amazon S3 and the S3 URL to our dataset is passed into the `fit()` call. # # Let's first fetch our ECR image url that corresponds to the image we just built and pushed. # + import boto3 client = boto3.client('sts') account = client.get_caller_identity()['Account'] my_session = boto3.session.Session() region = my_session.region_name algorithm_name = 'yolov5' if region.startswith('cn'): ecr_image = '{}.dkr.ecr.{}.amazonaws.com.cn/{}:latest'.format(account, region, algorithm_name) else: ecr_image = '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account, region, algorithm_name) print(ecr_image) # + from sagemaker.estimator import Estimator hyperparameters = {} instance_type = 'ml.g4dn.xlarge' estimator = Estimator(role=role, instance_count=1, instance_type=instance_type, image_uri=ecr_image, hyperparameters=hyperparameters) estimator.fit(inputs) # - instance_type = 'ml.m5.xlarge' predictor = estimator.deploy(1, instance_type) # ## Optional cleanup # When you're done with the endpoint, you should clean it up. # # All of the training jobs, models and endpoints we created can be viewed through the SageMaker console of your AWS account. predictor.delete_endpoint() # # Reference # - [How Amazon SageMaker interacts with your Docker container for training](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html) # - [How Amazon SageMaker interacts with your Docker container for inference](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html) # - [CIFAR-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html) # - [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) # - [Dockerfile](https://docs.docker.com/engine/reference/builder/) # - [scikit-bring-your-own](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/scikit_bring_your_own/scikit_bring_your_own.ipynb) # - [SageMaker PyTorch container](https://github.com/aws/sagemaker-pytorch-container)
openprompt_byoc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/RVT123123/Model-Prunning/blob/main/Model_pruning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="qeW3RXEFAHE_" import keras # + id="Yhw8F2XdWgsB" import tensorflow as tf # + id="z5hV6DxWAMm9" import numpy as np import pandas as pd from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten, Add from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from keras.datasets import mnist # + id="ZLOJiPH0AQil" outputId="b239fe53-fd55-47af-8bc8-2337b1c6e83c" colab={"base_uri": "https://localhost:8080/", "height": 54} (X_train, y_train), (X_test, y_test) = mnist.load_data() # + id="GcenMm-QAWJ_" outputId="527b8ff1-7791-42cd-bf96-266a35155aec" colab={"base_uri": "https://localhost:8080/", "height": 302} print (X_train.shape) from matplotlib import pyplot as plt # %matplotlib inline plt.imshow(X_train[1]) # + id="lw5YaH6PAb_V" X_train = X_train.reshape(X_train.shape[0], 28, 28,1) X_test = X_test.reshape(X_test.shape[0], 28, 28,1) # + id="yIoqd3FXAgeG" X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 # + id="JYds49jOAnJD" outputId="5a30e6c5-5fea-4d9b-f32f-3422954dce80" colab={"base_uri": "https://localhost:8080/", "height": 35} y_train[:10] # + id="W6oHJ9nqApUV" # Convert 1-dimensional class arrays to 10-dimensional class matrices Y_train = np_utils.to_categorical(y_train, 10) Y_test = np_utils.to_categorical(y_test, 10) # + id="hAfxc3bTA1Qq" from keras.layers import Activation model = Sequential() model.add(Convolution2D(128, (3, 3), activation = 'relu', input_shape = (28, 28, 1))) model.add(MaxPooling2D(2, 2)) #model.add(Dropout(0.5))) model.add(MaxPooling2D(2, 2)) #model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(64, activation = 'relu')) #model.add(Dropout(0.5)) model.add(Dense(10, activation = 'softmax')) # + id="UcEsxc_mA4Oe" outputId="3face7cd-c1f0-4d0a-e278-d01c0413274e" colab={"base_uri": "https://localhost:8080/", "height": 384} model.summary() # + id="jvI9YIlYA-SU" model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # + id="XQ3zH2TpBBi5" outputId="bc9ee14f-8d37-44b2-e9f6-ee225ba7cb9c" colab={"base_uri": "https://localhost:8080/", "height": 770} model.fit(X_train, Y_train,batch_size=120, epochs=20, verbose=2) # + id="CQbOYnDlBEdg" original_score = model.evaluate(X_test, Y_test, verbose=0) # + id="4URv5FCFBMCW" outputId="a385ed06-c93d-4e2e-c512-24ba7c06a90e" colab={"base_uri": "https://localhost:8080/", "height": 35} print(original_score[1]) # + id="PK5vzq9yBS1F" tf.keras.models.save_model(model, 'original_model.h5', include_optimizer=False) # + [markdown] id="LBPXJYzagRCx" # **Performing Model Prunning** # + id="fTbtlgRJW9v7" # ! pip install -q tensorflow-model-optimization # + id="ZMAllv7rBUxp" outputId="35923837-a172-4759-d7fd-704c1879b955" colab={"base_uri": "https://localhost:8080/", "height": 384} import tensorflow_model_optimization as tfmot prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude # Compute end step to finish pruning after 2 epochs. batch_size = 128 epochs = 10 validation_split = 0.1 # 10% of training set will be used for validation set. num_images = X_train.shape[0] * (1 - validation_split) end_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs # Define model for pruning. pruning_params = { 'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50, final_sparsity=0.80, begin_step=0, end_step=end_step) } model_for_pruning = prune_low_magnitude(model, **pruning_params) # `prune_low_magnitude` requires a recompile. model_for_pruning.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model_for_pruning.summary() # + id="cg7bF0HmXDSZ" outputId="4a47a420-dde8-486f-de16-99be6283f0cb" colab={"base_uri": "https://localhost:8080/", "height": 423} import tempfile logdir = tempfile.mkdtemp() callbacks = [ tfmot.sparsity.keras.UpdatePruningStep(), tfmot.sparsity.keras.PruningSummaries(log_dir=logdir), ] model_for_pruning.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, validation_split=validation_split, callbacks=callbacks) # + id="X4vy00nwgcuv" # + [markdown] id="EfDT_aysgdeu" # Comparison of Accuracies # + id="2MZ1vCLIYboM" outputId="7cff26ee-e5c8-4963-ef01-f08c009fef79" colab={"base_uri": "https://localhost:8080/", "height": 54} model_for_pruning_score = model_for_pruning.evaluate( X_test, Y_test, verbose=0) print('Baseline test accuracy:', original_score[1]) print('Pruned test accuracy:', model_for_pruning_score[1]) # + [markdown] id="Q9VAu7XKgOph" # Create 3x smaller models from pruning # + id="SvYT9lgOa0P3" model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning) tf.keras.models.save_model(model_for_export, 'pruned_model.h5', include_optimizer=False) # + [markdown] id="TRNBV02vhLnd" # using TFLite for model compression # + id="Rqu1-_LqhHHo" outputId="faad8216-4f31-4265-ad7e-d9f040521368" colab={"base_uri": "https://localhost:8080/", "height": 184} converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export) pruned_tflite_model = converter.convert() _, pruned_tflite_file = tempfile.mkstemp('.tflite') with open(pruned_tflite_file, 'wb') as f: f.write(pruned_tflite_model) print('Saved pruned TFLite model to:', pruned_tflite_file) # + [markdown] id="wc49lMlMhym7" # function for calculating the gzip size # + id="B6TADN1Ehq1K" def get_gzipped_model_size(file): # Returns size of gzipped model, in bytes. import os import zipfile _, zipped_file = tempfile.mkstemp('.zip') with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.write(file) return os.path.getsize(zipped_file) # + id="Oo-by81kh473" outputId="41d51fd6-f748-45b3-c2cc-5c61ef7d692d" colab={"base_uri": "https://localhost:8080/", "height": 72} print("Size of gzipped baseline Keras model: %.2f bytes" % (get_gzipped_model_size('/content/original_model.h5'))) print("Size of gzipped pruned Keras model: %.2f bytes" % (get_gzipped_model_size('/content/pruned_model.h5'))) print("Size of gzipped pruned TFlite model: %.2f bytes" % (get_gzipped_model_size(pruned_tflite_file))) # + id="KZV3rMm_iAze"
Model_pruning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="GEA-ZAlairKv" colab_type="code" outputId="63ce46e0-d1b2-4da4-f1e1-5f665731a573" colab={"base_uri": "https://localhost:8080/", "height": 140} import pandas as pd import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import scorer, accuracy_score, mean_squared_error, r2_score from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.pipeline import Pipeline from sklearn.svm import SVR from subprocess import call from sklearn.tree import DecisionTreeRegressor, export_graphviz from sklearn.pipeline import Pipeline from sklearn.linear_model import LinearRegression # %tensorflow_version 2.x # + [markdown] id="I5huioxvivQO" colab_type="text" # # Data Creation # - wml = returns of WinnerMinusLoser portfolio # - iml = returns of IlliquidityMinusLiquidity portfolio # - market_value = market return # + id="CUQsT3QBi5V4" colab_type="code" colab={} wml_data = pd.read_csv('wml.csv', delimiter=',', header=0) iml_data = pd.read_csv('iml.csv', delimiter=',', header=0) target = pd.read_csv('returns.csv', delimiter=',', header=0) market_value = pd.read_csv('market_value.csv', delimiter=',', header=0) target = target[213:-1] # 213 is index of 01.01.2009. data = pd.DataFrame({'iml': iml_data['iml'][213:]}) new_col = pd.DataFrame({'wml': wml_data['wml'][213:]}) data = data.join(new_col) data = data.reset_index() mv_data = market_value['Adj Close'] new_col = pd.DataFrame({'market_value': mv_data}) data = data.join(new_col) data = data.dropna() data = data.reset_index(drop=True) target = target.reset_index(drop=True) stocks = target.columns.values # + id="fiNn1xNFnshr" colab_type="code" outputId="553bab1e-d806-4fb6-d4cb-33c66d3f9646" colab={"base_uri": "https://localhost:8080/", "height": 424} data # + [markdown] id="zsS-jVmq2M3B" colab_type="text" # # LINEAR REGRESSION # + id="c8Dyd9eZ1_88" colab_type="code" outputId="d418b926-8218-44e8-8bfa-af1f2f492775" colab={"base_uri": "https://localhost:8080/", "height": 1000} X = data[['iml', 'wml', 'market_value']].values for Y in stocks[1:]: X_train, X_test, y_train, y_test = train_test_split(X, target[Y].values[1:], test_size=0.2, random_state=0) regressor = LinearRegression() regressor.fit(X_train, y_train) # training the algorithm coeffs = (regressor.intercept_, regressor.coef_) y_pred = regressor.predict(X_test) print('-----' + Y + '------') print('Mean Squared Error:' , mean_squared_error(y_test, y_pred)) print('R2_score:', r2_score(y_test, y_pred)) # + [markdown] id="2DoqXosNjSQG" colab_type="text" # # DECISION TREE # # + id="aHbS_QY1kZU1" colab_type="code" outputId="461f2549-058b-4f8f-fd87-f456f3386487" colab={"base_uri": "https://localhost:8080/", "height": 549} X = data[['iml', 'wml', 'market_value']].values # List of values to try for max_depth: max_depth_range = list(range(1, 6)) accuracy_total = [] for Y in stocks[1:]: X_train, X_test, y_train, y_test = train_test_split(X, target[Y].values[1:], test_size=0.2, random_state=0, shuffle=False) # List to store the average RMSE for each value of max_depth: accuracy = [] for depth in max_depth_range: clf = DecisionTreeRegressor(max_depth=depth, random_state=0) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) accuracy.append(score) # index + 1 = best depth max_index = accuracy.index(max(accuracy)) + 1 dt = DecisionTreeRegressor(max_depth=max_index, random_state=0) dt.fit(X_train, y_train) score = dt.score(X_test, y_test) accuracy_total.append(score) # graph vizualized only for one stock --> copy from .dot file u http://webgraphviz.com/ if Y == 'WTM': export_graphviz(dt, out_file='tree.dot', feature_names=['iml', 'wml', 'market_value']) call(['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600']) # Display in python plt.figure(figsize = (14, 18)) plt.imshow(plt.imread('tree.png')) plt.axis('off'); plt.show(); plt.plot(stocks[1:], accuracy_total) plt.show() # + [markdown] id="Zf5W8N2XkeGJ" colab_type="text" # # SVM # + id="AJVvpF2lkhRV" colab_type="code" outputId="caedeac7-efeb-4ee5-d028-57e3c2d62e2f" colab={"base_uri": "https://localhost:8080/", "height": 1000} X = data[['iml', 'wml', 'market_value']].values for Y in stocks[1:]: X_train, X_test, Y_train, Y_test = train_test_split(X, target[Y].values[1:], test_size=0.2, random_state=0, shuffle=False) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) clf = SVR().fit(X_train, Y_train) y_pred = clf.predict(X_test) # create empty table with 2 fields --> nesto da namistim dimenzije #helper = np.zeros(shape=(len(y_pred), 3) ) # put the predicted values in the right field #helper[:,0] = y_pred # inverse transform and then select the right field #y_pred = scaler.inverse_transform(y_pred) plt.plot(Y_test, color = 'black', label = 'Test data return') plt.plot(y_pred, color = 'green', label = 'Predicted data return') plt.title('Prediction of returns with SVM') plt.xlabel('Time') plt.ylabel('Data return') plt.legend() plt.show() # + [markdown] id="tYNfCqhRlpX7" colab_type="text" # # <NAME> # + id="R4A1JHTLlX9s" colab_type="code" outputId="9daba619-a434-4291-dd84-99bc9aca25a4" colab={"base_uri": "https://localhost:8080/", "height": 1000} scaler = MinMaxScaler(feature_range = (0, 1)) # 213 is index of 01/01/2009 X = data[['iml', 'wml', 'market_value']].values for Y in stocks[1:]: print('-----' + Y + '------') X_train, X_test, Y_train, Y_test = train_test_split(X, target[Y].values[1:], test_size=0.2, random_state=0) X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.2, random_state=0, shuffle=False) # scale data X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_val = scaler.transform(X_val) # define model model = tf.keras.models.Sequential() model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(1, activation=tf.nn.relu)) model.compile(optimizer="adam", loss="mean_squared_error") #fit model model.fit(X_train, Y_train, epochs=100) #evaluate model on test data model.evaluate(X_test, Y_test) # backtest the model y_pred = model.predict(X_val) # create empty table with 2 fields --> nesto da namistim dimenzije helper = np.zeros(shape=(len(y_pred), 3) ) # put the predicted values in the right field helper[:,0] = y_pred[:,0] # inverse transform and then select the right field y_pred = scaler.inverse_transform(helper)[:,0] plt.plot(Y_val, color = 'black', label = 'Validation data return') plt.plot(y_pred, color = 'green', label = 'Predicted data return') plt.title('Prediction of returns with MLP') plt.xlabel('Time') plt.ylabel('Data return') plt.legend() plt.show() # + [markdown] id="3RO_rAHdryBN" colab_type="text" # # LSTM # + id="jhzCH_Grp1r2" colab_type="code" outputId="46756d99-7c13-433a-9f1d-76638bbd9117" colab={"base_uri": "https://localhost:8080/", "height": 1000} X = data[['iml', 'wml', 'market_value']].values for Y in stocks[1:]: print('-----' + Y + '------') X_train, X_test, Y_train, Y_test = train_test_split(X, target[Y].values[1:], test_size=0.2, random_state=0, shuffle=False) X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.2, random_state = 0, shuffle=False) # scale data X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_val = scaler.transform(X_val) # reshape data X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],1) X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],1) X_val = X_val.reshape(X_val.shape[0],X_val.shape[1],1) # define model model = tf.keras.Sequential() model.add(tf.keras.layers.LSTM(20, input_shape=(X_train.shape[1], 1), return_sequences=True)) model.add(tf.keras.layers.LSTM(20)) model.add(tf.keras.layers.Dense(1, activation=tf.nn.relu)) model.compile(optimizer="adam", loss="mean_squared_error") # fit model model.fit(X_train, Y_train, epochs = 100, batch_size = 32) # evaluate model on test data model.evaluate(X_test, Y_test) # backtest the model y_pred = model.predict(X_val) # create empty table with 2 fields --> nesto da namistim dimenzije helper = np.zeros(shape=(len(y_pred), 3) ) # put the predicted values in the right field helper[:,0] = y_pred[:,0] # inverse transform and then select the right field y_pred = scaler.inverse_transform(helper)[:,0] plt.plot(Y_val, color = 'black', label = 'Validation data return') plt.plot(y_pred, color = 'green', label = 'Predicted data return') plt.title('Prediction of returns with LSTM') plt.xlabel('Time') plt.ylabel('Data return') plt.legend() plt.show() # + id="s7x6y0N5sLfM" colab_type="code" colab={}
models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Important Disclaimer:** Mockup. So far, the example here does not support # fit or predict, let alone hyperparameter tuning etc. # # The pipeline shown here assumes the example input tables from # <a href="https://arxiv.org/pdf/1706.00327.pdf#page=3">Fig. 2</a> # of the following paper: # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. # "One button machine for automating feature engineering in relational databases". 2017. # https://arxiv.org/abs/1706.00327 from lale.expressions import it, replace, sum, max, count, month, day_of_month, item import numpy as np from lale.lib.lale import Scan, Join, Map, GroupBy, Aggregate, ConcatFeatures from sklearn.feature_selection import SelectKBest as SelectFeatures from sklearn.pipeline import Pipeline from lale.lib.autoai_libs import NumpyColumnSelector, CatEncoder, OptStandardScaler, FS1 from sklearn.linear_model import LogisticRegression as LR from sklearn.neighbors import KNeighborsClassifier as KNN from xgboost import XGBClassifier as XGBoost import lale lale.wrap_imported_operators() # one-to-one path doesn't need GroupBy >> Aggregate info_features = ( (Scan(table=it.main) & Scan(table=it.info)) >> Join(pred=[it.main.TrainId == it.info.Train_Id, #note the underscore it.main['Arrival time'] >= it.info.TimeStamp]) >> Map(columns=[replace(it['Train class'], {'Regional': 0, 'Intercity': 1}), it['Max Speed (km/h)'], month(it['Arrival time'], fmt='YYYY-MM-DD HH:MM:SS'), day_of_month(it['Arrival time'])])) # one-to-many path (multiple delay rows per main-table row) delay_features = ( (Scan(table=it.main) & Scan(table=it.delay)) >> Join(pred=[it.main.TrainId == it.delay.TrainId, it.main['Arrival time'] >= it.delay.TimeStamp]) >> GroupBy(by=it.MessageId) #primary key of main table >> Aggregate(columns=[sum(it.Delay), max(it.Delay)])) # multi-hop one-to-many path uses multi-way join event_features = ( (Scan(table=it.main) & Scan(table=it.delay) & Scan(table=it.event)) >> Join(pred=[it.main.TrainId == it.delay.TrainId, it.main['Arrival time'] >= it.delay.TimeStamp, it.delay.StationId == it.event.StationId, it.main.TimeStamp >= it.event.TimeStamp]) >> GroupBy(by=it.MessageId) #primary key of main table >> Aggregate(columns=[count(it.Event), item(it['Train class'], 'Roadwork')])) all_features = Pipeline(steps=[('data_joins', (info_features & delay_features & event_features) >> ConcatFeatures >> SelectFeatures())]) cats_prep = NumpyColumnSelector(columns=[0]) >> CatEncoder(dtype=np.float64) cont_prep = NumpyColumnSelector(columns=[1,2]) >> OptStandardScaler(use_scaler_flag=True) all_prep = Pipeline(steps=[('preprocessing', (cats_prep & cont_prep) >> ConcatFeatures >> FS1(additional_col_count_to_keep=3))]) classifier = LR | KNN | XGBoost pipeline = all_features >> all_prep >> classifier pipeline.visualize() pipeline.pretty_print(ipython_display=True)
examples/demo_multi_table.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Visualizing planar flow # Visualizing the behavior of planar flow: $$f(z) = z + u \tanh(w^T z + b),$$ as proposed in # * Rezende, <NAME>, and <NAME>. "Variational inference with normalizing flows." arXiv preprint arXiv:1505.05770 (2015). # # author: <NAME>, 09/25/2016 # + import pandas as pd import numpy as np import matplotlib import matplotlib.pylab as plt # %matplotlib inline # + def h(x): return np.tanh(x) def h_prime(x): return 1 - np.tanh(x) ** 2 def f(z, w, u, b): return z + np.dot(h(np.dot(z, w) + b).reshape(-1,1), u.reshape(1,-1)) # + plt.figure(figsize=[10,12]) id_figure = 1 for i in np.arange(5): for j in np.arange(5): theta_w = 0 #represent w and u in polar coordinate system rho_w = 5 theta_u = np.pi / 8 * i rho_u = j / 4.0 w = np.array([np.cos(theta_w),np.sin(theta_w)]) * rho_w u = np.array([np.cos(theta_u),np.sin(theta_u)]) * rho_u b = 0 grid_use = np.meshgrid(np.arange(-1,1,0.001), np.arange(-1,1,0.001)) z = np.concatenate([grid_use[0].reshape(-1,1), grid_use[1].reshape(-1,1)], axis=1) z = np.random.normal(size=(int(1e6),2)) z_new = f(z, w, u, b) heatmap, xedges, yedges = np.histogram2d(z_new[:,0], z_new[:,1], bins=50, range=[[-3,3],[-3,3]]) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] plt.subplot(5,5,id_figure) plt.imshow(heatmap, extent=extent) plt.title("u=(%.1f,%.1f)"%(u[0],u[1]) + "\n" + "w=(%d,%d)"%(w[0],w[1]) + ", " + "b=%d"%b) id_figure += 1 plt.xlim([-3,3]) plt.ylim([-3,3]) plt.savefig('planar_flow.jpg')
notebooks/2016-09-25-planar_flow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/kundajelab/label_shift_experiments/blob/master/CIFAR100_do_label_shift_experiments.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="4HoNMk_PkjYs" outputId="731d2b9a-e434-4194-c05c-ac31ce17aeb0" # !wget https://zenodo.org/record/3459399/files/am_cifar100_test_labels.txt.gz?download=1 -O am_cifar100_test_labels.txt.gz # !wget https://zenodo.org/record/3459399/files/am_cifar100_valid_labels.txt.gz?download=1 -O am_cifar100_valid_labels.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed0.txt.gz?download=1 -O cifar100_validpreacts_seed0.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed0.txt.gz?download=1 -O cifar100_testpreacts_seed0.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed10.txt.gz?download=1 -O cifar100_validpreacts_seed10.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed10.txt.gz?download=1 -O cifar100_testpreacts_seed10.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed20.txt.gz?download=1 -O cifar100_validpreacts_seed20.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed20.txt.gz?download=1 -O cifar100_testpreacts_seed20.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed30.txt.gz?download=1 -O cifar100_validpreacts_seed30.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed30.txt.gz?download=1 -O cifar100_testpreacts_seed30.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed40.txt.gz?download=1 -O cifar100_validpreacts_seed40.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed40.txt.gz?download=1 -O cifar100_testpreacts_seed40.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed50.txt.gz?download=1 -O cifar100_validpreacts_seed50.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed50.txt.gz?download=1 -O cifar100_testpreacts_seed50.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed60.txt.gz?download=1 -O cifar100_validpreacts_seed60.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed60.txt.gz?download=1 -O cifar100_testpreacts_seed60.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed70.txt.gz?download=1 -O cifar100_validpreacts_seed70.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed70.txt.gz?download=1 -O cifar100_testpreacts_seed70.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed80.txt.gz?download=1 -O cifar100_validpreacts_seed80.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed80.txt.gz?download=1 -O cifar100_testpreacts_seed80.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_validpreacts_seed90.txt.gz?download=1 -O cifar100_validpreacts_seed90.txt.gz # !wget https://zenodo.org/record/3459399/files/cifar100_testpreacts_seed90.txt.gz?download=1 -O cifar100_testpreacts_seed90.txt.gz # + colab={"base_uri": "https://localhost:8080/", "height": 394} colab_type="code" id="3AzwUvQGk1AG" outputId="2ebc6c65-2df5-404b-b8f5-9746cffdd05b" ![[ -e abstention ]] || git clone https://github.com/blindauth/abstention # %cd /content/abstention # !git pull # !pip uninstall abstention # !pip install . # %cd .. # + colab={"base_uri": "https://localhost:8080/", "height": 581} colab_type="code" id="lfMaM2DglFSi" outputId="333ffb7e-579f-4ce4-c48b-d2d6b790572f" ![[ -e label_shift_experiments ]] || git clone https://github.com/blindauth/labelshiftexperiments # %cd /content/labelshiftexperiments # !git pull # !pip uninstall labelshiftexperiments # !pip install . # %cd .. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="K57Sm50IlmAa" outputId="c066b4c8-9e1d-4c62-c1fe-4d7fc2a993fa" # !rm *.txt # !gunzip *.gz # + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="q_jXFekXzd7F" outputId="9ef7bf46-66bf-46e0-b803-7bee1118d356" # !ls # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="rgxJu-ONlHlZ" outputId="5818ec00-ea5a-49b0-8d47-3a3206b8b305" from importlib import reload import abstention reload(abstention) reload(abstention.calibration) reload(abstention.label_shift) reload(abstention.figure_making_utils) from abstention.calibration import ( TempScaling, VectorScaling, NoBiasVectorScaling, softmax) from abstention.label_shift import (EMImbalanceAdapter, BBSEImbalanceAdapter, ShiftWeightFromImbalanceAdapter) import glob import gzip import numpy as np from collections import defaultdict, OrderedDict import labelshiftexperiments reload(labelshiftexperiments) reload(labelshiftexperiments.cifarandmnist) from labelshiftexperiments import cifarandmnist test_labels = cifarandmnist.read_preds(open("am_cifar100_test_labels.txt")) valid_labels = cifarandmnist.read_preds(open("am_cifar100_valid_labels.txt")) imbalanceadaptername_to_imbalanceadapter = { 'em': EMImbalanceAdapter(), 'bbse-hard': BBSEImbalanceAdapter(soft=False), 'bbse-soft': BBSEImbalanceAdapter(soft=True)} calibname_to_calibfactory = OrderedDict([ ('None', abstention.calibration.Softmax()), ('TS', TempScaling(verbose=False)), ('NBVS', NoBiasVectorScaling(verbose=False)), ('BCTS', TempScaling(verbose=False, bias_positions='all')), ('VS', VectorScaling(verbose=False)) ]) adaptncalib_pairs = [ ('bbse-hard', 'None'), ('bbse-soft', 'None'), ('bbse-soft', 'TS'), ('bbse-soft', 'NBVS'), ('bbse-soft', 'BCTS'), ('bbse-soft', 'VS'), ('bbse-soft', 'best-ece'), ('bbse-soft', 'best-jsdiv'), ('bbse-soft', 'best-nll'), ('em', 'None'), ('em', 'TS'), ('em', 'NBVS'), ('em', 'BCTS'), ('em', 'VS'), ('em', 'best-ece'), ('em', 'best-jsdiv'), ('em', 'best-nll') ] num_trials = 10 seeds = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90] dirichlet_alphas_and_samplesize = [(1.0,7000), (1.0,8500), (1.0,10000), (0.1,7000), (0.1,8500), (0.1,10000)] tweakone_alphas_and_samplesize = [] print("Dirichlet shift") (dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, dirichlet_alpha_to_samplesize_to_baselineacc, metric_to_samplesize_to_calibname_to_unshiftedvals) =\ cifarandmnist.run_experiments( num_trials=num_trials, seeds=seeds, alphas_and_samplesize = dirichlet_alphas_and_samplesize, shifttype='dirichlet', calibname_to_calibfactory=calibname_to_calibfactory, imbalanceadaptername_to_imbalanceadapter= imbalanceadaptername_to_imbalanceadapter, adaptncalib_pairs=adaptncalib_pairs, validglobprefix="cifar100_validpreacts_seed", testglobprefix="cifar100_testpreacts_seed", valid_labels=valid_labels, test_labels=test_labels) print("Tweak one shift") (tweakone_alpha_to_samplesize_to_adaptername_to_metric_to_vals, tweakone_alpha_to_samplesize_to_baselineacc, _) = cifarandmnist.run_experiments( num_trials=num_trials, seeds=seeds, alphas_and_samplesize = tweakone_alphas_and_samplesize, shifttype='tweakone', calibname_to_calibfactory=calibname_to_calibfactory, imbalanceadaptername_to_imbalanceadapter= imbalanceadaptername_to_imbalanceadapter, adaptncalib_pairs=adaptncalib_pairs, validglobprefix="cifar100_validpreacts_seed", testglobprefix="cifar100_testpreacts_seed", valid_labels=valid_labels, test_labels=test_labels) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="HDoDQrwEz8TZ" outputId="9131a02b-95c5-40ec-b18f-2352511fb63b" import json import os file_out = "cifar100_label_shift_adaptation_results.json" dict_to_write = { "dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals": dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, "dirichlet_alpha_to_samplesize_to_baselineacc": dirichlet_alpha_to_samplesize_to_baselineacc, "metric_to_samplesize_to_calibname_to_unshiftedvals": metric_to_samplesize_to_calibname_to_unshiftedvals, "tweakone_alpha_to_samplesize_to_adaptername_to_metric_to_vals": tweakone_alpha_to_samplesize_to_adaptername_to_metric_to_vals, "tweakone_alpha_to_samplesize_to_baselineacc": tweakone_alpha_to_samplesize_to_baselineacc } open(file_out, 'w').write( json.dumps(dict_to_write, sort_keys=True, indent=4, separators=(',', ': '))) os.system("gzip -f "+file_out) # + colab={} colab_type="code" id="ywxLEvN6T9X1" from google.colab import files files.download("cifar100_label_shift_adaptation_results.json.gz") # + colab={} colab_type="code" id="lxnkZg-n1hxj" import gzip import json loaded_dicts = json.loads(gzip.open("cifar100_label_shift_adaptation_results.json.gz").read()) dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals =\ loaded_dicts['dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals'] dirichlet_alpha_to_samplesize_to_baselineacc =\ loaded_dicts['dirichlet_alpha_to_samplesize_to_baselineacc'] tweakone_alpha_to_samplesize_to_adaptername_to_metric_to_vals =\ loaded_dicts['tweakone_alpha_to_samplesize_to_adaptername_to_metric_to_vals'] tweakone_alpha_to_samplesize_to_baselineacc =\ loaded_dicts['tweakone_alpha_to_samplesize_to_baselineacc'] metric_to_samplesize_to_calibname_to_unshiftedvals =\ loaded_dicts['metric_to_samplesize_to_calibname_to_unshiftedvals'] # + colab={"base_uri": "https://localhost:8080/", "height": 360} colab_type="code" id="DvGAAv8k1rTL" outputId="d9eee5ac-6799-4bda-d4f5-fb561dce321d" from importlib import reload import numpy as np import labelshiftexperiments reload(labelshiftexperiments) import labelshiftexperiments.maketable reload (labelshiftexperiments.maketable) from labelshiftexperiments.maketable import render_calibration_table metricname_to_nicename = {'nll': 'nll', 'jsdiv': 'jsdiv', 'ece': 'ECE'} calibname_to_nicename = {'None': "None", "TS": "TS", "VS":"VS", "NBVS": "NBVS", "BCTS": "BCTS"} from scipy.stats import norm N = len(seeds)*num_trials #Using the normal approximation at N=100; # variance from https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test #Note that T = ((N+1)*N/2 - W)/2 ustat_threshold = ((N*(N+1))/2 - norm.ppf(0.99)*np.sqrt(N*(N+1)*(2*N+1)/6.0))/2.0 print(render_calibration_table( metric_to_samplesize_to_calibname_to_unshiftedvals= metric_to_samplesize_to_calibname_to_unshiftedvals, #threshold of 8 comes from table https://www.oreilly.com/library/view/nonparametric-statistics-a/9781118840429/bapp02.xhtml #for one-tailed alpha=0.025 and n=10 ustat_threshold=ustat_threshold, metrics_in_table=['nll', 'ece'], samplesizes_in_table=['7000', '8500', '10000'], calibnames_in_table=['None', 'TS', 'NBVS', 'BCTS', 'VS'], metricname_to_nicename=metricname_to_nicename, calibname_to_nicename=calibname_to_nicename, caption="CIFAR100 Calibration metric differences", label="cifar10calibrationcomparison", applyunderline=False)) # + colab={"base_uri": "https://localhost:8080/", "height": 989} colab_type="code" id="VonA65ef1uOr" outputId="f3fb9c95-213c-45d4-a413-c107065a9877" from collections import OrderedDict from labelshiftexperiments.maketable import render_adaptation_table methodgroups = OrderedDict([ ('em', ['em:None', 'em:TS', 'em:NBVS', 'em:BCTS', 'em:VS']), ('bbse', ['bbse-hard:None', 'bbse-soft:None', 'bbse-soft:TS', 'bbse-soft:NBVS', 'bbse-soft:BCTS', 'bbse-soft:VS'])]) samplesizes_in_table = ['7000', '8500', '10000'] adaptname_to_nicename = {'em': 'EM', 'bbse-soft': 'BBSE-soft', 'bbse-hard': 'BBSE-hard'} calibname_to_nicename = {'None': 'None', 'TS': 'TS', 'NBVS': 'NBVS', 'BCTS': 'BCTS', 'VS': 'VS', 'best-nll':'Best NLL', 'best-jsdiv':'Best JS Div', 'best-ece':'Best ECE'} dirichlet_alphas_in_table = ['0.1', '1.0'] print(render_adaptation_table( alpha_to_samplesize_to_adaptncalib_to_metric_to_vals=dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, ustat_threshold=ustat_threshold, valmultiplier=1.0, adaptname_to_nicename=adaptname_to_nicename, calibname_to_nicename=calibname_to_nicename, methodgroups=methodgroups, metric='jsdiv', largerisbetter=False, alphas_in_table=dirichlet_alphas_in_table, samplesizes_in_table=samplesizes_in_table, caption="CIFAR100 Metric: JS Divergence, dirichlet shift", label="cifar100jsdivdirichletshift", applyunderline=False)) print(render_adaptation_table( alpha_to_samplesize_to_adaptncalib_to_metric_to_vals=dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, ustat_threshold=ustat_threshold, valmultiplier=100, adaptname_to_nicename=adaptname_to_nicename, calibname_to_nicename=calibname_to_nicename, methodgroups=methodgroups, metric='delta_acc', largerisbetter=True, alphas_in_table=dirichlet_alphas_in_table, samplesizes_in_table=samplesizes_in_table, caption="CIFAR100 Metric: $\\Delta$\\%Accuracy, dirichlet shift", label="cifar100deltaaccdirichletshift", applyunderline=False)) # + colab={"base_uri": "https://localhost:8080/", "height": 751} colab_type="code" id="Na0VuwcQIxlW" outputId="2a0a2b76-0234-4a26-a4d9-aacfc9ad8f41" methodgroups = OrderedDict([ ('em', ['em:NBVS', 'em:BCTS']), ('bbse', ['bbse-soft:NBVS', 'bbse-soft:BCTS'])]) print(render_adaptation_table( alpha_to_samplesize_to_adaptncalib_to_metric_to_vals=dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, ustat_threshold=ustat_threshold, valmultiplier=1.0, adaptname_to_nicename=adaptname_to_nicename, calibname_to_nicename=calibname_to_nicename, methodgroups=methodgroups, metric='jsdiv', largerisbetter=False, alphas_in_table=dirichlet_alphas_in_table, samplesizes_in_table=samplesizes_in_table, caption="CIFAR100 NBVS vs BCTS Metric: JS Divergence, dirichlet shift", label="cifar100_nbvsbcts_jsdiv_dirichletshift", applyunderline=False)) print(render_adaptation_table( alpha_to_samplesize_to_adaptncalib_to_metric_to_vals=dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, ustat_threshold=ustat_threshold, valmultiplier=100, adaptname_to_nicename=adaptname_to_nicename, calibname_to_nicename=calibname_to_nicename, methodgroups=methodgroups, metric='delta_acc', largerisbetter=True, alphas_in_table=dirichlet_alphas_in_table, samplesizes_in_table=samplesizes_in_table, caption="CIFAR100 NBVS vs BCTS Metric: $\\Delta$\\%Accuracy, dirichlet shift", label="cifar100_nvbsbcts_deltaacc_dirichletshift", applyunderline=False)) # + colab={"base_uri": "https://localhost:8080/", "height": 751} colab_type="code" id="HkEN85En2KxK" outputId="21303ab0-570c-4782-cea9-071a8c96588e" methodgroups = OrderedDict([ ('em-calib', ['em:best-nll', 'em:best-ece']), ('bbse-calib', ['bbse-soft:best-nll', 'bbse-soft:best-ece'])]) print(render_adaptation_table( alpha_to_samplesize_to_adaptncalib_to_metric_to_vals=dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, ustat_threshold=ustat_threshold, valmultiplier=100, adaptname_to_nicename=adaptname_to_nicename, calibname_to_nicename=calibname_to_nicename, methodgroups=methodgroups, metric='delta_acc', largerisbetter=True, alphas_in_table=dirichlet_alphas_in_table, samplesizes_in_table=samplesizes_in_table, caption="CIFAR100 NLL vs ECE $\\Delta$\\%Accuracy, dirichlet shift", label="cifar100_nllvsece_deltaacc_dirichletshift", applyunderline=False)) print(render_adaptation_table( alpha_to_samplesize_to_adaptncalib_to_metric_to_vals=dirichlet_alpha_to_samplesize_to_adaptername_to_metric_to_vals, ustat_threshold=ustat_threshold, valmultiplier=1, adaptname_to_nicename=adaptname_to_nicename, calibname_to_nicename=calibname_to_nicename, methodgroups=methodgroups, metric='jsdiv', largerisbetter=False, alphas_in_table=dirichlet_alphas_in_table, samplesizes_in_table=samplesizes_in_table, caption="CIFAR100 NLL vs ECE JSDiv, dirichlet shift", label="cifar100_nllvsece_jsdiv_dirichletshift", applyunderline=False)) # + colab={} colab_type="code" id="D8YpzlkKWZ_o"
CIFAR100_do_label_shift_experiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/lmkwytnicholas/nicholas-lee.github.io/blob/master/Seoul_Bike_Rental_Prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="bmmgX432X2Gg" # # **Title: Seoul Bike Rental Prediction** # # * What are the factors that influence the number of bikes rented? # * Explored the data for outliers and missing values # * Plotted correlation between the variables # * Built a linear regression model to predict rented bike count by choosing appropriate independent variables # # + [markdown] id="iQJs8eIIftse" # # **Import Libraries** # + id="shwKc3xZXiYt" import pandas as pd import numpy as np import warnings warnings.filterwarnings('ignore') import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline sns.set(style='dark', color_codes=True) # + id="4xhvtxUCgHLB" data = pd.read_csv('/content/drive/MyDrive/Tech I.S./Datasets/Linear Regression/SeoulBikeData.csv') # + [markdown] id="yMduJkd1g0s0" # # **Extract DataFrame for Only Numeric DataTypes** # ..and summarize # + colab={"base_uri": "https://localhost:8080/", "height": 233} id="M6ONY1rDg0Pq" outputId="90c89303-9c6c-40b1-c40e-686696d29ad8" df = data.select_dtypes(include=['float64','int64']) df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 343} id="_9zlDVFbhSmh" outputId="6e21bf8f-1e55-426b-f812-8fcdd456cad3" # Descriptive Statistics df.describe() # + [markdown] id="ybb-vGavhZJr" # # **Manage for Missing Values** # + colab={"base_uri": "https://localhost:8080/"} id="28CgWh4PhYh_" outputId="197bcde4-77f8-48ba-8ff3-1d20b28b7d16" na_df = df.isna().sum() na_df # + [markdown] id="kPTXRZa7hoE4" # # **Construct Correlation Matrix** # + id="5oIFlUlVj548" df = pd.DataFrame(df) # + colab={"base_uri": "https://localhost:8080/", "height": 472} id="dPOiLyeXhsTQ" outputId="31973aa3-9af2-4c8a-a45d-220dbd0c385d" df_corr = df.corr() # type(df_corr) df.corr() # + [markdown] id="vwT3C2YwkN-d" # # **Extract Strongly Correlated Values** # + colab={"base_uri": "https://localhost:8080/"} id="e8h3IHnNkSuz" outputId="de883917-c42d-4301-a182-19814453c280" # type(df_corr) df_corr = df_corr['Rented Bike Count'][:-1] df_corr[abs(df_corr)>=0.50].sort_values(ascending=False) df_corr # + [markdown] id="6KKeH099zq2p" # # **Plot Correlation Matrix** # + colab={"base_uri": "https://localhost:8080/", "height": 586} id="uvKBbAHaztdg" outputId="75a8b83c-c7ef-47fe-cc81-aff6e7c89a4d" df_corr = df.corr() plt.figure(figsize=[8,8]) sns.heatmap(df_corr,annot=True,cmap='cubehelix_r',square=True) plt.show() # + [markdown] id="BzR9LvM70syW" # #**Train Test Split** # + id="iTGYrHTWDwBh" # HERE IS WHEN TO EXCLUDE TARGET VARIABLES df_train = data.drop(['Rented Bike Count'],axis=1) df_test = data['Rented Bike Count'] # + colab={"base_uri": "https://localhost:8080/"} id="bkv5XMcatlok" outputId="34a8a151-16e2-467c-e46c-52d465959047" df_train.shape # + [markdown] id="Roq-BndcFIi2" # # **Train Test Split** # + id="9amZU2AkFK57" from sklearn.model_selection import train_test_split train,test = train_test_split(df,test_size=0.25) # + colab={"base_uri": "https://localhost:8080/"} id="vbzj7wggFY_M" outputId="6928a9d8-a4ab-4eb3-da9b-9cda70dff731" train_x = train.drop(['Rented Bike Count'],axis=1) train_y = train['Rented Bike Count'] test_x = test.drop(['Rented Bike Count'],axis=1) test_y = test['Rented Bike Count'] print('Dimension of train_x:',train_x.shape) print('Dimension of train_y:',train_y.shape) print('Dimension of test_x:',test_x.shape) print('Dimension of test_y:',test_y.shape) # + [markdown] id="Hq2f0Jy0GDRl" # # **Linear Regression** # # + [markdown] id="1XuB-oWtGm0p" # ## **Linear Fit for Training Datasets** # + id="mL5X9guAGHJj" from sklearn.linear_model import LinearRegression ln_reg = LinearRegression() model=ln_reg.fit(train_x,train_y) # + [markdown] id="Kia2gMoKGkVc" # ## **Tests: Scoring Models Linear vs. Ridge vs. Lasso** # + id="qfsTJq2KGUvh" colab={"base_uri": "https://localhost:8080/"} outputId="0db9ba4d-d724-4ec8-9ce3-dc4cf321dd97" from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score df_pred = ln_reg.predict(test_x) print('Mean absolute error of linear regression:',mean_absolute_error(df_pred,test_y)) print('Mean square error of linear regression:',mean_squared_error(df_pred,test_y)) print('R_squared score of linear regression:',r2_score(df_pred,test_y)) # + [markdown] id="jWp5iAYAVOkO" # ## **Ridge L2** # + colab={"base_uri": "https://localhost:8080/"} id="4QBTLwEWUXSq" outputId="9b744688-9b0b-43be-9044-e3e63f93fbec" from sklearn.linear_model import Ridge ridge = Ridge() ridge # + colab={"base_uri": "https://localhost:8080/"} id="iB7UR5wnUd7f" outputId="42263462-aa8c-4a81-d32f-ee4fed7b9b66" from sklearn.linear_model import Ridge ridge = Ridge() ridge.fit(train_x,train_y) ridge_score = ridge.score(test_x,test_y) coeff_used = np.sum(ridge.coef_ !=0) ridge.coef_ print("Training score:",ridge_score) print("Number of feature used:",coeff_used) # + [markdown] id="Tg00fa00VRbA" # ## **Lasso L1** # + colab={"base_uri": "https://localhost:8080/"} id="AXkRdKPEVVEZ" outputId="7cb7f70e-f689-4f3d-ffcf-50dbda076e65" from sklearn.linear_model import Lasso lasso = Lasso() lasso # + colab={"base_uri": "https://localhost:8080/"} id="7tzzDY-wVZ9q" outputId="5963ce7a-ac55-482e-c8b2-15079e69c3f7" from sklearn.linear_model import Lasso lasso = Lasso() lasso lasso.fit(train_x, train_y) lasso_score = lasso.score(test_x,test_y) coeff_used = np.sum(lasso.coef_ !=0) lasso.coef_ # + colab={"base_uri": "https://localhost:8080/"} id="FB2pNQoEVoaD" outputId="bc9795bc-f340-4610-a8ae-f3d5a83c5223" print('Training score:', lasso_score) print('Number of features used:',coeff_used) # + [markdown] id="yh1K1RdiWrpR" # #Conclusion # Features of the dataset, `Temperature(C)`, `Hour` and `Dew Point Temperature(C)` showed the strongest relationships for predicting whether customers will rent a bicycle on a given day according to the dataset tested for Linear, Lasso, and Ridge Regression Modeling.
Seoul_Bike_Rental_Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example using jupyter-nuclio %nuclio magic # # The cell below will be excluded from the generated handler code since it has `# nuclio: ignore` comment # nuclio: ignore import nuclio # ## Setup Environment # %%nuclio env USER=iguazio PASSWORD=<PASSWORD> # %nuclio env API_KEY=1234 from os import environ env_file = environ.get('ENV_FILE', 'env.txt') # %nuclio env_file $env_file # ## Setting Configuration # %nuclio config spec.maxReplicas = 5 # ## Add Commands # %%nuclio cmd apt update apt install -y libyaml-dev # nuclio: ignore event = nuclio.Event(body='Nuclio') # ## Exporting Handler # # You can use `File/Export Notebook as` menu or use the `%nuclio export` magic command. Cells marked with `%%nuclio handler` magic will be exported to functions. Lines marked with `# nuclio: return` comment will become the handler exit point. # # ```python # def handler(context, event): # msg = 'Hello ' + event.body # return msg # nuclio: return # ``` # + # %%nuclio handler msg = 'Hello ' + event.body msg # nuclio: return # - # %nuclio build --output /tmp/demo-handler/ # !ls /tmp/demo-handler # ## Deploying # # If you have a [nuclio dashboard](https://github.com/nuclio/nuclio#quick-start-steps) running. You can deploy the handler using the `%nuclio deploy` magic # %nuclio deploy
tests/handler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pathlib import Path import imagededup # - image_dir = Path('../tests/data/mixed_images') # # Hashing # #### Find duplicates using Perceptual hashing along with scores # + from imagededup.methods import PHash phasher = PHash() duplicates = phasher.find_duplicates(image_dir=image_dir, scores=True) # - duplicates # ### Plotting duplicates for image file: 'ukbench00120.jpg' from imagededup.utils import plot_duplicates plot_duplicates(image_dir=image_dir, duplicate_map=duplicates, filename='ukbench00120.jpg') # #### Find duplicates to remove using Perceptual hashing # + from imagededup.methods import PHash phasher = PHash() duplicates_list = phasher.find_duplicates_to_remove(image_dir) # - duplicates_list # # CNN # #### Find duplicates using CNN along with scores # + from imagededup.methods import CNN cnn_encoder = CNN() duplicates_cnn = cnn_encoder.find_duplicates(image_dir=image_dir, scores=True) # - duplicates_cnn # ### Plotting duplicates for image file: 'ukbench00120.jpg' from imagededup.utils import plot_duplicates plot_duplicates(image_dir=image_dir, duplicate_map=duplicates_cnn, filename='ukbench00120.jpg') # #### Find duplicates to remove using CNN # + from imagededup.methods import CNN cnn_encoder = CNN() duplicates_list_cnn = cnn_encoder.find_duplicates_to_remove(image_dir=image_dir) # - duplicates_list_cnn
examples/Finding_duplicates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="nWxac3raPzB4" # # CNN Demo # TO DO: add a CoLab badge # # Can a convolutional neural network (CNN) be trained to distinguish RNA # from nucleotide composition alone? # More specifically, can a CNN learn to classify # AT-rich sequence with the label "protein coding" # from GC-rich sequence with the label "non-coding"? # # This demo uses an RNA sequence simulator. # The simulator strictly follows a frequency histogram with values for A, C, G, T. # This is a noise-free simulation. # # The CNN is almost as simple as can be. # It has one trainiable convolution layer (one dimensional) with 8 filters. # It has one flatten layer simply to reshape the data. # It has a trainable fully connected (dense) output layer with 1 neuron. # More sophisticated models would incorporate embedding, pooling, dropout, # multiple convolution layers, and multiple dense layers. # # The training regime is also simple. # The model is trained for a fixed number of epochs. # More sophisticated training would implement early stopping. # # This model minimizes loss at 5 epochs and overfits by 10 epochs. # + [markdown] id="tRkDy1NTPzCF" # ## Computing Environment Setup # + id="39R_Ey6TPzCJ" PC_SEQUENCES=2000 # how many protein-coding sequences NC_SEQUENCES=2000 # how many non-coding sequences BASES=55 # how long is each sequence ALPHABET=4 # how many different letters are possible INPUT_SHAPE_2D = (BASES,ALPHABET,1) # Conv2D needs 3D inputs INPUT_SHAPE = (BASES,ALPHABET) # Conv1D needs 2D inputs FILTERS = 8 # how many different patterns the model looks for WIDTH = 3 # how wide each pattern is, in bases STRIDE_2D = (1,1) # For Conv2D how far in each direction STRIDE = 1 # For Conv1D, how far between pattern matches, in bases EPOCHS=5 # how many times to train on all the data SPLITS=4 # SPLITS=3 means train on 2/3 and validate on 1/3 FOLDS=5 # train the model this many times (must be 1 to SPLITS) # + colab={"base_uri": "https://localhost:8080/"} id="ph16HKwFPzCM" outputId="c94f7fc0-8c69-44e6-fa20-3e419aec19a2" import sys try: from google.colab import drive IN_COLAB = True print("On Google CoLab, mount cloud-local file, get our code from GitHub.") PATH='/content/drive/' #drive.mount(PATH,force_remount=True) # hardly ever need this #drive.mount(PATH) # Google will require login credentials DATAPATH=PATH+'My Drive/data/' # must end in "/" import requests r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py') with open('RNA_gen.py', 'w') as f: f.write(r.text) # writes to cloud local, delete the file later? from RNA_gen import * s = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/LearnTools/RNA_prep.py') with open('RNA_prep.py', 'w') as f: f.write(s.text) # writes to cloud local, delete the file later? from RNA_prep import * except: print("CoLab not working. On my PC, use relative paths.") IN_COLAB = False DATAPATH='data/' # must end in "/" sys.path.append("..") # append parent dir in order to use sibling dirs from SimTools.RNA_gen import * from LearnTools.RNA_prep import * MODELPATH="BestModel" # saved on cloud instance and lost after logout #MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login if not assert_imported_RNA_gen(): print("ERROR: Cannot use RNA_gen.") # + id="BzNCeHtiPzCP" from os import listdir import time # datetime import csv from zipfile import ZipFile import numpy as np import pandas as pd from scipy import stats # mode from sklearn.preprocessing import StandardScaler from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from keras.models import Sequential from keras.layers import Dense,Embedding from keras.layers import Conv1D,Conv2D from keras.layers import Flatten,MaxPooling1D,MaxPooling2D from keras.losses import BinaryCrossentropy # tf.keras.losses.BinaryCrossentropy import matplotlib.pyplot as plt from matplotlib import colors mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1 np.set_printoptions(precision=2) # + [markdown] id="MdYEn_WTPzCS" # ## Data Preparation # + colab={"base_uri": "https://localhost:8080/", "height": 36} id="hfi5Ak1FPzCU" outputId="d865a3a8-8c00-46f9-e8e6-ae37d63182ff" # print(datetime.datetime.now()) t = time.time() time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t)) # + colab={"base_uri": "https://localhost:8080/"} id="SCs9tmnJPzCX" outputId="709dec07-39e2-4424-f25a-1f24141685ab" # Use code from our SimTools library. def make_generator(seq_len): cgen = Collection_Generator() cgen.get_len_oracle().set_mean(seq_len) return cgen def make_seqs(cgen,is_pc,train_count,test_count): freqs = [1,1,1,1] # the relative frequencies for four nucleotides if is_pc: freqs = [2,1,1,2] # protein-coding has more A and T else: pass # non-coding is random uniform cgen.get_seq_oracle().set_frequencies(freqs) train_set = cgen.get_sequences(train_count) test_set = cgen.get_sequences(test_count) return train_set,test_set simulator = make_generator(BASES) pc_train,pc_test = make_seqs(simulator,True, PC_SEQUENCES,PC_SEQUENCES) nc_train,nc_test = make_seqs(simulator,False,NC_SEQUENCES,NC_SEQUENCES) print("Train on",len(pc_train),"PC seqs") print("Train on",len(nc_train),"NC seqs") # + colab={"base_uri": "https://localhost:8080/"} id="e3JvO3boPzCZ" outputId="62b75c4e-ab47-4f4c-e4f0-d552b5462fd6" # Use code from our LearnTools library. X,y = prepare_inputs_len_x_alphabet(pc_train,nc_train,ALPHABET) # shuffles print("Assume y=f(X) and we want to learn f().") print("Each input X is a sequence of A, C, G, or T. Use upper case for vector variables.") print("Each output label y is a single number 0 to 1. Use lower case for scalar variables.") print("X shape:",X.shape, "includes PC and NC sequences shuffled.") print("y shape:",y.shape, "includes 0s and 1s that match specific sequences.") # + [markdown] id="3etr3Sh6PzCb" # ## Model build, train, save # + colab={"base_uri": "https://localhost:8080/"} id="FVwLXvUHPzCd" outputId="ae8f5815-9a0b-4719-cc27-095ab8be08ff" def make_DNN(): print("make_DNN") print("input shape:",INPUT_SHAPE) dnn = Sequential() #dnn.add(Embedding(input_dim=4,output_dim=4)) dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE, padding="same",input_shape=INPUT_SHAPE)) # Data shape: [SAMPLES,BASES,FILTERS] #dnn.add(MaxPooling1D()) dnn.add(Flatten()) # Data shape: [SAMPLES,BASES*FILTERS] dnn.add(Dense(1,activation="sigmoid",dtype=np.float32)) dnn.compile(optimizer='adam', loss=BinaryCrossentropy(from_logits=False), metrics=['accuracy']) # add to default metrics=loss dnn.build(input_shape=INPUT_SHAPE) #ln_rate = tf.keras.optimizers.Adam(learning_rate = LN_RATE) #bc=tf.keras.losses.BinaryCrossentropy(from_logits=False) #model.compile(loss=bc, optimizer=ln_rate, metrics=["accuracy"]) return dnn model = make_DNN() print(model.summary()) # + id="o81jvk7jPzCh" from keras.callbacks import ModelCheckpoint def do_cross_validation(X,y): cv_scores = [] fold=0 mycallbacks = [ModelCheckpoint( filepath=MODELPATH, save_best_only=True, monitor='val_accuracy', mode='max')] splitter = KFold(n_splits=SPLITS) # this does not shuffle for train_index,valid_index in splitter.split(X): if fold < FOLDS: fold += 1 X_train=X[train_index] # inputs for training y_train=y[train_index] # labels for training X_valid=X[valid_index] # inputs for validation y_valid=y[valid_index] # labels for validation print("MODEL") # Call constructor on each CV. Else, continually improves the same model. model = model = make_DNN() print("FIT") # model.fit() implements learning start_time=time.time() history=model.fit(X_train, y_train, epochs=EPOCHS, verbose=1, # ascii art while learning callbacks=mycallbacks, # called at end of each epoch validation_data=(X_valid,y_valid)) end_time=time.time() elapsed_time=(end_time-start_time) print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time)) # print(history.history.keys()) # all these keys will be shown in figure pd.DataFrame(history.history).plot(figsize=(8,5)) plt.grid(True) plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="m_I51k3UPzCj" outputId="a80258ed-2671-42cf-d449-cedb4ffceb46" do_cross_validation(X,y) # + [markdown] id="QX2bzeLsbJHc" # ## Test # + colab={"base_uri": "https://localhost:8080/"} id="U39jidvAPzCl" outputId="94807022-c2b4-4c5e-ea97-944d523229dd" from keras.models import load_model X,y = prepare_inputs_len_x_alphabet(pc_test,nc_test,ALPHABET) best_model=load_model(MODELPATH) scores = best_model.evaluate(X, y, verbose=0) print("The best model parameters were saved during cross-validation.") print("Best was defined as maximum validation accuracy at end of any epoch.") print("Now re-load the best model and test it on previously unseen data.") print("Test on",len(pc_test),"PC seqs") print("Test on",len(nc_test),"NC seqs") print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100)) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="si7c3K3ObJHf" outputId="8fa8c3f2-6232-41b3-f33c-5be975ae5e93" from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score ns_probs = [0 for _ in range(len(y))] bm_probs = best_model.predict(X) ns_auc = roc_auc_score(y, ns_probs) bm_auc = roc_auc_score(y, bm_probs) ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs) bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs) plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc) plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc) plt.title('ROC') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend() plt.show() # + id="6K93tLYpbJHi"
Notebooks/CNN_Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pathlib import Path from datetime import datetime import json import simplejson EXPERIMENT_FOLDER = "./experiments/channel_permutation_test" Path(EXPERIMENT_FOLDER).mkdir(exist_ok=True) # - import os os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # + import numpy as np import pandas as pd import tensorflow as tf from IPython.display import clear_output import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.reset_defaults() sns.set() print('Physical Devices:') for dev in tf.config.list_physical_devices(): print(dev) # - from zscomm.agent import Agent from zscomm.comm_channel import CommChannel from zscomm.synth_teacher import SyntheticTeacher from zscomm.data import * from zscomm.play_game import * from zscomm.loss import * from zscomm.experiment import Experiment from zscomm.meta_experiment import * from zscomm.plot_game import plot_game from zscomm.analysis import * # ## Load Data: # + NUM_CLASSES = 3 BATCH_SIZE = 32 CHANNEL_SIZE = 5 TRAIN_DATA, TEST_DATA = get_simple_card_data(num_classes=NUM_CLASSES) # + def generate_train_batch(): return generate_batch(TRAIN_DATA, batch_size=BATCH_SIZE, num_classes=NUM_CLASSES) def generate_test_batch(): return generate_batch(TEST_DATA, batch_size=BATCH_SIZE, num_classes=NUM_CLASSES) # - # # Run Experiments def create_channel_permutation_experiment(channel_size=5, epochs=200, **exp_kwargs): agent = Agent(channel_size, NUM_CLASSES) start_temp = 10 end_temp = 0.1 temp_anneal_end_epoch = 200 a = -np.log(end_temp / start_temp) / temp_anneal_end_epoch def play_params(epoch): if epoch < temp_anneal_end_epoch: channel_temp = float(start_temp * np.exp(-a*epoch)) else: channel_temp = end_temp return { 'channel_size': channel_size, 'p_mutate': 0, 'message_permutation': True, 'channel_temp': channel_temp, } return Experiment( generate_train_batch, generate_test_batch, play_params=play_params, student=agent, teacher=agent, loss_fn=student_pred_matches_test_class, max_epochs=epochs, lr=1e-2, step_print_freq=10, **exp_kwargs ) permutation_experiment = MetaExperiment( create_experiment_fn=create_channel_permutation_experiment, num_experiments=2, epochs=200, export_location=None, ) games_played, _ = permutation_experiment.experiments[0]['experiment'].run_tests() permutation_experiment.experiments[0]['experiment'].student.summary() permutation_experiment.run() for item in permutation_experiment.experiments: item['experiment'].plot_training_history() plt.show() print('After training', len(permutation_experiment.experiments), 'agents we ran', 2*len(permutation_experiment.results), '"stranger-encounters"', 'with the follow zero-shot coordination results:') permutation_experiment.print_results() zs_results = [ metrics['mean_ground_truth_f1'] for stranger_pairings in permutation_experiment.results for metrics in stranger_pairings['vanilla_params_test_metrics'] ] print('Final mean zero-shot test performance: ', round(float(np.mean(zs_results)), 4), '+-', round(float(np.std(zs_results)), 4)) for item in permutation_experiment.experiments: if item['status'] == 'Complete': total_time = sum([ x['seconds_taken'] for x in item['experiment'].training_history ]) print(int(total_time / 3600), 'hours,', int(total_time / 60), 'mins and', int(total_time) % 60, 'seconds taken for experiment', item['index']) res_path = Path(f'{EXPERIMENT_FOLDER}/results.json') with res_path.open(mode='w') as f: json.dump({'zero_shot_coordination_scores': permutation_experiment.results}, f) permutation_experiment.experiments # ## Analyse Results def load_channel_permutation_experiment(path): config = json.load((path / 'config.json').open(mode='r')) results = json.load((path / 'results.json').open(mode='r')) history = json.load((path / 'training_history.json').open(mode='r')) agent = Agent(config['play_params']['channel_size'], NUM_CLASSES) agent.load_weights(str(path / 'agent_weights')) config['loss_fn'] = student_pred_matches_test_class kwargs = { k: v for k, v in config.items() if k not in ['epochs_optimised', 'optimiser_config', 'optimise_agents_separately'] } experiment = Experiment( generate_train_batch, generate_test_batch, student=agent, teacher=agent, **kwargs ) experiment.epoch = config['epochs_optimised'] experiment.training_history = history experiment.results = results return experiment experiments = [] for path in Path(EXPERIMENT_FOLDER).glob('*'): if not path.is_file(): exp = load_channel_permutation_experiment(path) experiments.append(exp) print('Loaded experiment from:', path) # + def did_converge_to_global_optima(experiment): return experiment.results['mean_ground_truth_f1'] > 0.9 def did_converge_to_local_optima(experiment): return 0.9 > experiment.results['mean_ground_truth_f1'] > 0.6 def get_category(experiment): if did_converge_to_global_optima(experiment): return 'Coverged to Global Optima' if did_converge_to_local_optima(experiment): return 'Coverged to Local Optima' return 'Did Not Converge' # - sns.reset_defaults() sns.set() item['experiment'].training_history[-1] # + df_train = pd.DataFrame([ { 'Epoch': epoch, 'Loss': train_item['loss'], 'Experiment': f"Run {index}", 'Category': get_category(experiment) } for index, experiment in enumerate(permutation_experiment.) for epoch, train_item in enumerate(experiment.training_history) ]) df_test = pd.DataFrame([ { 'Epoch': epoch, 'Performance': train_item['test_metrics']['mean_ground_truth_f1'], 'Protocol Diversity': train_item['test_metrics']['mean_protocol_diversity'], 'Experiment': f"Run {index}", 'Category': get_category(experiment) } for index, experiment in enumerate(permutation_experiment) for epoch, train_item in enumerate(experiment.training_history) if 'test_metrics' in train_item ]) fig = plt.figure(figsize=(8, 5)) ax = fig.add_subplot() sns.lineplot(x='Epoch', y='Loss', data=df_train, label='Train Loss', ax=ax); sns.lineplot(x='Epoch', y='Performance', data=df_test, label='Test Performance', ax=ax); sns.lineplot(x='Epoch', y='Protocol Diversity', data=df_test, label='Protocol Diversity', ax=ax); plt.ylabel('') plt.title('Channel Permutation Training Histories') plt.show() # - games_played, _ = experiments[0].run_tests() for i in range(5): inputs, targets, outputs = games_played[i] plot_game(inputs, outputs, targets, select_batch=0) mean_class_message_map = create_mean_class_message_map(games_played) sns.heatmap(mean_class_message_map, vmin=0, vmax=1); plt.ylabel('Class') plt.xlabel('Symbol') plt.title('Communication Protocol') plt.show() conf_matrix = compute_confusion_matrix(games_played) sns.heatmap(conf_matrix, annot=True, vmin=0, vmax=1) plt.title('Ground Truth Confusion Matrix') plt.ylabel('Predicted Class') plt.xlabel('Actual Class'); def compute_teacher_responsiveness(experiment): override_play_params = { **experiment.get_play_params(), 'p_mutate': 1.0, } _, test_metrics = experiment.run_tests(override_play_params) teacher_error = test_metrics['mean_teacher_error'] return np.exp(-teacher_error) def compute_student_responsiveness(experiment, num_rounds=5): games_played = [] channel_size = experiment.get_play_params()['channel_size'] for i in range(num_rounds): inputs, targets = experiment.generate_test_batch() synth = SyntheticTeacher(channel_size, experiment.student.num_classes, targets) outputs = play_game( inputs, synth, experiment.student, p_mutate = 0.0, training=False, channel_size=channel_size ) games_played.append([inputs, targets, outputs]) test_metrics = experiment.extract_test_metrics(games_played) student_error = test_metrics['mean_student_error'] return np.exp(-student_error) teacher_responsiveness = compute_teacher_responsiveness(exp) round(teacher_responsiveness, 4) student_responsiveness = compute_student_responsiveness(exp) round(student_responsiveness, 4)
notebooks/channel-permutation-experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # + from collections import Counter import requests CAR_DATA = 'https://bit.ly/2Ov65SJ' # pre-work: load JSON data into program with requests.Session() as s: data = s.get(CAR_DATA).json() # your turn: def most_prolific_automaker(year): """Given year 'year' return the automaker that released the highest number of new car models""" pass def get_models(automaker, year): """Filter cars 'data' by 'automaker' and 'year', return a set of models (a 'set' to avoid duplicate models)""" pass # - from pprint import pprint as pp pprint(data) pp(data) # + def most_prolific_automaker(year): """Given year 'year' return the automaker that released the highest number of new car models""" listofcars = [x for x in data if x["year"] == year] # print(listofcars) c = Counter([x["automaker"] for x in listofcars]) # print(c) car, year = c.most_common()[0] return car def get_models(automaker, year): """Filter cars 'data' by 'automaker' and 'year', return a set of models (a 'set' to avoid duplicate models)""" listofcars = [x for x in data if x["year"] == year] c = Counter([x["model"] for x in listofcars if x["automaker"] == automaker]) return set(c) # - most_prolific_automaker(2008) get_models('Volkswagen', 2008)
bitesofpy/130_car_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os from os import listdir from os.path import isfile, join import numpy as np from tqdm import tqdm import csv # + dss = [ 'electron-38323' #0 eminus-position-Mom-52294 #,'kplus-position-Mom-50588' #1 ,'pionminus-39144' #1 - kplus yerine bunu kullaniyoruz piminus-position-Mom-39144 ,'muon-62190' #2 muminus-position-40000 ,'pionzero-35674' #3 pionzero-position-Mom-41118 ,'proton-36793' #4 proton-position-Mom-20358 ] ds_base = '/home/yalmalioglu/dataset5d/500sp_0padding_evts/' cls_f=[] for d in tqdm(range(len(dss))): #ds = 'proton-position-Mom-20358' ds = dss[d] evt_dir = join(ds_base,ds) evt_list = listdir(evt_dir) print(ds) for f in evt_list[:20000]: #take 20k events per each particle if isfile(join(evt_dir, f)) and f.endswith(".csv"): cls_f.append([d, join(ds,f)]) #f_evts = [[d, join(ds,f)] for f in listdir(evt_dir) if isfile(join(evt_dir, f)) and f.endswith(".csv")] #cls_f.append(f_evts) # - #print(len(cls_f[0])) print(cls_f[0]) np.random.shuffle(cls_f) test_ind= len(cls_f)//10 #10percent for test print(test_ind) print(len(cls_f)) # + write_dir='/home/schefke/PIDNet/data' with open(join(write_dir,'test_files20k.csv'), 'w') as f_t: wr = csv.writer(f_t) for row in cls_f[0:test_ind]: wr.writerow(row) with open(join(write_dir,'train_files20k.csv'), 'w') as f_t: wr = csv.writer(f_t) for row in cls_f[test_ind:]: wr.writerow(row) # -
prepare_data/generate_train_test_files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- # First, run this: import Pkg;Pkg.add(Pkg.PackageSpec(name="HyperGraphTools",path=".")) # # Index # - [CYK](cyk.ipynb) # - [Earley](earley.ipynb) # - [Visualization](visualization.ipynb) # - [TAGML tokenization](tagml-tokenize.ipynb)
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Drawing Conclusions Quiz # Use the space below to explore `store_data.csv` to answer the quiz questions below. # + # imports and load data import pandas as pd # %matplotlib inline df = pd.read_csv("store_data.csv") # - # explore data df.info() df.describe() df.head() # total sales for the last month df.tail(5) # Total sales print("Store A total sales = {}".format(df.iloc[196:]['storeA'].sum())) print("Store B total sales = {}".format(df.iloc[196:]['storeB'].sum())) print("Store C total sales = {}".format(df.iloc[196:]['storeC'].sum())) print("Store D total sales = {}".format(df.iloc[196:]['storeD'].sum())) print("Store E total sales = {}".format(df.iloc[196:]['storeE'].sum())) # average sales print("Store A average sale = {}".format(df['storeA'].mean())) print("Store B average sale = {}".format(df['storeB'].mean())) print("Store C average sale = {}".format(df['storeC'].mean())) print("Store D average sale = {}".format(df['storeD'].mean())) print("Store E average sale = {}".format(df['storeE'].mean())) # sales on march 13, 2016 df_march_13 = df[df['week'] == '2016-03-13'] print("Store A sale = {}".format(df_march_13.iloc[0,1:]['storeA'])) print("Store B sale = {}".format(df_march_13.iloc[0,1:]['storeB'])) print("Store C sale = {}".format(df_march_13.iloc[0,1:]['storeC'])) print("Store D sale = {}".format(df_march_13.iloc[0,1:]['storeD'])) # worst week for store C df_worst_C = df[df['storeC'] == df['storeC'].min()] df_worst_C df_worst_C.iloc[0,0] print("Worst week for store C = {}".format(df_worst_C.iloc[0,0])) df_worst_C['storeC'].values[0] print("Minimum sale for store C = {}".format(df_worst_C['storeC'].values[0])) # total sales during most recent 3 month period last_three_months = df[df['week'] >= '2017-12-01'] last_three_months.iloc[:, 1:].sum()
Machine-Learning-Foundation-Nanodegree/Data-Analysis-Process/conclusions_quiz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Introduction to the Interstellar Medium # ### <NAME> # ### Figure 5.12: ISM element abundance as a function of condensation temperature import numpy as np import matplotlib.pyplot as plt import matplotlib.ticker # %matplotlib inline # + f = open('savage_sembach_table5.txt','r') header1 = f.readline() header2 = f.readline() element = [] Xsolar = [] Tcond = [] deltaX = [] deltaX_lo = [] deltaX_hi = [] for line in f: columns = line.split() element.append(columns[0]) Xsolar.append(float(columns[1])) Tcond.append(float(columns[2])) deltaX.append(float(columns[3])) deltaX_hi.append(float(columns[4])) deltaX_lo.append(float(columns[5])) f.close() Xsolar = np.asarray(Xsolar) Tcond = np.asarray(Tcond) deltaX = np.asarray(deltaX) deltaX_lo = np.asarray(deltaX_lo) deltaX_hi = np.asarray(deltaX_hi) fig = plt.figure(figsize=(6,4)) ax = fig.add_subplot(111) ax.set_xlim(-50,1650.0) ax.set_ylim(0.0001, 9.9) ax.set_yscale("log", nonposy='clip') ax.set_xlabel(r'Condensation Temperature (K)', fontsize=14) ax.set_ylabel(r'$[X/H] / [X/H]_\odot$', fontsize=14) arrow = u'$\u2193$' for i, e in enumerate(element): x = Tcond[i] logy = deltaX[i] y = 10**logy ylo = deltaX_lo[i] if ylo < 99: yerr1 = 10**(logy - ylo) yerr2 = 10**(logy + deltaX_hi[i]) ax.errorbar(x, y, yerr=y-yerr1, color='k', marker='o', markersize=5) else: ax.plot(x, y, color='k', marker=arrow, markersize=10) if (e=='Ar' or e=='S' or e=='Ga' or e=='Mn' or e=='Na' or e=='Cr' or e=='Co'): ax.text(x-10, 1.05*y, e, ha='right') else: ax.text(x+10, 1.05*y, e, ha='left') plt.plot([-50,1650], [1,1], 'k:', lw=1) # purely empirical power law eyeball fit to guide the eye xmin = 500 xmax = 1600 x = np.arange(xmin, xmax, 10) p = 2 yscale = 4 logy = -yscale * ((x-xmin)/(xmax-xmin))**p plt.plot(x, 10**logy, color='gray', linestyle='solid', lw=10, alpha=0.3, zorder=99) fig.tight_layout() plt.savefig('depletion.pdf') # -
atomic/.ipynb_checkpoints/depletion-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="8feIPImxO1xf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 436} outputId="b2524f97-39ec-4ae2-c768-3c9e0242df22" # Install part # !pip install torch torchvision matplotlib tqdm numpy # + [markdown] id="giHDkkWGQdMZ" colab_type="text" # # + id="UOMUDeolO4aG" colab_type="code" colab={} # + id="UsRw4qbOS8oR" colab_type="code" colab={} ### First part. BoilerPlate code # + id="Ol5v8YT4QeFr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 329} outputId="5353d15f-d2c4-4ef4-bb03-3ac5f3850887" import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils, datasets import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm # Instruct matplotlib to draw inline # %matplotlib inline # See if the cuda is avaliable and store it in device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Define the neural nets module which inherits the nn.Module class Network(nn.Module): def __init__(self, dataset): super(Network, self).__init__() x, y = dataset[0] c, h, w = x.size() out = y.size(0) # One of the way to define the neural nets layers self.net = nn.Sequential(nn.Linear(c * h * w, out)) # Defining multiple layer neural networks """ self.net = nn.Sequential( nn.Linear(c * h * w, 1000), nn.Sigmoid(), nn.Linear(1000, out), ) """ # Forward pass. Backward pass is automatically implemented def forward(self, x): n, c, h, w = x.size() flattened = x.view(n , c * h * w) return self.net(flattened) # Defining custom dataset processor class FashionMNISTProcessedDataset(Dataset): def __init__(self, root, train=True): self.data = datasets.FashionMNIST(root=root, train=True, download=True, transform=transforms.ToTensor()) # Identity matrix self.e = torch.eye(10) def __getitem__(self, i): x, y = self.data[i] # Only used if we want y as a scalar #return x, y.unsqueeze(0).float() # Y as an one hot encoding return x, self.e[y].float() def __len__(self): return 100 #len(self.data) # Ploting the train loss def plot_train_loss(loss): #x = np.linspace(0, 2, 100) x = range(len(loss)) plt.plot(x, loss, label='loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.title("Training Loss per Epoch") plt.legend() plt.show() train_dataset = FashionMNISTProcessedDataset('/tmp/fashionmnist',train=True) model = Network(train_dataset) model = model.cuda() loss_func = torch.nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.0001) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=20, shuffle=True, num_workers=2, pin_memory=True) losses = [] loop = tqdm(total=len(train_loader)*100, position=0) for epoch in range(100): for batch in train_loader: batch[0]=batch[0].cuda(async=True) batch[1]=batch[1].cuda(async=True) #print (batch[0], batch[1]) optimizer.zero_grad() y_hat = model(batch[0]) loss = loss_func(y_hat, batch[1]) loss.backward() optimizer.step() loop.set_description('loss:{:.4f}'.format(loss.item())) loop.update(1) losses.append(loss) #print (loss) loop.close() plot_train_loss(losses) #print (losses[-10:]) # + [markdown] id="xNnaRzSiSuWB" colab_type="text" # # + id="zbNev81OSs5-" colab_type="code" colab={} ## Part 2: Training vs Validation set # + id="MSNMBRqxA278" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="d0112f57-6b0b-4b12-957e-3b5cbe53dc83" import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils, datasets import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm # Instruct matplotlib to draw inline # %matplotlib inline # See if the cuda is avaliable and store it in device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Define the neural nets module which inherits the nn.Module class Network(nn.Module): def __init__(self, dataset): super(Network, self).__init__() x, y = dataset[0] c, h, w = x.size() out = y.size(0) # One of the way to define the neural nets layers # self.net = nn.Sequential(nn.Linear(c * h * w, out)) # Defining multiple layer neural networks self.net = nn.Sequential( nn.Linear(c * h * w, 1000), nn.ReLU(), nn.Linear(1000, 500), nn.ReLU(), nn.Linear(500, out), ) # Forward pass. Backward pass is automatically implemented def forward(self, x): n, c, h, w = x.size() flattened = x.view(n , c * h * w) return self.net(flattened) # Defining custom dataset processor class FashionMNISTProcessedDataset(Dataset): def __init__(self, root, train=True): self.data = datasets.FashionMNIST(root=root, train=train, download=True, transform=transforms.ToTensor()) # Identity matrix self.e = torch.eye(10) def __getitem__(self, i): x, y = self.data[i] # Only used if we want y as a scalar # return x, y.unsqueeze(0).float() # Y as an one hot encoding return x, self.e[y].float() def __len__(self): return 100 #len(self.data) # Ploting the train loss def plot_train_loss(loss): #x = np.linspace(0, 2, 100) x = range(len(loss)) plt.plot(x, loss, label='loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.title("Training Loss per Epoch") plt.legend() plt.show() # Ploting the train loss def plot_both_loss(loss, vloss): #x = np.linspace(0, 2, 100) x = range(len(loss)) plt.plot(x, loss, label='Training loss') plt.plot(x, vloss, label='Validatation loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.title("Losses per Epoch") plt.legend() plt.show() def validatation_loop(): # Get the validation data and do a forward pass through the neural net loss_batch = [] for x, y in valid_loader: # Send both data and lable to cuda x = x.cuda(async=True) y = y.cuda(async=True) # Do the forward pass y_hat = model(x) # Compute the loss. Prediction vs Real value loss = loss_func(y_hat, y) # Add the loss to the list loss_batch.append(loss) loss = torch.mean(torch.tensor(loss_batch)) return loss # Load the training data in this case it Fashionmnist train_dataset = FashionMNISTProcessedDataset('/tmp/fashionmnist',train=True) # Load the validation data validatation_dataset = FashionMNISTProcessedDataset( '/tmp/fashionmnist',train=False) # Build a neural net module passing this dataset model = Network(train_dataset) # Make sure that this model runs on cuda model = model.cuda() # Define the loss function. In this case MSELoss loss_func = torch.nn.MSELoss() # Define the optimizer to use. In this case SGD optimizer = optim.SGD(model.parameters(), lr=0.001) # Get the training data in a mini-batch train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=20, shuffle=False, num_workers=2, pin_memory=True) # Get the validatation data in a mini-batch valid_loader = torch.utils.data.DataLoader(validatation_dataset, batch_size=20, shuffle=True, num_workers=2, pin_memory=True) # Define a list to store all the losses losses = [] valid_losses = [] # Define a tqdm instance to see the progress bar loop = tqdm(total=len(train_loader)*100, position=0) for epoch in range(100): # Define a list for batch loss batch_losses = [] # For each data batch from training data for batch in train_loader: # batch[0] is the data and 1 is the label batch[0]=batch[0].cuda(async=True) batch[1]=batch[1].cuda(async=True) # Reset the grad value to zero optimizer.zero_grad() # Predicted value y_hat = model(batch[0]) # Compute the loss loss = loss_func(y_hat, batch[1]) # Propagate the loss backward. Backprop loss.backward() # Update the weights optimizer.step() # Set the tqdm params loop.set_description('loss:{:.4f}'.format(loss.item())) loop.update(1) # Append the loss to compute the average batch_losses.append(loss) # Call the validatation step valid_loss = validatation_loop() # Compute average loss for that batch loss = torch.mean(torch.tensor(batch_losses)) # Append the average loss to draw graph losses.append(loss) valid_losses.append(valid_loss) # Close the loop display loop.close() # Draw the plot plot_both_loss(losses, valid_losses) # + id="uEjyN_-4B5iy" colab_type="code" colab={}
Lab2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Basic Python Revision # ### Assigment question's ''' Ques 1: Answer these 3 questions without typing code. Then type code to check your answer. What is the value of the expression 4 * (6 + 5) What is the value of the expression 4 * 6 + 5 What is the value of the expression4 * 6 + 5 Ques 2: What is the type of the result of the expression 3 + 1.5 + 4? Ques 3: What would you use to find a number’s square root, as well as its square? Ques 4: Given the string 'hello' give an index command that returns 'e’. Ques 5: Reverse the string 'hello' using slicing Ques 6: Given the string hello, give two methods of producing the letter 'o' using indexing. Ques 7: Check if a list contains an element Ques 8: priint the strings present in reverse order and items also inreversse manner Ques 9: print("the homogeneos list in descendiing order Ques 10: print( odd index number items present in list ''' # answer 1 print(4*(6+5)) #44 print(4 * 6 + 5) #29 print(4 + 6 * 5) #34 # answer 2 print(3 + 1.5 + 4) # float # answer 3 print(5 ** 0.5) # sqrt with power # answer 4/5/6 str = "hello" print(str[1]) print(str[::-1]) print(str[4]) print(str[-1]) # + # create a lottery game guess_num = 5 user_num = int(input("Guess any number: ")) if(user_num > guess_num): print("lower down your guess") elif(user_num < guess_num): print("increases your guess value") else: print("You won Lottery!!!") # - # nested loop # Q: you have to fetch all the item from list and then acess all charcter of item of list. a = ["akash","mango","apple"] for i in a: for j in i: print(j, end=" ") # print even or odd num = int(input("Enter num: ")) if num % 2 == 0:print("Even") else:print("Odd") # + # vowel or not a = ['a','e','i','o','u'] char = input("Enter character : ") if char in a: print("Its vowel") else: print("not a vowel ") # - a=15 b = int(input("guess the number")) while b!=a: print('wrong! try again') b=int(input()) print('correct') # ### lecture 2 Assignment # ''' Q1: Write a Python function to sum all the numbers in a list Q2: Write a Python function that takes a list and returns a new list with unique elements of the first list Q3: Write a Python program to print the even numbers from a given list Q4: Write a function func1() such that it can accept a variable length of argument and print all arguments value Q5: Write a function calculation() such that it can accept two variables and calculate the addition and subtraction of them. And also it must return both addition and subtraction in a single return call Q6: Create a function showEmployee() in such a way that it should accept employee name, and its salary and display both. If the salary is missing in the function call assign default value 9000 to salary ''' # sol 1 num_lis = [1,2,3,4,5,6,7,8,9,10] print(sum(num_lis)) # + # sol 2 def uniquelis(lis): uni_lis = [] for i in lis: if i not in uni_lis: uni_lis.append(i) return uni_lis x = [1,1,2,2,3,3,4,4,5,5] print(uniquelis(x)) # + # sol 3 def evenNum(lis): even_lis = [] for i in lis: if i%2 == 0: even_lis.append(i) return even_lis x = [1,2,3,4,5,6,7,8,9,10] print(evenNum(x)) # + # sol 4 def fact1(*arg): for i in arg: print(i) fact1(1,2,3,4,8,2,6,9,2,63,5,5,2,2) # + # sol 5 def cal(a,b): return a+b,a-b x,y = cal(5,10) print(x,y) # + # sol 6 def showEmployee(name,salary = 9999): print(f"{name} earning is {salary}") showEmployee("Akash",10000) # - # # Lecture 3 import pandas as pd data = pd.Series([1,2,3,4,5,6]) data # get index value data.index data.values data = pd.Series([10,20,30,40,50,60,70],index=['a','b','c','d','e','f','g']) data.index data.values data['c'] data['a':'c'] # Lec 3 a = (10,20,30,40,50,60,70) df = pd.Series(a,dtype=float,index=range(1,8)) df a = {'a':22,'b':23,'c':25} df = pd.Series(a) df # ### DataFrame # + a = [1,2,3,4] b = [5,6,7,8] numSet = list(zip(a,b)) df = pd.DataFrame(numSet,columns = ['x','y']) df # - df.to_csv("dataframe1.csv") df_read = pd.read_csv("dataframe1.csv") df_read df_read['x'] #dic to pandas d = {'a':[1,2],'b':[3,4]} df = pd.DataFrame(d) df # ### Assignment ''' Q1. Write a Pandas program to convert Series of lists to one Series. Q2. Write a Pandas program to compare the elements of the two Pandas Series. Q3. Sample Series: Q4. Write a Pandas program to add, subtract, multiple and divide two Pandas Series. ''' # sol1 data = pd.Series([[1,2],[3,4],[5,6],[7,8]]) data = data.apply(pd.Series) data # + # sol2 a = pd.Series([1,2,3]) b = pd.Series([3,2,1]) print(a > b) print(a < b) print(a == b) # + # sol 3 x,y = pd.Series([2, 4, 6, 8, 10]), pd.Series([1, 3, 5, 7, 10]) print(x+y) print(x-y) print(x*y) print(x/y)
Lec 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np x = pd.Series(["Alice", "Bob", "Marley"], name="Penan") x x.head() x = pd.Series([10.1, 20.1, 30.1]) x x = pd.Series([1, 3, np.nan, 12, 6, 8]) x
30. Pandas/Series.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Importing Brevitas networks into FINN # # In this notebook we'll go through an example of how to import a Brevitas-trained QNN into FINN. The steps will be as follows: # # 1. Load up the trained PyTorch model # 2. Call Brevitas FINN-ONNX export and visualize with Netron # 3. Import into FINN and call cleanup transformations # # We'll use the following utility functions to print the source code for function calls (`showSrc()`) and to visualize a network using netron (`showInNetron()`) in the Jupyter notebook: import onnx from finn.util.visualization import showSrc, showInNetron # ## 1. Load up the trained PyTorch model # # The FINN Docker image comes with several [example Brevitas networks](https://github.com/Xilinx/brevitas/tree/master/src/brevitas_examples/bnn_pynq), and we'll use the LFC-w1a1 model as the example network here. This is a binarized fully connected network trained on the MNIST dataset. Let's start by looking at what the PyTorch network definition looks like: from brevitas_examples import bnn_pynq showSrc(bnn_pynq.models.FC) # We can see that the network topology is constructed using a few helper functions that generate the quantized linear layers and quantized activations. The bitwidth of the layers is actually parametrized in the constructor, so let's instantiate a 1-bit weights and activations version of this network. We also have pretrained weights for this network, which we will load into the model. from finn.util.test import get_test_model lfc = get_test_model(netname = "LFC", wbits = 1, abits = 1, pretrained = True) lfc # We have now instantiated our trained PyTorch network. Let's try to run an example MNIST image through the network using PyTorch. import torch import matplotlib.pyplot as plt from pkgutil import get_data import onnx import onnx.numpy_helper as nph raw_i = get_data("finn.data", "onnx/mnist-conv/test_data_set_0/input_0.pb") input_tensor = onnx.load_tensor_from_string(raw_i) input_tensor_npy = nph.to_array(input_tensor) input_tensor_pyt = torch.from_numpy(input_tensor_npy).float() imgplot = plt.imshow(input_tensor_npy.reshape(28,28), cmap='gray') from torch.nn.functional import softmax # do forward pass in PyTorch/Brevitas produced = lfc.forward(input_tensor_pyt).detach() probabilities = softmax(produced, dim=-1).flatten() probabilities import numpy as np objects = [str(x) for x in range(10)] y_pos = np.arange(len(objects)) plt.bar(y_pos, probabilities, align='center', alpha=0.5) plt.xticks(y_pos, objects) plt.ylabel('Predicted Probability') plt.title('LFC-w1a1 Predictions for Image') plt.show() # ## 2. Call Brevitas FINN-ONNX export and visualize with Netron # # Brevitas comes with built-in FINN-ONNX export functionality. This is similar to the regular ONNX export capabilities of PyTorch, with a few differences: # # 1. The weight quantization logic is not exported as part of the graph; rather, the quantized weights themselves are exported. # 2. Special quantization annotations are used to preserve the low-bit quantization information. ONNX (at the time of writing) supports 8-bit quantization as the minimum bitwidth, whereas FINN-ONNX quantization annotations can go down to binary/bipolar quantization. # 3. Low-bit quantized activation functions are exported as MultiThreshold operators. # # It's actually quite straightforward to export ONNX from our Brevitas model as follows: import brevitas.onnx as bo export_onnx_path = "/tmp/LFCW1A1.onnx" input_shape = (1, 1, 28, 28) bo.export_finn_onnx(lfc, input_shape, export_onnx_path) # Let's examine what the exported ONNX model looks like. For this, we will use the Netron visualizer: showInNetron('/tmp/LFCW1A1.onnx') # When running this notebook in the FINN Docker container, you should be able to see an interactive visualization of the imported network above, and click on individual nodes to inspect their parameters. If you look at any of the MatMul nodes, you should be able to see that the weights are all {-1, +1} values, and the activations are Sign functions. # ## 3. Import into FINN and call cleanup transformations # # We will now import this ONNX model into FINN using the ModelWrapper, and examine some of the graph attributes from Python. from finn.core.modelwrapper import ModelWrapper model = ModelWrapper(export_onnx_path) model.graph.node[8] # The ModelWrapper exposes a range of other useful functions as well. For instance, by convention the second input of the MatMul node will be a pre-initialized weight tensor, which we can view using the following: model.get_initializer(model.graph.node[8].input[1]) # We can also examine the quantization annotations and shapes of various tensors using the convenience functions provided by ModelWrapper. model.get_tensor_datatype(model.graph.node[8].input[1]).name model.get_tensor_shape(model.graph.node[8].input[1]) # If we want to operate further on this model in FINN, it is a good idea to execute certain "cleanup" transformations on this graph. Here, we will run shape inference and constant folding on this graph, and visualize the resulting graph in Netron again. from finn.transformation.fold_constants import FoldConstants from finn.transformation.infer_shapes import InferShapes model = model.transform(InferShapes()) model = model.transform(FoldConstants()) export_onnx_path_transformed = "/tmp/LFCW1A1-clean.onnx" model.save(export_onnx_path_transformed) showInNetron('/tmp/LFCW1A1-clean.onnx') # We can see that the resulting graph has become smaller and simpler. Specifically, the input reshaping is now a single Reshape node instead of the Shape -> Gather -> Unsqueeze -> Concat -> Reshape sequence. We can now use the internal ONNX execution capabilities of FINN to ensure that we still get the same output from this model as we did with PyTorch. # + import finn.core.onnx_exec as oxe input_dict = {"0": nph.to_array(input_tensor)} output_dict = oxe.execute_onnx(model, input_dict) produced_finn = output_dict[list(output_dict.keys())[0]] produced_finn # - np.isclose(produced, produced_finn).all() # We have succesfully verified that the transformed and cleaned-up FINN graph still produces the same output, and can now use this model for further processing in FINN.
notebooks/basics/1_brevitas_network_import.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Perceptron Lernalgorithmus # + # Für grafische Darstellung import matplotlib.pyplot as plt # Zufallsgenerator from random import choice # Für mathematische Operationen from numpy import array, dot, random, linspace, zeros # %matplotlib inline # Set mit Ordnung Bias | Input A | Input B | gewünschter Output trainings_set = [ (array([1, 0, 0]), 0), (array([1, 0, 1]), 1), (array([1, 1, 0]), 1), (array([1, 1, 1]), 1), ] # Lernfunktion def heaviside(value): return 0 if value < 0 else 1 # Zufallsgenerator mit seed random.seed(18) # Gewichte initialisieren w = zeros(3) def fit(trainings_set, w, iterations=25): errors = [] weights = [] for i in range(iterations): # Zufälligen Input ermitteln example = choice(trainings_set) x = example[0] y = example[1] # Tatsächlichen Output ermitteln y_hat = heaviside(dot(w, x)) # Fehler ermitteln error = y - y_hat # Fehler und Gewicht für Auswertung speichern errors.append(error) weights.append(w) # Gewichte anpassen w += error * x return errors, weights errors, weights = fit(trainings_set, w) print("Letzter Gewichtsvektor: " + str(weights[-1])) print("\nAusgabe mit Trainingsset: ") for x, y in trainings_set: y_hat = heaviside(dot(x, w)) print("{}: {} -> {}".format(x, y, y_hat)) # Fehlergraph fignr = 1 plt.figure(1, figsize=(10, 10)) plt.plot(errors) plt.style.use('seaborn-whitegrid') plt.xlabel('Iteration') plt.ylabel(r'$(y - \hat y)$') plt.show() # - # ## Basisbeispiel mit sklearn # + import numpy as np from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_random_state from sklearn.utils.multiclass import unique_labels import matplotlib.pyplot as plt # %matplotlib inline class PerceptronEstimator(BaseEstimator, ClassifierMixin): def __init__(self, n_iterations=20, random_state=None): self.n_iterations = n_iterations # Anzahl iterationen fürs lernen self.random_state = random_state # Random Seed für Zufallsgenerator self.errors = [] # Fehler im Lernprozess für plot # Stepfunction für einzelne Neuronen def heaviside(self, x): if x < 0: return 0 return 1 # Lern/ Trainingsfunktion X -> [N,D], N = Zeilen = Anzahl Lernbeispiele, D = Spalten = Anzahl Features, y -> [N] def fit(self, X=None, y=None): self.random_state_ = check_random_state(self.random_state) self.w = self.random_state_.random_sample(np.size(X, 1)) # Initialisiere Gewichte zufällig X, y = check_X_y(X, y) # Checke auf richtiges Format -> X.shape[0] == y.shape[0] self.classes = unique_labels(y) # Eindeutige Zielwerte speichern # Lerndaten für spätere Prüfung in predict speichern self.X_ = X self.y_ = y # Lernvorgang for i in range(self.n_iterations): # Zufälliges vermischen für Batch Size 1 rand_index = self.random_state_.randint(0, np.size(X, 0)) # Zufälliger Input Vektor x_ = X[rand_index] # Erwarteter Output y_ = y[rand_index] # Tatsächlicher Output y_hat = self.heaviside(np.dot(self.w, x_)) # Fehler errechnen error = y_ - y_hat self.errors.append(error) # Für Visualisierung sammeln self.w += error * x_ # Neue Gewichte ermitteln return self # Auswerten eines beliebigen Input Vektors x def predict(self, x): check_is_fitted(self, ['X_', 'y_']) y_hat = self.heaviside(np.dot(self.w, x)) return y_hat # Fehlergraph anzeigen def plot_error(self): plt.figure(1, figsize=(5, 5)) # Erster Parameter ist figure_num plt.plot(self.errors) plt.style.use('seaborn-whitegrid') plt.xlabel('Iterationen') plt.ylabel(r'$(y - \hat y)$') plt.show() # Testarray und Test Ziel definieren X = np.array([[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]) y = np.array([0, 1, 1, 1]) # Lernen mit dem neuen NN Perceptron = PerceptronEstimator(20, 10) Perceptron.fit(X, y) for index, x in enumerate(X): p = Perceptron.predict(x) print("{}: {} -> {}".format(x, y[index], p)) # Fehlergraph Perceptron.plot_error() # - # ## scikit-learn-Perceptron-Estimator # + from sklearn.datasets import load_iris # Für das Schwertlilien Datenset from sklearn.linear_model import Perceptron # Standard perceptron aus sklearn iris = load_iris() # Datensatz laden X = iris.data[:,(2,3)] # Länge und Breite der Blütenblätter y = iris.target # Zielvektor my_per = Perceptron(random_state=49, max_iter=10000, tol=None) # tol -> Stopkriterium my_per.fit(X, y) # Lernen der Testdaten y_prediction = my_per.predict([[1.4, 0.2], [4.7, 1.4], [6.0, 2.5]]) # Auswerten einiger gegebenen Schwertlilien, iris-setosa, iris versicolor, iris virginca print(y_prediction) # - # ## Adaline # - statt delta(gewichte) = µ * (y - ŷ) * input , wobei ŷ {0,1} # - delta(gewichte) = µ * (y - net) * input , wobei net kontinuierlicher Wert zwischen 0 und 1 # + from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_random_state # Prüfroutinen from sklearn.utils.multiclass import unique_labels import numpy as np import matplotlib.pyplot as plt from random import choice import math # Für inline plot anzeige # %matplotlib inline class AdalineEstimator(BaseEstimator, ClassifierMixin): def __init__(self, eta=.001, n_iterations=500, random_state=None): self.n_iterations = n_iterations # Iterationen für das Lernen self.eta = eta # Lernrate self.random_state = random_state # Seed für Zufallsgenerator self.errors = [] # Fehler im Lernprozess für Visualisierung self.w = [] # Gewichtsvektor self.wAll = [] # Alle Gewichte für Visualisierung # Punktprodukt aus Inputvektor und Gewichten def net_i(self, x): return np.dot(x, self.w) # Neuron, Aktivierungsfunktion def activation(self, x): return self.net_i(x) # Outputfunktion def output(self, x): if self.activation(x) >= 0.0: return 1 return -1 # Lernfunktion def fit(self, X=None, y=None): self.random_state_ = check_random_state(self.random_state) # Initialisierung des Zufallsgenerators self.w = self.random_state_.random_sample(np.size(X, 1)) # Zufällige Initialisierung der Gewichte X, y = check_X_y(X, y) # Inputvalidierung X.shape[0] == y.shape[0] # Initiale Lerndaten sichern self.X_ = X self.y_ = y # Lernen mit Gradientenabstieg for i in range(self.n_iterations): # Zufälliges Beispiel aus dem Datensatz zum lernen auswählen rand_index = self.random_state_.randint(0, np.size(X, 0)) x_ = X[rand_index] y_ = y[rand_index] net_j = np.dot(x_, self.w) # Net Input berechnen error = (y_ - net_j) ** 2 # Fehler zwischen gewünschtem Output und Net Input berechnen self.errors.append(error) for j in range(3): weight = {} self.w[j] += self.eta * x_[j] * (y_ - net_j) weight[0] = self.w[0] weight[1] = self.w[1] weight[2] = self.w[2] self.wAll.append(weight) # 3 * nr_iterations # Kalkuliere Ausgabe für ein Beispielinput def predict(self, x): check_is_fitted(self, ['X_', 'y_']) return self.output(x) def plot(self): x1, x2, colors = [], [], [] for i in range(self.X_.shape[0]): x1.append(self.X_[i][1]) x2.append(self.X_[i][2]) if self.y_[i] == 1: colors.append('r') # rot else: colors.append('b') # blau plt.plot(self.errors) plt.figure(1) plt.show() # Scatterplot plt.figure(2) plt.scatter(x1, x2, c=colors) x1Line = np.linspace(0.0, 1.0, 2) x2Line = lambda x1, w0, w1, w2: (-x1 * w1 - w0) / w2 alpha = 0.0 for idx, weight in enumerate(self.wAll): if (idx % 500 == 0): alpha = 1.0 plt.plot(x1Line, x2Line(x1Line, weight[0], weight[1], weight[2]), alpha=alpha, linestyle='solid', label=str(idx), linewidth=1.5) plt.plot(x1Line, x2Line(x1Line, weight[0], weight[1], weight[2]), alpha=alpha, linestyle='solid', label=str(idx), linewidth=2.0) plt.legend(loc='best', shadow=True) ### MAIN ### random_state = check_random_state(10) I, o = [], [] # Datensatz aufbauen for x in random_state.random_sample(20): y = random_state.random_sample() I.append([1, x, y + 1.0]) o.append(1) for x in random_state.random_sample(20): y = random_state.random_sample() I.append([1, x, y - 1.0]) o.append(-1) X = np.array(I) y = np.array(o) Adaline = AdalineEstimator(eta=0.01, n_iterations=500, random_state=10) Adaline.fit(X, y) Adaline.plot()
AI/other_examples/Lernen im einfachen neuronalen Netz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solutions to the Exercises on Python Basics # ### <span style="color:green"> Exercise: Your First Program </span> # Print out the message "Hello, World!" # #### <span style="color:blue"> Solution: Your First Program </span> # Printing out is pretty straight forward, but remember to use quotation marks when printing out text. print("Hello, World!") # ### <span style="color:green"> Exercise: Variable Names </span> # # Why does the code below yield an error? Fix the code such that the name is printed. name = "Arthur" print(Name) # #### <span style="color:blue"> Solution: Variable Names </span> # # Python is case-sensitive, and will unsuccessfully try to find a variable named `Name` with capital N in the print statement. Simply fix the typo in the print statement, and the code will work, like shown below: name = "Arthur" print(name) # ### <span style="color:green"> Exercise: Understanding Variables </span> # # Go through the following lines of code step-by-step. # What are the values of the different variables after each step? # You can check your answers by executing each line in Python. length = 5 width = 3.5 length = length * 2 width = width - 1.5 print(length, width) # #### <span style="color:blue"> Solution: Understanding Variables </span> # A good way of checking if the code runs as you expect it to, is to print out a lot of information! length = 5 width = 3.5 print("initial length:", length, ". Initial widgth:", width) length = length * 2 print("length after multiplied with two: ", length) width = width - 1.5 print("width after subtracted 1.5: ", width) print("Final result below:") print(length, width) # ### <span style="color:green"> Exercise: Using Variables to Perform Simple Calculations </span> # # Alice weighs 65 kg, and Bob weighs 70 kg. # 1. Create variables containing these data. Try to think of descriptive variable names. # 2. Create a new variable containing their total weight, calculated from the two variables from 1. # 3. Charlie weighs 85 kg. Calculate Alice, Bob and Charlie's average weight. # # #### <span style="color:blue"> Solution: Using Variables to Perform Simple Calculations </span> # # The important details of the solution is that the total and average weight were calculated using the variables. # + alice_weight = 65 bob_weight = 70 total_weight = alice_weight + bob_weight print("Total weight:", total_weight, "kg.") charlie_weight = 85 average_weight = (alice_weight + bob_weight + charlie_weight) / 3 print("Average weight:", average_weight, "kg.") # - # ### <span style="color:green">Exercise: Datatypes</span> # # Can you change the variables below, so that all three contains the value two, but with different datatypes? The output should be the title of each exercise. # # #### <span style="color:green">a) <class 'int'> </span> # Let the variable `int_2` be an integer with the value two. # # #### <span style="color:blue">a) Solution : <class 'int'> </span> # This is probably the most intuitive, simply write the number 2. int_2 = 2 print(type(int_2)) # #### <span style="color:green">b) <class 'float'> </span> # Let the variable `float_2` be a decimal number with the value two. # # #### <span style="color:blue">b) Solution :<class 'float'> </span> # To make the number two into a decimal number, WITHOUT changing the value, all you need to do is to write any number of zeros behind the decimal point. This means that `float_2 = 2.00` and `float_2 = 2.000000` are valid answers, but also simply `float_2 = 2.` will do the trick. float_2 = 2.0 print(type(float_2)) # #### <span style="color:green">c) <class 'str'> </span> # Let the variable `str_2` be the text containing the number two. (Not "two"). # # #### <span style="color:blue">c) Solution : <class 'str'> </span> # Quotation marks makes the String in Python! str_2 = "2" print(type(str_2)) # ### <span style="color:green">Exercise: Importing a Module </span> # # We will use the library `datetime` to print out which date it is today. The library contains an object, called `date`, which contains a function `today`. The code for *calling* the function `today` to extract and print out todays date is already given. This code will not work, as `date` is not imported. # # Import `date` from the module `datetime`. # # #### <span style="color:blue">Solution: Importing a Module </span> # To make the code work, you must import in the manner shown below. Simply using `import datetime` will not work, as the print statement then would have to use `date time.date.today()`. # + from datetime import date print("date today: ", date.today()) # - # All done!
solutions/solutions_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''Bayeslogic'': conda)' # name: python38564bitbayeslogiccondab4d8225b150b4e36bd7aa762b5fb9ff1 # --- # <center><h1>Note on Bayesian Sequential Probability Test</h1></center> # # <center><NAME></center> # ## The Problem # Consider a dynamic regression model in the following form: # <a id='OLS'></a> # \begin{equation} # y_{t} = x_{t}'\beta_{t}+\epsilon_{t}, \tag{1} # \end{equation} # where $x_{t}$ is a $k$ dimensional vector of regressors and $\beta_{t}$ is a $k\times 1$ vector of regression coefficients, and the error terms $\epsilon_{t}$ is i.i.d $\mathcal{N}(0,\sigma^{2})$. Assume in the history $t \in \{ 1,...,n \}$, the coefficients in Equation [(1)](#OLS) are constant and equal to $\beta_{0}$. We want to monitor new data from $n+1$ onward to test if there is any structural break happening since. The null hypothesis is # \begin{equation} # \beta_{t} = \beta_{0} \, \forall t # \end{equation} # against the alternative that from some unknown time $\kappa>n$, the coefficients changes to $\beta_{1} \neq \beta_{0}$. To be more precise, we assume the following statistical process: # \begin{eqnarray*} # H_{0}: y_{t} &=& x_{t}'\beta_{0} + \epsilon_{t} \text{ for } t = 0,1,2,...,\kappa-1,\\ # H_{1}: y_{t} &=& x_{t}'\beta_{1} + \epsilon_{t} \text{ for } t = \kappa, \kappa+1,... # \end{eqnarray*} # Correspondingly we have the following hypotheses test, define stochastic process $\zeta_{t}$: # \begin{eqnarray*} # H_{0}: \varepsilon_{t} &=& \epsilon_{t} \text{ for } t = 0,1,2,...,\kappa-1,\\ # H_{1}: \varepsilon_{t} &=& x_{t}'(\beta_{1}-\beta_{0}) + \epsilon_{t} \text{ for } t = \kappa, \kappa+1,... # \end{eqnarray*} # To simplify the problem we assume the unconditional mean $E(\varepsilon_{t})=\mu$. # So we arrived the following stochastic process: # \begin{eqnarray*} # H_{0}: \zeta_{t} &=& \epsilon_{t} \text{ for } t = 0,1,2,...,\kappa-1,\\ # H_{1}: \zeta_{t} &=& \mu + \epsilon_{t} \text{ for } t = \kappa, \kappa+1,... # \end{eqnarray*} # The reason to use unconditional mean is because $\beta_{1}$ is unknown. The covariance $cov(x_{t},\beta_{t})=0$ due to standard exogeneity assumption of regression model. # In practice, practitioners estimate the model with historical data, and start monitoring the out of sample performance of the estimated model with newly arrived data. Let $\hat{\beta}^{n}$ denote the OLS estimates with historical data up to $t=n$. Under the null, the residuals can be approximated as # \begin{eqnarray*} # \zeta_{t} &=& \epsilon_{t} - x_{t}'(\hat{\beta}^{n} - \beta_{0})\\ # &=& \epsilon_{t}-x_{t}'((\sum_{i=0}^{n}x_{i}'x_{i})^{-1}\sum_{i=0}^{n}x_{i}'\epsilon_{i})\\ # &\approx& \epsilon_{t}. # \end{eqnarray*} # The approximation is reasonable when $\hat{\beta}^{n}$ is estimated consistently. Under the alternative, the residuals can be approximated as # \begin{eqnarray*} # \zeta_{t} &=& \epsilon_{t} - x_{t}'(\hat{\beta}^{n} - \beta_{1})\\ # &=& \epsilon_{t} - x_{t}'(\beta_{0} -\beta_{1}+(\sum_{i=0}^{n}x_{i}'x_{i})^{-1}\sum_{i=0}^{n}x_{i}'\epsilon_{i})\\ # &\approx& \epsilon_{t} - x_{t}'(\beta_{0}-\beta_{1})\\ # &\approx& \epsilon_{t} - \mu. # \end{eqnarray*}
archived/BSPT_note.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from nlpkit.tfidf_df import tfidf_df s = "An accessory dwelling unit or detached accessory dwelling unit (sometimes called a mother-in-law apartment) is a separate living space within a house or on the same property as an existing house. These units aren’t legal unless they have been established through a permit process. A legally permitted unit in the home is called an accessory dwelling unit (ADU). A legally permitted unit on the property (but not within the home) is called a backyard cottage or detached accessory dwelling unit (DADU). The property owner must live in either the house or the attached or detached accessory dwelling unit. Tiny houses, with foundations, are considered DADUs." s tfidf_df(s, lemmatize=True, remove_stopwords=True, remove_punct=True)
notebooks/test_tfidf_df.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Model import using the Petab format # In this notebook, we illustrate how to use [pyPESTO](https://github.com/icb-dcm/pypesto.git) together with [PEtab](https://github.com/petab-dev/petab.git) and [AMICI](https://github.com/icb-dcm/amici.git). We employ models from the [benchmark collection](https://github.com/benchmarking-initiative/benchmark-models-petab), which we first download: # + import pypesto import amici import petab import os import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # !git clone --depth 1 https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git tmp/benchmark-models || (cd tmp/benchmark-models && git pull) folder_base = "tmp/benchmark-models/Benchmark-Models/" # - # ## Import # ### Manage PEtab model # A PEtab problem comprises all the information on the model, the data and the parameters to perform parameter estimation. We import a model as a `petab.Problem`. # + # a collection of models that can be simulated #model_name = "Zheng_PNAS2012" model_name = "Boehm_JProteomeRes2014" #model_name = "Fujita_SciSignal2010" #model_name = "Sneyd_PNAS2002" #model_name = "Borghans_BiophysChem1997" #model_name = "Elowitz_Nature2000" #model_name = "Crauste_CellSystems2017" #model_name = "Lucarelli_CellSystems2018" #model_name = "Schwen_PONE2014" #model_name = "Blasi_CellSystems2016" # the yaml configuration file links to all needed files yaml_config = os.path.join(folder_base, model_name, model_name + '.yaml') # create a petab problem petab_problem = petab.Problem.from_yaml(yaml_config) # - # ### Import model to AMICI # The model must be imported to pyPESTO and AMICI. Therefore, we create a `pypesto.PetabImporter` from the problem, and create an AMICI model. # + importer = pypesto.PetabImporter(petab_problem) model = importer.create_model() # some model properties print("Model parameters:", list(model.getParameterIds()), '\n') print("Model const parameters:", list(model.getFixedParameterIds()), '\n') print("Model outputs: ", list(model.getObservableIds()), '\n') print("Model states: ", list(model.getStateIds()), '\n') # - # ### Create objective function # To perform parameter estimation, we need to define an objective function, which integrates the model, data, and noise model defined in the PEtab problem. # + import libsbml converter_config = libsbml.SBMLLocalParameterConverter()\ .getDefaultProperties() petab_problem.sbml_document.convert(converter_config) obj = importer.create_objective() # for some models, hyperparamters need to be adjusted #obj.amici_solver.setMaxSteps(10000) #obj.amici_solver.setRelativeTolerance(1e-7) #obj.amici_solver.setAbsoluteTolerance(1e-7) # - # We can request variable derivatives via `sensi_orders`, or function values or residuals as specified via `mode`. Passing `return_dict`, we obtain the direct result of the AMICI simulation. ret = obj(petab_problem.x_nominal_scaled, mode='mode_fun', sensi_orders=(0,1), return_dict=True) print(ret) # The problem defined in PEtab also defines the fixing of parameters, and parameter bounds. This information is contained in a `pypesto.Problem`. problem = importer.create_problem(obj) # In particular, the problem accounts for the fixing of parametes. print(problem.x_fixed_indices, problem.x_free_indices) # The problem creates a copy of he objective function that takes into account the fixed parameters. The objective function is able to calculate function values and derivatives. A finite difference check whether the computed gradient is accurate: objective = problem.objective ret = objective(petab_problem.x_nominal_free_scaled, sensi_orders=(0,1)) print(ret) # + eps = 1e-4 def fd(x): grad = np.zeros_like(x) j = 0 for i, xi in enumerate(x): mask = np.zeros_like(x) mask[i] += eps valinc, _ = objective(x+mask, sensi_orders=(0,1)) valdec, _ = objective(x-mask, sensi_orders=(0,1)) grad[j] = (valinc - valdec) / (2*eps) j += 1 return grad fdval = fd(petab_problem.x_nominal_free_scaled) print("fd: ", fdval) print("l2 difference: ", np.linalg.norm(ret[1] - fdval)) # - # ### In short # All of the previous steps can be shortened by directly creating an importer object and then a problem: importer = pypesto.PetabImporter.from_yaml(yaml_config) problem = importer.create_problem() # ## Run optimization # Given the problem, we can perform optimization. We can specify an optimizer to use, and a parallelization engine to speed things up. # + optimizer = pypesto.ScipyOptimizer() # engine = pypesto.SingleCoreEngine() engine = pypesto.MultiProcessEngine() # do the optimization result = pypesto.minimize(problem=problem, optimizer=optimizer, n_starts=10, engine=engine) # - # ## Visualize # The results are contained in a `pypesto.Result` object. It contains e.g. the optimal function values. result.optimize_result.get_for_key('fval') # We can use the standard pyPESTO plotting routines to visualize and analyze the results. # + import pypesto.visualize ref = pypesto.visualize.create_references(x=petab_problem.x_nominal_scaled, fval=obj(petab_problem.x_nominal_scaled)) pypesto.visualize.waterfall(result, reference=ref, scale_y='lin') pypesto.visualize.parameters(result, reference=ref)
doc/example/petab_import.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # A brief, basic introduction to Python for scientific computing - Chapter 1 # # ## Background/prerequisites # This is part of a brief introduction to Python; please find links to the other chapters and authorship information [here](https://github.com/MobleyLab/drug-computing/blob/master/other-materials/python-intro/README.md) on GitHub. This is the first chapter in this content. # # For best results with these notebooks, we recommend using the [Table of Contents nbextension](https://github.com/ipython-contrib/jupyter_contrib_nbextensions/tree/master/src/jupyter_contrib_nbextensions/nbextensions/toc2) which will provide you with a "Navigate" menu in the top menu bar which, if dragged out, will allow you to easily jump between sections in these notebooks. To install, in your command prompt, use: # * `conda install -c conda-forge jupyter_contrib_nbextensions` # * `jupyter contrib nbextension install --user` # * Open `jupyter notebook` and click the `nbextensions` button to enable `Table of Contents`. # (See the [jupyter nbextensions documentation](https://github.com/ipython-contrib/jupyter_contrib_nbextensions) for more information on using these.) # # ## Introduction/Overview # # Python is an extremely usable, high-level programming language that is quickly becoming a standard in scientific computing. It is open source, completely standardized across different platforms (Windows / MacOS / Linux), immensely flexible, and easy to use and learn. Programs written in Python are highly readable and often much shorter than comparable programs written in other languages like C or Fortran. Moreover, Python comes pre-loaded with standard modules that provide a huge array of functions and algorithms, for tasks like parsing text data, manipulating and finding files on disk, reading/writing compressed files, and downloading data from web servers. Python is also capable of all of the complex techniques that advanced programmers expect, like object orientation. # # Python is somewhat different than languages like C, C++, or Fortran. In the latter, source code must first be compiled to an executable format before it can be run. In Python, there is no compilation step; instead, source code is interpreted on the fly in a line-by-line basis. That is, Python executes code as if it were a script. The main advantage of an interpreted language is that it is flexible; variables do not need to be declared ahead of time, and the program can adapt on-the-fly. The main disadvantage, however, is that numerically-intensive programs written in Python typically run slower than those in compiled languages. This would seem to make Python a poor choice for scientific computing; however, time-intensive subroutines can be compiled in C or Fortran and imported into Python in such a manner that they appear to behave just like normal Python functions. # # Fortunately, many common mathematical and numerical routines have been pre-compiled to run very fast and grouped into two packages that can be added to Python in an entirely transparent manner. The NumPy (Numeric Python) package provides basic routines for manipulating large arrays and matrices of numeric data. The SciPy (Scientific Python) package extends the functionality of NumPy with a substantial collection of useful algorithms, like minimization, Fourier transformation, regression, and other applied mathematical techniques. Both of these packages are also open source and growing in popularity in the scientific community. With NumPy and SciPy, Python become comparable to, perhaps even more competitive than, expensive commercial packages like MatLab. # # This tutorial will cover the Python 3.x language version. Some packages still use the 2.7 series, but support is being dropped and modern packages must move to the 3.x series. # # ## Getting started # # ### Installation # # To use Python, one must install the base interpreter. In addition, there are a number of applications that provide a nice GUI-driven editor for writing Python programs. Currently the preferred method of installing Python on most platforms (Mac, Linux, Windows) is the Anaconda Python distribution, which is free and provides a wide variety of standard scientific Python packages as well as a built in package manager called `conda` which makes it easy to install a variety of other packages you might need. # # # ### Other Resources # # Python comes standard with extensive documentation. This can typically be accessed via built-in help, such as typing `help` on the Python prompt. One can search through function and module definitions, or find general information on the language using the table of contents. The entire manual, and many other helpful documents and links, can also be found at: # # http://docs.python.org # # The Python development community also maintains an extensive wiki. In particular, for programming beginners, there are several pages of tutorials and help at: # # http://wiki.python.org/moin/BeginnersGuide # # For those who have had some programming experience and don't need to start learning Python from scratch, the Dive Into Python website is an excellent tutorial that can teach you most of the basics in a few hours: # # http://www.diveintopython3.net # # ## Jupyter notebooks and the format of THIS document # # This document is formatted as a Jupyter notebook; Jupyter is a convenient way of running Python within your web browser, interspersed with documents and other types of images. # Basic elements in Jupyter notebooks are cells (such as this one) which can hold "markdown" formatted text (plain text with special formatting symbols) or code (Python code, specifically); the former is simply displayed, while the latter can be run. # Code blocks can typically be "run" to display the output of Python commands, so you should be able to run the examples in what follows and modify them as you like. Here is a sample code block; try it out by clicking on it and hitting shift-enter to evaluate it (or by clicking the "run cell" button at the top of your screen). # This is a code block 1+2 # ## Using the interactive interpreter # # On your computer, you can start Python by typing "python" at a command prompt or terminal. You should see something similar to the following (note that this is formatted as a code block but it will not "run"): Python 3.6.2 |Anaconda custom (x86_64)| (default, Sep 21 2017, 18:29:43) [GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> # The ">>>" at the bottom indicates that Python is awaiting your input. This is the interactive interpreter; Python programs do not need to be compiled and commands can be entered directly, step-by-step. In the interactive interpreter, Python reads your commands and gives responses: >>> 1 # ### This notebook is its own interpreter # # Here, in this Jupyter notebook, each code cell effectively functions as part of an interactive interpreter, so you will see very similar behavior (with some additional bells and whistles) compared to what you would see in the commmand line on your own computer. # # As we will show later, Python can also read scripts, or files that are pre-written lists of commands to execute in sequence. With the exception that output after each line is suppressed when reading from a file, there is no difference in the way Python treats commands entered interactively, in Jupyter notebooks, and in scripts; the latter are simply read in as if they were typed at the interactive prompt or in a Jupyter notebook (except that Jupyter notebooks also provide some special commands). This gives us a powerful way to test out commands in your programs by entering them interactively while writing code. # # Comments in Python are indicated using the "#" symbol. Python ignores everything after them until reaching the end of the line. >>> 1 #I just entered the number 1 # ### Breaking long commands # Long commands in Python can be split across several lines using the line continuation character "\". When using this character, subsequent lines must be indented by exactly the same amount of space. This is because spacing in Python is syntactic, as we will discuss in greater depth later. >>> 1.243 + (3.42839 - 4.394834) * 2.1 \ ... + 4.587 - 9.293 + 34.234 \ ... - 6.2 + 3.4 # Here, Python automatically draws the ellipses mark to indicate that the command you are entering spans more than one line. Alternatively, lines are continued implicitly without using the "\" character if enclosing characters (parenthesis, brackets) are present >>> (1.243 + (3.42839 - 4.394834) * 2.1 ... + 4.587 - 9.293 + 34.234 ... - 6.2 + 3.4) # Typically the use of parenthesis is preferred over the "\" character for line continuation. # # It is uncommon in practice, but more than one command can be entered on the same line in a Python script using the ";" symbol: >>> print(1 + 4); print(6 - 2) # Avoid using this notation in programs that you write, as it will make your code more dense and less legible. # # ### Help in Python # # There is a generic help function in Python that will tell you about almost everything. For example, it will tell you what the proper arguments for a function are: >>> help(sum) # The help function will even work with functions and variables that you create yourself, and Python provides a very easy way to add extra descriptive text that the help function can use, as we will discuss later on. # # Python is a case sensitive language. # # That means that variables and functions must be given the correct case in order to be recognized. Similarly, the following two variables are different: >>> Var = 1 >>> var = 2 >>> Var >>> var # ### Exiting interactive Python # # To exit the Python interactive prompt on your computer (not relevant for Jupyter notebooks, as here), we need to use an end-of-file character. Under Windows, this corresponds to the Ctrl-Z key combination; in Linux, it corresponds to Ctrl-D. Alternatively, one can use the exit() function: >>> exit()
other-materials/python-intro/Intro_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #! pip install -U climetlab --quiet # #! pip install -U climetlab_s2s_ai_challenge --quiet # - import climetlab as cml import climetlab_s2s_ai_challenge print(f'Climetlab version : {cml.__version__}') print(f'Climetlab-s2s-ai-challenge plugin version : {climetlab_s2s_ai_challenge.__version__}') import os # When running in continous integration in github, # append "-dev" to the datasets name to download only a fragment of data # Warning : do not use the "-dev" datasets for training ML models. if os.environ.get('GITHUB_ACTIONS'): is_test = '-dev' else: is_test = '' # # Using grib data FORMAT = 'grib' # Let us download netcdf file for total precipitation (tp) for one given date from the training-input dataset : cmlds = cml.load_dataset("s2s-ai-challenge-training-input"+is_test, origin='ecmwf', date=20200102, parameter='tp', format=FORMAT) # We can iterate on the list of grib data: for field in list(cmlds)[0:2]: print(field) print(field.valid_datetime(), field.shape) print(field.to_numpy()) # This climetlab dataset can be used as a xarray.Dataset or as a pandas.DataFrame : cmlds.to_xarray() # We can get the temperature parameter (2t) in a similar fashion. The "date" and "parameter" arguments also accept lists of values. cml.load_dataset("s2s-ai-challenge-training-input"+is_test, origin='ecmwf', date=20200102, parameter='2t', format=FORMAT).to_xarray() # + #import numpy as np #dates = [np.datetime64('2020-01-02'),'2020-01-09']#,'20200116'] #cml.load_dataset("s2s-ai-challenge-training-input", # origin='eccc', # date=dates, # parameter=['2t','tp'], # format=FORMAT).to_xarray() # - # Data from the forecast-input dataset can be retrieve in a similar fashion: cml.load_dataset("s2s-ai-challenge-forecast-input"+is_test, origin='ecmwf', date=["20200102","20200109"], parameter='2t', format=FORMAT).to_xarray() # ### Computing average and plotting ds = cml.load_dataset("s2s-ai-challenge-forecast-input"+is_test, origin='ecmwf', date=["20200102","20200109"], parameter='2t', format=FORMAT).to_xarray() mean1 = ds.mean(dim="lead_time") mean1.compute() #cml.plot_map(mean1.isel(forecast_time=0, realization=0)) mean2 = ds.mean(dim="forecast_time") mean2.compute() #cml.plot_map(mean2.isel(lead_time=2, realization=0)) mean3 = ds[['t2m','valid_time']].groupby('valid_time').mean()['t2m'] mean3.compute() # cml.plot_map(mean3.isel(forecast_time=2, realization=0))
notebooks/demo_grib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ima # language: python # name: ima # --- # # Scratchbook # ## Imports from torchvision import models from torchvision import transforms import PIL.Image as Image import numpy as np import matplotlib.pyplot as plt import torch from torch.autograd import Variable # ## Model # Instanciate the model model = models.vgg19() # ## Data img = Image.open("./examples/pebbles.jpg") # Preprocess the image so that it can be fed to the network preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), #normalize ]) img_tensor = preprocess(img) # ``unsqueeze_`` means the transformation is made inplace # Here unsqueeze enables us to create a batch of 1 image img_tensor.unsqueeze_(0) # ## Refactoring def get_output_from_layer(model, base_img_path, layer_id): """ Get the features maps outputs from any layer using a forward pass from a base image in a VGG model Parameters ------------ - model : torchvision.models.vgg.VGG Model to be investigated - base_img_path : string Path of the image to be used as input - layer_id : int Layer Number. Validity range: 0 - 36 (included) See ``model._modules['features']`` for more details Returns ------------ - layer_output : numpy.ndarray Array of features maps activations """ if not layer_id in np.arange(0, 37): raise ValueError("``layer_id`` argument invalid. " f"Got: {layer_id}. " "Expected a value between 0 and 36 included. ") # Create an empty list features_blobs = list() # Create hook to dump features maps into the list created above def hook_feature(module, input, output): features_blobs.append(output.data.cpu().numpy()) #features_blobs.append(output.grad.data.cpu().numpy()) # Get model features features = model._modules["features"] # We can hook any layer from above features._modules.get(str(layer_id)).register_forward_hook(hook_feature); #features._modules.get(str(layer_id)).register_backward_hook(hook_feature); # Load image img = Image.open(base_img_path) plt.imshow(img) # Preprocess the image so that it can be fed to the network preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), #normalize ]) img_tensor = preprocess(img) img_tensor = img_tensor.unsqueeze(0) input_img = Variable(torch.zeros(img_tensor.size()).type_as(img_tensor.data), requires_grad=True) print(input_img.grad) # Capture the features outputs at the layer given above model.forward(input_img) print(input_img.grad) # Why does it output None here? layer_output = np.array(features_blobs[0]) return layer_output layer_output = get_output_from_layer(model=model, base_img_path="./examples/pebbles.jpg", layer_id=5) layer_output # ## TODO: # # * Texture synthesis pseudo-code # * Generate a ranVGG instance (right now we ignore the construction step) # * Compute the gram matrix of an image at any layer l # * Backpropagate all the way to the image input layer # * Texture synthesis implementation # ## Rubbish # + # Create an empty list features_blobs = list() # Create hook to dump features maps into the list created above def hook_feature(module, input, output): features_blobs.append(output.data.cpu().numpy()) # - # Get model features features = model._modules["features"] features classifier = model._modules["classifier"] classifier # We can hook any layer from above features._modules.get('0').register_forward_hook(hook_feature); # + # Create an empty list features_blobs = list() # Create hook to dump features maps into the list created above def hook_feature(module, input, output): features_blobs.append(output.data.cpu().numpy()) #features_blobs.append(output.grad.data.cpu().numpy()) # Get model features features = model._modules["features"] # We can hook any layer from above features._modules.get(str(layer_id)).register_forward_hook(hook_feature); #features._modules.get(str(layer_id)).register_backward_hook(hook_feature); # Load image img = Image.open(base_img_path) plt.imshow(img) # Preprocess the image so that it can be fed to the network preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), #normalize ]) img_tensor = preprocess(img) img_tensor = img_tensor.unsqueeze(0) input_img = Variable(torch.zeros(img_tensor.size()).type_as(img_tensor.data), requires_grad=True) print(input_img.grad) # Capture the features outputs at the layer given above model.forward(input_img) print(input_img.grad) # Why does it output None here? layer_output = np.array(features_blobs[0]) layer_output # - # ## Gradient hooking trial # Load image img = Image.open("./examples/pebbles.jpg") plt.imshow(img); # Preprocess the image so that it can be fed to the network preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), #normalize ]) img_tensor = preprocess(img) img_tensor = img_tensor.unsqueeze(0) img_tensor.shape input_img = Variable(torch.zeros(img_tensor.size()).type_as(img_tensor.data), requires_grad=True) #model.zero_grad() output = model(input_img) g = torch.zeros(1, 10, 3, 224, 224) for i in range(10): g[:, i] = torch.autograd.grad(output[:, i].sum(), input_img, retain_graph=True)[0].data print(g)
Scratchbook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] deletable=true editable=true # ### Collective Burden for Sequencing Documents # + deletable=true editable=true import requests from collections import Counter import random as randomlib from bs4 import BeautifulSoup import numpy as np import pandas as pd import networkx as nx import itertools pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) # + deletable=true editable=true ''' Knowledge Graph ''' kg_path = "../graph_query/graphs/weighted_knowledge_graph.gpickle" kg = nx.read_gpickle(kg_path) kg_labels = [str(x) for x in list(kg.nodes())[1:]] n_labels = len(kg_labels) # + deletable=true editable=true def get_queries_based_on_node(node_label): node = kg.node[node_label] if("NodeType" not in node): kg.node[node_label]["NodeType"] = "ConceptNode" l = list(kg.neighbors(node_label)) return l node_type = node["NodeType"] if(node_type == "TopicNode" or node_type == "ConceptNode"): return list(kg.neighbors(node_label)) elif(node_type == "SubConceptNode"): return [node_label] else: pass ''' Returns a list of queries depending on the type of the node closest to the query. args - query(str) returns [] of str ''' def query_formulator(query, label): queries = [] children_neighbours = get_queries_based_on_node(label) queries = [label] for child in children_neighbours: queries.append(child) return list(set(queries)) # + deletable=true editable=true ''' Get content from a given set of URLs. ''' def get_content(url): es_order = [] f = open(url, 'r') l = f.readlines() docs = {} index = {} counter = 0 for url in l: try: docs[url] = requests.get(url).content index[url] = counter es_order.append(url) counter += 1 except: continue return docs, index, es_order # + deletable=true editable=true ''' Term Frequency Array for a particular document. ''' def get_tfd(content): word_count_dict = Counter(w for w in kg_labels if w.lower() in content.lower()) common = word_count_dict.most_common() frequency_arr = [0]*len(kg_labels) for common_word in common: common_word_index = kg_labels.index(common_word[0]) frequency_arr[common_word_index] = common_word[1] return frequency_arr # + deletable=true editable=true # + deletable=true editable=true ''' Building word_data a document (rows) by term frequency (columns) matrix. ''' def get_matrices(content, index): tfd_data = {} for url, cont in content.items(): tfd_data[url] = get_tfd(cont) tfd_arr = [] for key in index.keys(): tfd_arr.append(key.replace("\n", "")) word_data = {'TFD':tfd_arr} for label in kg_labels: word_data[label] = [None]*len(index) for url, words_in_doc in tfd_data.items(): url_index = index[url] for i in range(0, n_labels, 1): word = kg_labels[i] word_data[word][url_index] = words_in_doc[i] ''' (DTF)^T(DTF) = Coocurence Matrix ''' document_term_frequency = pd.DataFrame(word_data).set_index('TFD') dtf_asint = document_term_frequency.astype(int) coocc = dtf_asint.T.dot(dtf_asint) return document_term_frequency, dtf_asint, coocc # + deletable=true editable=true # + [markdown] deletable=true editable=true # ### Calculating Relationship Score: S(i, j) # + deletable=true editable=true def get_relationship_between_concepts(concept_1, concept_2, document_term_frequency): concept_1_index= document_term_frequency.columns.get_loc(concept_1) concept_2_index= document_term_frequency.columns.get_loc(concept_2) return coocc.iloc[concept_1_index, concept_2_index] # + [markdown] deletable=true editable=true # ### Significance of a concept in a document: \lambda(c, i) # + deletable=true editable=true def get_significance_score(concept, index, document, document_term_frequency, dtf_asint, coocc): if(document == None): return 0 concept_index = document_term_frequency.columns.get_loc(concept) freq = dtf_asint.iloc[index[document]][concept_index] coocc_row = coocc.iloc[concept_index,:] r = np.array(coocc_row) if(sum(r) == 0): return freq return (freq)+np.count_nonzero(r) # + deletable=true editable=true def get_right(content, index, top_n, document_term_frequency, dtf_asint, coocc): doc_to_concepts_list = {} for each_document in content.keys(): doc_to_concepts_list[each_document] = [] print(doc_to_concepts_list) for each_concept in kg_labels: m = 0.0 d_to_v = {} for each_document in content.keys(): d_to_v[each_document] = get_significance_score(each_concept, index, each_document, document_term_frequency, dtf_asint, coocc) if(d_to_v[each_document] > m): m = d_to_v[each_document] for d, v in d_to_v.items(): if(v == m): doc_to_concepts_list[d].append((each_concept, v)) final_doc_to_concept_list = {} for d, v in doc_to_concepts_list.items(): v.sort(key=lambda x:x[1]) if(len(v) >= top_n): final_doc_to_concept_list[d] = [v[i][0] for i in range(0, top_n, 1)] else: final_doc_to_concept_list[d] = [x[0] for x in v] relevant_concepts= set() for d, v in final_doc_to_concept_list.items(): for each in v: relevant_concepts.add(each[0]) return doc_to_concepts_list, relevant_concepts # + [markdown] deletable=true editable=true # ### Key Sections k_c # + deletable=true editable=true def get_doc_to_concepts_list(content, index, top_n, document_term_frequency, dtf_asint, coocc): doc_to_concept_list = {} relevant_concepts_to_sequence = set() for each_document in content.keys(): rt = [] rc = [] for each_concept in kg_labels: s = get_significance_score(each_concept, index,each_document, document_term_frequency, dtf_asint, coocc) if(s <= 0): continue if("NodeType" not in kg.node[each_concept]): continue elif(kg.node[each_concept]["NodeType"] == "ConceptNode"): rc.append((each_concept, s)) elif(kg.node[each_concept]["NodeType"] == "TopicNode"): rt.append((each_concept, s)) rt.sort(key=lambda x:x[1]) rt = rt[::-1] rc.sort(key=lambda x:x[1]) rc = rc[::-1] key_concepts = [] while(len(rc) and len(key_concepts) < top_n): key_concepts.append(rc[0][0]) print(rc[0][0], rc[0][1], each_document) relevant_concepts_to_sequence.add(rc[0][0]) rc.pop(0) while(len(rt) and len(key_concepts) < top_n): key_concepts.append(rt[0][0]) relevant_concepts_to_sequence.add(rt[0][0]) rt.pop(0) for each in rt: relevant_concepts_to_sequence.add(each[0]) for each in rc: relevant_concepts_to_sequence.add(each[0]) doc_to_concept_list[each_document] = key_concepts return doc_to_concept_list, relevant_concepts_to_sequence # + deletable=true editable=true def get_relevant_concepts_for_lp(doc_to_concepts_list): rel = [] for key,val in doc_to_concepts_list.items(): for each in val: rel.append(each) return rel # + [markdown] deletable=true editable=true # ### Comprehension Burden # + deletable=true editable=true def f_cb(sig_score, key_sig_score, relationship): return sig_score+key_sig_score def get_related_concepts(document, index, document_term_frequency): concepts = [] a = np.array(document_term_frequency.iloc[index[document]]) z = a.nonzero() if(len(z[0]) == 0): return [] for x in np.nditer(z[0]): concepts.append(document_term_frequency.columns[x]) return concepts def get_cb_document(document, dc, visited, relevant, document_term_frequency, dtf_asint, coocc, index): document_burden = 0.0 ds = get_related_concepts(document, index, document_term_frequency) for d in ds: burden = 0.0 count = 0 for c in dc: if(get_relationship_between_concepts(d, c, document_term_frequency) > 0): count += 1 if(d not in visited): burden += get_significance_score(c, index, document, document_term_frequency, dtf_asint, coocc) else: count += 1 if(count > 0): document_burden += burden/count return document_burden # + deletable=true editable=true # + [markdown] deletable=true editable=true # ### Sequence Generation # + deletable=true editable=true def get_linear(nodes): parents = [] for each in nodes: if(each in kg.nodes and kg.nodes[each]["NodeType"] == "TopicNode"): parents.append(each) linear = [] for p in parents: linear.append(p) children = kg.neighbors(p) for c in children: if c in nodes and kg.nodes[c]["NodeType"] == "ConceptNode": linear.append(c) for each in nodes: if each not in linear: linear.append(each) return linear def get_weighted_sequences(nodes): parents = [] for each in nodes: if(each in kg.nodes and kg.nodes[each]["NodeType"] == "TopicNode"): parents.append(each) weighted = [] for p in parents: weighted.append(p) children = kg.neighbors(p) all_c = [] for c in children: if(c not in nodes): continue if("weight" in kg[p][c]): all_c.append((c, kg[p][c]["weight"])) else: all_c.append((c, 0.0)) all_c.sort(key=lambda x:x[1]) all_c = all_c[::-1] for e in all_c: weighted.append(e[0]) return weighted def get_sequences(nodes): linear = get_linear(nodes) top_down = linear[::-1] weighted = get_weighted_sequences(nodes) return linear, top_down, weighted # + deletable=true editable=true def get_concepts_to_document_list(doc_to_concepts_list, relevant_concepts): concept_to_document_list = {} for each_concept in relevant_concepts: concept_to_document_list[each_concept] = [] for doc, kcs in doc_to_concepts_list.items(): if(each_concept in kcs): concept_to_document_list[each_concept].append(doc) return concept_to_document_list # + deletable=true editable=true def get_burden_for_a_sequence(docs_sequence, doc_to_concepts_list, concepts_to_document_list, document_term_frequency, dft_asint, coocc, index, relevant_concepts_to_sequence): visited = set() collective_burden = 0.0 burdens = [] for each_doc in docs_sequence: for each_ass_conc in doc_to_concepts_list[each_doc]: visited.add(each_ass_conc) burden_per_doc = get_cb_document(each_doc, doc_to_concepts_list[each_doc], visited, relevant_concepts_to_sequence, document_term_frequency, dtf_asint, coocc, index) collective_burden += burden_per_doc burdens.append(burden_per_doc) return collective_burden def get_burden_for_all_permutations(doc_to_concepts_list, concepts_to_document_list, document_term_frequency, dft_asint, coocc, index, relevant_concepts_to_sequence): docs = [x for x in doc_to_concepts_list.iterkeys()] perms = list(itertools.permutations(docs)) for each in perms: print(get_burden_for_a_sequence(each, doc_to_concepts_list, concepts_to_document_list, document_term_frequency, dft_asint, coocc, index, relevant_concepts_to_sequence)) # + deletable=true editable=true def get_burden_for_sequence(sequence, doc_to_concepts_list, concepts_to_document_list, document_term_frequency, dtf_asint, coocc, index): collective_burden = 0.0 docs_sequence = [] for each_con in sequence: docs_ass = concepts_to_document_list[each_con] doc_ass_size = [] for each in docs_ass: doc_ass_size.append((each, get_significance_score(each_con, index, each, document_term_frequency, dtf_asint, coocc))) doc_ass_size.sort(key=lambda x:x[1]) doc_ass_size = doc_ass_size[::-1] for each, v in doc_ass_size: if(each not in docs_sequence): docs_sequence.append(each) for each in doc_to_concepts_list.keys(): if(each not in docs_sequence): docs_sequence.append(each) visited = set() burdens = [] for each_doc in docs_sequence: for each_ass_conc in doc_to_concepts_list[each_doc]: visited.add(each_ass_conc) burden_per_doc = get_cb_document(each_doc, doc_to_concepts_list[each_doc], visited, sequence, document_term_frequency, dtf_asint, coocc, index) collective_burden += burden_per_doc burdens.append(burden_per_doc) return collective_burden, burdens # + deletable=true editable=true # + deletable=true editable=true def get_score(content, index, top_n, document_term_frequency, dtf_asint, coocc): doc_to_concepts_list, relevant_concepts_to_sequence = get_doc_to_concepts_list(content, index, top_n, document_term_frequency, dtf_asint, coocc) concepts_to_document_list = get_concepts_to_document_list(doc_to_concepts_list, relevant_concepts_to_sequence) linear, bottom_up, weighted = get_sequences(relevant_concepts_to_sequence) s, burden_per_doc = get_burden_for_sequence(linear, doc_to_concepts_list, concepts_to_document_list, document_term_frequency, dtf_asint, coocc, index) print("len", len(doc_to_concepts_list)) return (s, doc_to_concepts_list, max(burden_per_doc)) # + deletable=true editable=true def get_required(url): content, index, es_order = get_content(url) document_term_frequency, dtf_asint, coocc = get_matrices(content, index) return content, document_term_frequency, dtf_asint, coocc, index # + deletable=true editable=true content, document_term_frequency, dtf_asint, coocc, index = get_required("lps/engage/user_study_graph_theory_engage.txt") # + deletable=true editable=true for i in range(1, 2, 1): s, doc_to_concepts_list, m = get_score(content, index, 10, document_term_frequency, dtf_asint, coocc) for k, v in doc_to_concepts_list.items(): print(k, v) # + deletable=true editable=true import os for root, dirs, files in os.walk("lps/engage/"): for filename in files[:1]: content, document_term_frequency, dtf_asint, coocc, index = get_required("lps/engage/"+filename) print(filename) result_str = "" max_str = "" og = 0.0 og_max = 0.0 for i in range(1, 3, 1): s, doc_to_concepts_list, max_burden_doc = get_score(content, index, i, document_term_frequency, dtf_asint, coocc) if(i == 1): result_str += "& "+str(1.0) og = s max_str += "& "+str(1.0) og_max = max_burden_doc else: if(og == 0): print(s) else: result_str += "&"+"{0:.3f}".format(s/og) max_str += "&"+"{0:.3f}".format(max_burden_doc/og_max) print("burden str", result_str) print("max str", max_str) print("\n") # + deletable=true editable=true # + deletable=true editable=true 25.266666666666666 24.5 # + deletable=true editable=true # + deletable=true editable=true # + deletable=true editable=true # + deletable=true editable=true
create_lesson_plan/comprehension_burden_module/CollectiveBurden-HCOMP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ekramasif/Basic-Machine-Learning/blob/main/NLP/Embedding_Sequence_LSTM_Preprocessing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="I4pk2muJSvYz" outputId="a524e8c2-2489-4c45-9e78-25008d2c2570" import tensorflow as tf print(tf.__version__) # + id="kbHHVXbKXc4z" from tensorflow.keras import layers embedding_layer = layers.Embedding(10, 5) # + colab={"base_uri": "https://localhost:8080/"} id="mpWAb8YXX44N" outputId="f77fa31b-8922-41fa-832d-a459fab0413f" result = embedding_layer(tf.constant([1, 2, 3])) result.numpy() # + colab={"base_uri": "https://localhost:8080/"} id="BEzJFRzbYZ71" outputId="8fc34248-31c7-4068-fade-061ce53e10f1" from tensorflow.keras.preprocessing.text import Tokenizer tokenizer = Tokenizer() sentence = ['এই মাত্র পাওয়া সংবাদে জানা গেলো দেশ এর করোনা পরিস্থিতির উন্নতি হয়েছে', 'আমাদের সমাজে মুখোশধারী মানুষের অভাব নাই', 'আমরা দিন দিন বোকার রাজ্যে নির্বাসিত হচ্ছি'] tokenizer.fit_on_texts(sentence) sequence = tokenizer.texts_to_sequences(sentence) sequence # + colab={"base_uri": "https://localhost:8080/"} id="G10BENsTafAa" outputId="dfb869cd-61e7-46de-cbc5-b698d1ed949c" result = embedding_layer(tf.constant([1, 2, 3])) result.numpy() # + id="BF357AqLhRHO" from numpy import array import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Flatten, Embedding, Dense, LSTM, Bidirectional # + id="cICBB2hTh5r7" # Real Life Example of Classification train_ex = ['পণ্য ১০০% অরজিনাল কিন্তু আমার সাইজ যেটা আসছে ওটা আমাকে হচ্ছে না। আমার দরকার ৪২', 'জুতা এপেক্সের ছিল একটু ভারী মনে হয়েছে জুতা এবং শক্ত। প্রোডাক্টটি ঠিক আছে যা চেয়েছিলাম তাই পেয়েছি ওভারঅল ভালো', 'আমি বিস্মিত, ঠিক যেমনটি চেয়েছিলাম তেমনটি পেয়েছি।। ধন্যবাদ এপেক্স ধন্যবাদ দারাজ।।', 'অসাধারণ...ধন্যবাদ দারাজ।ধন্যবাদ এপেক্স। অরিজিনাল প্রোডাক্ট দেওয়ার জন্য।', 'বেশি বলবনা এককথায় একশতে একশ। দাম অনুযায়ী খুবইসুন্দর প্রোডাক্ট, ধন্যবাদ দারাজ এবং সেলার ভাইটিকে।', 'খুব একটা ভালো বলা চলে না। চাইলাম ৪১ আর দিলো ৪০।।ওনারা নিজেরাই ভালো রিভিউ দেয় কাস্টমারদের দেখানোর জন্ন্যে', 'হাটার সময় অনেক আন ইজি পা বাকাতে প্রব্লেম হয়', 'এপেক্স এর মত এই রকম প্রোডাক্ট আশা করা যায় না', 'এপেক্স তো সবসময়ই ভালো বাট ডেলিভারি বাজে ছিলো😡😡😡 যেদিন দেয়ার কথা এর ২ দিন পর দিছে...', 'ফালতু সেলার। মেসেজ দিয়া বল্লাম সাইজ যাতে উল্টোপাল্টা না আসে। কেউ অরডার করে প্রতারিত হবেন না।।' ] # Reviews -- negative = 0 || positive = 1 (class/labels) train_label = array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="iBFjv-kbkdLB" outputId="b6709af8-aa2c-4c37-af7e-7bd0cb81e621" train_ex[4] # + colab={"base_uri": "https://localhost:8080/"} id="9OWv_Zwjkjy-" outputId="d30ec566-688c-45cb-d992-7a158e5b2c3e" # tokenization and converting words into sequences tokenizer = Tokenizer() tokenizer.fit_on_texts(train_ex) dense_train_ex = tokenizer.texts_to_sequences(train_ex) dense_train_ex # + id="eJClhSRVhVA4" # def FindMaxLength(dense_train_ex): # maxList = max((x) for x in dense_train_ex) # maxLength = max(len(x) for x in dense_train_ex ) # return maxList, maxLength # # Driver Code # print(FindMaxLength(dense_train_ex)) # + id="-I2K785Qjm7x" # print(max(map(len, dense_train_ex))) # + colab={"base_uri": "https://localhost:8080/"} id="grxvy74GlyFl" outputId="689baaef-2f31-4152-cf0a-1f1b762fc0dc" def longest(dense_train_ex): longest_list = max(len(elem) for elem in dense_train_ex) return longest_list print(longest(dense_train_ex)) # + colab={"base_uri": "https://localhost:8080/"} id="kFo1jnbsoxFy" outputId="eacd32c6-844e-4a56-81f0-f788906263ee" def largest(arr,n): # Initialize maximum element max = dense_train_ex[0] # Traverse array elements from second # and compare every element with # current max for i in range(1, n): if dense_train_ex[i] > max: max = dense_train_ex[i] return max # Driver Code n = len(dense_train_ex) Ans = largest(dense_train_ex,n) print ("Largest in given array is",Ans) # + colab={"base_uri": "https://localhost:8080/"} id="8AgdiZbTnIfZ" outputId="78f6166e-41fe-4f1e-82f3-b28c3fa867d1" # padding the training documents in order to make them equal length MAX_LENGTH = 19 padded_train_ex = pad_sequences(dense_train_ex, maxlen=MAX_LENGTH, padding='post') for pd_sen in padded_train_ex: print(pd_sen) # + colab={"base_uri": "https://localhost:8080/"} id="o1c60e4wpISB" outputId="073a8cc7-7679-4ccc-a63f-7962860feaa4" def largest(arr,n): # Initialize maximum element max = Ans[0] # Traverse array elements from second # and compare every element with # current max for i in range(1, n): if Ans[i] > max: max = Ans[i] return max # Driver Code n = len(Ans) Ans1 = largest(Ans,n) print ("VOCAB_SIZE:",Ans1) # + colab={"base_uri": "https://localhost:8080/"} id="hu9NqNL9n3Zb" outputId="6db9a090-cbed-4813-bd8d-bcfb63c8867a" # Model Declaration VOCAB_SIZE = 118 model = Sequential() # Embedding Layer embedding_layer = Embedding(input_dim=VOCAB_SIZE, output_dim=8, input_length=MAX_LENGTH) model.add(embedding_layer) # # Flatten Layer # model.add(Flatten()) # model.add(Dense(units=160, activation='relu')) # model.add(Dense(units=80, activation='relu')) # model.add(Dense(units=40, activation='relu')) # model.add(Dense(units=10, activation='relu')) # LSTM - for better performance # model.add(LSTM(units=128)) # Bidirectional LSTM forward_layers = LSTM(units=128, return_sequences=False) backward_layers = LSTM(units=128, return_sequences=False, go_backwards=True) model.add(Bidirectional(layer=forward_layers, backward_layer=backward_layers)) # Output Layer model.add(Dense(units=1, activation='sigmoid')) model.compile(optimizer='adam', loss='mse', metrics=['acc']) print(model.summary()) # + colab={"base_uri": "https://localhost:8080/"} id="InvCtAjsrxPX" outputId="52cdc40f-b952-462c-a6dc-3a3007dea940" model.fit(padded_train_ex, train_label, epochs=100, verbose=1) # + colab={"base_uri": "https://localhost:8080/"} id="iBAB4FT4sKO9" outputId="805efaee-e9e0-451b-c0de-892b5d207b00" # Testing test_ex = ['দামে বেশি হলেও মানে ভালো, ধন্যবাদ এপেক্স এবং দারাজ কে', 'জুতাটি হাতে পেয়ে আমি সত্যিই বিস্মিত', 'একদম বাজে, মনে হচ্ছে প্রতারিত হলাম 😡', 'এতো ফালতু প্রডাক্ট পবো আশা করি নি'] # tokenization and converting words into sequence dense_test_ex = tokenizer.texts_to_sequences(test_ex) # padding the test documents padded_test_ex = pad_sequences(dense_test_ex, maxlen=MAX_LENGTH, padding='post') prediction = model.predict(padded_test_ex) print(prediction) # + id="TuxW_bwduAqO"
NLP/Embedding_Sequence_LSTM_Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia # language: julia # name: julia-1.5 # --- # # Symmetric LU # # We begin with a symmetric $A$. A = [ 2 4 4 2 4 5 8 -5 4 8 6 2 2 -5 2 -26 ]; # Carrying out our usual elimination in the first column leads us to using LinearAlgebra L1 = diagm(0=>ones(4)) L1[2:4,1] = [-2,-2,-1] A1 = L1*A # But now let's note that if we transpose this result, we have the same first column as before! So we could apply again and then transpose back. A2 = (L1*A1')' # Using transpose identities, this is just A2 = A1*L1' # Now you can see how we proceed down and to the right, eliminating in a column and then symmetrically in the corresponding row. L2 = diagm(0=>ones(4)) L2[3:4,2] = [0,-3] A3 = L2*A2*L2' # Finally, we arrive at a diagonal matrix. L3 = diagm(0=>ones(4)) L3[4,3] = -1 D = L3*A3*L3'
book/linsys/demos/structure-symm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from os import listdir from pickle import dump from keras.applications.vgg16 import VGG16 from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.applications.vgg16 import preprocess_input from keras.models import Model # + # extract features from each photo in the directory def extract_features(directory): # load the model model = VGG16() # re-structure the model model.layers.pop() model = Model(inputs=model.inputs, outputs=model.layers[-1].output) # summarize print(model.summary()) # extract features from each photo features = dict() for name in listdir(directory): # load an image from file filename = directory + '/' + name image = load_img(filename, target_size=(224, 224)) # convert the image pixels to a numpy array image = img_to_array(image) # reshape data for the model image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) # prepare the image for the VGG model image = preprocess_input(image) # get features feature = model.predict(image, verbose=0) # get image id image_id = name.split('.')[0] # store feature features[image_id] = feature # print('>%s' % name) return features # extract features from all images directory = '/Users/kushalgupta/Downloads/Flicker8k_Dataset' features = extract_features(directory) print('Extracted Features: %d' % len(features)) # save to file dump(features, open('features.pkl', 'wb')) #pickle # - ''' from keras.models import Sequential from keras.layers.core import Flatten, Dense, Dropout from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D from keras.optimizers import SGD import cv2, numpy as np def VGG_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model if __name__ == "__main__": im = cv2.resize(cv2.imread('cat.jpg'), (224, 224)).astype(np.float32) im[:,:,0] -= 103.939 im[:,:,1] -= 116.779 im[:,:,2] -= 123.68 im = im.transpose((2,0,1)) im = np.expand_dims(im, axis=0) # Test pretrained model model = VGG_16('vgg16_weights.h5') sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='categorical_crossentropy') out = model.predict(im) print np.argmax(out) '''
image_captioning_prepare_photo_data_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] graffitiCellId="id_wzbr23x" # ### Problem Statement # # The Tower of Hanoi is a puzzle where we have three rods and `n` unique sized disks. The three rods are - source, destination, and auxiliary as shown in the figure below. # <br><img style="float: center;" src="TOH.png"><br> # Initally, all the `n` disks are present on the source rod. The final objective of the puzzle is to move all disks from the source rod to the destination rod using the auxiliary rod.<br><br> # **However, there are some rules applicable to all rods:** # 1. Only one disk can be moved at a time. # 2. A disk can be moved only if it is on the top of a rod. # 3. No disk can be placed on the top of a smaller disk. # # You will be given the number of disks `num_disks` as the input parameter. Write a **recursive function** `tower_of_Hanoi()` that prints the "move" steps in order to move `num_disks` number of disks from Source to Destination using the help of Auxiliary rod. # # --- # ### Example Illustration # For example, if you have `num_disks = 3`, then the disks should be moved as follows: # # 1. move disk from source to destination # 2. move disk from source to auxiliary # 3. move disk from destination to auxiliary # 4. move disk from source to destination # 5. move disk from auxiliary to source # 6. move disk from auxiliary to destination # 7. move disk from source to destination # # You must print these steps as follows: # # S D # S A # D A # S D # A S # A D # S D # # Where S = source, D = destination, A = auxiliary <br><br> # An example illustration for `num_disks = 4` can be visualized in this [GIF from wikipedia](https://en.wikipedia.org/wiki/Tower_of_Hanoi#/media/File:Tower_of_Hanoi_4.gif) # # --- # # ### The Idea # Assume you are writing a function that accepts the following arguments: # 1. arg1 - number of disks # 2. arg2 - rod A - this rod acts as the source (at the time of calling the function) # 2. arg3 - rod B - this rod acts as the auxiliary # 2. arg4 - rod C - this rod acts as the destination # # Follow the steps below: # 1. Given the `num_disks` disks on the source, along with auxiliary and destination rods<br><br> # 2. Check if `num_disks == 1`. This must be the termination condition, therefore use recursion to reach at this moment. # - If yes, move disk from source to destination. (Termination condition)<br><br> # 3. For `num_disks > 1`, just think of your FIRST set of steps. You want to pick the bottom most disk on the source, to be transferred to the destination. For this reason, you will will perform the steps below: # - Step 1: Move `num_disks - 1` from source to auxiliary<br><br> # - Step 2: Now you are left with only the largest disk at source. Move the only leftover disk from source to destination<br><br> # - Step 3: You had `num_disks - 1` disks available on the auxiliary, as a result of Step 1. Move `num_disks - 1` from auxiliary to destination # # --- # ### Exercise - Write the function definition here # + graffitiCellId="id_8tcr5o8" def tower_of_Hanoi(num_disks): """ :param: num_disks - number of disks TODO: print the steps required to move all disks from source to destination """ pass # + [markdown] graffitiCellId="id_rh9jy5w" # <span class="graffiti-highlight graffiti-id_rh9jy5w-id_aaedpt9"><i></i><button>Hide Solution</button></span> # + graffitiCellId="id_aaedpt9" # Solution def tower_of_Hanoi_soln(num_disks, source, auxiliary, destination): if num_disks == 0: return if num_disks == 1: print("{} {}".format(source, destination)) return tower_of_Hanoi_soln(num_disks - 1, source, destination, auxiliary) print("{} {}".format(source, destination)) tower_of_Hanoi_soln(num_disks - 1, auxiliary, source, destination) def tower_of_Hanoi(num_disks): tower_of_Hanoi_soln(num_disks, 'S', 'A', 'D') # + [markdown] graffitiCellId="id_6dm5twe" # #### Compare your results with the following test cases # * num_disks = 2 # # solution # S A # S D # A D # # * num_disks = 3 # # solution # S D # S A # D A # S D # A S # A D # S D # # * num_disks = 4 # # solution # S A # S D # A D # S A # D S # D A # S A # S D # A D # A S # D S # A D # S A # S D # A D # + graffitiCellId="id_zia79bz"
Data Structures/Recursion/Tower-of-Hanoi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Problem # A business produces two products $X$ and $Y$. To make these two products he needs $3$ machines and a quantity of labor. # # To produce $1$kg of $X$ we need: # # * $2$ hours on machine $1$ # * $1$ hour on machine $2$ # * $2$ hours on machine $3$ # * $1$ hour of labor # # To produce $1$kg of $Y$ we need: # # * $1$ hour on machine $1$ # * $2$ hours on machine $2$ # * $1$ hour of labor # # On the first and second machine there are a maximum of $140$ hours available. The last machine has $130$ hours available. There are a maximum of $90$ hours of labor. # # The profit for $1$kg of $X$ is $\$30$, and for $1$kg of $Y$ it is $\$20$. The business wants to maximize their profits. # # How many kg $X$ and $Y$ does he needs to make? Assuming that all produced products will be sold. # # Linear programming model # First we convert the linear programming problem into a linear programming model. # * Let $x$: quantity required to produce in kg for $X$. # * Let $y$: quantity required to product in kg for $Y$. # * max $30x + 20y$ # * $2x+y \leq 140$ # * $x+2y\leq 140$ # * $2x \leq 130$ # * $x + y \leq 90$ # ## Solving it graphically # To find the intersections we solve: # # $$ S_1 = \begin{cases} x+y=90 \\ x+2y=140 \end{cases} $$ # # $$ S_2 = \begin{cases} 2x+y=140 \\ x+2y=140 \end{cases} $$ # # $$ S_3 = \begin{cases} x+2y=140 \\ 2x=130 \end{cases} $$ # # $$ S_4 = \begin{cases} x+y=90 \\ 2x+y=140 \end{cases} $$ S1 = solve(cbind(c(1,1),c(1,2)),c(90,140)) S2 = solve(cbind(c(2,1),c(1,2)),c(140,140)) S3 = solve(cbind(c(1,2),c(2,0)),c(140,130)) S4 = solve(cbind(c(1,2),c(1,1)),c(90,140)) s = cbind(S1,S2,S3,S4) rownames(s) <- c('x', 'y') s # We calculate the profit for each of the points: for (i in 1:4) { print(paste('X=', s[1,i], 'Y=', s[2,i], 'Profit=', 30*s[1,i]+20*s[2,i])) } # Only $S_1$ and $S_4$ are viable solutions. We create a function $d(x)$ through $S_4$ which is $(50, 40)$: # # $$ 30x + 20y = 0 \iff y = -\frac{3}{2}x$$ # # Finally we plug-in the point $(50,40)$: # # $$ y = -\frac{3}{2}(x-50)+40 $$ d <- function(x) -3/2*(x-50)+40 a = 0 b = 140 X = Y = a:b plot(X,Y,col='white') points(s[1,], s[2,]) c1 = line(a:b, sapply(a:b, d)) c2 = line(a:b, sapply(a:b, function(x) (140-2*x))) c3 = line(a:b, sapply(a:b, function(x) (1/2*(140-x)))) c4 = line(a:b, sapply(a:b, function(x) (90-x))) abline(c1, col='red', lwd=3, lty=2) abline(c2, lwd=2) abline(c3, lwd=2) abline(c4, lwd=2) abline(v=130/2, lwd=2) # ## Solution # The solution is $50$ kg of $X$ and $40$ kg of $Y$ with a total profit of $\$2300$. # # Simplex method # Now we are going to find the solution with the simplex method. d = c(1,0,0,0,0) x = c(-30, 2,1,2,1) y = c(-20,1,2,0,1) s1 = c(0,1,0,0,0) s2 = c(0,0,1,0,0) s3 = c(0,0,0,1,0) s4 = c(0,0,0,0,1) RHS = c(0,140,140,130,90) M = cbind(d,x,y,s1,s2,s3,s4,RHS) rownames(M) = c('d','s1','s2','s3','s4') M # Solving for $X$: M[1,] = M[1,] + 15*M[4,] M[2,] = M[2,] - M[4,] M[3,] = M[3,] - 1/2*M[4,] M[5,] = M[5,] - 1/2*M[4,] M[4,] = 1/2*M[4,] rownames(M) = c('d','s1','s2','x','s4') M # Solving for $Y$: M[1,] = M[1,] + 20*M[2,] M[3,] = M[3,] - 2*M[2,] M[5,] = M[5,] - M[2,] rownames(M) = c('d','y','s2','x','s4') M # Solving for $s_3$: M[1,] = M[1,] + 10*M[5,] M[2,] = M[2,] + 2*M[5,] M[3,] = M[3,] - 3*M[5,] M[4,] = M[4,] - M[5,] M[5,] = 2*M[5,] rownames(M) = c('d','y','s2','x','s3') M # ## Solution # # The total profit is $\$2300$. For maximum profit we need to produce $50$ kg of $X$, and $40$ kg of $Y$. With a remainder for $s_2=10$ and $s_3=30$. # # Algorithm # The following steps are executed when the simplex tablet has been set up. # 1. Find the largest negative entry in row $d$, select this column. If there are none, we are done. # 2. For the selected column, divide the right hand side by the elements in that column. # 3. Find the smaller non-negative number, use this as the pivot element. # 4. Divide the pivot row by the number of the pivot element to make it $1$. # 5. Make all the values in the column equal $0$, except the pivot element. # 6. Go to $^*1$.
Applied Math/Y1S4/Lineair programmeren/.ipynb_checkpoints/Simplex methode-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Honesty Pledge # # The first part of this exam must be completed individually. I understand that during this time, I am not allowed to discuss this exam with anyone besides the instructor. # # The second part of this exam may be completed in pairs. I understand that during this time, I am only allowed to discuss this exam with my partner and the instructor. In particular, I am not allowed to discuss the exam with any other groups. # # I certify that I did not receive any advance knowledge about this project. Also, I promise not to divulge any details about this project to any students who have not yet taken it. Giving and receiving help are both forbidden and will be prosecuted equally. # # This project is open-Internet. I understand that I am allowed to use any existing resources on the web, including web search, but I am not allowed to consult with any other people during this project. # # I understand that the penalty for violating this pledge is a grade of **F** in the course and disciplinary action by the Office of Student Rights & Responsibilities. Please sign below to indicate that you have read and understood this pledge. Your project will not be graded unless you sign below. # + deletable=false nbgrader={"checksum": "6fa681ce83bec92925cb85a1c3a6decc", "grade": true, "grade_id": "student", "locked": false, "points": 0, "solution": true} # YOUR CODE HERE raise NotImplementedError()
Project-05-31-Individual/Honesty Pledge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PythonData # language: python # name: pythondata # --- # create a practice set of random latitude and longitude combinations x = [25.12903645, 25.92017388, 26.62509167, -59.98969384, 37.30571269] y = [-67.59741259, 11.09532135, 74.84233102, -76.89176677, -61.13376282] coordinates = list(zip(x, y)) for coordinate in coordinates: print(coordinate[0], coordinate[1]) from citipy import citipy # Use the tuple() function to display the latitude and longitude combinations. for coordinate in coordinates: print(citipy.nearest_city(coordinate[0], coordinate[1]).city_name, citipy.nearest_city(coordinate[0], coordinate[1]).country_code) import requests #import key from config import weather_api_key # + # Format api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key} url_ct = "api.openweathermap.org/data/2.5/weather?q=" root_key = "&appid=" # from notes # Starting URL for Weather Map API Call. url = "http://api.openweathermap.org/data/2.5/weather?units=metric&APPID=" + weather_api_key print(url) # - # Create an endpoint URL for a city. city_url = url + "&q=" + "Boston" print(city_url) # make a 'Get' request for the city weather city_weather = requests.get(city_url) city_weather city_weather.json() # + # error handling # Create an endpoint URL for a city. city_url = url + "&q=" + "Boston" city_weather = requests.get(city_url) if city_weather.status_code == 200: print(f"City Weather found.") else: print(f"City weather not found.") # - # Create an endpoint URL for a city. city_url = url + "&q=" + "Bston" city_weather = requests.get(city_url) if city_weather.status_code == 200: print(f"City Weather found.") else: print(f"City weather not found.") # Create an endpoint URL for a city. city_url = url + "&q=" + "Boston" city_weather = requests.get(city_url) city_weather.json() boston_data = city_weather.json() boston_data['sys']['country'] lat = boston_data["coord"]["lat"] lng = boston_data["coord"]["lon"] max_temp = boston_data["main"]["temp_max"] humidity = boston_data["main"]["humidity"] clouds = boston_data["clouds"]["all"] wind = boston_data["wind"]["speed"] print(lat, lng, max_temp, humidity, clouds, wind) # + from datetime import datetime date = boston_data['dt'] datetime.utcfromtimestamp(date).strftime('%Y-%m-%d %H:%M:%S') # - for coordinate in enumerate(coordinates): print(coordinate) import time today=time.strftime("%x") today
API_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # 기초부터 시작하는 NLP: 문자-단위 RNN으로 이름 생성하기 # ******************************************************************************** # **Author**: `<NAME> <https://github.com/spro/practical-pytorch>`_ # **번역**: `황성수 <https://github.com/adonisues>`_ # # 이 튜토리얼은 3개로 이뤄진 "기초부터 시작하는 NLP"의 2번째 튜토리얼입니다. # `첫번째 튜토리얼 </intermediate/char_rnn_classification_tutorial>` # 에서는 이름의 언어를 분류하기 위해 RNN을 사용했습니다. # 이번에는 반대로 언어로 이름을 생성할 예정입니다. # # :: # # > python sample.py Russian RUS # Rovakov # Uantov # Shavakov # # > python sample.py German GER # Gerren # Ereng # Rosher # # > python sample.py Spanish SPA # Salla # Parer # Allan # # > python sample.py Chinese CHI # Chan # Hang # Iun # # 우리는 몇 개의 선형 계층으로 작은 RNN을 직접 만들고 있습니다. # 이전 튜토리얼인 이름을 읽은 후 그 언어를 예측하는 것과의 큰 차이점은 # 언어를 입력하고 한 번에 한 글자를 생성하여 출력하는 것입니다. # 언어 형성(단어 또는 다른 고차원 구조로도 수행 될 수 있음)을 위해 # 문자를 반복적으로 예측하는 것을 "언어 모델" 이라고 합니다. # # **추천 자료:** # # Pytorch를 설치했고, Python을 알고, Tensor를 이해한다고 가정합니다: # # - https://pytorch.org/ 설치 안내 # - :doc:`/beginner/deep_learning_60min_blitz` PyTorch 시작하기 # - :doc:`/beginner/pytorch_with_examples` 넓고 깊은 통찰을 위한 자료 # - :doc:`/beginner/former_torchies_tutorial` 이전 Lua Torch 사용자를 위한 자료 # # RNN과 작동 방식을 아는 것 또한 유용합니다: # # - `The Unreasonable Effectiveness of Recurrent Neural # Networks <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`__ # 실생활 예제를 보여 줍니다. # - `Understanding LSTM # Networks <https://colah.github.io/posts/2015-08-Understanding-LSTMs/>`__ # LSTM에 관한 것이지만 RNN에 관해서도 유익합니다. # # 이전 튜토리얼도 추천합니다. :doc:`/intermediate/char_rnn_classification_tutorial` # # # 데이터 준비 # ================== # # .. Note:: # `여기 <https://download.pytorch.org/tutorial/data.zip>`_ # 에서 데이터를 다운 받고, 현재 디렉토리에 압축을 푸십시오. # # 이 과정의 더 자세한 사항은 지난 튜토리얼을 보십시오. # 요약하면, 줄마다 이름이 적힌 텍스트 파일 ``data/names/[Language].txt`` 있습니다. # 이것을 어레이로 분리하고, Unicode를 ASCII로 변경하고, # 사전 ``{language: [names ...]}`` 을 만들어서 마무리합니다. # # # # + from __future__ import unicode_literals, print_function, division from io import open import glob import os import unicodedata import string all_letters = string.ascii_letters + " .,;'-" n_letters = len(all_letters) + 1 # EOS(end of sentence) 기호 추가 def findFiles(path): return glob.glob(path) # 유니코드 문자열을 ASCII로 변환, https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters ) # 파일을 읽고 줄 단위로 분리 def readLines(filename): lines = open(filename, encoding='utf-8').read().strip().split('\n') return [unicodeToAscii(line) for line in lines] # 각 언어의 이름 목록인 category_lines 사전 생성 category_lines = {} all_categories = [] for filename in findFiles('data/names/*.txt'): category = os.path.splitext(os.path.basename(filename))[0] all_categories.append(category) lines = readLines(filename) category_lines[category] = lines n_categories = len(all_categories) if n_categories == 0: raise RuntimeError('Data not found. Make sure that you downloaded data ' 'from https://download.pytorch.org/tutorial/data.zip and extract it to ' 'the current directory.') print('# categories:', n_categories, all_categories) print(unicodeToAscii("O'Néàl")) # - # 네트워크 생성 # ==================== # # 이 네트워크는 `지난 튜토리얼의 RNN <#Creating-the-Network>`__ 이 # 다른 입력들과 연결되는 category tensor를 추가 인자로 가지게 확장합니다. # category tensor는 문자 입력과 마찬가지로 one-hot 벡터입니다. # # 역자주: 기존 입력과 category tensor를 결합하여 입력으로 사용하기 때문에 # 입력의 사이즈가 n_categories 만큼 커집니다. # # 우리는 출력을 다음 문자의 확률로 해석 합니다. 샘플링 할 때, # 가장 확률이 높은 문자가 다음 입력 문자로 사용됩니다. # # 더 나은 동작을 위해 두 번째 선형 레이어 # ``o2o`` (은닉과 출력을 결합한 후) 를 추가했습니다 . # 또한 Drop-out 계층이 있습니다. 이 계층은 주어진 확률(여기서는 0.1)로 # `무작위로 입력을 0 # <https://arxiv.org/abs/1207.0580>`__ 으로 만듭니다. # 일반적으로 입력을 흐리게 해서 과적합을 막는 데 사용됩니다. # 여기서 우리는 고의로 일부 혼돈을 추가하고 샘플링 다양성을 높이기 # 위해 네트워크의 마지막에 이것을 사용합니다. # # .. figure:: https://i.imgur.com/jzVrf7f.png # :alt: # # # # # + import torch import torch.nn as nn class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size) self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size) self.o2o = nn.Linear(hidden_size + output_size, output_size) self.dropout = nn.Dropout(0.1) self.softmax = nn.LogSoftmax(dim=1) def forward(self, category, input, hidden): input_combined = torch.cat((category, input, hidden), 1) hidden = self.i2h(input_combined) output = self.i2o(input_combined) output_combined = torch.cat((hidden, output), 1) output = self.o2o(output_combined) output = self.dropout(output) output = self.softmax(output) return output, hidden def initHidden(self): return torch.zeros(1, self.hidden_size) # - # 학습 # ========= # 학습 준비 # ---------------------- # # 제일 먼저 (category, line)의 무작위 쌍을 얻는 함수: # # # # + import random # 목록에서 무작위 아이템 반환 def randomChoice(l): return l[random.randint(0, len(l) - 1)] # 임의의 category 및 그 category에서 무작위 줄(이름) 얻기 def randomTrainingPair(): category = randomChoice(all_categories) line = randomChoice(category_lines[category]) return category, line # - # 각 시간 단계 마다 (즉, 학습 단어의 각 문자 마다) 네트워크의 입력은 # ``(언어, 현재 문자, 은닉 상태)`` 가 되고, 출력은 # ``(다음 문자, 다음 은닉 상태)`` 가 된다. 따라서 각 학습 세트 마다 # 언어, 입력 문자의 세트, 출력/목표 문자의 세트가 필요하다. # # 각 시간 단계마다 현재 문자에서 다음 문자를 예측하기 때문에, # 문자 쌍은 한 줄(하나의 이름)에서 연속된 문자 그룹입니다. - 예를 들어 ``"ABCD<EOS>"`` 는 # ("A", "B"), ("B", "C"), ("C", "D"), ("D", "EOS") 로 생성합니다. # # .. figure:: https://i.imgur.com/JH58tXY.png # :alt: # # Category(언어) Tensor는 ``<1 x n_categories>`` 크기의 `One-hot # Tensor <https://en.wikipedia.org/wiki/One-hot>`__ 입니다. # 학습시에 모든 시간 단계에서 네트워크에 이것을 전달합니다. # - 이것은 설계 선택사항으로, 초기 은닉 상태 또는 # 또 다른 전략의 부분으로 포함될 수 있습니다. # # # # + # Category를 위한 One-hot 벡터 def categoryTensor(category): li = all_categories.index(category) tensor = torch.zeros(1, n_categories) tensor[0][li] = 1 return tensor # 입력을 위한 처음부터 마지막 문자(EOS 제외)까지의 One-hot 행렬 def inputTensor(line): tensor = torch.zeros(len(line), 1, n_letters) for li in range(len(line)): letter = line[li] tensor[li][0][all_letters.find(letter)] = 1 return tensor # 목표를 위한 두번째 문자 부터 마지막(EOS) 까지의 LongTensor def targetTensor(line): letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))] letter_indexes.append(n_letters - 1) # EOS return torch.LongTensor(letter_indexes) # - # 학습 동안 편의를 위해 무작위로 (category[언어], line[이름])을 가져오고 # 그것을 필요한 형태 (category[언어], input[현재 문자], target[다음 문자]) Tensor로 바꾸는 # ``randomTrainingExample`` 함수를 만들 예정입니다. # # # # 임의의 Category에서 Category, Input, Target Tensor를 만듭니다. def randomTrainingExample(): category, line = randomTrainingPair() category_tensor = categoryTensor(category) input_line_tensor = inputTensor(line) target_line_tensor = targetTensor(line) return category_tensor, input_line_tensor, target_line_tensor # 네트워크 학습 # -------------------- # # 마지막 출력만 사용하는 분류와 달리, 모든 단계에서 예측을 수행하므로 # 모든 단계에서 손실을 계산합니다. # # Autograd의 마법이 각 단계의 손실들을 간단하게 합하고 마지막에 # 역전파를 호출하게 해줍니다. # # # # + criterion = nn.NLLLoss() learning_rate = 0.0005 def train(category_tensor, input_line_tensor, target_line_tensor): target_line_tensor.unsqueeze_(-1) hidden = rnn.initHidden() rnn.zero_grad() loss = 0 for i in range(input_line_tensor.size(0)): output, hidden = rnn(category_tensor, input_line_tensor[i], hidden) l = criterion(output, target_line_tensor[i]) loss += l loss.backward() for p in rnn.parameters(): p.data.add_(-learning_rate, p.grad.data) return output, loss.item() / input_line_tensor.size(0) # - # 학습에 걸리는 시간을 추적하기 위해 사람이 읽을 수 있는 문자열을 # 반환하는``timeSince (timestamp)`` 함수를 추가합니다: # # # # + import time import math def timeSince(since): now = time.time() s = now - since m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) # - # 학습은 일상적인 일입니다. - 몇번 train() 을 호출하고, 몇 분 정도 # 기다렸다가 ``print_every`` 마다 현재 시간과 손실을 출력하고, # 나중에 도식화를 위해 ``plot_every`` 마다 ``all_losses`` 에 # 평균 손실을 저장합니다. # # # # + rnn = RNN(n_letters, 128, n_letters) n_iters = 100000 print_every = 5000 plot_every = 500 all_losses = [] total_loss = 0 # plot_every 마다 초기화 start = time.time() for iter in range(1, n_iters + 1): output, loss = train(*randomTrainingExample()) total_loss += loss if iter % print_every == 0: print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, loss)) if iter % plot_every == 0: all_losses.append(total_loss / plot_every) total_loss = 0 # - # 손실 도식화 # ------------------- # # all\_losses를 이용한 손실의 도식화는 # 네트워크의 학습 상태를 보여줍니다: # # # # + import matplotlib.pyplot as plt import matplotlib.ticker as ticker plt.figure() plt.plot(all_losses) # - # 네트워크 샘플링 # ==================== # # 샘플링을 위해서, 네트워크에 하나의 글자를 주고 다음 문자를 물어보고 # 이것을 다음 문자로 전달하는 것을 EOS 토큰까지 반복합니다. # # - 입력 카테고리(언어), 시작 문자, 비어 있는 은닉 상태를 위한 Tensor를 생성하십시오 # - 시작 문자로 ``output_name`` 문자열을 생성하십시오 # - 최대 출력 길이까지, # # - 현재 문자를 네트워크에 전달하십시오. # - 가장 높은 출력에서 다음 문자와 다음 은닉 상태를 얻으십시오 # - 만일 문자가 EOS면, 여기서 멈추십시오 # - 만일 일반적인 문자라면, ``output_name`` 에 추가하고 계속하십시오 # # - 마지막 이름을 반환하십시오 # # .. Note:: # 시작 문자를 주는 것 외에 "문자열 시작" 토큰을 학습에 # 포함되게 하고 네트워크가 자체적으로 시작 문자를 선택하게 하는 # 다른 방법도 있습니다. # # # # + max_length = 20 # 카테고리와 시작 문자로 부터 샘플링 하기 def sample(category, start_letter='A'): with torch.no_grad(): # 샘플링에서 히스토리를 추적할 필요 없음 category_tensor = categoryTensor(category) input = inputTensor(start_letter) hidden = rnn.initHidden() output_name = start_letter for i in range(max_length): output, hidden = rnn(category_tensor, input[0], hidden) topv, topi = output.topk(1) topi = topi[0][0] if topi == n_letters - 1: break else: letter = all_letters[topi] output_name += letter input = inputTensor(letter) return output_name # 하나의 카테고리와 여러 시작 문자들로 여러 개의 샘플 얻기 def samples(category, start_letters='ABC'): for start_letter in start_letters: print(sample(category, start_letter)) samples('Russian', 'RUS') samples('German', 'GER') samples('Spanish', 'SPA') samples('Chinese', 'CHI') # - # Exercises # ========= # # - Try with a different dataset of category -> line, for example: # # - Fictional series -> Character name # - Part of speech -> Word # - Country -> City # # - Use a "start of sentence" token so that sampling can be done without # choosing a start letter # - Get better results with a bigger and/or better shaped network # # - Try the nn.LSTM and nn.GRU layers # - 상위 수준 네트워크로 여러 개의 이런 RNN을 결합해 보십시오 # # #
docs/_downloads/a75cfadf4fa84dd594874d4c53b62820/char_rnn_generation_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Comapre pandas vs seaborn # # ## Pandas & Seaborn - A guide to handle & visualize data in Python # https://tryolabs.com/blog/2017/03/16/pandas-seaborn-a-guide-to-handle-visualize-data-elegantly/ # # Import data # + import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import timeit # Load dataset titanic = sns.load_dataset('titanic') # -
notebooks/pandas_seaborn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Accessing Data From S3 # # This example shows how to configure a JupyterLab docker image to access data from AWS S3. # # ## Build a Docker Image with AWS Related JARs # # First, we need to build a docker image that includes the missing jars files needed for accessing S3. You can also add the jars using a volume mount, and then include code in your notebook to update the `PYSPARK_SUBMIT_ARGS` to include the jars from their location within the docker image. I felt like baking the jars into the docker image was a little easier that having to run a code cell to update the `PYSPARK_SUBMIT_ARGS`. # # This example is using Spark 3.0.1 with Hadoop 3.2, and the files that we're adding are: # # * aws-java-sdk-bundle-1.11.950.jar # * hadoop-aws-3.2.0.jar # * jets3t-0.9.4.jar # # Here is an example Dockerfile to use: # # ``` # FROM jupyter/pyspark-notebook:8ea7abc5b7bc # # USER root # # ENV PYSPARK_SUBMIT_ARGS '--packages com.amazonaws:aws-java-sdk:1.11.950,org.apache.hadoop:hadoop-aws:3.2.0,net.java.dev.jets3t:jets3t:0.9.4 pyspark-shell' # # # # Download missing jars # # # Get AWS SDK JAR # RUN (cd /usr/local/spark/jars && curl -O https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-bundle/1.11.950/aws-java-sdk-bundle-1.11.950.jar) # # # Get Hadoop-AWS Jar # RUN (cd /usr/local/spark/jars && curl -O https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/3.2.0/hadoop-aws-3.2.0.jar) # # # Get jets3t JAR # RUN (cd /usr/local/spark/jars && curl -O https://repo1.maven.org/maven2/net/java/dev/jets3t/jets3t/0.9.4/jets3t-0.9.4.jar) # # USER $NB_UID # ``` # # # ## Run the Docker Container and Pass in AWS Credentials # # This example is assuming that you have appropriate credentials saved in $HOME/.aws/credentials, and have jq installed. # # Fetch temporary credentials from AWS and run the docker container with the credentials and session token passed in as environment variables: # # ```bash # creds_json=$(aws --profile default --region us-west-2 sts get-session-token) # # docker run -d --name jupyter --rm -p 8888:8888 \ # -e AWS_ACCESS_KEY_ID=$(echo "$creds_json" | jq -r .Credentials.AccessKeyId) \ # -e AWS_SECRET_ACCESS_KEY=$(echo "$creds_json" | jq -r .Credentials.SecretAccessKey) \ # -e AWS_SESSION_TOKEN=$(echo "$creds_json" | jq -r .Credentials.SessionToken) \ # jupyter-docker:yourtag jupyter lab --LabApp.token '' # ``` # # ## Configure Spark from pyspark.sql import SparkSession import logging logging.getLogger().setLevel(logging.DEBUG) # ### Set the SparkSession Thread Count and Memory # # If you have JupyterLab running in the cloud, and you can afford to run enough instances where you're not overly concerned with cost, then don't worry about this section. If you are running JupyterLab on a single machine (for example, a laptop with limited resources), and the amount of data you want to process is more than you have available on the machine, then you might want to be thoughtful about how you initialize the SparkSession. If the single machine (perhaps your home laptop) use case sounds like you, then this is what I considered when configuring the SparkSession. # # I have 8 cores and 16GB of memory available on my laptop, and I configured Docker to use 4 cores and up to 3GB of memory. # # # Things to consider if the Spark cluster is on a constrained system: # # * How much memory do you have available for your Spark job? # > If you don't have much memory available, then consider reading the [Spark Memory Tuning Guide](https://spark.apache.org/docs/latest/tuning.html#memory-tuning). There are great suggestions for everything from changing the default serializer to being aware of the impacts of using broadcast variables. # * How much data do you plan to process? # > You also might want to be aware of the format that your source data is in. [Here is a nice article comparing CSV, JSON, and Parquet](https://www.linkedin.com/pulse/spark-file-format-showdown-csv-vs-json-parquet-garren-staubli/). If your data is in JSON, but you want to process the data as Parquet, then consider creating a job to convert the data to Parquet before using the data in your processing jobs. # + MAX_MEMORY = "2g" spark = SparkSession.builder \ .master("local[4]") \ .appName("Covid19TimeSeries") \ .config("spark.executor.memory", MAX_MEMORY) \ .config("spark.driver.memory", MAX_MEMORY) \ .config("fs.s3a.path.style.access", True) \ .config("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider") \ .config("fs.s3a.endpoint", "s3.us-west-2.amazonaws.com") \ .config("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem") \ .config("com.amazonaws.services.s3.enableV4", True) \ .config("spark.driver.extraJavaOptions", "-Dcom.amazonaws.services.s3.enableV4=true") \ .getOrCreate() # - # ## Read Data From S3 # # At this point you should be able to read data in from S3. s3path = "s3a://dev-leewallen-spark/covid-19-time-series/parquet/covid-19.parquet" parquetDF = spark.read.parquet(s3path) from pyspark.sql.functions import col parquetDF.show(5) usConfirmed = parquetDF.filter((col('`Country/Region`') == "US")) # import chart_studio.plotly as py # import plotly.graph_objects as go # from plotly.offline import plot import pandas as pd import matplotlib.pyplot as plt import requests requests.packages.urllib3.disable_warnings() usConfirmed.show(5) usConfirmed.printSchema() # + from pyspark.sql.types import DateType usConfirmed = usConfirmed.withColumn("DateTS",usConfirmed["Date"].cast(DateType())) # - usConfirmed.show() usConfirmed.printSchema() usPandas = usConfirmed.toPandas() usPandas # ### Pandas Plot Related Settings pd.options.plotting.matplotlib.register_converters = True plt.close("all") # ### Make the Plot Interactive # # Make the plot resizeable, and provide an interface so you can save your plot. # %matplotlib widget usPandas.plot.bar(stacked=True) usPandas.plot()
notebooks/AwsS3AccessTemporaryCredentials.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="-ejxfHPIDa_f" # %matplotlib inline # отключим предупреждения Anaconda import warnings import matplotlib.pyplot as plt import seaborn as sns warnings.filterwarnings("ignore") import numpy as np import pandas as pd # + id="q_Qmcwn2DmJI" ## Сделаем функцию, которая будет заменять NaN значения на медиану в каждом столбце таблицы def delete_nan(table): for col in table.columns: table[col] = table[col].fillna(table[col].median()) return table # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="UogmNRjvGGOF" outputId="15ce5c19-7beb-49d7-9f9f-b2c798ee4d0c" ## Считываем данные data = pd.read_csv("drive/MyDrive/ML/mlcourse.ai/A5/credit_scoring_sample.csv") data.head() # + colab={"base_uri": "https://localhost:8080/"} id="uO_vyBs-GqPe" outputId="22c1e502-ec39-48da-a4bb-6cd851c525dc" data.dtypes # + colab={"base_uri": "https://localhost:8080/"} id="6rpKEWY_M34c" outputId="f07bfeed-7bbe-4b95-8b37-3e165d9f01b4" data.columns # + colab={"base_uri": "https://localhost:8080/", "height": 365} id="Ds1SjvSfK0GY" outputId="c2a9a303-4c76-4ebd-dc39-78f265c0b5d5" ## Посмотрим на распределение классов в зависимой переменной ax = data['SeriousDlqin2yrs'].hist(orientation='horizontal', color='red') ax.set_xlabel("number_of_observations") ax.set_ylabel("unique_value") ax.set_title("Target distribution") print('Distribution of the target:') data['SeriousDlqin2yrs'].value_counts()/data.shape[0] # + colab={"base_uri": "https://localhost:8080/"} id="jn5qb7P9NbDu" outputId="e4a2c89a-d692-44a4-e150-88dbf6b4463c" data.shape # + colab={"base_uri": "https://localhost:8080/"} id="6kNJc1dEOKSt" outputId="4f14fcf2-0149-447a-c107-923255f2e3a2" ## Выберем названия всех признаков из таблицы, кроме прогнозируемого independent_columns_names = [x for x in data if x != "SeriousDlqin2yrs"] independent_columns_names # + id="w4X3RUCyOV6O" ## Применяем функцию, заменяющую все NaN значения на медианное значение соответствующего столбца table = delete_nan(data) # + id="DNH20bcP2Tjt" ## Разделяем таргет и признаки X = table[independent_columns_names] y = table["SeriousDlqin2yrs"] # + colab={"base_uri": "https://localhost:8080/"} id="GC8pVVTC2fHa" outputId="91a1ee2c-d692-4c45-ac2e-7c49540bb884" def get_bootstrap_samples(data, n_samples): # функция для генерации подвыборок с помощью бутстрэпа indices = np.random.randint(0, len(data), (n_samples, len(data))) print(indices) samples = data[indices] return samples def stat_intervals(stat, alpha): # функция для интервальной оценки boundaries = np.percentile(stat, [100 * alpha / 2., 100 * (1 - alpha / 2.)]) return boundaries # сохранение в отдельные numpy массивы данных по лояльным и уже бывшим клиентам age = table[table['SeriousDlqin2yrs'] == True]['age'].values # ставим seed для воспроизводимости результатов np.random.seed(0) # генерируем выборки с помощью бутстрэра и сразу считаем по каждой из них среднее age_mean_scores = [np.mean(sample) for sample in get_bootstrap_samples(age, 1000)] # выводим интервальную оценку среднего print("Age: mean interval", stat_intervals(age_mean_scores, 0.1)) # + id="hdhcZE_e-xqT" from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, StratifiedKFold ## Используем модуль LogisticRegression для построения логистической регрессии. ## Из-за несбалансированности классов в таргете добавляем параметр балансировки. ## Используем также параметр random_state=5 для воспроизводимости результатов lr = LogisticRegression(random_state=5, class_weight="balanced") ## Попробуем подобрать лучший коэффициент регуляризации (коэффициент C в логистической регрессии) для модели лог.регрессии. ## Этот параметр необходим для того, чтобы подобрать оптимальную модель, которая не будет переобучена, с одной стороны, ## и будет хорошо предсказывать значения таргета, с другой. ## Остальные параметры оставляем по умолчанию. parameters = {"C": (0.0001, 0.001, 0.01, 0.1, 1, 10)} ## Для того, чтобы подобрать коэффициент регуляризации, попробуем для каждого его возможного значения посмотреть ## значения roc-auc на стрэтифайд кросс-валидации из 5 фолдов с помощью функции StratifiedKFold skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=5) # + colab={"base_uri": "https://localhost:8080/"} id="2m-uQRuX_Tw0" outputId="af0a714a-6fe7-4a51-cfcf-b7ad818210cb" gs = GridSearchCV(lr, parameters, scoring='roc_auc', cv=skf) gs.fit(X,y) gs.best_estimator_ # + colab={"base_uri": "https://localhost:8080/"} id="uPfLhtkoAXXJ" outputId="8c48b56e-4ec3-4ce0-b702-2f26d437ef36" gs.cv_results_['std_test_score'][1] # + colab={"base_uri": "https://localhost:8080/"} id="4MUGkfnQENdt" outputId="40142444-4cb0-43a0-b057-d5f2e3894876" gs.best_score_ # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="tbGu2YgxGJwA" outputId="cbd6aac3-6c51-430c-f379-524efd04fa0b" from sklearn.preprocessing import StandardScaler lr = LogisticRegression(C=0.001, random_state=5, class_weight='balanced') scal = StandardScaler() lr.fit(scal.fit_transform(X), y) pd.DataFrame({'feat': independent_columns_names, 'coef': lr.coef_.flatten().tolist()}).sort_values(by='coef', ascending=False) # + colab={"base_uri": "https://localhost:8080/"} id="h13FXzNAGMk4" outputId="56e1e52b-5d96-4c8f-e2c0-b63f1c4c341d" print((np.exp(lr.coef_[0]) / np.sum(np.exp(lr.coef_[0])))[2]) # + colab={"base_uri": "https://localhost:8080/"} id="J6XqpvbeHfsq" outputId="a0d8289b-67ee-4884-aecb-d7aeab74ea5a" lr = LogisticRegression(C=0.001, random_state=5, class_weight='balanced') lr.fit(X,y) lr.coef_ # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="2NRxiOU7IhBE" outputId="27b1916f-0277-451c-a792-b80ebb03b9e5" pd.DataFrame({'feat': independent_columns_names, 'coef': lr.coef_.flatten().tolist()}).sort_values(by='coef', ascending=False) # + colab={"base_uri": "https://localhost:8080/"} id="2fRm-q8NJFjQ" outputId="069c5b65-15b0-4f70-f9ab-b48137fc842d" np.exp(lr.coef_[0][0]*20) # + id="h8EgWEn1JIcl" from sklearn.ensemble import RandomForestClassifier # Инициализируем случайный лес с 100 деревьями и сбалансированными классами rf = RandomForestClassifier( n_estimators=100, n_jobs=-1, random_state=42, oob_score=True, class_weight="balanced", ) ## Будем искать лучшие параметры среди следующего набора parameters = { "max_features": [1, 2, 4], "min_samples_leaf": [3, 5, 7, 9], "max_depth": [5, 10, 15], } ## Делаем опять же стрэтифайд k-fold валидацию. Инициализация которой должна у вас продолжать храниться в skf # + id="cGTsPxy7Jo9n" colab={"base_uri": "https://localhost:8080/"} outputId="b2967e41-1e2c-42ca-9e4a-5d239ee2515e" skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=5) skf.get_n_splits() # + colab={"base_uri": "https://localhost:8080/"} id="JT6HoQj8KQLu" outputId="9d6010c6-4e39-458e-9817-d7539f7ecff5" rf_grid_search = GridSearchCV(rf, parameters, n_jobs=-1, scoring='roc_auc', cv=skf, verbose=True) rf_grid_search = rf_grid_search.fit(X, y) print(rf_grid_search.best_score_ - gs.best_score_) # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="-f6jcq8rLGer" outputId="1e2648d4-b8e1-4128-d1d1-e576bcaa3340" independent_columns_names[np.argmin(rf_grid_search.best_estimator_.feature_importances_)] # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="Lj6r565WM9uJ" outputId="55c73c9b-2fa8-41ac-9d53-78bb4ca1e055" pd.DataFrame({'feat': independent_columns_names, 'coef': rf_grid_search.best_estimator_.feature_importances_}).sort_values(by='coef', ascending=False) # + colab={"base_uri": "https://localhost:8080/"} id="kSUPl7m6NAur" outputId="53ff58a5-31e6-4cd0-e227-451fe82a061b" from sklearn.ensemble import BaggingClassifier from sklearn.model_selection import RandomizedSearchCV parameters = { "max_features": [2, 3, 4], "max_samples": [0.5, 0.7, 0.9], "base_estimator__C": [0.0001, 0.001, 0.01, 1, 10, 100], } bg = BaggingClassifier(LogisticRegression(class_weight='balanced'), n_estimators=100, n_jobs=-1, random_state=42) r_grid_search = RandomizedSearchCV(bg, parameters, n_jobs=-1, scoring='roc_auc', cv=skf, n_iter=20, random_state=1, verbose=True) r_grid_search = r_grid_search.fit(X, y) # + id="7NHu-t1zNXg4" colab={"base_uri": "https://localhost:8080/"} outputId="c5113eec-f5e7-4e28-aa43-69181eaf159a" r_grid_search.best_estimator_ # + id="QfgMsQPIOjTO" colab={"base_uri": "https://localhost:8080/"} outputId="665ce214-7831-41de-e44d-9f00a8784834" r_grid_search.best_score_ # + id="SRBsVfK1Olmq"
CreditScoring.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/CarlosRochaA/codes-python-/blob/main/ejercicios.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="C1RZEm1RAlRE" # # Clase 1 - Tipos de datos # + [markdown] id="idIpJPg1A_Cs" # ### 1- Cambiar un texto # Necesitamos mostrar en nuestra web la sinopsis de las películas. El problema es que tenemos los textos separados por pipes ("|") en lugar de saltos de línea. # + id="F8n12VmJBAMc" string = 'Sinopsis | <NAME>, un típico adolescente americano de los años ochenta, '\ 'es accidentalmente enviado de vuelta a 1955 en una "máquina del tiempo" realizada con'\ 'un DeLorean inventada por un científico un poco loco. | En este viaje, Marty debe '\ 'asegurarse de que sus padres se encuentren y se enamoren, para que pueda volver a su tiempo. ' # + colab={"base_uri": "https://localhost:8080/"} id="qHm99oJeBRAY" outputId="97f05b7a-a24e-46cf-e948-b32f64856c89" print(string) # + [markdown] id="UV9Hoov9BTd6" # ¿Cómo podríamos hacer para que la función "print" muestre saltos de línea en lugar de pipes? # + colab={"base_uri": "https://localhost:8080/"} id="vPEkpaBaBUye" outputId="f1b6275f-e076-4ec5-cfc1-4d3988108965" string = string.replace('|', '\n') print(string) # + [markdown] id="S7cOGm4UBhi6" # ### 2 - Crear un acrónimo # # Ahora nos piden crear un acrónimo (una palabra compuesta por la primera letra de cada palabra) de cada título. Pero antes de eso es necesario que transformemos los títulos a "title case" porque no todos van a llegar prolijos. Entonces, el acrónimo de "Volver al futuro" debería ser "VAF". # + id="EPaCFYcMB1LY" titulo = 'Volver al futuro' # + colab={"base_uri": "https://localhost:8080/"} id="O352TLH2B3Cd" outputId="6ac96797-db5f-4f68-e5f0-1fcfc6b37833" acronimo = '' titulo_lista = titulo.title().split() for palabra in titulo_lista: acronimo = acronimo + palabra[0] print(acronimo) # + [markdown] id="P52-HQyIWpqA" # # Clase 2 - Errores y funciones # + [markdown] id="NYhKyYTnWuTf" # ## El contador de palabras # # Una revista científica quiere publicar los abstracts de los trabajos que aprobó recientemente pero primero tiene que asegurarse de que ninguno de los abstracts tenga más de 200 palabras. # # Para interactuar con los archivos que tenemos en nuestro "file system" vamos a utilizar el módulo os. No se preocupen por entender todos los detalles ahora, vamos a ir profundizando en la utilización de módulos. # + colab={"base_uri": "https://localhost:8080/"} id="UO3fPsseW-xO" outputId="190fb33f-4304-4c71-f261-788bd56843c4" # !wget https://datasets-humai.s3.amazonaws.com/datasets/publicaciones.zip # + colab={"base_uri": "https://localhost:8080/"} id="VxdiLX9OYYk4" outputId="cfd48d50-4d9b-4fd9-a2d7-206f909d0d74" # !unzip publicaciones.zip # + id="oC83IC5dYfa5" import os # + id="0wQO7BeGYgmR" archivos_directorio = os.listdir('publicaciones') # + colab={"base_uri": "https://localhost:8080/"} id="tFRcUPPKYgpF" outputId="e06e0252-f5ae-48b2-945d-402703bd2eb6" print(archivos_directorio) # + [markdown] id="t43NF04OYvKs" # La función listdir nos devuelve una lista con todos los archivos que están en la carpeta publicaciones. Noten que solamente nos devuelve los nombres de los archivos, no la ruta completa que necesitamos para acceder a los mismos desde la ubicación en el filesystem donde se encuentra esta notebook. # # Las rutas hasta los archivos cambian con el sistema operativo, por eso si están en Windows, la forma de acceder al archivo Yukon Delta Salmon Management.txt es ejercicios\\Yukon Delta Salmon Management.txt mientras que si están en Linux o Unix la forma de acceder es ejercicios/Yukon Delta Salmon Management.txt . Para evitar problemas y que el código sea ejecutable desde cualquier sistema operativo, el módulo os tiene la función os.join. # # Entonces para crear las rutas vamos a usar la función os.path.join y para esto es ideal una lista por comprensión # + colab={"base_uri": "https://localhost:8080/"} id="xU07LAb9Y6Xo" outputId="bc282ba8-660c-43fa-d4be-c58b8dce84bf" rutas_archivos = [os.path.join('publicaciones',archivo) for archivo in archivos_directorio] print(rutas_archivos) print(type(rutas_archivos)) # + [markdown] id="mntnGWBbw4FR" # Ahora sí, vamos a pedirles que creen una función que reciba una tupla con la ruta y el nombre del archivo. Necesitamos que esta función cuente las palabras que hay en el txt que se encuentra en esa ruta y luego imprima el nombre del archivo y la cantidad. # Después vamos a escribir un for loop que recorra la lista tuplas_archivos y devuelve una tupla con el nombre del archivo y la cantidad de palabras. Desde el loop for vamos a imprimir esa tupla. # + id="u7r1-1myxOK0" def cuenta_palabras(ruta, archivo): with open(ruta, 'r') as f: contenido = f.read() palabras = contenido.split() return archivo + " tiene " + str(len(palabras)) + " palabras" #cant_palabras = cuenta_palabras('publicaciones/The Citrus Solution Phase II.txt','The Citrus Solution Phase II.txt') # + colab={"base_uri": "https://localhost:8080/"} id="VZDTljqwPABu" outputId="b940c470-c1b2-4f1a-a2d9-cc3fb28f071b" tuplas_archivos = [(rutas_archivos [i], archivos_directorio[i]) for i in range(len(archivos_directorio))] for archivos in tuplas_archivos: print(cuenta_palabras(archivos[0],archivos[1])) # + [markdown] id="V-ral9_OeJp4" # Entonces ¿Cuáles superan las 250 palabras? Si quieren ir una milla extra modifiquen la función para que devuelva True si supera y False si no supera en lugar de devolver la cantidad. # + id="mBxQYMELeFr0" def cuenta_palabras(ruta, archivo): with open(ruta, 'r') as f: contenido = f.read() palabras = contenido.split() if len(palabras) > 250: return True else: pass #cant_palabras = cuenta_palabras('publicaciones/The Citrus Solution Phase II.txt','The Citrus Solution Phase II.txt') # + colab={"base_uri": "https://localhost:8080/"} id="-_alADUOef-w" outputId="9290077a-514e-4d03-fab4-844b422b3f10" tuplas_archivos = [(rutas_archivos [i], archivos_directorio[i]) for i in range(len(archivos_directorio))] for archivos in tuplas_archivos: if cuenta_palabras(archivos[0],archivos[1]): print(archivos[1]) # + [markdown] id="RQPCfvAwhUz1" # ### Otra solución para el Contador de Palabras # + [markdown] id="kwFj_YQ9jVlr" # Ahora vamos a unir estas dos listas del mismo tamaño en una lista de tuplas utilizando la función "zip" de Python nativo. Como el zip de Python devuelve un objeto iterable, vamos a convertirlo en lista para trabajar mejor # + id="bNl7jWWFhYsc" tuplas_archivos = list(zip(rutas_archivos,archivos_directorio)) # + colab={"base_uri": "https://localhost:8080/"} id="foOfr2xmhzNe" outputId="43170f13-8a3b-43a5-d209-e77f8fd92b1f" for tupla in tuplas_archivos: print(tupla) # + id="g-JCL32Lh7m-" # 1. Escribir la función def contar_palabras(tupla): ruta = tupla[0] nombre = tupla[1] with open(ruta, 'r') as inp: string_contenido = inp.read() palabras = string_contenido.split(' ') cantidad_palabras = len(palabras) return (nombre, cantidad_palabras) # + colab={"base_uri": "https://localhost:8080/"} id="jfMVzxdOjmgZ" outputId="72348b55-66e6-4f59-d8f4-f88fd6dcfa11" # 2. Recorrer en un loop tuplas_archivos invocando a la función for tupla in tuplas_archivos: print(contar_palabras(tupla)) # + [markdown] id="2wB2sPDUjw_R" # Entonces ¿Cuáles superan las 250 palabras? Si quieren ir una milla extra modifiquen la función para que devuelva True si supera y False si no supera en lugar de devolver la cantidad. # + id="zkl6Ou8Oj1uz" # 3. Modifiquen la función def contar_palabras(tupla): ruta = tupla[0] nombre = tupla[1] with open(ruta, 'r') as inp: string_contenido = inp.read() palabras = string_contenido.split(' ') cantidad_palabras = len(palabras) supera = False if cantidad_palabras > 250: supera = True return (nombre, supera) # + colab={"base_uri": "https://localhost:8080/"} id="gKRITDmbj6FM" outputId="a6c74a80-087e-463e-ead9-8d33aee898b1" #4. Vuelvan a llamarla for tupla in tuplas_archivos: print(contar_palabras(tupla)) # + [markdown] id="-vwPfm-wk5qZ" # ## Funciones de Test # # Estos ejercicios tienen un nivel de dificultad un poco mas elevado. Cada ejercicio tiene una función de test para chequear si lo que hicieron esta bien. # + colab={"base_uri": "https://localhost:8080/"} id="ND8wcSAMk-6E" outputId="f97d1d5a-ef12-44e5-c0b1-c402d1904dd4" # !wget https://datasets-humai.s3.amazonaws.com/datasets/test_intro_clase2.zip # !unzip test_intro_clase2.zip # + id="nHYHM5vDlEQV" from test import * # + [markdown] id="5_nE2RbglKTV" # ## Juego de espías (Fácil) # El espía Ramsay debe codificar los mensajes que le mandan otros espías sobre la cantidad de tropas que tiene el enemigo en distintos cuarteles. Para esto, otro espía le manda una tira de números con un pequeño truco. Esta tira de números estan separados por `-`, pero para que no sea tan fácil saber que esta informando, la cantidad de tropas esta levemente escondida y también esta escondido el número del cuartel. El cuartel estará escondido en el último lugar de la tira y para obtener la cantidad de tropas aproximadas se deben sumar todos los números que son divisibles por el número del cuartel de la tira. Crear una función que reciba el string de la tira de números y devuelva la cantidad de tropas que hay en el cuartel enemigo como una tupla. Adicionalmente, podria imprimir un mensaje con la información requerida. # # Ej: # ```Python # INPUT: # tira_numeros = '29-32-1-5-65-12345-0-12-2' # OUTPUT: # (2, 44) # "En el cuartel número 2 hay 44 soldados" # ``` # + colab={"base_uri": "https://localhost:8080/"} id="3b4Nk6YGlJfc" outputId="10d53940-167d-4a6c-ced9-d3a3b67dbfaa" def informe_espia(codigo): lista = codigo.split('-') cuartel = lista[-1] cant_soldados = [int(soldados) for soldados in lista[:-1] if int(soldados) % int(cuartel) == 0] return(int(cuartel), sum(cant_soldados)) tira_numeros = '29-32-1-5-65-12345-0-12-2' print(informe_espia(tira_numeros)) # + colab={"base_uri": "https://localhost:8080/"} id="0AE-nrbAlJli" outputId="7b5193bf-c9c3-4ed4-f57d-1d284264f1ae" test1(informe_espia) # + [markdown] id="QNrzIRNUmxLh" # ### Otra solución a Juego de Espías # + id="mUsCaSZQmvvh" def informe_espia(tira_numeros): numeros = tira_numeros.split('-') cuartel = int(numeros[-1]) informe = 0 for num in numeros[:-1]: if int(num) % cuartel == 0: informe += int(num) print(f"En el cuartel número {cuartel} hay {informe} soldados") return (cuartel,informe) # + id="gvNANHIzn2Rx" colab={"base_uri": "https://localhost:8080/"} outputId="a7595aef-30e7-4412-de49-edd70ca06614" test1(informe_espia) # + [markdown] id="C3XokRnImwJB" # *** # ## Codificador César (Intermedio) # Una de las formas mas antiguas de crear un código encriptado es lo que se conoce como el encriptado César <https://es.wikipedia.org/wiki/Cifrado_C%C3%A9sar>. En este tipo de encriptado lo que se hace es "girar" el abecedario una determinada cantidad de pasos según una clave numérica (ver ejemplo). Crear una función que lea un string dentro de un txt en la misma ruta que esta notebook, tome una clave y devuelva el string encriptado con la clave César en minúsculas(asumir que el texto esta en castellano). # # Ej: Clave = 2 # # | Letra | Letra encriptada | # | ------------- |:-------------:| # | A | C | # | B | D | # | C | E | # | ... | ... | # | Y | A | # | Z | B | # # ```Python # INPUT: # 'mi_archivo.txt' ("Hola estudiante"), clave = 1 # OUTPUT: # "Jqnc guvwfkcovg" # ``` # # *AYUDA* # # El método `mi_lista.index(elemento)` búsca el `elemento` en la lista `mi_lista` y devuelve la posición del elemento si lo encontró. Si no lo encontró devuelve un `ValueError`. # + id="BVwTpFIqpSty" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="79ee15f7-b5c9-4ec5-bca2-d532f7b9e9ad" def codificador_cesar(archivo, clave): with open(archivo, 'r') as f: texto = f.read() cadena = '' alfabeto = 'abcdefghijklmnñopqrstuvwxyz' for indice in range(len(texto)): if texto[indice].lower() in alfabeto: pos_alfa = alfabeto.index(texto[indice].lower()) if pos_alfa + (clave) > len(alfabeto)-1: indice_clave = (pos_alfa + clave) - len(alfabeto) else: indice_clave = pos_alfa + clave # Verifico Mayusculas if texto[indice].lower() == texto[indice]: caracter_codificado = alfabeto[indice_clave] else: caracter_codificado = alfabeto[indice_clave].upper() else: caracter_codificado = texto[indice] cadena += caracter_codificado return cadena codificador_cesar("borges1.txt", 6) # + id="PeBn5ttIn_7w" colab={"base_uri": "https://localhost:8080/"} outputId="ef8ea7ad-24d8-4c2f-82a1-e7b5e7b27386" test2_mayusculas(codificador_cesar) # + [markdown] id="dki79duioLRQ" # ### Otra solución a Codificador César (Minúsculas) # # # + id="nuUwCIJmoPnP" def codificador_cesar(mensaje_path, clave): # Un ayudin abecedario = 'abcdefghijklmnñopqrstuvwxyz' # Ahora hagan su magia (ojo con las mayúsculas) # Llamamos a upper para obtener sólo minúsculas with open(mensaje_path, 'r') as f: contenido = f.read().lower() # Variable para guardar mensaje cifrado cifrado = "" for l in contenido: # Si la letra está en el abecedario se reemplaza if l in abecedario: pos_letra = abecedario.index(l) # Sumamos para movernos a la derecha del abc nueva_pos = (pos_letra + clave) % len(abecedario) cifrado+= abecedario[nueva_pos] else: # Si no está en el abecedario sólo añadelo cifrado+= l #print(cifrado) return cifrado # + id="2YZuEC7zoUI6" colab={"base_uri": "https://localhost:8080/"} outputId="91cc63c0-9272-420c-df0b-f697cedfbef3" test2(codificador_cesar) # + [markdown] id="4sz9uIrEptfz" # ## La calesita (Rompecoco) # El señor Jacinto es dueño de una antigua calesita con animalitos que no funciona hace varios años y quiere volver a ponerla en funcionamiento. Para eso va a probarla prendiendola y viendo cuanto rota segun la cantidad de movimientos. # # Crear una función que reciba una lista de strings (con la primera en mayúscula) con los animales que componen la calesita, una cantidad de ciclos(n_ciclos) y devuelva la misma lista pero rotada hacia la derecha esa cantidad de movimientos, donde un movimiento es cambiar todos los animales una posición hacia la derecha: # # Ej: # ``` Python # INPUT: # ['Unicornio','Oso','Jirafa', 'Pato'. 'Elefante'], movimiento = 1 # OUTPUT: # ['Elefante', 'Unicornio', 'Oso', 'Jirafa', 'Pato'] # ``` # + id="jkOPwh2fpyTv" def probar_calesita(calesita, n_movimientos): # Proba la calesita # Primero calculamos el resto de la division de los ciclos por el tamaño de la calesita # Porque si por ejemplo da tantos ciclos como tamaño tiene, entonces la calesita no cambia asi omitimos dar vueltas de mas movimientos = n_movimientos % len(calesita) #print(movimientos) # Ahora posicion primero me dice donde va a ir a parar el primer animalito y asi sucesivamente. # Separamos en el caso de que si se mueva la calesita if movimientos > 0: # Movemos los ultimos elementos al principio de la lista adelante = calesita[len(calesita)-movimientos:] atras = calesita[:-movimientos] #print(adelante) #print(atras) calesita_girada = adelante + atras else: calesita_girada = calesita return calesita_girada # + id="J0t3iz69p3fh" colab={"base_uri": "https://localhost:8080/"} outputId="bdb93fea-4657-48b0-ae99-b15f27c1f9e6" #test3(probar_calesita) probar_calesita(['Gatito','Ornitorrinco','Vaca','Elefante','Pato'],3) # + [markdown] id="t_gckD4CqxJT" # Cuando prueba la calesita se da cuenta que es muy lenta. Debe sacar uno de los animales para que pueda funcionar correctamente. Para eso los manda a pesar y le dicen cual es el que hay que sacar para que funcione perfectamente. # # Modificar la función anterior para que reciba un string, que es un animal en MAYÚSCULAS (animal_quitar) para sacar y pruebe la función nuevamente. # # Ej: # ```Python # INPUT: # ['Unicornio','Oso','Jirafa', 'Pato'. 'Elefante'], animal_quitar = 'JIRAFA', movimientos = 1 # OUTPUT: # ['Elefante', 'Unicornio', 'Oso', 'Pato'] # ``` # + id="EswMmZC7q2OA" def probar_calesita_arreglada(calesita, n_mov, animal_quitar): animal = animal_quitar.lower().title() indice = calesita.index(animal) nueva_calesita = calesita[:indice]+calesita[indice+1:] return probar_calesita(nueva_calesita, n_mov) # + id="l3RNjGFmq5Sd" colab={"base_uri": "https://localhost:8080/"} outputId="852f424f-ca40-44e3-e83a-a40f82ff2c00" #test4(probar_calesita_arreglada) probar_calesita_arreglada(['Gatito','Ornitorrinco','Vaca','Elefante','Pato'],11, 'Gatito') # + [markdown] id="SWXiUHgXrKf0" # # Clase 3 - Algoritmos # + [markdown] id="jLnntjA0rVyH" # ## Factoriales # # Factorial de 5 es igual a $5*4*3*2*1$, es decir, 120. # + id="MqceVPG5rM5m" # Solución recursiva. def factorial(x): if x == 1: return 1 else: return x*factorial(x-1) # + id="6foyjLwSri2E" # Solución iterativa. def factorial(x): resultado = 1 for i in range(1, x+1): resultado = resultado * (i) return resultado # + [markdown] id="4EgA9Rvhrnc1" # Construya un módulo que se llame "operaciones" con cualquiera de las dos versiones de la función y luego invoquen a la función factorial del módulo. # + id="326tY2gcrM6z" # Escriban el archivo operaciones.py with open('operaciones.py', 'w') as out: out.write("""def factorial(x): resultado = 1 for i in range(1, x+1): resultado = resultado * (i) return resultado""") # + id="o_1xTP7ksIKc" # Importen operaciones import operaciones # + id="vKB4ZV14sN-d" outputId="b7ff9dce-dcae-48f2-c10b-ca882d128760" colab={"base_uri": "https://localhost:8080/"} # Invoquen con cualquier valor a operaciones.factorial() operaciones.factorial(5)
ejercicios.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MySQL Examples # # ## Connect to a database server # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>" ) print(mydb) print(f""" Hostname: {mydb.server_host} Port: {mydb.server_port} User: {mydb.user} Timezone: {mydb.time_zone} MySQL ver: {mydb.get_server_info()} SQL Mode: {mydb.sql_mode} Current DB: {mydb.database} """) # Version as a tuple mydb.get_server_version() mydb.close() # - # ## Create a database & show all DB's # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>" ) mycursor = mydb.cursor() #mycursor.execute("CREATE DATABASE mydatabase") mycursor.execute("CREATE DATABASE IF NOT EXISTS mydatabase") mycursor.execute("SHOW DATABASES") for x in mycursor: print(x) mycursor.close() mydb.close() # - # ## Create a table named "customers" # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() mycursor.execute("CREATE TABLE IF NOT EXISTS customers (name VARCHAR(35), address VARCHAR(255))") sql = "INSERT INTO customers (name, address) VALUES (%s, %s)" val = ("John", "Highway 21") mycursor.execute(sql, val) mydb.commit() mycursor.execute("SELECT * FROM customers") for x in mycursor.fetchall(): print(x) mycursor.close() mydb.close() # - # ## Add a primary key column # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() mycursor.execute("ALTER TABLE customers ADD COLUMN id INT AUTO_INCREMENT PRIMARY KEY") mycursor.close() mydb.close() # - # ## Insert a record # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() sql = "INSERT INTO customers (name, address) VALUES (%s, %s)" val = ("John", "Highway 21") mycursor.execute(sql, val) mydb.commit() print(mycursor.rowcount, "record inserted.") mycursor.close() mydb.close() # - # ## Insert many # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() sql = "INSERT INTO customers (name, address) VALUES (%s, %s)" val = [ ('Peter', 'Lowstreet 4'), ('Amy', 'Apple st 652'), ('Hannah', 'Mountain 21'), ('Michael', 'Valley 345'), ('Sandy', 'Ocean blvd 2'), ('Betty', 'Green Grass 1'), ('Richard', 'Sky st 331'), ('Susan', 'One way 98'), ('Vicky', 'Yellow Garden 2'), ('Ben', 'Park Lane 38'), ('William', 'Central st 954'), ('Chuck', 'Main Road 989'), ('Viola', 'Sideway 1633') ] mycursor.executemany(sql, val) mydb.commit() print(mycursor.rowcount, "was inserted.") mycursor.close() mydb.close() # - # ## Query/View records # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() sql = "SELECT * FROM customers" mycursor.execute(sql) for row in mycursor: print(row) mycursor.close() mydb.close() # - # ## Delete records # # *Note: Escape values by using the placeholder %s method to avoid SQL Injection.* # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() sql = "DELETE FROM customers WHERE address = %s" adr = ("Yellow Garden 2", ) mycursor.execute(sql, adr) mydb.commit() print(mycursor.rowcount, "record(s) deleted") mycursor.close() mydb.close() # - # ## Drop/delete table # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() START TRANSACTION: sql = "INSERT INTO customers (name, address) VALUES (%s, %s)" val = ("John", "Highway 21") mycursor.execute(sql, val) mydb.commit() print(mycursor.rowcount, "record inserted.") mycursor.close() mydb.close() # - # ## Drop/delete database # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", # database="mydatabase" # Skipping database parameter ) mycursor = mydb.cursor() sql = "DROP DATABASE IF EXISTS mydatabase" mycursor.execute(sql) mycursor.execute("SHOW DATABASES") mycursor.close() mydb.close() # - # ## Show columns # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() sql = "SHOW COLUMNS FROM customers" mycursor.execute(sql) for x in mycursor: print(x) mydb.close() # - # ## UPDATE # # ### Copy table # Useful to make a copy of the table before making modifications. # # ```sql # CREATE TABLE new_table # SELECT col, col2, col3 # FROM # existing_table; # ``` # ## Transaction # # ### Work in progress # ### Check whether autocommit is set to true or false print # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = mydb.cursor() sql = "SHOW COLUMNS FROM customers" mycursor.execute(sql) for x in mycursor: print(x) mydb.close() # - # ## Data aggregation # # ```sql # SELECT AVG(amount) AS 'Average payment amount' # FROM sakila.payment; # ``` # # *Note: Column header is not visible here. I.E. By using Pandas, column headers can be printed* # + import mysql.connector mydb = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="sakila" ) mycursor = mydb.cursor() sql = "SELECT AVG(amount) AS 'Average payment amount' FROM sakila.payment;" # OR use single quotes 'Average payment amount' mycursor.execute(sql) result = mycursor.fetchall() for i in result: print(i[0]) mydb.close() # - # ## Return a dictionary from queries # # Use `dictionary=True` to retrieve dictionaries with column names as keys, instead of "columnless" tuples # + import mysql.connector conn = mysql.connector.connect( host="localhost", user="lybekk", password="<PASSWORD>", database="mydatabase" ) mycursor = conn.cursor(dictionary=True) sql = "SELECT * FROM customers" mycursor.execute(sql) rows = mycursor.fetchall() print(rows) for row in rows: print(row) # -
jupyter_notebooks/notebook_mysql.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python (conda: mpdev)' # language: python # name: mpdev # --- # ## Demonstration of Materials Project Energy Corrections # This notebook illustrates how to apply and obtain an explanation of energy corrections used in the Materials Project database. # # Author: <NAME> # # Date: May 2021 # # `pymatgen==2022.0.8` # ### Overview # # The Materials Project API (`MPRester`) returns `ComputedEntry` objects that contain information about DFT calculations. By default, these objects have adjustments applied to the energies of certain elements to reduce certain systematic errors. See our [documentation](https://docs.materialsproject.org/methodology/total-energies/#total-energy-adjustments) for complete details. # # As of Spring 2021, `ComputedEntry` are processed using the `MaterialsProject2020Compatibility` class in pymatgen by default. The legacy correction scheme, used from 2010-2020, is still available in `MaterialsProjectCompatibility`. from pymatgen.entries.computed_entries import ComputedEntry from pymatgen.entries.compatibility import MaterialsProjectCompatibility, \ MaterialsProject2020Compatibility from pymatgen.ext.matproj import MPRester # ### Default behavior - `MaterialsProject2020Compatibility` # # Let's retrieve entries in the `Cl-Mo-O` system to demonstrate how this works. # retrieve with MPRester() as m: entries = m.get_entries_in_chemsys("Cl-Mo-O") entry = entries[0] # You can examine the energy corrections via the `energy_adjustments` attribute # of the `ComputedEntry`. This attribute contains a list of each energy correction that has been applied. entries[25].energy_adjustments # If you want even more detail, you can examine an indiviual `EnergyAdjustment` (one element of the list) # ### Applying the legacy corrections with `MaterialsProjectCompatibility` # # If you want to use the old corrections, or apply your own, you can re-process the `ComputedEntry` obtained from `MPRester` using a `Compatibility` class. The `.process_entries` method will remove any previously-applied energy corrections and re-process the entry in-place. # + compat = MaterialsProjectCompatibility() entries = compat.process_entries(entries) # - entries[25].energy_adjustments # Notice how the energy adjustments have changed. The class name, description and values are all different. You will also notice that the descriptions of the legacy corrections are less verbose than those of the modern `MaterialsProject2020Compatibility` corrections. # ### Removing corrections altogther # # If you want to remove all corrections from a `ComputedEntry`, simply set `energy_adjustments` to an empty list. You can verify that you have removed corrections by checking the `energy_per_atom` and the `correction_per_atom` of the `ComputedEntry` before and after. entries[25].energy_per_atom entries[25].correction_per_atom entries[25].energy_adjustments = [] entries[25].energy_per_atom entries[25].correction_per_atom # Alternatively, you can simply pass `compatible_only=False` to the `MPRester` call when you download data. # retrieve with MPRester() as m: entries = m.get_entries_in_chemsys("Cl-Mo-O", compatible_only=False) entry = entries[0] entries[25].energy_adjustments
notebooks/2021-5-12-Explanation of Corrections.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from sklearn.cluster import DBSCAN from sklearn.datasets import make_blobs from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt # %matplotlib inline def createDataPoints(centroidLocation, numSamples, clusterDeviation): X, y = make_blobs(n_samples=numSamples, centers=centroidLocation, cluster_std=clusterDeviation) X = StandardScaler().fit_transform(X) return X, y X, y = createDataPoints([[4,3], [2,-1],[-1,4]], 1500, 0.5) print(X,y) epsilon = 0.3 minimumSamples = 7 db = DBSCAN(eps=epsilon, min_samples=minimumSamples).fit(X) labels = db.labels_ labels core_samples_mask = np.zeros_like(db.labels_, dtype= bool) core_samples_mask[db.core_sample_indices_] = True core_samples_mask n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_clusters_ unique_labels = set(labels) unique_labels labels colors = plt.cm.Spectral(np.linspace(0,1, len(unique_labels))) colors # + from sklearn.cluster import KMeans k = 3 k_means3 = KMeans(init = "k-means++", n_clusters = k, n_init = 12) k_means3.fit(X) fig = plt.figure(figsize=(6, 4)) ax = fig.add_subplot(1, 1, 1) for k, col in zip(range(k), colors): my_members = (k_means3.labels_ == k) plt.scatter(X[my_members, 0], X[my_members, 1], c=col, marker=u'o', alpha=0.5) plt.show() # - for k, col in zip(unique_labels, colors): if k == -1: col = 'k' class_member_mask = (labels == k) xy = X[class_member_mask & core_samples_mask] plt.scatter(xy[:, 0], xy[:, 1], s=50, c=col, marker=u'o', alpha=0.5) xy = X[class_member_mask & ~core_samples_mask] plt.scatter(xy[:, 0], xy[:, 1],s=50, c=[col], marker=u'o', alpha=0.5) # !wget -O weather-stations20140101-20141231.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/weather-stations20140101-20141231.csv # + import csv import pandas as pd import numpy as np filename = 'weather-stations20140101-20141231.csv' pdf = pd.read_csv(filename) pdf.head(5) # - pdf = pdf[pd.notnull(pdf["Tm"])] pdf = pdf.reset_index(drop=True) pdf.head(5) # + from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt from pylab import rcParams # %matplotlib inline rcParams['figure.figsize'] = (14,10) llon=-140 ulon=-50 llat=40 ulat=65 pdf = pdf[(pdf['Long'] > llon) & (pdf['Long'] < ulon) & (pdf['Lat'] > llat) &(pdf['Lat'] < ulat)] my_map = Basemap(projection='merc', resolution = 'l', area_thresh = 1000.0, llcrnrlon=llon, llcrnrlat=llat, #min longitude (llcrnrlon) and latitude (llcrnrlat) urcrnrlon=ulon, urcrnrlat=ulat) #max longitude (urcrnrlon) and latitude (urcrnrlat) my_map.drawcoastlines() my_map.drawcountries() # my_map.drawmapboundary() my_map.fillcontinents(color = 'white', alpha = 0.3) my_map.shadedrelief() # To collect data based on stations xs,ys = my_map(np.asarray(pdf.Long), np.asarray(pdf.Lat)) pdf['xm']= xs.tolist() pdf['ym'] =ys.tolist() #Visualization1 for index,row in pdf.iterrows(): # x,y = my_map(row.Long, row.Lat) my_map.plot(row.xm, row.ym,markerfacecolor =([1,0,0]), marker='o', markersize= 5, alpha = 0.75) #plt.text(x,y,stn) plt.show() # - # !pip install https://github.com/matplotlib/basemap/archive/master.zip # + from sklearn.cluster import DBSCAN import sklearn.utils from sklearn.preprocessing import StandardScaler sklearn.utils.check_random_state(1000) Clus_dataSet = pdf[['xm','ym']] Clus_dataSet = np.nan_to_num(Clus_dataSet) Clus_dataSet = StandardScaler().fit_transform(Clus_dataSet) # Compute DBSCAN db = DBSCAN(eps=0.15, min_samples=10).fit(Clus_dataSet) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ pdf["Clus_Db"]=labels realClusterNum=len(set(labels)) - (1 if -1 in labels else 0) clusterNum = len(set(labels)) # A sample of clusters pdf[["Stn_Name","Tx","Tm","Clus_Db"]].head(5) # -
Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from google.colab import drive drive.mount('/content/drive') #hide # ! pip install nbdev # + #default_exp utils # - #hide from nbdev.showdoc import * #export import os import re import imghdr import tensorflow as tf import requests from PIL import Image from tqdm import notebook from tensorflow.keras.callbacks import Callback from tensorflow.train import CheckpointManager from typing import * from pathlib import Path from shutil import copyfile #export def copy_data(source:Path, dest:Path, copy_rate:float): ''' Copy data from source path to dest (destination) path Args: soucre: path of the data source dest: path where to copy the data copy_rate: percentage of how many data to copy from the source ''' fnames = os.listdir(source) data_size = len(fnames) train_data_size = int(data_size * copy_rate) for idx, fname in enumerate(fnames): src = source / fname if idx + 1 > train_data_size: break if os.path.getsize(src) == 0 or imghdr.what(src) != 'jpeg': print("Ignoring {}, because it's a corrupted file".format(fname)) continue copyfile(src, dest / fname) # + # #hide # def how_many_in_each_class(root_dir ,classes): # output = {} # for class_name in classes: # output[class_name.upper()] = len(os.listdir(root_dir / class_name)) # return output # - #export def how_many_in_each_class(root_dir:Path,classes:List[str]): for class_name in classes: print('class: {} has: {} images'.format(class_name.upper(),len(os.listdir(root_dir / class_name)))) #export def freeze_unfreeze_layers(model:tf.keras.Model, layers:List[str] = [], freeze_mode:bool = True): '''freeze unfreeze layers for a given model Args: model: The model to freeze unfreeze its layers layers: a list of layers to be frozen unfrozen empty list means the operation will be applied to all layers of the given model freeze_mode: True to freeze the layers, False to unfreeze the layers ''' trainable = not(freeze_mode) if len(layers) == 0: for layer in model.layers: layer.trainable = trainable return for layer in layers: model.get_layer(layer).trainable = trainable #export def list_layers(model:tf.keras.Model): ''' List all layers of a given model ''' for layer in model.layers: print('name: {}, trainable: {}'.format(layer.name, layer.trainable)) #export def evaluate(model:tf.keras.Model, test_ds:tf.data.Dataset, metric:tf.metrics.Metric, num_batches:int = None): prog_bar = tf.keras.utils.Progbar(target=num_batches) for idx, batch in enumerate(test_ds): metric(batch[1], model(batch[0], training=False)) prog_bar.update(idx) print() print("{}: {:.2f}".format(metric.name, metric.result())) #export class CheckPointManagerCallback(Callback): ''' Keras Callback for the tf.train.CheckPointManager All arguments are same as in the tf.train.CheckPointManager except the `after_num_epoch` Args: after_num_epoch: number of epochs between each check point save ''' def __init__(self, checkpoint, directory, max_to_keep, after_num_epoch=1, keep_checkpoint_every_n_hours=None, checkpoint_name="ckpt", step_counter=None, checkpoint_interval=None, init_fn=None): super().__init__() self.manager = (CheckpointManager(checkpoint, directory, max_to_keep, keep_checkpoint_every_n_hours, checkpoint_name, step_counter, checkpoint_interval, init_fn)) self.epoch_counter = 0 self.after_num_epoch = after_num_epoch def on_epoch_end(self,batch, logs={}): self.epoch_counter += 1 if self.epoch_counter % self.after_num_epoch == 0: self.manager.save() # + #hide import time def timeit(dataset, steps=100): start_time = time.time() iterator = iter(dataset) for step in range(steps): next_batch = next(iterator) if step % 10 == 0: print('.', end='') print() end_time = time.time() duration = (end_time - start_time) print('{} batches: {} s'.format(steps, (duration))) print('{:.4f} Images/s'.format(batch_size * steps / duration)) # - #export def _validate_url(url): regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' #localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) return re.match(regex, url) is not None #export def download_images(dest, file, counter = 0): urls = open(file).read().strip().split("\n") for idx, url in enumerate(urls): if not(_validate_url(url)): continue resp = requests.get(url, allow_redirects=False) suffix = '.jpg' img_name = str(idx + counter) img_full_name = str(dest/img_name) + suffix open(img_full_name,'wb').write(resp.content) #export def verify_images(dest:Path, delete:bool=False, n_channels:int=3): fnames = os.listdir(str(dest)) for fname in notebook.tqdm(fnames ,total=len(fnames)): if not(verify_image(dest/fname, n_channels)): if delete: os.remove(dest/fname) print('{} => corrupted image, so it was deleted'.format(dest/fname)) continue print('{} => corrupted image, pass `delete=True` to delete corrupted images'.format(dest/fname)) #export def verify_image(img_path, n_channels): try: img = Image.open(img_path) img.draft(img.mode,(28,28)) img.load() return imghdr.what(img_path) != None and img.layers == n_channels except: return False # ! nbdev_build_lib # ! ssh-keygen -t rsa -b 2048 # ! cd /root/.ssh && cat id_rsa.pub # ! nbdev_install_git_hooks # ! git
nbs/00_utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/agemagician/CodeTrans/blob/main/prediction/single%20task/source%20code%20summarization/python/small_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="c9eStCoLX0pZ" # **<h3>Summarize the python source code using codeTrans single task training model</h3>** # <h4>You can make free prediction online through this # <a href="https://huggingface.co/SEBIS/code_trans_t5_small_source_code_summarization_python">Link</a></h4> (When using the prediction online, you need to parse and tokenize the code first.) # + [markdown] id="6YPrvwDIHdBe" # **1. Load necessry libraries including huggingface transformers** # + colab={"base_uri": "https://localhost:8080/"} id="6FAVWAN1UOJ4" outputId="5ded287a-74d3-4d18-9b0b-4397e28bf745" # !pip install -q transformers sentencepiece # + id="53TAO7mmUOyI" from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline # + [markdown] id="xq9v-guFWXHy" # **2. Load the token classification pipeline and load it into the GPU if avilabile** # + colab={"base_uri": "https://localhost:8080/", "height": 321, "referenced_widgets": ["ac42894389b44fa788152b5398c0b82a", "c5e4ca2d0c9f4253a81d0514919e7283", "90270fb229734af09302d7ccf0a85eae", "<KEY>", "ce83001485174ad589fe5d92909ae30a", "f0ae4f111fe245499e13ba65e37aa465", "<KEY>", "<KEY>", "<KEY>", "1eb1a677b0eb45358efae79f0f57e510", "<KEY>", "f69b05bf371e49ce824b273bed57d898", "b8cc710aadab4a3c93cdcfd7b864d927", "<KEY>", "42043df66a414d4698bc5303994677cc", "d9be8a2466c346818f7bec0b32816d71", "3dcfc1aae9ac4887a1ae512e5891c40e", "1617849fe3db41adbd5384b559a92f90", "8b572e2cec214551ada349a5fc56d63a", "<KEY>", "43ce66c95a41462a966b5be6f495c560", "fc4ab031248548b7a3d7d6c320921c5f", "<KEY>", "<KEY>", "bea3ce1d543b4bc7b17def6c0b3bee85", "95750723722f4f0aa8c58c419a27dd65", "9433ca796f36450d896d1f8838fd12b7", "203311a6971f4a42b928c1db534188e1", "<KEY>", "<KEY>", "<KEY>", "c098caf58a844cdb878a7002734836c9", "4387dae1ba6646c6afcc2f30c4c20ab2", "c5e8b779e06e4ebfb3762802b52e074f", "ebdbe279ce1849c9aca46639509582e4", "1aac326a6b724a73800d1c9d2d41d579", "<KEY>", "ef0158ff77b7433daf639b6517689dab", "15aec63125ef451db4483de43f5076c8", "<KEY>"]} id="5ybX8hZ3UcK2" outputId="3ed7e0b5-6f5d-4b6c-e437-2a36f94e2fa1" pipeline = SummarizationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_small_source_code_summarization_python"), tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_small_source_code_summarization_python", skip_special_tokens=True), device=0 ) # + [markdown] id="hkynwKIcEvHh" # **3 Give the code for summarization, parse and tokenize it** # + id="nld-UUmII-2e" code = '''with open("file.txt", "r") as in_file:\n buf = in_file.readlines()\n\nwith open("file.txt", "w") as out_file:\n for line in buf:\n if line == "; Include this text\n":\n line = line + "Include below\n"\n out_file.write(line)''' #@param {type:"raw"} # + id="cJLeTZ0JtsB5" import tokenize import io def pythonTokenizer(line): result= [] line = io.StringIO(line) for toktype, tok, start, end, line in tokenize.generate_tokens(line.readline): if (not toktype == tokenize.COMMENT): if toktype == tokenize.STRING: result.append("CODE_STRING") elif toktype == tokenize.NUMBER: result.append("CODE_INTEGER") elif (not tok=="\n") and (not tok==" "): result.append(str(tok)) return ' '.join(result) # + id="hqACvTcjtwYK" colab={"base_uri": "https://localhost:8080/"} outputId="df50350f-ad9b-476b-d1f2-0b95fd71f8e9" tokenized_code = pythonTokenizer(code) print("code after tokenization " + tokenized_code) # + [markdown] id="sVBz9jHNW1PI" # **4. Make Prediction** # + colab={"base_uri": "https://localhost:8080/"} id="KAItQ9U9UwqW" outputId="fac6b769-b2b0-48de-bfb7-8e3265eea15d" pipeline([tokenized_code])
prediction/single task/source code summarization/python/small_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Statistics from Stock Data # # In this lab we will load stock data into a Pandas Dataframe and calculate some statistics on it. We will be working with stock data from Google, Apple, and Amazon. All the stock data was downloaded from yahoo finance in CSV format. In your workspace you should have a file named GOOG.csv containing the Google stock data, a file named AAPL.csv containing the Apple stock data, and a file named AMZN.csv containing the Amazon stock data. All the files contain 7 columns of data: # # **Date Open High Low Close Adj_Close Volume** # # We will start by reading in any of the above CSV files into a DataFrame and see what the data looks like. # + # We import pandas into Python import pandas as pd # We read in a stock data data file into a data frame and see what it looks like stock = pd.read_csv("AAPL.csv") # We display the first 5 rows of the DataFrame stock.head() # - # We clearly see that the Dataframe is has automatically labeled the row indices using integers and has labeled the columns of the DataFrame using the names of the columns in the CSV files. # # # To Do # # You will now load the stock data from Google, Apple, and Amazon into separte DataFrames. However, for each stock data you will only be interested in loading the `Date` and `Adj Close` columns into the Dataframe. In addtion, you want to use the `Date` column as your row index. Finally, you want the DataFrame to recognize the dates as actual dates (year/month/day) and not as strings. For each stock, you can accomplish all theses things in just one line of code by using the appropiate keywords in the `pd.read_csv()` function. Here are a few hints: # # * Use the `index_col` keyword to indicate which column you want to use as an index. For example `index_col = ['Open']` # # * Set the `parse_dates` keyword equal to `True` to convert the Dates into real dates of the form year/month/day # # * Use the `usecols` keyword to select which columns you want to load into the DataFrame. For example `usecols = ['Open', 'High']` # # Fill in the code below: # + # We load the Google stock data into a DataFrame google_stock = pd.read_csv('GOOG.csv',index_col=['Date'], parse_dates = True,usecols=["Date", "Adj Close"]) # We load the Apple stock data into a DataFrame apple_stock = pd.read_csv("AAPL.csv",index_col=['Date'], parse_dates = True,usecols=["Date","Adj Close"]) # We load the Amazon stock data into a DataFrame amazon_stock = pd.read_csv("AMZN.csv",index_col=['Date'], parse_dates = True,usecols=["Date","Adj Close"]) # - # You can check that you have loaded the data correctly by displaying the head of the DataFrames. # We display the google_stock DataFrame google_stock.head() # You will now join the three DataFrames above to create a single new DataFrame that contains all the `Adj Close` for all the stocks. Let's start by creating an empty DataFrame that has as row indices calendar days between `2000-01-01` and `2016-12-31`. We will use the `pd.date_range()` function to create the calendar dates first and then we will create a DataFrame that uses those dates as row indices: # + # We create calendar dates between '2000-01-01' and '2016-12-31' dates = pd.date_range('2000-01-01', '2016-12-31') # We create and empty DataFrame that uses the above dates as indices all_stocks = pd.DataFrame(index = dates) # - # # To Do # # You will now join the the individual DataFrames, `google_stock`, `apple_stock`, and `amazon_stock`, to the `all_stocks` DataFrame. However, before you do this, it is necessary that you change the name of the columns in each of the three dataframes. This is because the column labels in the `all_stocks` dataframe must be unique. Since all the columns in the individual dataframes have the same name, `Adj Close`, we must change them to the stock name before joining them. In the space below change the column label `Adj Close` of each individual dataframe to the name of the corresponding stock. You can do this by using the `pd.DataFrame.rename()` function. # + # Change the Adj Close column label to Google google_stock = google_stock.rename(columns = {"Adj Close":"Google"}) # Change the Adj Close column label to Apple apple_stock = apple_stock.rename(columns = {"Adj Close":"Apple"}) # Change the Adj Close column label to Amazon amazon_stock = amazon_stock.rename(columns = {"Adj Close":"Amazon"}) # - # You can check that the column labels have been changed correctly by displaying the datadrames # We display the google_stock DataFrame google_stock.head() # We display the apple_stock DataFrame apple_stock.head() # We display the amazon_stock DataFrame amazon_stock.head() # Now that we have unique column labels, we can join the individual DataFrames to the `all_stocks` DataFrame. For this we will use the `dataframe.join()` function. The function `dataframe1.join(dataframe2)` joins `dataframe1` with `dataframe2`. We will join each dataframe one by one to the `all_stocks` dataframe. Fill in the code below to join the dataframes, the first join has been made for you: all_stocks.head() # + # We join the Google stock to all_stocks all_stocks = all_stocks.join(google_stock) # We join the Apple stock to all_stocks all_stocks = all_stocks.join(apple_stock) # We join the Amazon stock to all_stocks all_stocks = all_stocks.join(amazon_stock) # - # You can check that the dataframes have been joined correctly by displaying the `all_stocks` dataframe # We display the all_stocks DataFrame all_stocks.head() # # To Do # # Before we proceed to get some statistics on the stock data, let's first check that we don't have any *NaN* values. In the space below check if there are any *NaN* values in the `all_stocks` dataframe. If there are any, remove any rows that have *NaN* values: # Check if there are any NaN values in the all_stocks dataframe all_stocks.isnull().any() # Remove any rows that contain NaN values all_stocks.dropna(axis = 0, inplace=True) # You can check that the *NaN* values have been eliminated by displaying the `all_stocks` dataframe # Check if there are any NaN values in the all_stocks dataframe all_stocks.isnull().any() # Display the `all_stocks` dataframe and verify that there are no *NaN* values # We display the all_stocks DataFrame all_stocks.head() # Now that you have eliminated any *NaN* values we can now calculate some basic statistics on the stock prices. Fill in the code below # + # Print the average stock price for each stock print(all_stocks.mean()) # Print the median stock price for each stock print(all_stocks.median()) # Print the standard deviation of the stock price for each stock print(all_stocks.std()) # Print the correlation between stocks print(all_stocks.corr()) # - # We will now look at how we can compute some rolling statistics, also known as moving statistics. We can calculate for example the rolling mean (moving average) of the Google stock price by using the Pandas `dataframe.rolling().mean()` method. The `dataframe.rolling(N).mean()` calculates the rolling mean over an `N`-day window. In other words, we can take a look at the average stock price every `N` days using the above method. Fill in the code below to calculate the average stock price every 150 days for Google stock # We compute the rolling mean using a 150-Day window for Google stock rollingMean = all_stocks.Google.rolling(150).mean() # We can also visualize the rolling mean by plotting the data in our dataframe. In the following lessons you will learn how to use **Matplotlib** to visualize data. For now I will just import matplotlib and plot the Google stock data on top of the rolling mean. You can play around by changing the rolling mean window and see how the plot changes. # + # %matplotlib inline # We import matplotlib into Python import matplotlib.pyplot as plt # We plot the Google stock data plt.plot(all_stocks['Google']) # We plot the rolling mean ontop of our Google stock data plt.plot(rollingMean) plt.legend(['Google Stock Price', 'Rolling Mean']) plt.show()
Pandas Mini-Project/Statistics from Stock Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %reset -f from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Conv2D, MaxPooling2D, Activation, Flatten, Dense, GlobalAveragePooling2D from tensorflow.keras import backend as K from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.utils import to_categorical from imutils import paths import matplotlib.pyplot as plt import numpy as np from numpy import random seed = 42 np.random.seed(seed) from natsort import natsorted import pandas as pd import git, glob, os, cv2 from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder repo = git.Repo('.', search_parent_directories=True) root_path = f'{repo.working_tree_dir}/insectrec/created_data/' original_datapath = f'{root_path}impy_crops_export' aug_datapath = f'{root_path}images_augmented' img_dim = 80 # + # Creating le for encoding labels le = LabelEncoder() # Creating dataframe with all the original data (x: filenames, textlabels, y: nummerical labels) df_orig = pd.DataFrame() df_orig['x'] = pd.Series(glob.glob(f"{original_datapath}/*/*.jpg")) df_orig['textlabels'] = df_orig['x'].apply(lambda x: x.split('/')[-2]) df_orig['y'] = le.fit_transform(df_orig.textlabels) # Splitting into train/val/test X_train, X_test, y_train, y_test = train_test_split(df_orig.x, df_orig.y, test_size=0.2, random_state=seed, shuffle=True) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=seed, shuffle=True) print(" loading images...") data = [] labels = [] imagePaths = natsorted(X_train.tolist()) np.random.seed(42) np.random.shuffle(imagePaths) for imagePath in imagePaths: # load the image, pre-process it, and store it in the data list image = cv2.imread(imagePath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (img_dim, img_dim)) image = img_to_array(image) data.append(image) # extract the class label from the image path and update the # labels list label = imagePath.split(os.path.sep)[-2] labels.append(label) data = np.array(data, dtype="float") / 255.0 print(data.shape) aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, # zoom_range=0.3, horizontal_flip=True, vertical_flip=True, # brightness_range=[0.8,1.2], # zca_whitening=True, fill_mode="nearest") name_map = dict(zip(le.transform(le.classes_), le.classes_)) print(name_map) y = np.array(y_train.tolist(), dtype="float") aug_imgs_path = './insectrec/created_data/images_augmented/' rdm = np.random.randint(0,1e6) for i in np.unique(df_orig.textlabels.unique().tolist()): if not os.path.isdir(f'{aug_imgs_path}/{i}'): os.mkdir(f'{aug_imgs_path}/{i}') aug.fit(data) nb_batches = 0 for X_batch, y_batch in aug.flow(data, y, batch_size=512, seed=42): for i, mat in enumerate(X_batch): rdm = np.random.randint(0,1e6) cv2.imwrite(f'{aug_imgs_path}/{name_map[y_batch[i]]}/{name_map[y_batch[i]]}_{rdm}{i}.jpg', cv2.cvtColor(mat*255, cv2.COLOR_RGB2BGR)) nb_batches += 1 if nb_batches > 100: break # -
SaveAugmentedBoxImages.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Setup # # In this notebook, we demonstrate how to setup time series data for the examples inlcuded in this book. The data in this example is taken from the GEFCom2014 forecasting competition<sup>1</sup> (see reference below). It consists of 3 years of hourly electricity load and temperature values between 2012 and 2014. # # <sup>1</sup><NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Probabilistic energy forecasting: Global Energy Forecasting Competition 2014 and beyond", International Journal of Forecasting, vol.32, no.3, pp 896-913, July-September, 2016. # + jupyter={"outputs_hidden": true} nteract={"transient": {"deleting": false}} outputExpanded=false import os import shutil import matplotlib.pyplot as plt from common.utils import load_data, extract_data, download_file # %matplotlib inline # + jupyter={"outputs_hidden": true} data_dir = './data' if not os.path.exists(data_dir): os.mkdir(data_dir) if not os.path.exists(os.path.join(data_dir, 'energy.csv')): # download_file("https://mlftsfwp.blob.core.windows.net/mlftsfwp/GEFCom2014.zip") # shutil.move("GEFCom2014.zip", os.path.join(data_dir,"GEFCom2014.zip")) extract_data(data_dir) # + jupyter={"outputs_hidden": true} ts_data_load = load_data(data_dir)[['load']] ts_data_load.head() # + jupyter={"outputs_hidden": true} ts_data_load.plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show() # + jupyter={"outputs_hidden": true} ts_data_load['2014-07-01':'2014-07-07'].plot(y='load', subplots=True, figsize=(15, 8), fontsize=12) plt.xlabel('timestamp', fontsize=12) plt.ylabel('load', fontsize=12) plt.show()
Notebooks/Data Setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 选择 # ## 布尔类型、数值和表达式 # ![](../Photo/33.png) # - 注意:比较运算符的相等是两个等到,一个等到代表赋值 # - 在Python中可以用整型0来代表False,其他数字来代表True # - 后面还会讲到 is 在判断语句中的用发 a= 100 b=100 a<=b c="loker" d="nanaan" c>d# 字符串的比较使用ASCII值 # e=100 f=100 e==f e1 = 101 e2= 102 e1!=e2 # ## EP: # - <img src="../Photo/34.png"></img> # - 输入一个数字,判断其实奇数还是偶数 i=int(True) j=int(False) j==i i=(True) j=(False) print(i) print(j) # + import random x1=eval(input("x1")) i=random.randint(0,100) j=i%2 if j!=0: print("y") else: print("n") # - # ## 产生随机数字 # - 函数random.randint(a,b) 可以用来产生一个a和b之间且包括a和b的随机整数 import random import random a1=random.randint(0,5) print(a1) import random a2=random.random(0,1) print(a2) a3=random.randrange(start=0,stop=10,step=2) print(a3) a4=random.Random() print(a4) # ## EP: # - 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确 # - 进阶:写一个随机序号点名程序 import random numb1=random.randint(0,9) numb2=random.randint(0,9) numb1=eval(input("numb1=")) numb2=eval(input("numb2=")) print(numb1,"+",numb2,"=",numb1+numb2) import random numb1=random.randint(0,4) numb2=random.randint(0,4) numb1=eval(input("numb1=")) numb2=eval(input("numb2=")) print(numb1,"+",numb2,"=",numb1+numb2) numb1=random.randint(0,10) numb2=random.randint(0,10) print(numb1,numb2) sum=numb1+numb2 sum_input=eval(input("数字和")) if sum==sum_input: print("t") else: print("f") # + import random numb1=random.randint(0,4) numb2=random.randint(0,4) print(numb1,numb2) # - # ## 其他random方法 # - random.random 返回0.0到1.0之间前闭后开区间的随机浮点 # - random.randrange(a,b) 前闭后开 # ## if语句 # - 如果条件正确就执行一个单向if语句,亦即当条件为真的时候才执行if内部的语句 # - Python有很多选择语句: # > - 单向if # - 双向if-else # - 嵌套if # - 多向if-elif-else # # - 注意:当语句含有子语句的时候,那么一定至少要有一个缩进,也就是说如果有儿子存在,那么一定要缩进 # - 切记不可table键和space混用,单用table 或者 space # - 当你输出的结果是无论if是否为真时都需要显示时,语句应该与if对齐 if"a"=="b": print("2111") print("3333") if"a"=="b": print("2111") elif"a"=="c": print("3333") else: print("3112233") # + import random numb1=random.randint(0,100) # - # ## EP: # - 用户输入一个数字,判断其实奇数还是偶数 # - 进阶:可以查看下4.5实例研究猜生日 # ## 双向if-else 语句 # - 如果条件为真,那么走if内部语句,否则走else内部语句 # ## EP: # - 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确,如果正确打印“you‘re correct”,否则打印正确答案 # ## 嵌套if 和多向if-elif-else # ![](../Photo/35.png) input if score>=90.0: grade="A" else: if score>=80.0: grade="B" else: if score>=70: grade="C": else: if score>=60: grade="D": std=eval (input("成绩")) if std>=90.0: print("A") elif 80<=std<90 print("B") elif 70<=std<80 print("C") elif 60<=std<70 print("D") std=eval(input("成绩")) if std>=90.0: print("A") elif 80<=std<90 print("B") elif 70<=std<80 print("C") elif 60<=std<70 print("D") # ## EP: # - 提示用户输入一个年份,然后显示表示这一年的动物 # ![](../Photo/36.png) # - 计算身体质量指数的程序 # - BMI = 以千克为单位的体重除以以米为单位的身高 # ![](../Photo/37.png) year=eval(input("year")) a=year%12 if a==0: print("HOU") elif a==1: print("1") elif a==2: print("2") elif a==3: print("3") elif a==4: print("4") elif a==5: print("5") elif a==6: print("6") elif a==7: print("7") elif a==8: print("8") elif a==9: print("9") elif a==10: print("10") elif a==11: print("11") # ## 逻辑运算符 # ![](../Photo/38.png) # ![](../Photo/39.png) # ![](../Photo/40.png) # + not("a"=="b") # - if "a"=="b" and "c"=="c" print(1323230) 100==100 and 100==100 100==100 # ## EP: # - 判定闰年:一个年份如果能被4整除但不能被100整除,或者能被400整除,那么这个年份就是闰年 # - 提示用户输入一个年份,并返回是否是闰年 # - 提示用户输入一个数字,判断其是否为水仙花数 year=eval(input("year")) if year%4==0 and year%100!=0 or year%400==0: print("闰年") else: print("平年") # ## 实例研究:彩票 # ![](../Photo/41.png) # # Homework # - 1 # ![](../Photo/42.png) import math a=eval(input("a=")) b=eval(input("b=")) c=eval(input("c=")) r=b*b - 4*a*c if r>0: d= math.sqrt(r) x1=(-b+d)/(2*a) x2=(b+d)/(2*a) print(x1,x2) elif r==0: print("-1") elif r<0: print("123") # - 2 # ![](../Photo/43.png) import random numb1=random.randint(0,100) numb2=random.randint(0,100) print(numb1,numb2) sum=numb1+numb2 sum_input=eval(input("两个数的和")) if sum==sum_input: print("T") else: print("F") # - 3 # ![](../Photo/44.png) a=eval(input("today is=")) b=eval(input("since today=")) if (a+b)%7==0: print("today is ",a, "future day is 0") elif (a+b)%7==1: print("today is ",a, "future day is 1") elif (a+b)%7==2: print("today is ",a, "future day is 2") elif (a+b)%7==3: print("today is ",a, "future day is 3") elif (a+b)%7==4: print("today is ",a, "future day is 4") elif (a+b)%7==5: print("today is ",a, "future day is 5") elif (a+b)%7==6: print("today is ",a, "future day is 6") # - 4 # ![](../Photo/45.png) # + numb1=eval(input("numb1 is ")) numb2=eval(input("numb2 is ")) numb3=eval(input("numb3 is ")) if numb1<=numb2<=numb3: print(numb1,numb2,numb3) elif numb2<=numb1<=numb3: print(numb2,numb1,numb3) elif numb2<=numb3<=numb1: print(numb2,numb3,numb1) elif numb1<=numb3<=numb2: print(numb1,numb3,numb2) elif numb3<=numb2<=numb1: print(numb3,numb2,numb1) elif numb3<=numb1<=numb2: print(numb3,numb1,numb2) # - # - 5 # ![](../Photo/46.png) # - 6 # ![](../Photo/47.png) year=eval(input("year is ")) mou=eval(input("mou is ")) if mou==1 or mou==3 or mou==5 or mou==7 or mou==8 or mou==10 or mou==12: print("year is ",year,"mou is ",mou,"31天") elif mou==4 or mou==6 or mou==9 or mou==11: print("year is ",year,"mou is ",mou,"30天") elif year%4==0 and year%100!=0 or year%400==0: print("year is ",year,"mou is 2","29天") else: print("year is ",year,"mou is 2","28天") # - 7 # ![](../Photo/48.png) # - 8 # ![](../Photo/49.png) # - 9 # ![](../Photo/50.png) # - 10 # ![](../Photo/51.png) # - 11 # ![](../Photo/52.png) # - 12 # ![](../Photo/53.png)
7.18syh.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import folium, html, json import matplotlib.pyplot as plt import datetime df = pd.read_csv('../data/brazil_corona19_data.csv') df['date'] = df['date'].astype('datetime64[ns]') df1 = df[df['city']=='Santa Gertrudes'] plt.plot(df1.day,df1.cases) plt.plot(df1.day,df1.avg7_cases) # plt.plot(df.day,df.deaths) df.tail() # df = pd.read_csv('../data/world_corona19_data.csv', sep=',') # df['date'] = df['date'].astype('datetime64[ns]') df2 = df[(df['state']=='SP')]# and (df['place_type']=='state')] # df2 = df2[(df2['place_type']=='state')] df2 = df2[(df2['city']=='São Paulo')] local = 'SP' df2[['date','day','cases','deaths','case_day','death_day','avg7_deaths']].tail() # + fig, ((ax3, ax4)) = plt.subplots(1,2, figsize=(20, 8)) fig.tight_layout(pad=5.0) ax3.set_title("Casos") ax3.set_xlabel("dias desde o primeiro caso") ax3.grid(color='gray', alpha = 0.4) ax3.plot(df2.day, df2.case_day, label = 'casos diários') ax3.plot(df2.day, df2.avg7_cases, label = 'média móvel (7 dias)') ax4.set_title("Mortes") ax4.set_xlabel("dias desde o primeiro caso") ax4.grid(color='gray', alpha = 0.4) ax4.plot(df2.day, df2.death_day, label = 'mortes diárias') ax4.plot(df2.day, df2.avg7_deaths, label = 'média móvel (7 dias)') ax3.legend() ax4.legend() # + fig, ((ax3)) = plt.subplots(1,1, figsize=(10, 5)) fig.tight_layout(pad=5.0) ax3.set_title("% var. das médias móveis (7 dias)") ax3.set_xlabel("dias desde o primeiro caso") ax3.grid(color='gray', alpha = 0.4) ax3.set_ylim(-20,60) ax3.plot(df2.day, df2['%var_avg7_case_day_thousand'], label = 'casos') ax3.plot(df2.day, df2['%var_avg7_death_day_thousand'], label = 'mortes') ax3.legend() # - # # País df = pd.read_csv('../data/world_corona19_data.csv') df['date'] = df['date'].astype('datetime64[ns]') df_br = df[df['country']=='Brazil'] # + fig, ((ax3)) = plt.subplots(1,1, figsize=(10, 5)) fig.tight_layout(pad=5.0) ax3.set_title("BRA - % var. das médias móveis (7 dias)") ax3.set_xlabel("dias desde o primeiro caso") ax3.grid(color='gray', alpha = 0.4) ax3.set_ylim(-20,60) # ax3.set_yticks(np.arange(-40, 200, step=20)) ax3.plot(df_br.day, df_br['%var_avg7_case_day_million'], label = 'casos') ax3.plot(df_br.day, df_br['%var_avg7_death_day_million'], label = 'mortes') ax3.legend() # + ax3.set_title("Cases in Brazil") ax3.set_xlabel("days from the first case") ax3.grid(color='gray', alpha = 0.4) ax3.plot(df_br.day, df_br.case_day, label = 'daily cases') ax3.plot(df_br.day, df_br.avg7_cases, label = 'moving average') ax4.set_title("Deaths in Brazil") ax4.set_xlabel("days from the first case") ax4.grid(color='gray', alpha = 0.4) ax4.plot(df_br.day, df_br.death_day, label = 'daily deaths') ax4.plot(df_br.day, df_br.avg7_deaths, label = 'moving average') # + # Selected cities fig, ((ax1, ax2)) = plt.subplots(1,2, figsize=(20, 8)) fig.tight_layout(pad=5.0) ax1.set_title("Cumulatative cases and deaths") ax1.set_xlabel("days from the first case") ax1.grid(color='gray', alpha = 0.4) ax2.set_title("Cases and deaths - moving average (last 7 days)") ax2.set_xlabel("days from the first case") ax2.grid(color='gray', alpha = 0.4) dados = df[(df['state'] == 'SP') & (df['place_type']=='state')] ax1.plot(dados.day, dados.cases, label = 'cases') ax1.plot(dados.day, dados.deaths, label = 'deaths') ax2.plot(dados.day, dados.avg7_cases, label = 'cases') ax2.plot(dados.day, dados.avg7_deaths, label = 'deaths') ax1.axvline(x=91, ymin=0, ymax=0.9, color = 'green', linestyle = '-',label = 'anúncio') ax1.axvline(x=91+7, ymin=0, ymax=0.9, color = 'green', linestyle = ':',label = 'anúncio + 7') ax1.axvline(x=96, ymin=0, ymax=0.9, color = 'red', linestyle = '--',label = 'reabertura') ax1.axvline(x=96+7, ymin=0, ymax=0.9, color = 'red', linestyle = '--',label = 'reabertura + 7') ax2.axvline(x=91, ymin=0, ymax=0.9, color = 'purple', linestyle = '-',label = 'anúncio') ax2.axvline(x=91+14, ymin=0, ymax=0.9, color = 'purple', linestyle = '-.',label = 'anúncio + 14') ax2.axvline(x=96, ymin=0, ymax=0.9, color = 'orange', linestyle = '-',label = 'reabertura') ax2.axvline(x=96+14, ymin=0, ymax=0.9, color = 'orange', linestyle = '--',label = 'reabertura + 14') ax1.legend() ax2.legend() # fig.savefig('../analysis/saoPaulo_cases_deaths.png') # -
notebooks/Testes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="E--EmfMeNyw4" # # Import Stuff # + id="7R8miAIzNyPw" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627669387828, "user_tz": 240, "elapsed": 3181, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="63a0e3fb-4b90-4834-922a-b12643890a22" # !pip install newspaper3k # + id="5DQcT2APHMuk" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627669388514, "user_tz": 240, "elapsed": 691, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="c965a8ab-7526-48b9-aa55-0aeb840462cf" from urllib.request import urlopen, Request from bs4 import BeautifulSoup from nltk.sentiment.vader import SentimentIntensityAnalyzer import pandas as pd import matplotlib.pyplot as plt import numpy as np import datetime as dt import time # + colab={"base_uri": "https://localhost:8080/"} id="yCYXfWkSI4it" executionInfo={"status": "ok", "timestamp": 1627669388515, "user_tz": 240, "elapsed": 9, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="bd64d8d8-fce5-4a43-f196-6576c79b477e" import nltk nltk.download('vader_lexicon') # + colab={"base_uri": "https://localhost:8080/", "height": 287} id="OI_nu6oVtJjQ" executionInfo={"status": "ok", "timestamp": 1627669389677, "user_tz": 240, "elapsed": 366, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="c42f28de-53fd-497a-d49a-b24ec886497b" import matplotlib.pyplot as plt import matplotlib.dates as md import numpy as np import datetime as dt import time n=20 duration=1000 now=time.mktime(time.localtime()) timestamps=np.linspace(now,now+duration,n) datenums=[dt.datetime.fromtimestamp(ts) for ts in timestamps] #datenums=md.date2num(dates) values=np.sin((timestamps-now)/duration*2*np.pi) plt.subplots_adjust(bottom=0.2) plt.xticks( rotation=25 ) ax=plt.gca() xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) plt.plot(datenums,values) plt.show() # + [markdown] id="rNV53jFrN10G" # # Code # + id="gxDv4V5Te1LJ" finviz_url = 'https://finviz.com/quote.ashx?t=' tickers = ['AMD', 'JNJ', 'PFE'] # + id="sq6ZCZKAfJo0" news_tables = {} for ticker in tickers: url = finviz_url + ticker # Requesting to see the data on the website. req = Request(url=url, headers={'user-agent': 'my-app'}) response = urlopen(req) html = BeautifulSoup(response, 'html') #print(html) # Prints source code news_table = html.find(id='news-table') news_tables[ticker] = news_table print(news_tables) # + id="fqTAVIH-gs5U" colab={"base_uri": "https://localhost:8080/", "height": 231} executionInfo={"status": "error", "timestamp": 1627669395062, "user_tz": 240, "elapsed": 131, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="17ab0b6f-7b65-44be-c55c-c55bd5fc018c" amzn_data = news_tables['AMZN'] amzn_rows = amzn_data.findAll('tr') for index, row in enumerate(amzn_rows): title = row.a.text # Look for the anchor tag and get the text within td = row.td.text print(td, " ", title) # Prints the title of each element. # + colab={"base_uri": "https://localhost:8080/"} id="cnYPDHu6h-ma" executionInfo={"status": "ok", "timestamp": 1627669398412, "user_tz": 240, "elapsed": 134, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="4a02f084-5e26-4429-cbcb-961c8878c6dc" parsed_data = [] for ticker, news_table in news_tables.items(): for rows in news_table.findAll('tr'): title = rows.a.text date_data = rows.td.text.split(' ') # Split a thing that has a date and a time(June 2:00) if len(date_data) == 1: # If the string is just the time time = date_data[0] else: date = date_data[0] time = date_data[1] parsed_data.append([ticker, date, time, title]) print(parsed_data) # + colab={"base_uri": "https://localhost:8080/"} id="IN_YgTqlILUD" executionInfo={"status": "ok", "timestamp": 1627669399778, "user_tz": 240, "elapsed": 127, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>", "userId": "01730018351539431740"}} outputId="d408269d-dee0-4634-e5a3-80b08bd6a7da" # Analyzing Sentiment df = pd.DataFrame(parsed_data, columns=['ticker', 'date', 'time', 'title']) vader = SentimentIntensityAnalyzer() print(vader.polarity_scores("The movie sucks ass.")) # positive sentiment : (compound score >= 0.05) # neutral sentiment : (compound score > -0.05) and (compound score < 0.05) # negative sentiment : (compound score <= -0.05) # WE ONLY CARE ABOUT THE COMPOUND SCORE # + id="uWvytH3FKGqf" # Applying VADER on our dataframe df['cmpd_score'] = [vader.polarity_scores(title)['compound'] for title in df['title']] # f = lambda title: vader.polarity_scores(title)['compound'] same as ^^^ # df['compound'] = df['title'].apply(f) df # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="RSBpLat8Lnv8" executionInfo={"status": "ok", "timestamp": 1627669474402, "user_tz": 240, "elapsed": 8307, "user": {"displayName": "ThatOneGuy 4", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GifdiA3IY1W_lxhJLvpVcINYel5dPvvCji8tMXk=s64", "userId": "01730018351539431740"}} outputId="a0144f81-8256-4dec-bec9-79ef596a6f29" # Plotting stocks and stuff df['date'] = pd.to_datetime(df.date).dt.date plt.figure(figsize=(500, 500)) mean_df = df.groupby(['ticker', 'time']).median() # Takes the mean of all the cmpds of a single date within a single ticker print(mean_df) mean_df = mean_df.unstack() # Unstacks the data mean_df = mean_df.xs('cmpd_score', axis='columns').transpose() # Removing compound column print(mean_df) mean_df.plot(figsize=(100, 100), kind='bar') plt.xticks( rotation=25 ) plt.show()
SentimentAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Underfitting and Overfitting # + from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(train_X, train_y) preds_val = model.predict(val_X) mae = mean_absolute_error(val_y, preds_val) return(mae) # + import pandas as pd melbourne_file_path = pd.read_csv('melb_data.csv') melbourne_file_path # - filtered_melbourne_data = melbourne_file_path.dropna(axis=0) filtered_melbourne_data y = filtered_melbourne_data.Price y melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'BuildingArea', 'YearBuilt', 'Lattitude', 'Longtitude'] melbourne_features X = filtered_melbourne_data[melbourne_features] X from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_test_split(X, y,random_state = 0) for max_leaf_nodes in [5, 50, 500, 5000]: my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y) print("Max leaf nodes: %d \t\t Mean Absolute Error: %d" %(max_leaf_nodes, my_mae))
Machine Learning - Intro and Intermediate/Underfitting and Overfitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solutions # ## Thought exercises # 1. Explore the JupyterLab interface and look at some of the shortcuts available. Don't worry about memorizing them now (eventually they will become second nature and save you a lot of time), just get comfortable using notebooks. # 2. Is all data normally distributed? # > No. Even data that might appear to be normally distributed could belong to a different distribution. There are tests to check for normality, but this is beyond the scope of this book. You can read more [here](https://machinelearningmastery.com/a-gentle-introduction-to-normality-tests-in-python/). # 3. When would it make more sense to use the median instead of the mean for the measure of center? # > When your data has outliers, it may make more sense to use the median over the mean as your measure of center. # # ## Coding exercises # If you need a Python refresher, work through the [`python_101.ipynb`](../../ch_01/python_101.ipynb) notebook in chapter 1. # # ### Exercise 4: Generate the data # + import random random.seed(0) salaries = [round(random.random()*1000000, -3) for _ in range(100)] # - # ### Exercise 5: Calculating statistics and verifying # #### mean # + from statistics import mean sum(salaries) / len(salaries) == mean(salaries) # - # #### median # # First, we define a function to calculate the median: # + import math def find_median(x): x.sort() midpoint = (len(x) + 1) / 2 - 1 # subtract 1 bc index starts at 0 if len(x) % 2: # x has odd number of values return x[int(midpoint)] else: return (x[math.floor(midpoint)] + x[math.ceil(midpoint)]) / 2 # - # Then, we check its output matches the expected output: # + from statistics import median find_median(salaries) == median(salaries) # - # #### mode # + from statistics import mode from collections import Counter Counter(salaries).most_common(1)[0][0] == mode(salaries) # - # #### sample variance # Remember to use Bessel's correction. # + from statistics import variance sum([(x - sum(salaries) / len(salaries))**2 for x in salaries]) / (len(salaries) - 1) == variance(salaries) # - # #### sample standard deviation # Remember to use Bessel's correction. # + from statistics import stdev import math math.sqrt(sum([(x - sum(salaries) / len(salaries))**2 for x in salaries]) / (len(salaries) - 1)) == stdev(salaries) # - # ### Exercise 6: Calculating more statistics # #### range max(salaries) - min(salaries) # #### coefficient of variation # + from statistics import mean, stdev stdev(salaries) / mean(salaries) # - # #### interquartile range # First, we define function to calculate a quantile: # + import math def quantile(x, pct): x.sort() index = (len(x) + 1) * pct - 1 if len(x) % 2: # odd, so grab the value at index return x[int(index)] else: return (x[math.floor(index)] + x[math.ceil(index)]) / 2 # - # Then, we check that it calculates the 1<sup>st</sup> quantile correctly: sum([x < quantile(salaries, 0.25) for x in salaries]) / len(salaries) == 0.25 # and the 3<sup>rd</sup> quantile: sum([x < quantile(salaries, 0.75) for x in salaries]) / len(salaries) == 0.75 # Finally, we can calculate the IQR: q3, q1 = quantile(salaries, 0.75), quantile(salaries, 0.25) iqr = q3 - q1 iqr # #### quartile coefficent of dispersion iqr / (q1 + q3) # ### Exercise 7: Scaling data # #### min-max scaling # + min_salary, max_salary = min(salaries), max(salaries) salary_range = max_salary - min_salary min_max_scaled = [(x - min_salary) / salary_range for x in salaries] min_max_scaled[:5] # - # #### standardizing # + from statistics import mean, stdev mean_salary, std_salary = mean(salaries), stdev(salaries) standardized = [(x - mean_salary) / std_salary for x in salaries] standardized[:5] # - # ### Exercise 8: Calculating covariance and correlation # #### covariance # We haven't covered NumPy yet, so this is just here to check our solution (0.26) &mdash; there will be rounding errors on our calculation: import numpy as np np.cov(min_max_scaled, standardized) # Our method, aside from rounding errors, gives us the same answer as NumPy: # + from statistics import mean running_total = [ (x - mean(min_max_scaled)) * (y - mean(standardized)) for x, y in zip(min_max_scaled, standardized) ] cov = mean(running_total) cov # - # #### Pearson correlation coefficient ($\rho$) from statistics import stdev cov / (stdev(min_max_scaled) * stdev(standardized)) # <hr> # <div> # <a href="../../ch_01/introduction_to_data_analysis.ipynb"> # <button>&#8592; Introduction to Data Analysis</button> # </a> # <a href="../../ch_01/python_101.ipynb"> # <button>Python 101</button> # </a> # <a href="../../ch_02/1-pandas_data_structures.ipynb"> # <button style="float: right;">Chapter 2 &#8594;</button> # </a> # </div> # <hr>
solutions/ch_01/solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Time-shifted Data # # In this tutorial, we show how to transform the time-series data in the following ways: # * `split` time-series with a lot of data points into mutiple segments, and # * `time shift` the above segments so that they have the same timestamps. # # The above transformations allow you to easily compare the data over the last hour/day/week over the previous intervals. This helps you understand if your system's current behavior continues to match the past behavior. # # **Note**: This tutorial reads in metric data from the Monitoring API, or a Google Cloud Storage bucket: # * If the variable `'common_prefix'` is set, the data is read from the Monitoring API. # * If the variable `'common_prefix'` is not set, the data is loaded from a shared Cloud Storage bucket. See [here](../Storage/Storage APIs.ipynb) to learn more about the Storage API. # # ## Load the monitoring module and set the default project # # If there is no default project set already, you must do so using 'set_datalab_project_id'. # + from datalab.stackdriver import monitoring as gcm # set_datalab_project_id('my-project-id') # - # ## Find the most common instance name prefixes # # The prefix from an instance name is calculated by splitting on the last '-' character. All instances with the same prefixes are grouped together to get the prefix counts. # + import collections # Initialize the query for CPU utilization over the last week, and read in its metadata. query_cpu = gcm.Query('compute.googleapis.com/instance/cpu/utilization', hours=7*24) cpu_metadata = query_cpu.metadata() # Count the occurrences of each prefix, and display the top 5. instance_prefix_counts = collections.Counter( timeseries.metric.labels['instance_name'].rsplit('-', 1)[0] for timeseries in cpu_metadata) instance_prefix_counts.most_common(5) # - # ## Select the instance name prefix to filter on # # In this cell, you can select an instance name prefix to filter on. If you do not set this variable, then the data is read from a Cloud Storage bucket. # # You can look at the most frequent prefix in the previous cell. It is recommended that you select a prefix with the following properties: # * the instances have the CPU Utilization metric data for at least the last 5 days # * the instances span multiple zones # + # Set this variable to read data from your own project. common_prefix = None # 'my-instance-prefix' if common_prefix is None: print('No prefix specified. The data will be read from a Cloud Storage bucket.') else: print('You selected the prefix: "%s"' % (common_prefix,)) # - # ## Load the time series data # # Based on the value of `'common_prefix'` in the previous cell, the time series data is loaded from the Monitoring API, or a shared Cloud Storage bucket. # # In both cases, we load the time series of the CPU Utilization metric over the last week, aggregated to hourly intervals per zone. # + import StringIO import pandas import datalab.storage as storage if common_prefix is None: print('Reading in data from a Cloud Storage Bucket') # Initialize the bucket name, and item key. bucket_name = 'cloud-datalab-samples' per_zone_data = 'stackdriver-monitoring/timeseries/per-zone-weekly-20161010.csv' # Load the CSV from the bucket, and intialize the dataframe using it. per_zone_data_item = storage.Bucket(bucket_name).item(per_zone_data) per_zone_data_string = StringIO.StringIO(per_zone_data_item.read_from()) per_zone_cpu_data = pandas.DataFrame.from_csv(per_zone_data_string) else: print('Reading in data from the Monitoring API') # Filter the query to instances with the specified prefix. query_cpu = query_cpu.select_metrics(instance_name_prefix=common_prefix) # Aggregate to hourly intervals per zone. query_cpu = query_cpu.align(gcm.Aligner.ALIGN_MEAN, hours=1) query_cpu = query_cpu.reduce(gcm.Reducer.REDUCE_MEAN, 'resource.zone') # Get the time series data as a dataframe, with a single-level header. per_zone_cpu_data = query_cpu.as_dataframe(label='zone') per_zone_cpu_data.tail(5) # - # ## Split the data into daily chunks # # Here, we split the data over daily boundaries. # + import collections # Extract the number of days in the dataframe. num_days = len(per_zone_cpu_data.index)/24 # Split the big dataframe into daily dataframes. daily_dataframes = [per_zone_cpu_data.iloc[24*i: 24*(i+1)] for i in xrange(num_days)] # Reverse the list to have today's data in the first index. daily_dataframes.reverse() # Display the last five rows from today's data. daily_dataframes[0].tail(5) # - # ## Initialize a helper function # # Here, we initialize a helper function to create human readable names for days. # + TODAY = 'Today' # Helper function to make a readable day name based on offset from today. def make_day_name(offset): if offset == 0: return TODAY elif offset == 1: return 'Yesterday' return '%d days ago' % (offset,) # - # ## Time-shift all dataframes to line up with the last day # # The pandas method [tshift](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.tshift.html) lets you shift a dataframe by a specified offset. We use this to shift the index of all days to match the timestamps in the latest day. # # The data for each zone is inserted in a differenct dataframe, where the rows are timestamps and columns are specific days. # + # Extract the zone names. all_zones = per_zone_cpu_data.columns.tolist() # Use the last day's timestamps as the index, and initialize a dataframe per zone. last_day_index = daily_dataframes[0].index zone_to_shifted_df = {zone: pandas.DataFrame([], index=last_day_index) for zone in all_zones} for i, dataframe in enumerate(daily_dataframes): # Shift the dataframe to line up with the start of the last day. dataframe = dataframe.tshift(freq=last_day_index[0] - dataframe.index[0]) current_day_name = make_day_name(i) # Insert each daily dataframe as a column into the dataframe. for zone in all_zones: zone_to_shifted_df[zone][current_day_name] = dataframe[zone] # Display the first five rows from the first zone. zone_to_shifted_df[all_zones[0]].head(5) # - # ## Compare the CPU utilization day-over-day for zone, dataframe in zone_to_shifted_df.iteritems(): dataframe.plot(title=zone).legend(loc="upper left", bbox_to_anchor=(1,1)) # ## Compare today's CPU Utilization to the weekly average # # In order to compare the metric data for today, with the average of the week, we create new dataframes with the following columns: # * Today's data: From the original data for TODAY # * Average over the week: From the mean across all the days for zone, dataframe in zone_to_shifted_df.iteritems(): # Initialize the dataframe by extracting the column with data for today. compare_to_avg_df = dataframe.loc[:, [TODAY]] # Add a column with the weekly avg. compare_to_avg_df['Weekly avg.'] = dataframe.mean(axis=1) # Plot this dataframe. compare_to_avg_df.plot(title=zone).legend(loc="upper left", bbox_to_anchor=(1,1))
docs/tutorials/Stackdriver Monitoring/Time-shifted data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # `rsa` # This function implements 'random sequential addition' of spheres. The main benefit of this approach is that sphere overlap can be prevented or controlled. The downside is that the function is a bit on the slow side, though efforts have been made to accelerate it using numba jit. import porespy as ps import matplotlib.pyplot as plt import numpy as np import inspect b = inspect.signature(ps.generators.rsa) print(b) # ## `im_or_shape` # The function can either add spheres an existing image, or create a new image of the given shape and spheres to that. Let's start with an empty image, and set the void fraction to a low value: shape = [300, 300] r = 15 im = ps.generators.rsa(im_or_shape=shape, r=r) fig, ax = plt.subplots(1, 1, figsize=[4, 4]) ax.axis(False) ax.imshow(im, origin='lower', interpolation='none'); # Now let's fill up the remaining space with as many smaller spheres as possible: r = 5 im = ps.generators.rsa(im_or_shape=im, r=r) fig, ax = plt.subplots(1, 1, figsize=[4, 4]) ax.axis(False) ax.imshow(im, origin='lower', interpolation='none'); # ## `mode` # Spheres can either be fully contained within the image or be truncated at the edges by specifying mode: # + fig, ax = plt.subplots(1, 2, figsize=[8, 4]) r = 20 mode = 'contained' im1 = ps.generators.rsa(im_or_shape=shape, r=r, mode=mode) ax[0].imshow(im1, origin='lower', interpolation='none') ax[0].axis(False) ax[0].set_title(f'mode = {mode}') mode = 'extended' im2 = ps.generators.rsa(im_or_shape=shape, r=r, mode=mode) ax[1].imshow(im2, origin='lower', interpolation='none') ax[1].axis(False) ax[1].set_title(f'mode = {mode}'); # - # ## `clearance` # Spheres can be made to partially overlap, the so called 'cherry pit' model, or to have some clearance: # + fig, ax = plt.subplots(1, 2, figsize=[8, 4]) c = -4 im1 = ps.generators.rsa(im_or_shape=shape, r=r, clearance=c) ax[0].imshow(im1, origin='lower', interpolation='none') ax[0].axis(False) ax[0].set_title(f'clearance = {c}') c = 4 im2 = ps.generators.rsa(im_or_shape=shape, r=r, clearance=c) ax[1].imshow(im2, origin='lower', interpolation='none') ax[1].axis(False) ax[1].set_title(f'clearance = {c}'); # - # When adding additional spheres to an existing image, the clearance only applies to new spheres. To enforce clearance between the existing spheres you can dilate them by the amount of clearance desired (or erode them if overlap is desired). In this case you'll want to set ``return_sphers=True`` to obtain an image of only the new spheres, which can be added to the original ones: im3 = ps.filters.fftmorphology(im=im2, strel=ps.tools.ps_disk(5), mode='dilation') im4 = ps.generators.RSA(im_or_shape=im3, r=5, clearance=5, return_spheres=True) im5 = im2 + im4 fig, ax = plt.subplots(1, 3, figsize=[10, 4]) ax[0].imshow(im2, origin='lower', interpolation='none') ax[1].imshow(im4, origin='lower', interpolation='none') ax[2].imshow(im5, origin='lower', interpolation='none'); # ## `volume_fraction` and `n_max` # By default it will try to insert as many spheres as possible. This can be controlled by setting the volume fraction or limiting the number of spheres inserted: # + fig, ax = plt.subplots(1, 2, figsize=[8, 4]) n_max = 5 im1 = ps.generators.rsa(shape, r=r, clearance=c, n_max=n_max) ax[0].imshow(im1, origin='lower', interpolation='none') ax[0].axis(False) ax[0].set_title(f'n_max = {n_max}') vf = 0.2 im2 = ps.generators.rsa(shape, r=r, clearance=c, volume_fraction=vf) ax[1].axis(False) ax[1].set_title(f'volume_fraction = {vf}') ax[1].imshow(im2, origin='lower', interpolation='none'); # - # Note that this function returns the spheres as ``True`` which usually indicates the pore phase in porespy, so these will treated as images of holes. If you intended to make an image of sphers, just invert the image: im1 = ~im1 im2 = ~im2 fig, ax = plt.subplots(1, 2, figsize=[8, 4]) ax[0].imshow(im1, origin='lower', interpolation='none') ax[0].axis(False) ax[1].imshow(im2, origin='lower', interpolation='none') ax[1].axis(False);
examples/generators/reference/rsa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Constants DATA_PATH = "../data/raw/survey_results_public.csv" # Load packages import pandas as pd import numpy as np pd.options.display.max_rows = 10000 # Read data raw_df = pd.read_csv(DATA_PATH) # raw_df.head() # Get Shape of your dataset raw_df.shape # Display random answer # Observations: multiple answers need to be splitted # Referance to the schema to understand raw_df.sample(1).iloc[0] # Print the general information of the data frame raw_df.info() # Get stats for the numerical column raw_df.describe() # Investigate the questionable objects columns questionable_cols = ['Age1stCode', 'YearsCode', 'YearsCodePro'] for col in questionable_cols : print(col) print(raw_df[col].unique().tolist()) print('---------------------------\n')
notebooks/00_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Classification from mlbox.preprocessing import Reader from mlbox.preprocessing import Drift_thresholder from mlbox.optimisation import Optimiser from mlbox.prediction import Predictor import csv # Paths to the train set and the test set. paths = ["../../Data/titanic/processed/train.csv","../../Data/titanic/processed/x_test.csv"] # Name of the feature to predict. # This columns should only be present in the train set. target_name = "Survived" # Reading and cleaning all files # Declare a reader for csv files rd = Reader(sep=',') # Return a dictionnary containing three entries # dict["train"] contains training samples withtout target columns # dict["test"] contains testing elements withtout target columns # dict["target"] contains target columns for training samples. data = rd.train_test_split(paths, target_name) dft = Drift_thresholder() data = dft.fit_transform(data) # Tuning # Declare an optimiser. Scoring possibilities for classification lie in : # {"accuracy", "roc_auc", "f1", "neg_log_loss", "precision", "recall"} opt = Optimiser(scoring='accuracy', n_folds=3) opt.evaluate(None, data) # Space of hyperparameters # The keys must respect the following syntax : "enc__param". # "enc" = "ne" for na encoder # "enc" = "ce" for categorical encoder # "enc" = "fs" for feature selector [OPTIONAL] # "enc" = "stck"+str(i) to add layer n°i of meta-features [OPTIONAL] # "enc" = "est" for the final estimator # "param" : a correct associated parameter for each step. # Ex: "max_depth" for "enc"="est", ... # The values must respect the syntax: {"search":strategy,"space":list} # "strategy" = "choice" or "uniform". Default = "choice" # list : a list of values to be tested if strategy="choice". # Else, list = [value_min, value_max]. # Available strategies for ne_numerical_strategy are either an integer, a float # or in {'mean', 'median', "most_frequent"} # Available strategies for ce_strategy are: # {"label_encoding", "dummification", "random_projection", entity_embedding"} space = {'ne__numerical_strategy': {"search": "choice", "space": [0]}, 'ce__strategy': {"search": "choice", "space": ["label_encoding", "random_projection", "entity_embedding"]}, 'fs__threshold': {"search": "uniform", "space": [0.01, 0.3]}, 'est__max_depth': {"search": "choice", "space": [3, 4, 5, 6, 7]} } # # - # Optimises hyper-parameters of the whole Pipeline with a given scoring # function. Algorithm used to optimize : Tree Parzen Estimator. best = opt.optimise(space,data,40) print("Final results : " ,opt.evaluate(best, data)) from mlbox.prediction import * pred=Predictor() pred.fit_predict(best,data) predictions = pd.read_csv("save/Survived_predictions.csv") predictions = predictions.Survived_predicted y_test = pd.read_csv("../../Data/titanic/processed/y_test.csv") # + from sklearn.metrics import roc_curve,auc import matplotlib.pyplot as plt fpr, tpr, _ = roc_curve(y_test, predictions) roc_auc = auc(fpr, tpr) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() plt.savefig("ML_Box_Titanic_ROC.pdf") # -
Auto_scripts/Titanic/ML_Box-Titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Introduction into R coding # # This notebook is to get used to the R environment and manage to carry out basic tasks such as reading files and simple plots. # # # ## Installation of libraries and necessary software # Install the necessary libraries (only needed once) by executing (shift-enter) the following cell: # # + #install.packages("MASS", repos='http://cran.us.r-project.org') # - # ## Loading data and libraries # This requires that the installation above have been finished without error library("MASS") # ### Exercise 1 # Answer the following questions about the R framework # # # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <u>What is the difference between the commands ```install.packages``` and ```library```?</u> # # _Answer_ # # ##### Question II: <u>What is an object in R and what can it contain?</u> # # _Answer_ # # ##### Question III: <u>A ```data.frame``` is one of the most important R objects. Explain how rows, columns, rownames and colnames corresponds to what you see on an Excel sheet:</u> # # _Answer_ # # ##### Question II: <u>What is the difference between an object and a function?</u> # # _Answer_ # # # ### Exercise 2 # Read the data frame ```Pima.tr2``` from the MASS library. # Calculate the dimensions of the data frame by executing the following cell. # Count the number of missing values for each column. For that add R code that contains the functions ```is.na()``` ```colSums()```. Use ```table()``` on the output of ```colSums()``` to count how many rows do have how many missing values. # data(Pima.tr2) # the data.frame is in the Pima.tr2 object dim(Pima.tr2) # add your code here: # ### Exercise 3 # Use the functions ```mean()``` and```range()``` to find the mean and range of. # - the numbers 1, 2, ..., 21 # - the sample of 50 random values generated from a normal distribution with mean 0 and variance 1 using ```rnorm(50)```. Repeat several times with new set of 50 random numbers to get a feeling about random numbers. # - the columns ```height``` and ```weight``` in the data frame \```women``` # # Repeat all above, but now with the functions ```median()```, ```sum()``` and ```sd()```. # # # + x <- 1:21 y <- rnorm(50) z1 <- women$height z2 <- women$weight # - # ##### Question I: <u>Which are good descriptors for the different samples?</u> # # _Answer_ # # ##### Question II: <u>What is the most accurate way to describe normally distributed data?</u> # # _Answer_ # ### Exercise 4 # Get dataset mammals that is part of package MASS. Plot ```brain``` versus ```body```. Additionally, do the same plot accessing the columns directly. Try to visualize the data on logarithmic scale applying the ```log``` argument in the ```plot``` function. # library(MASS) data("mammals") x <- mammals$body y <- mammals$brain # ##### Question I: <u>Why is the logarithmic scale more suitable?</u> # # _Answer_ # # ##### Question II: <u>What is the relation between x and y when they show a linear relationship on double-logarithmic scale (both axis on logarithmic scale)?</u> # # _Answer_ # ### Exercise 5 # # Get data set ```genotype``` from library(MASS) and read about it (```?genotype```). Sort the data by column ```Wt``` with the ```order``` function. Sort the data also by column ```Mother```. Then sort by both ```Wt``` and then ```Mother```. # library(MASS) data(genotype) A <- genotype[order(genotype$Wt),] # ##### Question I: <u>Do you see any relation between having a mother of the same genotype and the average weight gain?</u> # # _Answer_ # ### Exercise 6 # Look at the ```for``` loop below that prints each number of a vector on a separate line, with its square and cube alongside. # # Look up ```help(while)```. Show how to use a ```while``` loop to achieve the same result. # vec <- 1:10 for (i in vec) { print(paste(i, i*i, i^3)) } # ### Exercise 7 # Carry out the commands below # # paste("Leo","the","lion") paste("a","b") paste("a","b", sep="") paste(1:5) paste(1:5, collapse="") # ##### Question I: <u>What do the arguments ```sep``` and ```collapse``` achieve (test by making your own examples)?</u> # # _Answer_ # ### Exercise 8 # The following function calculates the mean and standard deviation of a numeric vector. # ``` # MeanAndSd <- function (x) { # av <- mean(x) # sdev <- sd(x) # c(mean=av, sd=sdev) # } # ``` # # Modify the function so that: (a) the default is to use ```rnorm()``` to generate 20 random numbers; (b) if there are missing values, the mean and standard deviation are calculated for the remaining values. # # + # example for using 100 random number and calculate mean for the remaining values MeanAndSd <- function (x=rnorm(100), narm=T) { av <- mean(x,na.rm=narm) sdev <- sd(x) c(mean=av, sd=sdev) } sample <- c(rnorm(20),NA,rnorm(20)) MeanAndSd() MeanAndSd(sample) # - # ##### Question I: <u>Which would be typical values for mean and standard deviation?</u> # # _Answer_
E_Biostatistics/Playground/Basic_R.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="E0_Go1IYW3SO" outputId="db4401d0-c9ad-4204-8405-b13671d3cb94" from matplotlib import pyplot as plt # %matplotlib inline import numpy as np from keras.utils import np_utils from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.callbacks import EarlyStopping from keras.layers import Conv2D, MaxPooling2D from keras.layers.normalization import BatchNormalization from keras import regularizers # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="yv8hXlq1W7r2" outputId="8407efd2-889f-4580-ec23-a825eee94f91" from keras.datasets import cifar10 (X_train, y_train), (X_val, y_val) = cifar10.load_data() # + colab={} colab_type="code" id="FeR6ZMLfW89_" X_train = X_train.astype('float32')/255. X_val = X_val.astype('float32')/255. # + colab={} colab_type="code" id="-pjIi4kkW-Nn" n_classes = 10 y_train = np_utils.to_categorical(y_train, n_classes) y_val = np_utils.to_categorical(y_val, n_classes) # + colab={"base_uri": "https://localhost:8080/", "height": 319} colab_type="code" id="0IwvfsSkW_XV" outputId="4d81de7b-b46c-491d-b4a9-a872515e69ff" import matplotlib.pyplot as plt # %matplotlib inline plt.subplot(221) plt.imshow(X_train[0].reshape(32,32,3), cmap=plt.get_cmap('gray')) plt.grid('off') plt.subplot(222) plt.imshow(X_train[1].reshape(32,32,3), cmap=plt.get_cmap('gray')) plt.grid('off') plt.subplot(223) plt.imshow(X_train[2].reshape(32,32,3), cmap=plt.get_cmap('gray')) plt.grid('off') plt.subplot(224) plt.imshow(X_train[3].reshape(32,32,3), cmap=plt.get_cmap('gray')) plt.grid('off') plt.show() # + colab={} colab_type="code" id="g8zRc_ZYXHcx" input_shape = X_train[0].shape # + colab={} colab_type="code" id="Veot6MaldmW9" # + colab={} colab_type="code" id="kF-Sdtaqdhkf" weight_decay = 1e-4 # + colab={} colab_type="code" id="7JlYC6cFXR7B" model = Sequential() model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=X_train.shape[1:])) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.3)) model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(10, activation='softmax')) # + colab={} colab_type="code" id="1LBS8QQlXT2Q" from keras.optimizers import Adam adam = Adam(lr = 0.01) model.compile(loss='categorical_crossentropy', optimizer=adam,metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="kjzeY-eOXVo3" outputId="89f6a228-a8e8-44a1-8e55-a3eb90a74609" history = model.fit(X_train, y_train, batch_size=32,epochs=10, verbose=1, validation_data=(X_val, y_val)) # + colab={"base_uri": "https://localhost:8080/", "height": 386} colab_type="code" id="wLziTVk4XYAK" outputId="a652fcbc-3f71-482b-ae68-0e78d46dd6f8" history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] epochs = range(1, len(val_loss_values) + 1) import matplotlib.pyplot as plt # %matplotlib inline plt.subplot(211) plt.plot(epochs, history.history['loss'], 'bo', label='Training loss') plt.plot(epochs, val_loss_values, 'r', label='Test loss') plt.title('Training and test loss without data augmentation') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.grid('off') plt.show() plt.subplot(212) plt.plot(epochs, history.history['acc'], 'bo', label='Training accuracy') plt.plot(epochs, val_acc_values, 'r', label='Test accuracy') plt.title('Training and test accuracy without data augmentation') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.gca().set_yticklabels(['{:.0f}%'.format(x*100) for x in plt.gca().get_yticks()]) plt.legend() plt.grid('off') plt.show() # + colab={} colab_type="code" id="egNQRJ-eY2Se" # + [markdown] colab_type="text" id="CZa2BGGEZBsG" # # Data augmentation # + colab={} colab_type="code" id="8th3J0U-ZDNZ" from keras.preprocessing.image import ImageDataGenerator datagen = ImageDataGenerator( rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, ) # + colab={} colab_type="code" id="XDAiPhq1ZD19" datagen.fit(X_train) # + colab={} colab_type="code" id="FK4RZszElFFF" model = Sequential() model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=X_train.shape[1:])) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.3)) model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(10, activation='softmax')) # + colab={} colab_type="code" id="xw6zqIzEZFPm" from keras.optimizers import Adam adam = Adam(lr = 0.01) model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy']) # + colab={} colab_type="code" id="KWoIN7UQZG_U" history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),steps_per_epoch=X_train.shape[0] // batch_size, epochs=10,validation_data=(X_val,y_val)) # + colab={} colab_type="code" id="QeHy42HCZKd7" history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] epochs = range(1, len(val_loss_values) + 1) import matplotlib.pyplot as plt # %matplotlib inline plt.subplot(211) plt.plot(epochs, history.history['loss'], 'bo', label='Training loss') plt.plot(epochs, val_loss_values, 'r', label='Test loss') plt.title('Training and test loss with data augmentation') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.grid('off') plt.show() plt.subplot(212) plt.plot(epochs, history.history['acc'], 'bo', label='Training accuracy') plt.plot(epochs, val_acc_values, 'r', label='Test accuracy') plt.title('Training and test accuracy with data augmentation') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.gca().set_yticklabels(['{:.0f}%'.format(x*100) for x in plt.gca().get_yticks()]) plt.legend() plt.grid('off') plt.show() # + colab={} colab_type="code" id="fXOhnOfLbG1P"
Chapter04/Data_augmentation_to_improve_network_accuracy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sanikamal/deep-learning-atoz/blob/master/pytorch_example/PyTorch_Playground.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="gXmCHcwKs6rd" # # PyTorch playground # + colab_type="code" id="PzCCniVwNTdp" colab={} # Setting seeds to try and ensure we have the same results - this is not guaranteed across PyTorch releases. import torch torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False import numpy as np np.random.seed(0) # + colab_type="code" id="fQLW-HL7_0pT" outputId="a2347d9a-bd38-4fa5-b11a-6c2ae5767134" colab={"base_uri": "https://localhost:8080/", "height": 34} device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) # + colab_type="code" id="PCJzXv0OK1Bs" colab={} from torchvision import datasets, transforms import torch.nn.functional as F from torch import nn mean, std = (0.5,), (0.5,) # Create a transform and normalise data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std) ]) # Download FMNIST training dataset and load training data trainset = datasets.FashionMNIST('~/.pytorch/FMNIST/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download FMNIST test dataset and load test data testset = datasets.FashionMNIST('~/.pytorch/FMNIST/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False) # + colab_type="code" id="rqMqFbIVrbFH" colab={} class FMNIST(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 128) self.fc2 = nn.Linear(128,64) self.fc3 = nn.Linear(64,10) def forward(self, x): x = x.view(x.shape[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) x = F.log_softmax(x, dim=1) return x model = FMNIST() # + colab_type="code" id="67eZUNEM5b7n" outputId="65d086ff-3a03-4cee-addd-315c55d7b626" colab={"base_uri": "https://localhost:8080/", "height": 102} model.to(device) # + [markdown] colab_type="text" id="XPdDu7KfWEfW" # - The only change we have made to the code is that we are going to track the training loss, the testing loss and the accuracy across the 30 epochs. # - We'll print out the train loss, the test loss and the accuracy after each epoch. # - Because we are running this over 30 epochs this will take a bit longer to run - approx 15 minutes. # + colab_type="code" id="VJLzWi0UqGWm" outputId="77ce05a3-3f82-4cef-cefc-8a43c4ddbafb" colab={"base_uri": "https://localhost:8080/", "height": 578} from torch import optim criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.01) num_epochs = 30 train_tracker, test_tracker, accuracy_tracker = [], [], [] for i in range(num_epochs): cum_loss = 0 for batch, (images, labels) in enumerate(trainloader,1): images = images.to(device) labels = labels.to(device) optimizer.zero_grad() output = model(images) loss = criterion(output, labels) loss.backward() optimizer.step() cum_loss += loss.item() train_tracker.append(cum_loss/len(trainloader)) print(f"Epoch({i+1}/{num_epochs}) | Training loss: {cum_loss/len(trainloader)} | ",end='') test_loss = 0 num_correct = 0 total = 0 for batch, (images, labels) in enumerate(testloader,1): images = images.to(device) labels = labels.to(device) logps = model(images) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() output = torch.exp(logps) pred = torch.argmax(output, 1) total += labels.size(0) num_correct += (pred == labels).sum().item() test_tracker.append(test_loss/len(testloader)) print(f"Test loss: {test_loss/len(testloader)} | ", end='') accuracy_tracker.append(num_correct/total) print(f'Accuracy : {num_correct/total}') print(f'\nNumber correct : {num_correct}, Total : {total}') print(f'Accuracy of the model after 30 epochs on the 10000 test images: {num_correct * 100 / total}% ') # + [markdown] colab_type="text" id="IqNpkYO6V9YI" # - Has the accuracy of the model increased? # - Now plot the training loss vs the test loss over 30 epochs. # + colab_type="code" id="89a8FdTi-cNM" outputId="82701451-7413-4c00-d4e7-24f0e375a4c2" colab={"base_uri": "https://localhost:8080/", "height": 286} import matplotlib.pyplot as plt # %matplotlib inline plt.plot(train_tracker, label='Training loss') plt.plot(test_tracker, label='Test loss') plt.legend() # + [markdown] id="DHtKTHKKjG3r" colab_type="text" # - Now add the accuracy to the mix. # + colab_type="code" id="AJgyMHm2Pvx5" outputId="881d9ce0-19cd-4991-86a1-2d014ee00735" colab={"base_uri": "https://localhost:8080/", "height": 286} import matplotlib.pyplot as plt # %matplotlib inline plt.plot(train_tracker, label='Training loss') plt.plot(test_tracker, label='Test loss') plt.plot(accuracy_tracker, label='Test accuracy') plt.legend() # + [markdown] id="APhVglVTk-og" colab_type="text" # ## Further challenges and experiments # - Can you get better accuracy from a model if you : # - Add more layers? # - Change the number of nodes in the layers? # - Train over fewer/higher epochs? # # - Can you improve on your results if you add additional layers like [Dropout](https://pytorch.org/docs/master/nn.html#torch.nn.Dropout) # + id="Ex8FzAY7jG3v" colab_type="code" colab={} # + id="6ImOG4oXjG3x" colab_type="code" colab={} # + id="CwHV3ig7jG3y" colab_type="code" colab={} # + id="fg3aU2vJjG30" colab_type="code" colab={} # + id="9UfGdMuCjG32" colab_type="code" colab={} # + id="xxOzS2-TjG34" colab_type="code" colab={} # + id="3kdUCpCyjG35" colab_type="code" colab={} # + id="RMhC9JJujG37" colab_type="code" colab={}
pytorch_example/PyTorch_Playground.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-2:429704687514:image/datascience-1.0 # --- # # Amazon SageMaker Workshop # ### _**Evaluation**_ # # --- # In this part of the workshop we will get the previous model we trained to Predict Mobile Customer Departure and evaluate its performance with a test dataset. # # --- # # ## Contents # # 1. [Background](#Background) - Getting the model trained in the previous lab. # 2. [Evaluate](#Evaluate) # * Creating a script to evaluate model # * Using [SageMaker Processing](https://docs.aws.amazon.com/sagemaker/latest/dg/processing-job.html) jobs to automate evaluation of models # # --- # # ## Background # # In the previous [Modeling](../2-Modeling/modeling.ipynb) lab we used SageMaker trained models by creating multiple SageMaker training jobs. # # Install and import some packages we'll need for this lab: import sys # !{sys.executable} -m pip install sagemaker==2.42.0 -U # !{sys.executable} -m pip install xgboost==1.2.1 import boto3 import sagemaker from sagemaker.s3 import S3Uploader, S3Downloader region = boto3.Session().region_name sm_sess = sagemaker.session.Session() role = sagemaker.get_execution_role() # Get the variables from initial setup: # %store -r bucket # %store -r prefix bucket, prefix # ### - if you skipped the previous lab # # Use the pre-trained model in config directory (`config/source.tar.gz`). # + ## Uncomment if you skipped previous lab # # !cp config/model.tar.gz ./ # + ## Uncomment if you skipped previous lab # model_s3_uri = S3Uploader.upload("model.tar.gz", f"s3://{bucket}/{prefix}/model") # - # ### - if you have done the previous lab # # Download the model from S3: # Get name of training job and other variables # %store -r training_job_name training_job_name estimator = sagemaker.estimator.Estimator.attach(training_job_name) model_s3_uri = estimator.model_data print("\nmodel_s3_uri =",model_s3_uri) S3Downloader.download(model_s3_uri, ".") # --- # # Evaluate model # # Let's create a simple evaluation with some Scikit-Learn Metrics like [Area Under the Curve (AUC)](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.auc.html) and [Accuracy](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html). # + import json import os import tarfile import logging import pickle import pandas as pd import xgboost from sklearn.metrics import classification_report, roc_auc_score, accuracy_score model_path = "model.tar.gz" with tarfile.open(model_path) as tar: tar.extractall(path=".") print("Loading xgboost model.") model = pickle.load(open("xgboost-model", "rb")) # - print("Loading test input data") test_path = "config/test-dataset.csv" df = pd.read_csv(test_path, header=None) df print("Reading test data.") y_test = df.iloc[:, 0].to_numpy() df.drop(df.columns[0], axis=1, inplace=True) X_test = xgboost.DMatrix(df.values) X_test # + print("Performing predictions against test data.") predictions = model.predict(X_test) print("Creating classification evaluation report") acc = accuracy_score(y_test, predictions.round()) auc = roc_auc_score(y_test, predictions.round()) print("Accuracy =", acc) print("AUC =", auc) # - # ### Creating a classification report # # Now, let's save the results in a JSON file, following the structure defined in SageMaker docs: # https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html # # We'll use this logic later in [Lab 6-Pipelines](../6-Pipelines/pipelines.ipynb): # + import pprint # The metrics reported can change based on the model used - check the link for the documentation report_dict = { "binary_classification_metrics": { "accuracy": { "value": acc, "standard_deviation": "NaN", }, "auc": {"value": auc, "standard_deviation": "NaN"}, }, } print("Classification report:") pprint.pprint(report_dict) # + evaluation_output_path = os.path.join( ".", "evaluation.json" ) print("Saving classification report to {}".format(evaluation_output_path)) with open(evaluation_output_path, "w") as f: f.write(json.dumps(report_dict)) # - # --- # # ## Ok, now we have working code. Let's put it in a Python Script # + # %%writefile evaluate.py """Evaluation script for measuring model accuracy.""" import json import os import tarfile import logging import pickle import pandas as pd import xgboost logger = logging.getLogger() logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) # May need to import additional metrics depending on what you are measuring. # See https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html from sklearn.metrics import classification_report, roc_auc_score, accuracy_score if __name__ == "__main__": model_path = "/opt/ml/processing/model/model.tar.gz" with tarfile.open(model_path) as tar: tar.extractall(path="..") logger.debug("Loading xgboost model.") model = pickle.load(open("xgboost-model", "rb")) print("Loading test input data") test_path = "/opt/ml/processing/test/test-dataset.csv" df = pd.read_csv(test_path, header=None) logger.debug("Reading test data.") y_test = df.iloc[:, 0].to_numpy() df.drop(df.columns[0], axis=1, inplace=True) X_test = xgboost.DMatrix(df.values) logger.info("Performing predictions against test data.") predictions = model.predict(X_test) print("Creating classification evaluation report") acc = accuracy_score(y_test, predictions.round()) auc = roc_auc_score(y_test, predictions.round()) # The metrics reported can change based on the model used, but it must be a specific name per (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html) report_dict = { "binary_classification_metrics": { "accuracy": { "value": acc, "standard_deviation": "NaN", }, "auc": {"value": auc, "standard_deviation": "NaN"}, }, } print("Classification report:\n{}".format(report_dict)) evaluation_output_path = os.path.join( "/opt/ml/processing/evaluation", "evaluation.json" ) print("Saving classification report to {}".format(evaluation_output_path)) with open(evaluation_output_path, "w") as f: f.write(json.dumps(report_dict)) # - # --- # # ## Ok, now we are finally running this script with a simple call to SageMaker Processing! # + framework_version = '1.2-2' docker_image_name = sagemaker.image_uris.retrieve(framework='xgboost', region=region, version=framework_version) docker_image_name # - from sagemaker.processing import ( ProcessingInput, ProcessingOutput, ScriptProcessor, ) # Processing step for evaluation processor = sagemaker.processing.ScriptProcessor( image_uri=docker_image_name, command=["python3"], instance_type="ml.m5.xlarge", instance_count=1, base_job_name="CustomerChurn/eval-script", sagemaker_session=sm_sess, role=role, ) entrypoint = "evaluate.py" # Upload test dataset to S3 test_s3_uri = S3Uploader.upload("config/test-dataset.csv", f"s3://{bucket}/{prefix}/test") test_s3_uri processor.run( code=entrypoint, inputs=[ sagemaker.processing.ProcessingInput( source=model_s3_uri, destination="/opt/ml/processing/model", ), sagemaker.processing.ProcessingInput( source=test_s3_uri, destination="/opt/ml/processing/test", ), ], outputs=[ sagemaker.processing.ProcessingOutput( output_name="evaluation", source="/opt/ml/processing/evaluation" ), ], job_name="CustomerChurnEval" ) # If everything went well, the SageMaker Processing job must have created the JSON with the evaluation report of our model and saved it in S3. # # ### Let's check it the evaluation report from S3! out_s3_report_uri = processor.latest_job.outputs[0].destination out_s3_report_uri reports_list = S3Downloader.list(out_s3_report_uri) reports_list # + report = S3Downloader.read_file(reports_list[0]) print("=====Model Report====") print(json.dumps(json.loads(report.split('\n')[0]), indent=2))
3-Evaluation/.ipynb_checkpoints/evaluation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.6 64-bit (''py39'': conda)' # name: python3 # --- # + [markdown] id="89_Z46HlJqOT" # <table class="ee-notebook-buttons" align="left"> # <td><a target="_blank" href="https://github.com/aburdenko/gcp-jupyter-notebooks/blob/master/vertex-pipelines/kfp-sklearn.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> # <td><a target="_blank" href="https://nbviewer.jupyter.org/github/aburdenko/gcp-jupyter-notebooks/blob/main/vertex-pipelines/kfp-sklearn.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> # <td><a target="_blank" href="https://colab.sandbox.google.com/github/aburdenko/gcp-jupyter-notebooks/blob/main/vertex-pipelines/kfp-sklearn.ipynb"><img width=26px src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> # <td><a target="_blank" href="https://console.cloud.google.com/vertex-ai/workbench/list/instances"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/a/a0/Google_Cloud_Workbench.png" /> Run in Google Cloud Vertex Workbench</a></td> # </table> # + [markdown] id="Rlfii5t7I2tc" # # Based on [Colab Notebooks](https://console.cloud.google.com/marketplace/product/colab-marketplace-image-public/colab) available in GCP Marketplace. # + id="yBH7pW-pF3_x" USER = "<EMAIL>" # @param {type:"string"} <---CHANGE THESE BUCKET_NAME = "alphafold_protein_structure" # @param {type:"string"} <---CHANGE THESE GOOGLE_CLOUD_PROJECT = "aburdenko-project" # @param {type:"string"} <---CHANGE THESE REGION = "us-central1" # @param {type:"string"} <---CHANGE THESE PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER) PIPELINE_ROOT GOOGLE_APPLICATION_CREDENTIALS="/home/aburdenko/aburdenko-project-d93f3d235d90.json" # @param {type:"string"} <---CHANGE THESE DRIVE_PATH="/mnt/gdrive" LIB_PATH=f"{DRIVE_PATH}/Colab Notebooks/lib" CLEAN_LIB_PATH='/home/aburdenko/python_lib' print(f"Google Drive Path is: {DRIVE_PATH}") print(f"Lib Path is: {LIB_PATH}") # %env GOOGLE_CLOUD_PROJECT=$GOOGLE_CLOUD_PROJECT # %env GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS # %env CLEAN_LIB_PATH=$CLEAN_LIB_PATH # + import os import sys import errno CLEAN_LIB_PATH=os.environ.get('CLEAN_LIB_PATH') if not os.path.islink(CLEAN_LIB_PATH): import os, sys if os.path.exists(CLEAN_LIB_PATH): os.unlink(CLEAN_LIB_PATH) try: os.symlink(LIB_PATH, CLEAN_LIB_PATH) except OSError as e: if e.errno == errno.EEXIST: pass os.listdir(CLEAN_LIB_PATH) sys.path.insert(0,CLEAN_LIB_PATH) if 'alphafold_components' not in sys.path: sys.path.insert(0, '../alphafold_components') if '.' not in sys.path: sys.path.insert(0, '.') # - import os libs = os.listdir(CLEAN_LIB_PATH) # !pip3 install --target=$CLEAN_LIB_PATH grpcio-tools # #!pip3 install --target=$CLEAN_LIB_PATH grpcio --upgrade # + id="Ef1vYgAlHB7W" needs_restart=False if not any('google_cloud_aiplatform' in lib for lib in libs): print('google_cloud_aiplatform') # !pip3 install --target=$CLEAN_LIB_PATH google-cloud-aiplatform needs_restart=True if not any('google_cloud_pipeline_components' in lib for lib in libs): print('google_cloud_pipeline_components') # !pip3 install --target=$CLEAN_LIB_PATH google-cloud-pipeline-components needs_restart=True if not any('kfp' in lib for lib in libs): print('kfp') # !pip3 install --target=$CLEAN_LIB_PATH kfp needs_restart=True if not any('grpcio' in lib for lib in libs): print('grpcio') # !pip3 install --target=$CLEAN_LIB_PATH grpcio # !pip3 install --target=$CLEAN_LIB_PATH grpcio-tools needs_restart=True if needs_restart: print("🔁 Restarting kernel...") import IPython IPython.Application.instance().kernel.do_shutdown(True) # + [markdown] id="GFyjcgARBF51" # # Authentication and Authorization # + id="o2QavBpyA3Zn" # !echo $GOOGLE_CLOUD_PROJECT # !gcloud config set project $GOOGLE_CLOUD_PROJECT # + id="zVOmhCFIAUKD" # !gcloud --project $GOOGLE_CLOUD_PROJECT services enable compute.googleapis.com \ # containerregistry.googleapis.com \ # aiplatform.googleapis.com \ # cloudbuild.googleapis.com \ # cloudfunctions.googleapis.com # + id="ErWrs_oRF3_n" from inspect import isdatadescriptor from typing import NamedTuple from kfp import dsl from kfp.v2 import compiler from kfp.v2.dsl import component, OutputPath, InputPath, Output, Input, Dataset, Metrics, Model, Artifact from kfp.v2.google.client import AIPlatformClient from google_cloud_pipeline_components import aiplatform as gcc_aip # - import sys print(sys.path) # # End of Setup - Get to Work # + import os PIPELINE_NAME = 'alphafold-inference' PIPELINE_DESCRIPTION = 'Alphafold inference' REFERENCE_DATASETS_IMAGE = "https://www.googleapis.com/compute/v1/projects/jk-mlops-dev/global/images/jk-alphafold-datasets 3000" REFERENCE_DATASETS_GCS_LOCATION = 'gs://alphafold_protein_structure' REFERENCE_DATASETS_URI = '10.71.1.10,/datasets_v1,/mnt/nfs/alphafold,projects/895222332033/global/networks/default' MODEL_PARAMS_GCS_LOCATION='gs://alphafold_protein_structure/upload/model_params' MODEL_PARAMS_URI='gs://alphafold_protein_structure' UNIREF90_PATH = 'uniref90/uniref90.fasta' MGNIFY_PATH = 'mgnify/mgy_clusters_2018_12.fa' BFD_PATH = 'bfd/bfd_metaclust_clu_complete_id30_c90_final_seq.sorted_opt' UNICLUST30_PATH = 'uniclust30/uniclust30_2018_08/uniclust30_2018_08' UNIPROT_PATH = 'uniprot/uniprot.fasta' PDB70_PATH = 'pdb70/pdb70' PDB_MMCIF_PATH = 'pdb_mmcif/mmcif_files' PDB_OBSOLETE_PATH = 'pdb_mmcif/obsolete.dat' PDB_SEQRES_PATH = 'pdb_seqres/pdb_seqres.txt' UNIREF90 = 'uniref90' MGNIFY = 'mgnify' BFD = 'bfd' UNICLUST30 = 'uniclust30' PDB70 = 'pdb70' PDB_MMCIF = 'pdb_mmcif' PDB_OBSOLETE = 'pdb_obsolete' PDB_SEQRES = 'pdb_seqres' UNIPROT = 'uniprot' RELAX_MEMORY_LIMIT = os.getenv("MEMORY_LIMIT", "85") RELAX_CPU_LIMIT = os.getenv("CPU_LIMIT", "12") RELAX_GPU_LIMIT = os.getenv("GPU_LIMIT", "1") RELAX_GPU_TYPE = os.getenv("GPU_TYPE", "nvidia-tesla-a100") MEMORY_LIMIT = os.getenv("MEMORY_LIMIT", "85") CPU_LIMIT = os.getenv("CPU_LIMIT", "12") GPU_LIMIT = os.getenv("GPU_LIMIT", "1") GPU_TYPE = os.getenv("GPU_TYPE", "nvidia-tesla-a100") GKE_ACCELERATOR_KEY = 'cloud.google.com/gke-accelerator' ALPHAFOLD_COMPONENTS_IMAGE = os.getenv("ALPHAFOLD_COMPONETS_IMAGE", 'gcr.io/aburdenko-project/alphafold') XLA_PYTHON_CLIENT_MEM_FRACTION = "4.0" TF_FORCE_UNIFIED_MEMORY = "1" FASTA_PATH='gs://alphafold_protein_structure/upload/sequences.fasta' SEQUENCE_DESC='T1050 A7LXT1, Bacteroides Ovatus, 779 residues' MAX_TEMPLATE_DATE='2020-05-14' NUM_ENSUMBLE=1 USE_GPU_FOR_RELAXATION=True models = [ { 'model_name': 'model_1', 'random_seed': 1, }, # { # 'model_name': 'model_2', 'random_seed': 2, # }, # { # 'model_name': 'model_3', 'random_seed': 3, # }, # { # 'model_name': 'model_4', 'random_seed': 4, # }, # { # 'model_name': 'model_5', 'random_seed': 5, # }, ] from alphafold_components import ( RelaxProteinOp, AggregateFeaturesOp, ModelPredictOp, JackhmmerOp, HHBlitsOp, HHSearchOp, ImportSeqenceOp) @dsl.pipeline( name="sklearn-demo", description="A simple sklearn demo", pipeline_root=PIPELINE_ROOT, ) def pipeline( project : str = GOOGLE_CLOUD_PROJECT , region: str = REGION , sequence_path: str = "" , model_params_uri: str=MODEL_PARAMS_GCS_LOCATION ): input_sequence = dsl.importer( artifact_uri=sequence_path, artifact_class=Dataset, reimport=True) input_sequence.set_display_name('Input sequence') model_parameters = dsl.importer( artifact_uri=model_params_uri, artifact_class=Artifact, reimport=True) model_parameters.set_display_name('Model parameters') reference_databases = dsl.importer( artifact_uri=REFERENCE_DATASETS_URI, artifact_class=Dataset, reimport=False, metadata={ UNIREF90: UNIREF90_PATH, MGNIFY: MGNIFY_PATH, BFD: BFD_PATH, UNICLUST30: UNICLUST30_PATH, PDB70: PDB70_PATH, PDB_MMCIF: PDB_MMCIF_PATH, PDB_OBSOLETE: PDB_OBSOLETE_PATH, PDB_SEQRES: PDB_SEQRES_PATH, UNIPROT: UNIPROT_PATH, } ) reference_databases.set_display_name('Reference databases') search_uniref = JackhmmerOp( project=project, region=region, database=UNIREF90, reference_databases=reference_databases.output, sequence=input_sequence.output, ) search_uniref.set_display_name('Search Uniref')#.set_caching_options(enable_caching=True) search_mgnify = JackhmmerOp( project=project, region=region, database=MGNIFY, reference_databases=reference_databases.output, sequence=input_sequence.output, ) search_mgnify.set_display_name('Search Mgnify')#.set_caching_options(enable_caching=True) search_uniclust = HHBlitsOp( project=project, region=region, msa_dbs=[UNICLUST30], reference_databases=reference_databases.output, sequence=input_sequence.output, ) search_uniclust.set_display_name('Search Uniclust')#.set_caching_options(enable_caching=True) search_bfd = HHBlitsOp( project=project, region=region, msa_dbs=[BFD], reference_databases=reference_databases.output, sequence=input_sequence.output, ) search_bfd.set_display_name('Search BFD')#.set_caching_options(enable_caching=True) search_pdb = HHSearchOp( project=project, region=region, template_dbs=[PDB70], mmcif_db=PDB_MMCIF, obsolete_db=PDB_OBSOLETE, max_template_date=MAX_TEMPLATE_DATE, reference_databases=reference_databases.output, sequence=input_sequence.output, msa=search_uniref.outputs['msa'], ) search_pdb.set_display_name('Search Pdb')#.set_caching_options(enable_caching=True) aggregate_features = AggregateFeaturesOp( sequence=input_sequence.output, msa1=search_uniref.outputs['msa'], msa2=search_mgnify.outputs['msa'], msa3=search_bfd.outputs['msa'], msa4=search_uniclust.outputs['msa'], template_features=search_pdb.outputs['template_features'], ) aggregate_features.set_display_name('Aggregate features')#.set_caching_options(enable_caching=True) # Think what to do with random seed when switch to Parallel loop with dsl.ParallelFor(models) as model: model_predict = ModelPredictOp( model_features=aggregate_features.outputs['features'], model_params=model_parameters.output, model_name=model.model_name, num_ensemble=NUM_ENSUMBLE, random_seed=model.random_seed ) model_predict.set_display_name('Predict')#.set_caching_options(enable_caching=True) model_predict.set_cpu_limit(CPU_LIMIT) model_predict.set_memory_limit(MEMORY_LIMIT) model_predict.set_gpu_limit(GPU_LIMIT) model_predict.add_node_selector_constraint(GKE_ACCELERATOR_KEY, config.GPU_TYPE) model_predict.set_env_variable("TF_FORCE_UNIFIED_MEMORY", config.TF_FORCE_UNIFIED_MEMORY) model_predict.set_env_variable("XLA_PYTHON_CLIENT_MEM_FRACTION", config.XLA_PYTHON_CLIENT_MEM_FRACTION) relax_protein = RelaxProteinOp( unrelaxed_protein=model_predict.outputs['unrelaxed_protein'], use_gpu=USE_GPU_FOR_RELAXATION ) relax_protein.set_display_name('Relax protein')#.set_caching_options(enable_caching=True) relax_protein.set_cpu_limit(RELAX_CPU_LIMIT) relax_protein.set_memory_limit(RELAX_MEMORY_LIMIT) relax_protein.set_gpu_limit(RELAX_GPU_LIMIT) relax_protein.add_node_selector_constraint(GKE_ACCELERATOR_KEY, RELAX_GPU_TYPE) relax_protein.set_env_variable("TF_FORCE_UNIFIED_MEMORY", TF_FORCE_UNIFIED_MEMORY) relax_protein.set_env_variable("XLA_PYTHON_CLIENT_MEM_FRACTION", XLA_PYTHON_CLIENT_MEM_FRACTION) from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="custom_train_pipeline.json" ) # + from datetime import datetime # from google.cloud import aiplatform # from google.cloud.aiplatform import pipeline_jobs api_client = AIPlatformClient( project_id=GOOGLE_CLOUD_PROJECT, region=REGION, ) PIPELINE_SPEC_PATH="custom_train_pipeline.json" PIPELINE_ROOT = 'gs://{}/pipeline_root/{}'.format(BUCKET_NAME, USER) params = { 'sequence_path': FASTA_PATH , 'model_params_uri': MODEL_PARAMS_GCS_LOCATION # 'sequence_desc': SEQUENCE_DESC, # 'max_template_date': MAX_TEMPLATE_DATE, # 'project': GOOGLE_CLOUD_PROJECT, # 'region': REGION, # 'models': models, # 'num_ensemble': NUM_ENSUMBLE, # 'use_gpu_for_relaxation': USE_GPU_FOR_RELAXATION } compiler.Compiler().compile( pipeline_func=pipeline, package_path=PIPELINE_SPEC_PATH ) api_client = AIPlatformClient(project_id=GOOGLE_CLOUD_PROJECT, region=REGION) response = api_client.create_run_from_job_spec( PIPELINE_SPEC_PATH, pipeline_root=PIPELINE_ROOT, parameter_values=params ) # + [markdown] id="FgmaY3vN4yus" # Resultant pipeline should look like the following: # + [markdown] id="kl51_uZHKZZN" #
notebook/alphafold-notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (shared-conda) # language: python # name: shared-conda # --- import os import pandas as pd users = pd.read_csv("/home/srivbane/shared/caringbridge/data/projects/sna-social-support/csv_data/pcts.csv") print(len(users)) multi_site_count = 0 user_site_map = {} for userId, group in users.groupby(by='userId', sort=False): siteIds = tuple(group.siteId.tolist()) if len(siteIds) > 1: multi_site_count += 1 user_site_map[userId] = siteIds print(f"{len(user_site_map.keys())} users mapped to sites. ({multi_site_count} users to multiple sites.)")
build_network/CreateUserSiteMap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # ScmRun # # *Suggestions for update:* add examples of handling of timeseries interpolation plus how the guessing works # # In this notebook we provide an overview of the capabilities provided by scmdata's `ScmRun` class. `ScmRun` provides a efficient interface to analyse timeseries data. # ## Imports # + # NBVAL_IGNORE_OUTPUT import traceback import numpy as np from openscm_units import unit_registry as ur from pint.errors import DimensionalityError from scmdata import ScmRun from scmdata.errors import NonUniqueMetadataError # - # ## Loading data # # `ScmRun`'s can read many different data types and be loaded in many different ways. # For a full explanation, see the docstring of `ScmRun`'s `__init__` method. print(ScmRun.__init__.__doc__) # Here we load data from a file. # # *Note:* here we load RCP26 emissions data. This originally came from http://www.pik-potsdam.de/~mmalte/rcps/ and has since been re-written into a format which can be read by scmdata using the [pymagicc](https://github.com/openclimatedata/pymagicc) library. We are not currently planning on importing Pymagicc's readers into scmdata by default, please raise an issue [here](https://github.com/openscm/scmdata/issues) if you would like us to consider doing so. rcp26 = ScmRun("rcp26_emissions.csv", lowercase_cols=True) # ## Timeseries # # `ScmDataFrame` is ideally suited to working with timeseries data. # The `timeseries` method allows you to easily get the data back in wide format as a *pandas* `DataFrame`. # Here 'wide' format refers to representing timeseries as a row with metadata being contained in the row labels. rcp26.timeseries().head() type(rcp26.timeseries()) # ## Operations with scalars # # Basic operations with scalars are easily performed. rcp26.head() (rcp26 + 2).head() (rcp26 / 4).head() # `ScmRun` instances also support operations with [Pint](https://github.com/hgrecco/pint) scalars, permitting automatic unit conversion and error raising. For interested readers, the scmdata package uses the [OpenSCM-Units](https://openscm-units.readthedocs.io/) unit registry. to_add = 500 * ur("MtCO2 / yr") # If we try to add 0.5 GtC / yr to all the timeseries, we'll get a `DimensionalityError`. try: rcp26 + to_add except DimensionalityError: traceback.print_exc(limit=0, chain=False) # However, if we filter things correctly, this operation is perfectly valid. (rcp26.filter(variable="Emissions|CO2|MAGICC AFOLU") + to_add).head() # This can be compared to the raw data as shown below. rcp26.filter(variable="Emissions|CO2|MAGICC AFOLU").head() # ## Unit conversion # # The scmdata package uses the [OpenSCM-Units](https://openscm-units.readthedocs.io/) unit registry and uses the [Pint](https://github.com/hgrecco/pint) library to handle unit conversion. # # Calling the `convert_unit` method of an `ScmRun` returns a new `ScmRun` instance with converted units. rcp26.filter(variable="Emissions|BC").timeseries() rcp26.filter(variable="Emissions|BC").convert_unit("kg BC / day").timeseries() # Note that you must filter your data first as the unit conversion is applied to all available variables. If you do not, you will receive `DimensionalityError`'s. try: rcp26.convert_unit("kg BC / day").timeseries() except DimensionalityError: traceback.print_exc(limit=0, chain=False) # Having said this, thanks to Pint's idea of contexts, we are able to trivially convert to CO<sub>2</sub> equivalent units (as long as we restrict our conversion to variables which have a CO<sub>2</sub> equivalent). rcp26.filter(variable=["*CO2*", "*CH4*", "*N2O*"]).timeseries() rcp26.filter(variable=["*CO2*", "*CH4*", "*N2O*"]).convert_unit( "Mt CO2 / yr", context="AR4GWP100" ).timeseries() # Without the context, a `DimensionalityError` is once again raised. try: rcp26.convert_unit("Mt CO2 / yr").timeseries() except DimensionalityError: traceback.print_exc(limit=0, chain=False) # In addition, when we do a conversion with contexts, the context information is automatically added to the metadata. This ensures we can't accidentally use a different context for further conversions. ar4gwp100_converted = rcp26.filter( variable=["*CO2*", "*CH4*", "*N2O*"] ).convert_unit("Mt CO2 / yr", context="AR4GWP100") ar4gwp100_converted.timeseries() # Trying to convert without a context, or with a different context, raises an error. try: ar4gwp100_converted.convert_unit("Mt CO2 / yr") except ValueError: traceback.print_exc(limit=0, chain=False) try: ar4gwp100_converted.convert_unit("Mt CO2 / yr", context="AR5GWP100") except ValueError: traceback.print_exc(limit=0, chain=False) # ## Metadata handling # # `ScmRun` instances are strict with respect to metadata handling. If you either try to either a) instantiate an `ScmRun` instance with duplicate metadata or b) change an existing `ScmRun` instance so that it has duplicate metadata then you will receive a `NonUniqueMetadataError`. try: ScmRun( data=np.arange(6).reshape(2, 3), index=[10, 20], columns={ "variable": "Emissions", "unit": "Gt", "model": "idealised", "scenario": "idealised", "region": "World", }, ) except NonUniqueMetadataError: traceback.print_exc(limit=0, chain=False) # NBVAL_IGNORE_OUTPUT try: rcp26["variable"] = "Emissions|CO2|MAGICC AFOLU" except NonUniqueMetadataError: traceback.print_exc(limit=0, chain=False) # The `meta` attribute provides `Timeseries` specific metadata. There is also a `metadata` attribute which provides metadata for the `ScmRun` instance. # # These metadata can be used to store information about the collection of runs as a whole, such as the file where the data are stored or longer-form information about a particular dataset. rcp26.metadata["filename"] = "rcp26_emissions.csv" rcp26.metadata # ## Convenience methods # # Below we showcase a few convenience methods of `ScmRun`. These will grow over time, please add a pull request adding more where they are useful! # ### get_unique_meta # # This method helps with getting the unique metadata values in an `ScmRun`. Here we show how it can be useful. Check out its docstring for full details. # By itself, it doesn't do anything special, just returns the unique metadata values as a list. rcp26.get_unique_meta("variable") # However, it can be useful if you expect there to only be one unique metadata value. In such a case, you can use the `no_duplicates` argument to ensure that you only get a single value as its native type (not a list) and that an error will be raised if this isn't the case. rcp26.get_unique_meta("model", no_duplicates=True) try: rcp26.get_unique_meta("unit", no_duplicates=True) except ValueError: traceback.print_exc(limit=0, chain=False)
notebooks/scmrun.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Import Necessary Packages import pandas as pd from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor from sklearn.metrics import mean_squared_error as MSE from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.impute import SimpleImputer from sklearn.preprocessing import LabelEncoder from matplotlib import pyplot as plt from matplotlib.pyplot import figure # ### Defining some global variables # Defining some global variables SEED = 42 # + # Ingest Data # Training dataset df=pd.read_csv("E:\\Jupyter\\Kaggle\\House Prices Advanced Regression Techniques\\train.csv") X = df.iloc[:,:-1] # Target Variable - House sale price y = df.loc[:,['SalePrice']] # Testing dataset testdata = pd.read_csv("E://Jupyter//Kaggle//House Prices Advanced Regression Techniques//test.csv") # - # ### Data Exploration # Firstly, having a look at columns X.columns # Total number of columns and its distribution len(X.columns) # + # Prepare a list for different type of columns: list_null_cols = [] list_numeric_cols = [] list_categorical_cols = [] for col in X.columns: col_dtype = X[col].dtype if (df[col].isnull().sum()>0): list_null_cols.append(col) elif col_dtype=="O": list_categorical_cols.append(col) else: list_numeric_cols.append(col) # - len(list_null_cols) len(list_numeric_cols) len(list_categorical_cols) #Checking listing down visualizations plt.figure(1) plt.scatter(X.loc[:,'MSZoning'],y.loc[:,'SalePrice']) plt.figure(2) plt.scatter(X.loc[:,'MSSubClass'],y.loc[:,'SalePrice']) plt.show() # #### Looking at all features # Visualization of all features i=0 for col in list_numeric_cols: plt.figure(i+1) plt.scatter(X.loc[:,col],y.loc[:,'SalePrice']) plt.title(col) i=i+1 # So we have below types of columns: # 1. With categorical data (27) # 2. With numeric data (35) # 3. With null values (19) # ### Dealing with categorical data (27) # #### Converting Categorical Data to Numerical data dfc = X.copy() dict_categorical_col = {} for col in list_categorical_cols: #Training data dict_categorical_col[col]=dfc[col].drop_duplicates().reset_index(drop=True).reset_index() dict_categorical_col[col].columns = ['label',col] len1=len(dict_categorical_col[col]) df_temp = pd.DataFrame({'label':[len1],col:['unknown']},columns=['label',col],index=[len1]) dict_categorical_col[col] = dict_categorical_col[col].append(df_temp) # #### Cleaning Train & Test data testdata1 = testdata.copy() X1 = X.copy() for col in list_categorical_cols: #Mapping with dictionary for Train data X1 = pd.merge(X1,dict_categorical_col[col],on=col,how='left') X1 = X1.drop(columns=col) X1.rename(columns={'label':col},inplace=True) #Checking for null values in Test data testdata1[col] = testdata1[col].fillna("unknown") #Mapping with dictionary for Test data testdata1 = pd.merge(testdata1,dict_categorical_col[col],on=col,how='left') testdata1 = testdata1.drop(columns=col) testdata1.rename(columns={'label':col},inplace=True) testdata1['Street'].unique() # ### Dealing with numeric data (35) # First deal with missing data related to numeric columns in test data imputer1 = SimpleImputer() testdata2 = testdata1.copy() testdata2[list_numeric_cols] = pd.DataFrame(imputer1.fit_transform(testdata1[list_numeric_cols])) testdata2.columns = testdata1.columns # Calculation Correlation from scipy.stats import pearsonr i_index=0 pearson_corr_df = pd.DataFrame(columns=['column','correlation'],index=[-1]) for col in list_numeric_cols: corr,_ = pearsonr(X1.loc[:,col],y.loc[:,'SalePrice']) dftest = pd.DataFrame({'column': [col],'correlation':[corr]},columns=['column','correlation'],index=[i_index]) pearson_corr_df = pearson_corr_df.append(dftest) i_index=i_index+1 pearson_corr_df = pearson_corr_df.dropna() pearson_corr_df.loc[:,'correlation'] = pearson_corr_df.loc[:,'correlation'].abs() pearson_corr_df=pearson_corr_df.sort_values(by='correlation') pearson_corr_df.loc[(pearson_corr_df['correlation']>0.5),['correlation']] plt.figure(figsize=(5,10)) threshold = 0.5 plot_df = pearson_corr_df.loc[(pearson_corr_df['correlation']>threshold),:] plt.barh(plot_df.loc[:,'column'],plot_df.loc[:,'correlation']) plt.show() type(pearson_corr_df.loc[(pearson_corr_df['correlation']>threshold),['column']]) pearson_corr_df.loc[(pearson_corr_df['correlation']>threshold),['correlation']] # ### Dealing with Category 3: With null values (19) list_null_cols X.groupby('Fence').size() var_alley = X.loc[:,['Alley']].fillna("No Alley") plt.scatter(var_alley.loc[:,'Alley'],y.loc[:,'SalePrice']) # ## Model Building #Step 2: Building initial model on all numerical columns excluding #feature_list = list_numeric_cols feature_list = list_numeric_cols + list_categorical_cols X1_features = X1[feature_list] X1_features.shape y.shape #Step 3: Define Train & Test Sets X_train,X_test,y_train,y_test = train_test_split(X1_features, y, random_state=SEED, test_size=0.2) y_train.shape #Step 4: Define a Model dt = DecisionTreeRegressor(random_state=SEED, max_depth=10) # + #Step 5: Fitting the model dt.fit(X_train,y_train) # - # Calculating Training Error y_train_pred = dt.predict(X_train) model_train_error = MSE(y_train,y_train_pred)**(1/2) # Calculation Testing Error y_test_pred = dt.predict(X_test) model_test_error = MSE(y_test,y_test_pred)**(1/2) model_train_error model_test_error # ### Grid Search params_dt = {'max_depth': range(4,11), 'min_samples_leaf':[0.04,0.06,0.08], 'max_features':[0.2,0.3,0.4,0.5,0.6,0.7,0.8]} grid_dt = GridSearchCV(estimator = dt, param_grid = params_dt, scoring ='neg_mean_squared_error', cv=10, n_jobs=-1 ) grid_dt.fit(X1_features,y) grid_dt.best_params_ selectedmodel = grid_dt.best_estimator_ # + #Calculating Training error # - y_train_pred_selectedmodel= selectedmodel.predict(X_train) y_train_pred_selectedmodel_trainerror = MSE(y_train,y_train_pred_selectedmodel)**(1/2) # + #Calculating Testing error # - y_test_pred_selectedmodel= selectedmodel.predict(X_test) y_test_pred_selectedmodel_testerror = MSE(y_test,y_test_pred_selectedmodel)**(1/2) y_train_pred_selectedmodel_trainerror y_test_pred_selectedmodel_testerror # ### Now lets repeat the same for Gradient Boosting gbt = GradientBoostingRegressor(random_state=SEED) # + # params_gbt = { 'n_estimators':[100,150,200,250,300,350,400,450,500], # 'max_depth': [5,6,7,10,50,80,100] # } # Grid Search gave best depth as 6 & number of estimators as 250 # - params_gbt = { 'n_estimators':[250], 'max_depth': [6] } gbt_gcv = GridSearchCV(estimator = gbt, param_grid = params_gbt, scoring='neg_mean_squared_error', cv=10, n_jobs=-1 ) gbt_gcv.fit(X1_features,y) gbt_gcv.best_params_ selectedmodel_gbt = gbt_gcv.best_estimator_ #Calculating train error y_train_pred_selectedmodel_gbt= selectedmodel_gbt.predict(X_train) y_train_pred_selectedmodel_trainerror_gbt = MSE(y_train,y_train_pred_selectedmodel_gbt)**(1/2) #Calculating test error y_test_pred_selectedmodel_gbt= selectedmodel_gbt.predict(X_test) y_test_pred_selectedmodel_testerror_gbt = MSE(y_test,y_test_pred_selectedmodel_gbt)**(1/2) y_train_pred_selectedmodel_trainerror_gbt y_test_pred_selectedmodel_testerror_gbt # ### Now lets look at using plotting trainerror & testerror for different max_depth import numpy as np list_max_depth = range(1,8) train_errors = list() test_errors = list() for treedepth in list_max_depth: dt_model = DecisionTreeRegressor(random_state=SEED, max_depth = treedepth) dt_model.fit(X_train,y_train) train_errors.append(dt_model.score(X_train,y_train)) test_errors.append(dt_model.score(X_test,y_test)) plt.plot(np.array(list_max_depth),np.array(train_errors), label='Train') plt.plot(np.array(list_max_depth),np.array(test_errors), label='Test') plt.legend(loc='upper left') plt.show() # ### For Predictions y_predictions = selectedmodel_gbt.predict(testdata2[feature_list]) output = pd.DataFrame({'Id': testdata.Id, 'SalePrice': y_predictions}) output.to_csv('submission1.csv', index=False) selectedmodel_gbt.feature_importances_ importances_dt = pd.Series(selectedmodel_gbt.feature_importances_,index=testdata2[feature_list].columns) sorted_importances_dt = importances_dt.sort_values() # + plt.figure(figsize=(5,10)) sorted_importances_dt.plot(kind='barh',color='lightgreen') plt.show() # -
housingprice/notebooks/DecisionTree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # for use in tutorial and development; do not include this `sys.path` change in production: import sys ; sys.path.insert(0, "../") # # Data Sources # # Throughout this tutorial we'll work with data in the `dat` subdirectory: # !ls -goh ../dat # In particular, we'll work with a series of *progressive examples* based on the # `dat/recipes.csv` CSV file. # This data comes from a # [Kaggle dataset](https://www.kaggle.com/shuyangli94/food-com-recipes-and-user-interactions/metadata) # that describes metadata about [Food.com](https://food.com/): # # > "Food.com Recipes and Interactions" # <NAME> # Kaggle (2019) # <https://doi.org/10.34740/kaggle/dsv/783630> # One of the simpler recipes in that dataset is `"anytime crepes"` at <https://www.food.com/recipe/327593> # # * id: 327593 # * minutes: 8 # * ingredients: `"['egg', 'milk', 'whole wheat flour']"` # # The tutorial begins by showing how to represent the metadata for this recipe in a knowledge graph, then gradually builds up more and more information about this collection of recipes. # # To start, let's load and examine the CSV data: # + from os.path import dirname import os import pandas as pd df = pd.read_csv(dirname(os.getcwd()) + "/dat/recipes.csv") df.head() # - # Now let's drill down to the metadata for the `"anytime crepes"` recipe recipe_row = df[df["name"] == "anytime crepes"].iloc[0] recipe_row # Given that we have a rich source of *linked data* to use, next we need to focus on *knowledge representation*. # We'll use the [FoodOn](https://foodon.org/design/foodon-relations/) ontology (see below) to represent recipes, making use of two of its *controlled vocabularies*: # # * <http://purl.org/heals/food/> # * <http://purl.org/heals/ingredient/> # # The first one defines an entity called `Recipe` which has the full URL of <http://purl.org/heals/food/Recipe> and we'll use that to represent our recipe data from the *Food.com* dataset. # # It's a common practice to abbreviate the first part of the URL for a controlled vocabulary with a *prefix*. # In this case we'll use the prefix conventions used in previous publications related to this ontology: # # | URL | prefix | # | --- | --- | # | <http://purl.org/heals/food/> | `wtm:` | # | <http://purl.org/heals/ingredient/> | `ind:` | # Now let's represent the data using this ontology, starting with the three ingredients for the **anytime crepes** recipe: ingredients = eval(recipe_row["ingredients"]) ingredients # These ingredients become represented, respectively, as: # # * `ind:ChickenEgg` # * `ind:CowMilk` # * `ind:WholeWheatFlour` # ## Ontology Sources # We'll use several different sources for data and ontology throughout the **kglab** tutorial, although most of it focuses on progressive examples that use [*FoodOn*](https://www.nature.com/articles/s41538-018-0032-6). # # *FoodOn* – subtitled "a farm to fork ontology" – takes a comprehensive view of the data and metadata involved in our food supply, beginning with seed genomics, micronutrients, the biology of food alergies, etc. # This work is predicated on leveraging large knowledge graphs to represent the different areas of science, technology, business, public policy, etc.: # # > The need to represent knowledge about food is central to many human activities including agriculture, medicine, food safety inspection, shopping patterns, and sustainable development. FoodOn is an ontology – a controlled vocabulary which can be used by both people and computers – to name all parts of animals, plants, and fungai which can bear a food role for humans and domesticated animals, as well as derived food products and the processes used to make them. # # For more details, see: # # * <https://foodon.org/design/foodon-relations/> # * <https://foodkg.github.io/docs/ontologyDocumentation/Ingredient/doc/index-en.html> # * <https://foodkg.github.io/foodkg.html> # * <https://github.com/foodkg/foodkg.github.io> # # For primary sources, see: [[vardeman2014ceur]](https://derwen.ai/docs/kgl/biblio/#vardeman2014ceur), [[sam2014odp]](https://derwen.ai/docs/kgl/biblio/#sam2014odp), [[dooley2018npj]](https://derwen.ai/docs/kgl/biblio/#dooley2018npj), [[hitzler2018]](https://derwen.ai/docs/kgl/biblio/#hitzler2018) # We'll work through several examples of representation, although here's an example of what a full recipe in *FoodOn* would look like: # + active="" # owl:NamedIndividual a wtm:Recipe ; # rdf:about ind:BananaBlueberryAlmondFlourMuffin ; # wtm:hasIngredient ind:AlmondMeal ; # wtm:hasIngredient ind:AppleCiderVinegar ; # wtm:hasIngredient ind:BakingSoda ; # wtm:hasIngredient ind:Banana ; # wtm:hasIngredient ind:Blueberry ; # wtm:hasIngredient ind:ChickenEgg ; # wtm:hasIngredient ind:Honey ; # wtm:isRecommendedForCourse wtm:Dessert ; # wtm:isRecommendedForMeal wtm:Breakfast ; # wtm:isRecommendedForMeal wtm:Snack ; # wtm:hasCookTime "PT60M"^^xsd:duration ; # wtm:hasCookingTemperature "350"^^xsd:integer ; # wtm:serves "4"^^xsd:integer ; # rdfs:label "banana blueberry almond flour muffin" ; # skos:definition "a banana blueberry muffin made with almond flour" ; # skos:scopeNote "recipe" ; # prov:wasDerivedFrom <https://www.allrecipes.com/recipe/238012/banana-blueberry-almond-flour-muffins-gluten-free/?internalSource=hub%20recipe&referringContentType=Search> # . # - # ## Graph Size Comparisons # One frequently asked question is about the size of the graphs that we're using in the **kglab** tutorial. # The short answer: "No, these aren't trivial graphs." # # We'll start out with small examples, to show the basics for how to construct an RDF graph. # # Most of the examples here will use a knowledge graph with ~300 nodes and ~2000 edges. # This is a *non-trivial* size, especially when you start working with some graph algorithms. # Again, this tutorial has learning as its main intent, and this size of graph is ideal for running queries, validation, graph algorithms, visualization, etc., with the kinds of compute and memory resources available on contemporary laptops. # # In other words, we prioritize datasets that are large enough for examples to illustrate common use cases, though small enough for learners to understand. # # * 10^6 or more nodes are needed for deep learning # * 10^8 can run on contemporary laptops # * larger graphs require hardware accelerators (e.g., GPUs) or cloud-based clusters # # The full `recipes.tsv` dataset includes nearly 250,000 recipes. In some of the later examples, we'll work with that entire dataset – which is definitely non-trivial.
examples/ex0_0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import copy import numpy as np import os from matplotlib import pyplot as plt import copy import numpy as np from keras.preprocessing.image import ImageDataGenerator print("begin...") data_gen_args = dict(featurewise_center=True, samplewise_center=False, featurewise_std_normalization=True, samplewise_std_normalization=False, zca_whitening=True, zca_epsilon=1e-06, rotation_range=180, width_shift_range=(0,0.5), height_shift_range=(0,0.5), brightness_range=(0.5,1.5), shear_range=0.0, zoom_range=0.0, channel_shift_range=0.3, fill_mode='nearest', cval=0.0, horizontal_flip=True, vertical_flip=True, rescale=None, preprocessing_function=None, data_format=None, validation_split=0.0, dtype=None) image_datagen = ImageDataGenerator(**data_gen_args) mask_datagen = ImageDataGenerator(**data_gen_args) seed = 666 batch_size = 2 image_generator = image_datagen.flow_from_directory( '../', target_size=(224, 224), color_mode='rgb', classes=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, interpolation="bilinear") for i in range(5): plt.figure() for j in range(batch_size): image = image_generator[i][j] plt.subplot(1,batch_size,j+1) plt.imshow(image.astype(np.uint8)) plt.xticks([]) plt.yticks([]) plt.show() print("done!")
kerasimage/keras_augmentor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df = pd.read_csv("weather_data.csv", parse_dates=["day"]) #parse dates column as date timestamp df.set_index('day', inplace=True) df type(df.day[0]) df1 = df.fillna({ #use a dictionary to specify fillna values for each column 'temperature': 0, 'windspeed': 0, 'event': 'no event' }) df1 df2 = df.fillna(method="ffill", limit=1) #carry forward previous row's value, limit the number of forward copies df2 df3 = df.fillna(method="bfill", axis="columns") #backfill along column axis df3 df4 = df.interpolate() df4 import matplotlib.pyplot as plt plt.figure(); df4.plot(); df4.plot(kind='bar'); df4.plot.barh() df5 = df.dropna(how="all") df5 dt = pd.date_range("01-01-2017", "01-11-2017") idx = pd.DatetimeIndex(dt) df6 = df.reindex(idx) df6
Pandas/codebasics-pandas/05-06 missingdata/5-fillna.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `quantecon.game_theory` を使ってみる # まず `quantecon.game_theory` を読み込む: import quantecon.game_theory as gt # ## `support_enumeration` # # 2人 (非退化) ゲームのナッシュ均衡をすべて列挙する. # 授業での例 # # $ # \begin{bmatrix} # 3, 3 & 3, 2 \\ # 2, 2 & 5, 6 \\ # 0, 3 & 6, 1 # \end{bmatrix} # $ # `NormalFormGame` を作る: g = gt.NormalFormGame( [[(3, 3), (3, 2)], [(2, 2), (5, 6)], [(0, 3), (6, 1)]] ) # 利得表を表示してみる: print(g) # `support_enumeration` でナッシュ均衡を列挙する: gt.support_enumeration(g) # 結果を `NEs` という変数に格納してみる: NEs = gt.support_enumeration(g) # `NEs` の要素の数 = ナッシュ均衡の数 len(NEs) # 1つ目のナッシュ均衡: NEs[0] # 2つ目のナッシュ均衡: NEs[1] # 3つ目のナッシュ均衡: NEs[2] # それぞれ確かにナッシュ均衡になっている: for NE in NEs: print(g.is_nash(NE)) # グレーヴァ『非協力ゲーム理論』第3章練習問題3.3を解かせてみる. # (a) g_a = gt.NormalFormGame( [[(0, 1), (3, 3)], [(5, 2), (0, 0)], [(1, 8), (1, 7)]] ) gt.support_enumeration(g_a) # 1番目のプレイヤー (Python では0始まりなので,プレイヤー0) # の3番目の戦略 (戦略2) は被支配戦略になっている: g_a.players[0].is_dominated(2) # (b) g_b = gt.NormalFormGame( [[(0, 1), (3, 3)], [(5, 2), (0, 0)], [(2, 8), (2, 7)]] ) gt.support_enumeration(g_b) # (c) g_c = gt.NormalFormGame( [[(0, 3), (3, 2)], [(5, 0), (0, 4)], [(1, 1), (1, 1)]] ) gt.support_enumeration(g_c) # ## 他の関数を使ってみる # ### `vertex_enumeration` # # 2人 (非退化) ゲームのナッシュ均衡をすべて列挙する. # `support_enumeration` とは異なるアルゴリズム (多面体の頂点列挙アルゴリズム) が使われている. gt.vertex_enumeration(g_a) gt.vertex_enumeration(g_b) gt.vertex_enumeration(g_c) # ### `lemke_howson` # # 2人ゲームのナッシュ均衡を1つ求める. # 数100戦略程度のゲームにも使える. gt.lemke_howson(g_c) # 戦略数200の2人ゲームをランダムに発生させる: g_200 = gt.random_game((200, 200)) g_200 gt.lemke_howson(g_200)
game20/game20_py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import glob import numpy as np import scipy.io as sio # %load_ext autoreload # %autoreload 1 # - from src.feat_extraction import bandpower_welch, bandpower_multitaper, bandpower_de no_sessions = 3 no_participants = 15 no_videos = 15 no_channels = 62 frequency = 200 # ## Create features # + files_session = [] files_session = glob.glob("./data/dataset/SEED/Preprocessed_EEG/*.mat") print(np.shape(files_session)) files_session = sorted(files_session) files_session = np.concatenate((files_session[6*no_sessions:], files_session[:6*no_sessions])) # - # #### Absolute # + # Window length win_sec = 0.5 def search(myDict, lookup): for key, value in myDict.items(): if str.find(key, lookup) != -1: return(key) bandpower_SEED_welch = [] for i in range(no_sessions*no_participants): mat = sio.loadmat(files_session[i], verify_compressed_data_integrity=False) for j in range(no_videos): key = search(mat, '_eeg'+str(j+1)) input_brainwaves = mat[key] input_brainwaves = np.array(input_brainwaves) bands_video = [] for k in range(no_channels): bands_video.append(bandpower_welch(input_brainwaves[k,:], sf=frequency, method='welch', band=[4, 7], window_sec=win_sec, relative=False)) bands_video.append(bandpower_welch(input_brainwaves[k,:], sf=frequency, method='welch', band=[8, 13], window_sec=win_sec, relative=False)) bands_video.append(bandpower_welch(input_brainwaves[k,:], sf=frequency, method='welch', band=[14, 30], window_sec=win_sec, relative=False)) bands_video.append(bandpower_welch(input_brainwaves[k,:], sf=frequency, method='welch', band=[31, 50], window_sec=win_sec, relative=False)) bandpower_SEED_welch.append(bands_video) print(i, np.shape(bandpower_SEED_welch)) bandpower_SEED_welch = np.array(bandpower_SEED_welch) print(bandpower_SEED_welch.shape) np.save('./data/bandpower_SEED_welch', bandpower_SEED_welch) # - # #### Multitaper # + def search(myDict, lookup): for key, value in myDict.items(): if str.find(key, lookup) is not -1: return(key) bandpower_SEED_welch = [] for i in range(no_sessions*no_participants): mat = sio.loadmat(files_session[i], verify_compressed_data_integrity=False) for j in range(no_videos): key = search(mat, '_eeg'+str(j+1)) input_brainwaves = mat[key] input_brainwaves = np.array(input_brainwaves) bands_video = [] for k in range(no_channels): bands_video.append(bandpower_multitaper(input_brainwaves[k,:], sf=frequency, method='multitaper', band=[4, 7], relative=False)) bands_video.append(bandpower_multitaper(input_brainwaves[k,:], sf=frequency, method='multitaper', band=[8, 13], relative=False)) bands_video.append(bandpower_multitaper(input_brainwaves[k,:], sf=frequency, method='multitaper', band=[14, 30], relative=False)) bands_video.append(bandpower_multitaper(input_brainwaves[k,:], sf=frequency, method='multitaper', band=[31, 50], relative=False)) bandpower_SEED_welch.append(bands_video) print(i, np.shape(bandpower_SEED_welch)) np.save('./data/bandpower_SEED_multitaper', np.array(bandpower_SEED_welch)) bandpower_SEED_welch = np.array(bandpower_SEED_welch) print(bandpower_SEED_welch.shape) np.save('./data/bandpower_SEED_multitaper', bandpower_SEED_welch) # - # #### Differential entropy # + def search(myDict, lookup): for key, value in myDict.items(): if str.find(key, lookup) is not -1: return(key) bandpower_SEED_de = [] for i in range(no_sessions*no_participants): mat = sio.loadmat(files_session[i], verify_compressed_data_integrity=False) for j in range(no_videos): key = search(mat, '_eeg'+str(j+1)) input_brainwaves = mat[key] input_brainwaves = np.array(input_brainwaves) bands_video = [] for k in range(no_channels): bands_video.append(bandpower_de(input_brainwaves[k,:], sf=frequency, method='de', band=[4, 7], relative=False)) bands_video.append(bandpower_de(input_brainwaves[k,:], sf=frequency, method='de', band=[8, 13], relative=False)) bands_video.append(bandpower_de(input_brainwaves[k,:], sf=frequency, method='de', band=[14, 30], relative=False)) bands_video.append(bandpower_de(input_brainwaves[k,:], sf=frequency, method='de', band=[31, 50], relative=False)) bandpower_SEED_de.append(bands_video) print(i, np.shape(bandpower_SEED_de)) np.save('./data/bandpower_SEED_de', np.array(bandpower_SEED_de)) bandpower_SEED_de = np.array(bandpower_SEED_de) print(bandpower_SEED_de.shape) np.save('./data/bandpower_SEED_de', bandpower_SEED_de) # -
create_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Lecture 13: Crash Course in Probability # # CSCI 1360E: Foundations for Informatics and Analytics # + [markdown] slideshow={"slide_type": "slide"} # ## Overview and Objectives # + [markdown] slideshow={"slide_type": "-"} # To wrap up the fundamental concepts of core data science, as well as our interlude from Python, today we'll discuss probability and how it relates to statistical inference. Like with statistics, we'll wave our hands a lot and skip past many of the technicalities, so I encourage you to take a full course in probability and statistics. By the end of this lecture, you should be able to # + [markdown] slideshow={"slide_type": "-"} # - Define probability and its relationship to statistics # - Understand statistical dependence and independence # - Explain conditional probability and its role in Bayes' Theorem # + [markdown] slideshow={"slide_type": "slide"} # ## Part 1: Probability # + [markdown] slideshow={"slide_type": "-"} # When we say "what is the probability of X", we're discussing a way of quantifying uncertainty. # + [markdown] slideshow={"slide_type": "-"} # This uncertainty relates to *one particular event*--in the above statement, that event is "X"--happening out of a *universe of all possible events*. # + [markdown] slideshow={"slide_type": "-"} # An easy example is rolling a die: the universe consists of all possible outcomes (any of the 6 sides), whereas any subset is a single event (one side; an even number; etc). # + [markdown] slideshow={"slide_type": "slide"} # ### Relationship with Statistics # + [markdown] slideshow={"slide_type": "-"} # Think of "probability" and "statistics" as two sides of the same coin: you cannot have one without the other. # + [markdown] slideshow={"slide_type": "slide"} # ![probstats](Lecture13/slide_3.jpg) # + [markdown] slideshow={"slide_type": "slide"} # Let's go back to the concept of distributions from the Statistics lecture. Remember this figure? # + [markdown] slideshow={"slide_type": "fragment"} # ![distributions](Lecture13/distributions.png) # + [markdown] slideshow={"slide_type": "slide"} # These distributions allow us to say something about the *probability* of a random variable taking a certain specific value. # + [markdown] slideshow={"slide_type": "-"} # In fact, if you look at the previous plot of the normal bell curves--picking a spot along one of those curves gives you the probability of the random variable representing that distribution *taking that particular value* you picked! # + [markdown] slideshow={"slide_type": "-"} # I hope it becomes clear that, the *most likely* value for a random variable to take is its mean (since the curve is highest there). This even has a special name: the **expected value**. It means, on average, this is the value we're going to get from this random variable. # + [markdown] slideshow={"slide_type": "slide"} # We have a special notation for probability: # + [markdown] slideshow={"slide_type": "fragment"} # $P(X = x)$ # + [markdown] slideshow={"slide_type": "-"} # $P$ is the universal symbol for "probability of", followed by some *event*. In this case, our event is "$X = x$". # + [markdown] slideshow={"slide_type": "-"} # (Recall from before: *random variables* are denoted with uppercase letters (e.g. $X$), and *observations* of that random variable are denoted with lowercase letters (e.g. $x$). So when we say $X = x$, we're asking for the *event* where the random variable $X$ takes on the value of the observation $x$.) # + [markdown] slideshow={"slide_type": "slide"} # A few other properties to be aware of: # + [markdown] slideshow={"slide_type": "-"} # - Probabilities are *always* between 0 and 1; no exceptions. This means, for any arbitrary event $A$, $0 \le P(A) \le 1$ (super-official notation!). # + [markdown] slideshow={"slide_type": "-"} # - The probability of *something* happening is always exactly 1. Put another way, if you combine all possible events together and ask the probability of one of them occurring, that probability is 1. # + [markdown] slideshow={"slide_type": "-"} # - If $A$ and $B$ are two possible events that disparate (as in, they have no overlap), then the probability of either one of them happening is just the sum of their individual probabilities: $P(A, B) = P(A) + P(B)$. # + [markdown] slideshow={"slide_type": "fragment"} # These three points are referred to as the **Axioms of Probability** and form the foundation for pretty much every other rule of probability that has ever been and will ever be discovered. # + [markdown] slideshow={"slide_type": "slide"} # ### Visualizing # + [markdown] slideshow={"slide_type": "-"} # A good way of learning probability is to visualize it. Take the spinner on the following slide: # + [markdown] slideshow={"slide_type": "slide"} # ![spinner](Lecture13/spinner1.jpg) # + [markdown] slideshow={"slide_type": "-"} # It's split into 12 segments. You could consider the arrow landing on a segment to be one particular "event". So the probability of landing on any one specific segment is $1/12$. The probability of landing on *any segment at all* is 1. # + [markdown] slideshow={"slide_type": "slide"} # ### Dependence and Independence # + [markdown] slideshow={"slide_type": "-"} # Two events $A$ and $B$ are **dependent** if having knowledge about *one* of them implicitly gives you knowledge about the other. On the other hand, they're **independent** if knowing one tells you nothing about the other. Take an example of flipping a coin: # + [markdown] slideshow={"slide_type": "fragment"} # I have a penny; a regular old penny. I flip it once, and it lands on *Heads*. I flip it 9 more times, and it lands on *Heads* each time. What is the probability that the next flip will be *Heads*? # + [markdown] slideshow={"slide_type": "fragment"} # If you said $1/2$ (or 50%), you're correct! Coin flips are **independent** events; you could flip the coin 100 times and get 100 heads, and the probability of tails would *still* be $1/2$. Knowing one coin flip or 100 coin flips tells you nothing about future coin flips. # + [markdown] slideshow={"slide_type": "slide"} # Now, I want to know what the probability is of *two consecutive coin flips* returning Heads. If the first flip is Heads, what is the probability of *both flips being Heads*? What if the first flip is Tails? # + [markdown] slideshow={"slide_type": "fragment"} # In this case, the two coin flips are **dependent**. If the first flip is Tails, then P(two flips = Heads) is 0; it's impossible! On the other hand, if the first coin flip is Heads, then while it's not certain that both coin flips can be Heads, it's still a possibility. Thus, knowing one can tell you something about the other. # + [markdown] slideshow={"slide_type": "slide"} # If two events $A$ and $B$ are independent, their probability can be written as: # + [markdown] slideshow={"slide_type": "-"} # $P(A, B) = P(A) * P(B)$ # + [markdown] slideshow={"slide_type": "-"} # This is a *huge* simplification that comes up in many data science algorithms: if you can prove two random variables in your data are statistically independent, analyzing their behavior in concert with each other becomes *much* easier. # + [markdown] slideshow={"slide_type": "slide"} # On the other hand, if two events are dependent, then we can define the probabilities of these events in terms of their **conditional probabilities**: # + [markdown] slideshow={"slide_type": "-"} # $P(A, B) = P(A | B) * P(B)$ # + [markdown] slideshow={"slide_type": "-"} # This says "the probability of $A$ and $B$ equals the *conditional probability of $A$ given $B$*, multiplied by the probability of $B$." # - # That vertical bar means "conditioned on", and we'll get to that next! # + [markdown] slideshow={"slide_type": "slide"} # ### Conditional Probability # + [markdown] slideshow={"slide_type": "-"} # Conditional probability is way of "fixing" a random variable(s) we don't know, so that we can (in some sense) "solve" for the other random variable(s). So when we say: # + [markdown] slideshow={"slide_type": "-"} # $P(A, B) = P(A | B) * P(B)$ # + [markdown] slideshow={"slide_type": "-"} # This tells us that, for the sake of this computation, we're assuming we *know* what $B$ is in $P(A | B)$, as knowing $B$ gives us additional information in figuring out what $A$ is (again, since $A$ and $B$ are dependent). # + [markdown] slideshow={"slide_type": "slide"} # ### Bayes' Theorem # + [markdown] slideshow={"slide_type": "-"} # Which brings us, at last, to Bayes' Theorem and what is probably the hardest but most important part of this entire lecture. # + [markdown] slideshow={"slide_type": "fragment"} # (Thank *you*, Rev. <NAME>) # # ![bayes](Lecture13/Thomas_Bayes.png) # + [markdown] slideshow={"slide_type": "slide"} # Bayes' Theorem is a clever rearrangement of conditional probability, which allows you to update conditional probabilities as more information comes in. For two events, $A$ and $B$, Bayes' Theorem states: # + [markdown] slideshow={"slide_type": "fragment"} # $$ # P(A | B) = \frac{P(B | A) * P(A)}{P(B)} # $$ # + [markdown] slideshow={"slide_type": "-"} # As we've seen, $P(A)$ and $P(B)$ are the probabilities of those two events independent of each other, $P(B | A)$ is the probability of $B$ *given that we know* $A$, and $P(A | B)$ is the probability of $A$ *given that we know* $B$. # + [markdown] slideshow={"slide_type": "slide"} # ### Interpretation of Bayes' Theorem # + [markdown] slideshow={"slide_type": "-"} # Bayes' Theorem allows for an interesting interpretation of probabilistic events. # + [markdown] slideshow={"slide_type": "fragment"} # - $P(A|B)$ is known as the *posterior* probability, which is the conditional event you're trying to compute. # + [markdown] slideshow={"slide_type": "fragment"} # - $P(A)$ is known as the *prior* probability, which represents your current knowledge on the event $A$. # + [markdown] slideshow={"slide_type": "fragment"} # - $P(B|A)$ is known as the *likelihood*, essentially weighting how heavily the prior knowledge you have accumulated factors into the computation of your posterior. # + [markdown] slideshow={"slide_type": "fragment"} # - $P(B)$ is a normalizing factor--since the variable/event $A$, the thing we're determining, is not involved in this quantity, it is essentially a constant. # + [markdown] slideshow={"slide_type": "slide"} # Given this interpretation, you could feasibly consider using Bayes' Theorem as a procedure not only to conduct inference on some system, but to simultaneously *update your understanding of the system* by incorporating new knowledge. # + [markdown] slideshow={"slide_type": "-"} # Here's another version of the same thing (they use the terms "hypothesis" and "evidence", rather than "event" and "data"): # + [markdown] slideshow={"slide_type": "fragment"} # ![psych](Lecture13/2017222-18391340-9653-2-bayes-theorem.png) # + [markdown] slideshow={"slide_type": "slide"} # ## Review Questions # # Some questions to discuss and consider: # + [markdown] slideshow={"slide_type": "-"} # 1: Go back to the [spinner graphic](http://f.tqn.com/y/math/1/S/Q/s/spinner1.jpg). We know that the probability of landing on any *specific* segment is 1/12. What is the probability of landing on a *blue* segment? What about a *red* segment? What about a *red OR yellow* segment? What about a *red AND yellow* segment? # # 2: Recall the "conditional probability chain rule" from earlier in this lecture, i.e. that $P(A, B) = P(A | B) * P(B)$. Given a coin with $P(heads) = 0.75$ and $P(tails) = 0.25$, and three coin flips $A = heads$, $B = tails$, $C = heads$, compute $P(A, B, C)$. # + [markdown] slideshow={"slide_type": "slide"} # 3: Bayes' Theorem is a clever rearrangement of conditional probability using basic principles. Starting from there, $P(A, B) = P(A | B) * P(B)$, see if you can derive Bayes' Theorem for yourself. # # 4: Provide an example of a problem that Bayes' Theorem would be useful in solving (feel free to Google for examples), and how you would set the problem up (i.e. what values would be plugged into which variables in the equation, and why). # # 5: The bell curve for the normal distribution (or for *any* distribution) has a special name: the **probability distribution function**, or PDF (yep, like the file format). There's another distribution, the *uniform* distribution, that exists between two points $a$ and $b$. Given the name, what do you think its PDF from $a$ to $b$ would look like? # + [markdown] slideshow={"slide_type": "slide"} # ## Course Administrivia # + [markdown] slideshow={"slide_type": "-"} # - How is A6 going? It's due **tonight at 11:59pm!** # - # - This lecture wraps up our "crash course" section; starting Wednesday, we'll get back to Python and seeing how the contents of the past three lectures can be implemented in Python. # - **Data science theory is wholly built on linear algebra, probability, and statistics.** Make sure you understand the contents of these lectures! # + [markdown] slideshow={"slide_type": "slide"} # ## Additional Resources # # 1. <NAME>. *Data Science from Scratch*. 2015. ISBN-13: 978-1491901427 # 2. Grinstead, Charles and <NAME>. *Introduction to Probability*. [PDF](http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/amsbook.mac.pdf) # 3. Illowsky, Barbara and <NAME>. *Introductory Statistics*. [link](https://openstax.org/details/introductory-statistics) # 4. <NAME>; <NAME>; <NAME>; *OpenIntro Statistics*. [link](https://www.openintro.org/stat/textbook.php?stat_book=os) # 5. Wasserman, Larry. *All of Statistics: A Concise Course in Statistical Inference*. 2010. ISBN-13: 978-1441923226
lectures/L13.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # import modules as usual import os import glob import numpy as np import pandas as pd from pandas import DataFrame, Series import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import cv2 from sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_auc_score from xgboost import XGBClassifier # - # path of files path_positive = '../eskin_data/yamamoto/throw/' path_negative = '../eskin_data/yamamoto/others/' # extract the moment of throwing based on accel values def extract_action(df): df = df.reset_index() mom_action = int((np.argmax(abs(df.accelX))+ np.argmax(abs(df.accelY))+ np.argmax(abs(df.accelZ)))/3) df = df.ix[mom_action-90:mom_action+90] df.index = df.time df.drop(["time"], axis=1, inplace=True) return df.as_matrix() def load_positive_data(path): path = os.path.join(path, '*.csv') files = glob.glob(path) X_positives = [] for file_path in files: df = pd.read_csv(file_path, index_col=0) df = extract_action(df) X_positives.append(df) X_positives = np.array(X_positives) y_positives = np.ones(len(X_positives)) return X_positives, y_positives def load_negative_data(path, num_clip=100, random_state=71): np.random.seed(random_state) path = os.path.join(path, '*.csv') files = glob.glob(path) X_negatives = [] for file_path in files: df = pd.read_csv(file_path, index_col=0) for i in range(num_clip): start = np.random.choice(range(len(df)-180)) df_extracted = df.iloc[start:start+180].as_matrix() X_negatives.append(df_extracted) X_negatives = np.array(X_negatives) y_negatives = np.zeros(len(X_negatives)) return X_negatives, y_negatives def resize_matrix(X, size = (20, 20), flatten=False): X_resized = [] for i in range(len(X)): X_ = X[i] /1. X_ = cv2.resize(X_, size, interpolation = cv2.INTER_LINEAR) if flatten == True: # True for XGBoost etc., False for CNN (Convolutional Newral Networks) X_ = X_.ravel() X_resized.append(X_) X_resized = np.array(X_resized) return X_resized X_positives, y_positives = load_positive_data(path_positive) X_negatives, y_negatives = load_negative_data(path_negative, num_clip=500) # random 500 clops from negative data # check the shape of positive data X_positives.shape, y_positives.shape # check the shape of negative data X_negatives.shape, y_negatives.shape X_positives = resize_matrix(X_positives, flatten=True) X_negatives = resize_matrix(X_negatives, flatten=True) X = np.concatenate((X_positives, X_negatives), axis=0) y = np.concatenate((y_positives, y_negatives), axis=0) # + skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=71) scores = [] for i, (train, test) in enumerate(skf.split(X, y)): X_train, y_train = X[train], y[train] X_test, y_test = X[test], y[test] clf_xgb = XGBClassifier(base_score=0.5, colsample_bylevel=1, colsample_bytree=0.7, gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=3, min_child_weight=1, missing=None, n_estimators=100, nthread=-1, objective='binary:logistic', reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, silent=True, subsample=0.7) clf_xgb.fit(X_train, y_train) probs = clf_xgb.predict_proba(X_test)[:,1] score = roc_auc_score(y_test, probs) print(i, score) scores.append(score) print("Total ROC-AUC:", np.array(scores).mean()) # - df_ref = pd.read_csv("../eskin_data/yamamoto/throw/eskin131418286838246619.csv", index_col=0) # + clf_xgb.fit(X, y) feature_importance = clf_xgb.feature_importances_ # - df_imp = DataFrame(feature_importance.reshape(20,20), index=[str(x * 0.15) + "_msec" for x in range(20)], columns=df_ref.columns) # show heatmap of feature importances. plt.figure(figsize=[20,20]) sns.heatmap(df_imp) plt.show()
scripts/Xenoma_e-skin_throwing_detector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MediZen Strain Recommendation Model API # # ## Version 1.0 - 2019-11-18 # # --- import pandas as pd # + # Load the dataset import os filepath = "/Users/Tobias/workshop/buildbox/medizen_ds_api/data/" data_filename = "cannabis.csv" data_filepath = os.path.join(filepath, data_filename) df1 = pd.read_csv(data_filepath) # - df1.head() df1.shape df2 = df1.dropna(subset = ['Description']) df2.shape # ### Vectorizer from sklearn.feature_extraction.text import TfidfVectorizer # Instantiate Vectorizer tfidf = TfidfVectorizer(stop_words='english') tfidf = tfidf.fit(df2['Description']) # + # Pickle import pickle pickle_1_filename = 'vect_01.pkl' pickle_1_path = os.path.join(filepath, pickle_1_filename) pickle.dump(tfidf, open(pickle_1_path, 'wb')) # - # ### Model # + sparse = tfidf.transform(df2['Description']) # Send sparse matrix dataframe tfidf_dtm = pd.DataFrame(sparse.todense(), columns=tfidf.get_feature_names()) # + from sklearn.neighbors import NearestNeighbors nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree') nn.fit(tfidf_dtm) # - # Generate Knn pickle pickle_2_filename = 'knn_01.pkl' pickle_2_path = os.path.join(filepath, pickle_2_filename) pickle.dump(nn, open(pickle_2_path, 'wb')) # ### API # + #import vectorizer and model import pickle import pandas as pd tfidf = pickle.load(open("vect_01.pkl", "rb")) nn = pickle.load(open("knn_01.pkl", "rb")) # - def recommend(request): # Transform request = pd.Series(request) request_sparse = tfidf.transform(request) # Send to df request_tfidf = pd.DataFrame(request_sparse.todense()) # Return a list of indexes top5 = nn.kneighbors([request_tfidf][0], n_neighbors=5)[1][0].tolist() # Send recomendations to DataFrame recommendations_df = df.iloc[top5] return recommendations_df # Create a fake weed review fake_input = """nice cherry is an indica-dominant strain that captures the flavorful qualities of its cherry parent and the relaxing attributes of mr. nice. with an aroma of sweet skunk, pine, and berry, nice cherry delivers a rush of cerebral energy that lifts the mood while relaxing the body. it’ll also bring an edge back to your appetite while providing focus to keep you productive.""" # Test request function top5 = recommend(fake_input) top5
docs/notebooks/02-medizen-recommendations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NLP - Notebook # In this notebook we will attempt to make a good clustering on our data set by using NLP (Natural Language Processing) methods. Since the data set does not only contain visual information, but also very specific titles of the products, it seems not to be unlikely that this text information can make a substantial contribution to the accuracy of our prediction. # # At the end of this notebook, the results we got from our NLP approach are combined with those from the pHash-Notebook. # ## Preliminaries import warnings warnings.filterwarnings("ignore") # + # Load all the necessary libraries import pandas as pd import numpy as np from numpy import dot from numpy.linalg import norm import pickle import time import random np.random.seed(2018) import nltk from tensorflow.keras.preprocessing import text from sklearn.decomposition import PCA from sklearn.feature_extraction.text import TfidfVectorizer import gensim from gensim.utils import simple_preprocess from gensim.parsing.preprocessing import STOPWORDS from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.stem.porter import * from gensim.models import Word2Vec import nltk #nltk.download('wordnet') stemmer = SnowballStemmer('english') # - # Load the .csv file containing the text information to each image MY_PATH = './data' def load_trainCsv(): df = pd.read_csv(MY_PATH + "/train.csv") return df # # Functions for evaluation # In this notebook, we will build two NLP models. One model is based on the TfidfVectorizer from sklearn and one model based on Word2Vec. In order to evaluate these models later on, we now define some functions. # + def f_score_i(cl_real_i, cl_pred_i): ''' Description: Calculate f-score for a single posting_id f1-score is the mean of all f-scores Parameters: argument1 (list): list of posting_id's belonging to the real cluster argument2 (list): list of posting_id's belonging to the predicted cluster Returns: float value of f-score ''' s_pred = set(cl_pred_i) s_real = set(cl_real_i) s_intsec = s_pred.intersection(s_real) return 2*len(s_intsec) / (len(s_pred)+len(s_real)) def recall_i(cl_real_i, cl_pred_i): ''' Description: Calculate recall for a single posting_id Parameters: argument1 (list): list of posting_id's belonging to the real cluster argument2 (list): list of posting_id's belonging to the predicted cluster Returns: float value of recall ''' s_pred = set(cl_pred_i) s_real = set(cl_real_i) s_diff_r_p = s_real.difference(s_pred) return (len(s_real) - len(s_diff_r_p)) / len(s_real) def precision_i(cl_real_i, cl_pred_i): ''' Description: Calculate precision for a single posting_id Parameters: argument1 (list): list of posting_id's belonging to the real cluster argument2 (list): list of posting_id's belonging to the predicted cluster Returns: float value of precision ''' s_pred = set(cl_pred_i) s_real = set(cl_real_i) s_diff_p_r = s_pred.difference(s_real) return (len(s_pred) - len(s_diff_p_r)) / len(s_pred) # + def real_cluster_of_i_w2v(i): ''' Description: Find real cluster for a single posting_id Use this function when working with Word2Vec Parameters: argument1 (int): position of posting_id in DataFrame Returns: list of all posting_id's ''' l_g = (df_train_w2v.iloc[i].at['label_group']) df_red = df_train_w2v[df_train_w2v['label_group'] == l_g] df_red_list = df_red['posting_id'].tolist() return df_red_list def real_cluster_of_i_tfidf(i): ''' Description: Find real cluster for a single posting_id Use this function when working with TfidVectorizer Parameters: argument1 (int): position of posting_id in DataFrame Returns: list of all posting_id's ''' l_g = (df_train_tfidf.iloc[i].at['label_group']) df_red = df_train_tfidf[df_train_tfidf['label_group'] == l_g] df_red_list = df_red['posting_id'].tolist() return df_red_list def pred_cluster_of_i_tfidf(i,threshold): ''' Description: Find predicted cluster for a single posting_id Use this function when working with TfidVectorizer Parameters: argument1 (int): position of posting_id in DataFrame Returns: list of all posting_id's ''' list1 = [] list2 = [] list3 = [] for j in range(len(corpus)): list1.append(round(dist2(i, j),3)) list2.append(labels_tfidf[j]) list3.append(posting_id_tfidf[j]) df_nlp = pd.DataFrame(data = [list1,list2,list3]).transpose() df_nlp = df_nlp[df_nlp[0] <= threshold] ls = df_nlp[2].tolist() return ls def pred_cluster_of_i_w2v(i,threshold): ''' Description: Find predicted cluster for a single posting_id Use this function when working with Word2Vec Parameters: argument1 (int): position of posting_id in DataFrame Returns: list of all posting_id's ''' list1 = [] list2 = [] list3 = [] for j in range(34250): i_vec_1 = df_train_w2v['word_vec'][j] i_vec_2 = df_train_w2v['word_vec'][i] list1.append(round(get_sim_two_pi(i_vec_1, i_vec_2),4)) list2.append(labels[j]) list3.append(posting_id[j]) df_nlp = pd.DataFrame(data = [list1,list2,list3]).transpose() df_nlp = df_nlp.sort_values(by = 0) df_nlp = df_nlp[df_nlp[0] >= threshold] ls = df_nlp[2].tolist() return ls # - # # TfidfVectorizer # ## Creating a feature vector # In order to compare to images, we will first transform each title into a feature vector. The distance of the respective images can then be measured by the distance between these two vectors. # Create a function to clean all the titles of our images def clean_title(title): title = title.replace("-", " ").replace("//", " ").replace("[", " ").replace("]", " ").replace("(", " ").replace(")", " ") title = title.replace("+", " ").replace("/", " ").replace("x", " ").replace("x", " ").replace("\\", " ") title = title.replace(",", " ") return title df_train_tfidf = load_trainCsv() df_train_tfidf['cleanTitle'] = df_train_tfidf['title'].apply(clean_title) # + corpus = df_train_tfidf['cleanTitle'].to_list() tokenizer = text.Tokenizer() tokenizer.fit_on_texts(corpus) word_index = tokenizer.word_index st_words = nltk.corpus.stopwords.words("english") st_words.extend(nltk.corpus.stopwords.words("indonesian")) # Deleting all words which consist only of one or two letters for w in word_index.keys(): if len(w)<3: st_words.append(w) if w.isnumeric(): st_words.append(w) # - # The hyperparameter max_features is very important. It determines the length of the feature vectors. The longer these vectors are, the more precise our predictions will be. On the other hand, this will make calculations slower. # + # Now vectorize all our string included in the corpus; use TfidfVectorizer from Sklearn vectorizer = TfidfVectorizer(stop_words=st_words, max_features=2500) vectorizer.fit(corpus) label_vec = vectorizer.transform(corpus) label_vec = label_vec.toarray() # - # ### Defining a distance function # There are theoretically infinite possibilities to define distance functions as we can use the Minkowski Metric with any natural number p. Here we just experimented with p=1 (the so called Manhattan Metric) and p=2 (The Euclidean Metric). It turned out that the Euclidean Metric gives us better results. def dist2(x,y): # x,y indicate the position of the two images in our DataFrame a = label_vec[x] b = label_vec[y] dist = np.sqrt(sum([(a[i] - b[i])**2 for i in range(label_vec.shape[1])])) # (Euclidean Metric) #dist = sum([abs((a[i] - b[i])) for i in range(label_vec.shape[1])]) # (Manhattan-Metric) return dist # ### Finding semantically similar pictures # We now have a look at a particular picture and display the 15 images out of our data set which are semantically closest to this picture. # + labels_tfidf = df_train_tfidf['label_group'].to_list() posting_id_tfidf = df_train_tfidf['posting_id'].to_list() dist_ls = [] for i in range(len(corpus)): dist_ls.append(round(dist2(8400, i),2)) df_nlp = pd.DataFrame(data = [dist_ls,labels_tfidf,posting_id_tfidf]).transpose() df_nlp = df_nlp.sort_values(by = 0) df_nlp.head(15) # - # In column 0 we see the distance the respective image (8400) has to the image represented by the row number. In column 1 we see the cluster this image belongs to. We notice that the semantically nearest neighbours of image 8400 are images which actually belong to the same cluster. (Of course, our metric does not always perform that well.) # # One important hyperparameter we will have to tune is the threshold. This threshold determines how "close" another image has to be to the image we're just looking at in order to make the assumption that the two images display the same product. The table above indicates that the threshold could be at around 0.85. # ### Estimating the F1-Score # In order to estimate the F1-Score we can expect from that method, we now apply our f_score_i function to a certain number of images. The exact F1-Score is the mean of these accuracy values of all the 34250 images. # # What does the f_score_i function do: First construct the intersection of the two sets which represent the real cluster and predicted cluster. Then divide the length of this intersection through the sum of the length of both real and predicted cluster. The result is then multiplied by two and is finally returned. # + pred_tfidf = [] for i in range(100): clreal = real_cluster_of_i_tfidf(i*100) clpred = pred_cluster_of_i_tfidf(i*100,0.7) pr = f_score_i(clreal,clpred) pred_tfidf.append(pr) # - sum(pred_tfidf)/len(pred_tfidf) # ### Excursion: PCA # In order to decrease the calculation time, we now will do a dimensionality reduction via Principal Component Analysis. # + def create_fit_PCA(data, n_components=300): p = PCA(n_components=n_components, random_state=42) p.fit(data) return p feat_vec_pca = create_fit_PCA(label_vec) vec_pca = feat_vec_pca.transform(label_vec) def dist_pca(x,y): # x,y indicate the position of the two images in our DataFrame a = vec_pca[x] b = vec_pca[y] dist = np.sqrt(sum([(a[i] - b[i])**2 for i in range(100)])) # p=2 (Euclidean Metric) #dist = sum([abs((a[i] - b[i])) for i in range(label_vec.shape[1])]) # p=1 (Manhattan-Metric) return dist # + dist_ls_pca = [] for i in range(len(corpus)): dist_ls_pca.append(round(dist_pca(1100, i),2)) df_nlp = pd.DataFrame(data = [dist_ls_pca,labels_tfidf,posting_id_tfidf]).transpose() df_nlp = df_nlp.sort_values(by = 0) df_nlp.head(15) # - # We see that calculation time decreases a lot. Looking at some examples, we can see that the results we get are not very reliable. Future work on this issue could include checking out precisely the benefits and the limits of the PCA method for the NLP approach. # # Word2Vec # As the TfidfVectorizer need quite a lot of computation time, we now try out a second NLP method: Word2Vec # ### Preprocessing etc. # + # Friendly borrowed by <NAME> # https://www.kaggle.com/coder247/similarity-using-word2vec-text def lemmatize_stemming(text): return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v')) def preprocess(text): result = [] for token in gensim.utils.simple_preprocess(text): if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3: if token == 'xxxx': continue result.append(lemmatize_stemming(token)) return result def word2vec_model(size_feat_vec=50): w2v_model = Word2Vec(min_count=1, window=3, vector_size=size_feat_vec, sample=6e-5, alpha=0.03, min_alpha=0.0007, negative=20) w2v_model.build_vocab(processed_docs) w2v_model.train(processed_docs, total_examples=w2v_model.corpus_count, epochs=300, report_delay=1) return w2v_model # + df_train_w2v = load_trainCsv() processed_docs = df_train_w2v['title'].map(preprocess) processed_docs = list(processed_docs) df_train_w2v['preprocess_title']=processed_docs df_train_w2v[['posting_id','preprocess_title']][0:2] # - # ### Building a Word2Vec Model # + build_new_model_bool = False if build_new_model_bool: w2v_model = word2vec_model() w2v_model.save('word2vec_model') else: w2v_model = pickle.load(open('word2vec_model', 'rb')) emb_vec = w2v_model.wv # - # ### Create a feature vector # + def get_feature_vec_v2(sen1, model, size_feat_vec=50): sen_vec1 = np.zeros(size_feat_vec) for val in sen1: sen_vec1 = np.add(sen_vec1, model[val]) return sen_vec1/norm(sen_vec1) df_train_w2v['word_vec'] = df_train_w2v.apply( lambda row: get_feature_vec_v2(row['preprocess_title'], emb_vec), axis=1) df_train_w2v.head(2) # - # ### Calculate distances # + def get_sim_all_pi(i_vec_1,i_vec_all): return i_vec_all.dot(i_vec_1) def get_sim_two_pi(i_vec_1,i_vec_2): sim = dot(i_vec_1,i_vec_2)/(norm(i_vec_1)*norm(i_vec_2)) return sim # + # Finding semantically similar images, same as above id_1=8400 i_vec_1 = df_train_w2v['word_vec'][id_1] i_vec_all = df_train_w2v['word_vec'].values i_vec_all = np.vstack(i_vec_all) labels = df_train_w2v['label_group'].to_list() posting_id = df_train_w2v['posting_id'].to_list() list1 = list(get_sim_all_pi(i_vec_1,i_vec_all)) df_nlp = pd.DataFrame(data = [list1,labels,posting_id]).transpose() df_nlp = df_nlp.sort_values(by = [0,1],ascending=False) df_nlp.head(10) # - # ### Create clusters optimized on recall # + rec_values = [] cl_size = [] for i in range(100): clreal = real_cluster_of_i_w2v(i*100) clpred = pred_cluster_of_i_w2v(i*100,0.7) rec_values.append(recall_i(clreal,clpred)) cl_size.append(len(clpred)) print("Mean Recall: ",sum(rec_values)/len(rec_values), " Mean Length of cluster: ", sum(cl_size)/len(cl_size)) # - # By building clusters using a threshold of 0.7 we get a mean cluster size of around 180. On the other hand, we see that the mean recall value is close to 1. # # This is one of the most important findings of this notebook. That way we can reduce significantly the amount of images which possibly belong to a certain cluster: When looking for the cluster a certain image belongs to, instead of having to compare it to 34249 images, we now just need to look at 180, e.g. with visual methods. That way, the NLP method can be combined with other methods later on. # ## Save results and combine them with pHash # Now we want to predict clusters for all of the 34250 images using a high threshold (0.97). This way we'll obtain clusters which are optimized on precision. The results will be saved in a dictionary and then combined with the results obtained in the pHash Notebook. This combination leads to a F1-Score of 66 %. # + already_done = True if already_done == False: dict_nlp_prec_all_97 = {} list_post_id = df_train_w2v['posting_id'].tolist() for i in range(34250): dict_nlp_prec_all_97[list_post_id[i]] = set(pred_cluster_of_i_w2v(i,0.97)) if i%1000 == 0: # Display progress and save print(i) pickle.dump(dict_nlp_prec_all_97, open( "dict_nlp_prec_all_97.p", "wb" ) ) pickle.dump(dict_nlp_prec_all_97, open( "dict_nlp_prec_all_97.p", "wb" ) ) # Final save # Load results dict_nlp_prec_all_97_load = pickle.load( open( "dict_nlp_prec_all_97.p", "rb" ) ) # + # Combine two predictions def combi_pred(pred1, pred2): combi_set = {} for i in pred1.keys(): assert i in set(pred2.keys()) combi_set[i] = pred1[i].union(pred2[i]) return combi_set # Load results from the pHash Notebook dict_phash_prec_all_9_load = pickle.load( open( "dict_phash_prec_all_9.p", "rb" ) ) pred_nlp_phash = combi_pred(dict_nlp_prec_all_97_load, dict_phash_prec_all_9_load) # + # Calculate F1-Score of the combination NLP and pHash fscores = [] for i in range(34250): x = f_score_i(real_cluster_of_i_w2v(i), pred_nlp_phash[df_train_w2v.posting_id.values[i]]) fscores.append(x) print("F1-Score: ", sum(fscores)/len(fscores)) # - # ## Future Work # - Most important: Saving (via pickle) clusters omtimized on recall and combine them with visual methods based on CNNs # - Experimenting more on the hyperparameters of the W2V model # - Use further NLP models based on the transformer framework (such as BERT) # - Find out if PCA can be applied in order to decrease computation time but without loosing to much accuracy
NLP-Notebook.ipynb