code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fine Tuning Transformer for MultiLabel Text Classification # ### Introduction # # In this tutorial we will be fine tuning a transformer model for the **Multilabel text classification** problem. # This is one of the most common business problems where a given piece of text/sentence/document needs to be classified into one or more of categories out of the given list. For example a movie can be categorized into 1 or more genres. # # #### Flow of the notebook # # The notebook will be divided into seperate sections to provide a organized walk through for the process used. This process can be modified for individual use cases. The sections are: # # 1. [Importing Python Libraries and preparing the environment](#section01) # 2. [Importing and Pre-Processing the domain data](#section02) # 3. [Preparing the Dataset and Dataloader](#section03) # 4. [Creating the Neural Network for Fine Tuning](#section04) # 5. [Fine Tuning the Model](#section05) # 6. [Validating the Model Performance](#section06) # 7. [Saving the model and artifacts for Inference in Future](#section07) # # #### Technical Details # # This script leverages on multiple tools designed by other teams. Details of the tools used below. Please ensure that these elements are present in your setup to successfully implement this script. # # - Data: # - We are using the Jigsaw toxic data from [Kaggle](https://www.kaggle.com/) # - This is competion provide the souce dataset [Toxic Comment Competition](https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge) # - We are referring only to the first csv file from the data dump: `train.csv` # - There are rows of data. Where each row has the following data-point: # - Comment Text # - `toxic` # - `severe_toxic` # - `obscene` # - `threat` # - `insult` # - `identity_hate` # # Each comment can be marked for multiple categories. If the comment is `toxic` and `obscene`, then for both those headers the value will be `1` and for the others it will be `0`. # # # - Language Model Used: # - BERT is used for this project. It was the transformer model created by the Google AI Team. # - [Blog-Post](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html) # - [Research Paper](https://arxiv.org/abs/1810.04805) # - [Documentation for python](https://huggingface.co/transformers/model_doc/bert.html) # # --- # ***NOTE*** # - *It is to be noted that the outputs to the BERT model are different from DistilBert Model implemented by the Hugging Face team. There are no `token_type_ids` generated from the tokenizer in case of Distilbert and also the final outputs from the network differ.* # - *This will be explained further in the notebook* # --- # # - Hardware Requirements: # - Python 3.6 and above # - Pytorch, Transformers and All the stock Python ML Libraries # - GPU enabled setup # # # - Script Objective: # - The objective of this script is to fine tune BERT to be able to label a comment into the following categories: # - `toxic` # - `severe_toxic` # - `obscene` # - `threat` # - `insult` # - `identity_hate` # # --- # ***NOTE*** # - *It is to be noted that the overall mechanisms for a multiclass and multilabel problems are similar, except for few differences namely:* # - *Loss function is designed to evaluate all the probability of categories individually rather than as compared to other categories. Hence the use of `BCE` rather than `Cross Entropy` when defining loss.* # - *Sigmoid of the outputs calcuated to rather than Softmax. Again for the reasons defined in the previous point* # - *The [accuracy metrics](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html) and [F1 scores](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score) used from sklearn package as compared to direct comparison of expected vs predicted* # --- # <a id='section01'></a> # ### Importing Python Libraries and preparing the environment # # At this step we will be importing the libraries and modules needed to run our script. Libraries are: # * Pandas # * Pytorch # * Pytorch Utils for Dataset and Dataloader # * Transformers # * BERT Model and Tokenizer # # Followed by that we will preapre the device for GPU execeution. This configuration is needed if you want to leverage on onboard GPU. # # *I have included the code for TPU configuration, but commented it out. If you plan to use the TPU, please comment the GPU execution codes and uncomment the TPU ones to install the packages and define the device.* # + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" id="WD_vnyLXZQzD" outputId="b9664704-6c75-4bce-ee89-49746c8a9c89" # Installing the transformers library and additional libraries if looking process # # !pip install -q transformers # Code for TPU packages install # # !curl -q https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py # # !python pytorch-xla-env-setup.py --apt-packages libomp5 libopenblas-dev # + colab={} colab_type="code" id="pzM1_ykHaFur" # Importing stock ml libraries import numpy as np import pandas as pd from sklearn import metrics import transformers import torch from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler from transformers import BertTokenizer, BertModel, BertConfig import os os.environ["CUDA_VISIBLE_DEVICES"] = "3" # Preparing for TPU usage # import torch_xla # import torch_xla.core.xla_model as xm # device = xm.xla_device() # + colab={} colab_type="code" id="NLxxwd1scQNv" # # Setting up the device for GPU usage from torch import cuda device = 'cuda' if cuda.is_available() else 'cpu' # - # <a id='section02'></a> # ### Importing and Pre-Processing the domain data # # We will be working with the data and preparing for fine tuning purposes. # *Assuming that the `train.csv` is already downloaded, unzipped and saved in your `data` folder* # # * Import the file in a dataframe and give it the headers as per the documentation. # * Taking the values of all the categories and coverting it into a list. # * The list is appened as a new column and other columns are removed # + colab={"base_uri": "https://localhost:8080/", "height": 195} colab_type="code" id="mZ7lTlkyaG7u" outputId="ad5af998-9cf9-4f23-9a0e-23c2e3c0e3a1" df = pd.read_csv("./data/train.csv.zip") df['list'] = df[df.columns[2:]].values.tolist() new_df = df[['comment_text', 'list']].copy() new_df.head() # - # <a id='section03'></a> # ### Preparing the Dataset and Dataloader # # We will start with defining few key variables that will be used later during the training/fine tuning stage. # Followed by creation of CustomDataset class - This defines how the text is pre-processed before sending it to the neural network. We will also define the Dataloader that will feed the data in batches to the neural network for suitable training and processing. # Dataset and Dataloader are constructs of the PyTorch library for defining and controlling the data pre-processing and its passage to neural network. For further reading into Dataset and Dataloader read the [docs at PyTorch](https://pytorch.org/docs/stable/data.html) # # #### *CustomDataset* Dataset Class # - This class is defined to accept the `tokenizer`, `dataframe` and `max_length` as input and generate tokenized output and tags that is used by the BERT model for training. # - We are using the BERT tokenizer to tokenize the data in the `comment_text` column of the dataframe. # - The tokenizer uses the `encode_plus` method to perform tokenization and generate the necessary outputs, namely: `ids`, `attention_mask`, `token_type_ids` # --- # - *This is the first difference between the distilbert and bert, where the tokenizer generates the token_type_ids in case of Bert* # --- # - To read further into the tokenizer, [refer to this document](https://huggingface.co/transformers/model_doc/bert.html#berttokenizer) # - `targest` is the list of categories labled as `0` or `1` in the dataframe. # - The *CustomDataset* class is used to create 2 datasets, for training and for validation. # - *Training Dataset* is used to fine tune the model: **80% of the original data** # - *Validation Dataset* is used to evaluate the performance of the model. The model has not seen this data during training. # # #### Dataloader # - Dataloader is used to for creating training and validation dataloader that load data to the neural network in a defined manner. This is needed because all the data from the dataset cannot be loaded to the memory at once, hence the amount of dataloaded to the memory and then passed to the neural network needs to be controlled. # - This control is achieved using the parameters such as `batch_size` and `max_len`. # - Training and Validation dataloaders are used in the training and validation part of the flow respectively # + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["4ff1e0e231244d79a4d1ebdc2e7b8235", "51d4c2a94e654b83916d011c65248d04", "a7354b526df9448d8fd6c4a264c2ac24", "1b67b76cd9394e3b8addf6195ca9611a", "b9510b24e7e64b9d822d025a9b30757b", "84c7d4fb46924b7fa412d188ea7e05da", "23b26013d0144f468af29109e37e3a60", "b5dd7f6b28d542ea9d3831028c888f44"]} colab_type="code" id="ikfbFlNHgi8T" outputId="21188d80-cd21-4d66-bdf0-8308af26a0fd" # Sections of config # Defining some key variables that will be used later on in the training MAX_LEN = 200 TRAIN_BATCH_SIZE = 8 VALID_BATCH_SIZE = 4 EPOCHS = 1 LEARNING_RATE = 1e-05 tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # + colab={} colab_type="code" id="oFOylAXqiNYK" class CustomDataset(Dataset): def __init__(self, dataframe, tokenizer, max_len): self.tokenizer = tokenizer self.data = dataframe self.comment_text = dataframe.comment_text self.targets = self.data.list self.max_len = max_len def __len__(self): return len(self.comment_text) def __getitem__(self, index): comment_text = str(self.comment_text[index]) comment_text = " ".join(comment_text.split()) inputs = self.tokenizer.encode_plus( comment_text, None, add_special_tokens=True, max_length=self.max_len, pad_to_max_length=True, return_token_type_ids=True ) ids = inputs['input_ids'] mask = inputs['attention_mask'] token_type_ids = inputs["token_type_ids"] return { 'ids': torch.tensor(ids, dtype=torch.long), 'mask': torch.tensor(mask, dtype=torch.long), 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long), 'targets': torch.tensor(self.targets[index], dtype=torch.float) } # + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="PkDGqarcPowL" outputId="e834ea5d-5d00-4bb6-ad52-6f21e744da95" # Creating the dataset and dataloader for the neural network train_size = 0.8 train_dataset=new_df.sample(frac=train_size,random_state=200) test_dataset=new_df.drop(train_dataset.index).reset_index(drop=True) train_dataset = train_dataset.reset_index(drop=True) print("FULL Dataset: {}".format(new_df.shape)) print("TRAIN Dataset: {}".format(train_dataset.shape)) print("TEST Dataset: {}".format(test_dataset.shape)) training_set = CustomDataset(train_dataset, tokenizer, MAX_LEN) testing_set = CustomDataset(test_dataset, tokenizer, MAX_LEN) # + colab={} colab_type="code" id="vLpilV73QrXJ" train_params = {'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 } test_params = {'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 } training_loader = DataLoader(training_set, **train_params) testing_loader = DataLoader(testing_set, **test_params) # - # <a id='section04'></a> # ### Creating the Neural Network for Fine Tuning # # #### Neural Network # - We will be creating a neural network with the `BERTClass`. # - This network will have the `Bert` model. Follwed by a `Droput` and `Linear Layer`. They are added for the purpose of **Regulariaztion** and **Classification** respectively. # - In the forward loop, there are 2 output from the `BertModel` layer. # - The second output `output_1` or called the `pooled output` is passed to the `Drop Out layer` and the subsequent output is given to the `Linear layer`. # - Keep note the number of dimensions for `Linear Layer` is **6** because that is the total number of categories in which we are looking to classify our model. # - The data will be fed to the `BertClass` as defined in the dataset. # - Final layer outputs is what will be used to calcuate the loss and to determine the accuracy of models prediction. # - We will initiate an instance of the network called `model`. This instance will be used for training and then to save the final trained model for future inference. # # #### Loss Function and Optimizer # - The Loss is defined in the next cell as `loss_fn`. # - As defined above, the loss function used will be a combination of Binary Cross Entropy which is implemented as [BCELogits Loss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss) in PyTorch # - `Optimizer` is defined in the next cell. # - `Optimizer` is used to update the weights of the neural network to improve its performance. # # #### Further Reading # - You can refer to my [Pytorch Tutorials](https://github.com/abhimishra91/pytorch-tutorials) to get an intuition of Loss Function and Optimizer. # - [Pytorch Documentation for Loss Function](https://pytorch.org/docs/stable/nn.html#loss-functions) # - [Pytorch Documentation for Optimizer](https://pytorch.org/docs/stable/optim.html) # - Refer to the links provided on the top of the notebook to read more about `BertModel`. # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["29b605fec9d34649888ee7ec36b89595", "2bd7ade54b7841f2840e5187d9e3bc99", "fa0ae9bc665b47d89ebee73bf6ddaccf", "6cb5f40281524c1ba78f1260cadbfe66", "6ee0645e22b54ceebff09091598bce28", "448cd06bb18548a39d5fb88adad5cb20", "bfbb87e4c4ad44b285edd376b5659a3a", "c4e387689be7453d9c65b1b3bbd99d7d", "7d2a9c4d56524de7a3783cdf0760254d", "46c0394c31e44192a4ada6073cd915bc", "<KEY>", "<KEY>", "e8bcca8b06834420a6863751c4ed53e3", "19acfedb0f974ad1aac62da45273d5fb", "<KEY>", "ddec16ebb40a4ab9ba3172959596ecc2"]} colab_type="code" id="DegHNyIEQxB2" outputId="9d8c15f8-aa49-4c4e-e1c9-e112eb0ed19b" # Creating the customized model, by adding a drop out and a dense layer on top of distil bert to get the final output for the model. class BERTClass(torch.nn.Module): def __init__(self): super(BERTClass, self).__init__() self.l1 = transformers.BertModel.from_pretrained('bert-base-uncased') self.l2 = torch.nn.Dropout(0.3) self.l3 = torch.nn.Linear(768, 6) def forward(self, ids, mask, token_type_ids): _, output_1= self.l1(ids, attention_mask = mask, token_type_ids = token_type_ids) # output_2 = self.l2(output_1) output = self.l3(output_1) return output model = BERTClass() model.to(device) # + colab={} colab_type="code" id="7KnNeQx6SI78" def loss_fn(outputs, targets): return torch.nn.BCEWithLogitsLoss()(outputs, targets) # + colab={} colab_type="code" id="gUD8j0c7WsA-" optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE) # - # <a id='section05'></a> # ### Fine Tuning the Model # # After all the effort of loading and preparing the data and datasets, creating the model and defining its loss and optimizer. This is probably the easier steps in the process. # # Here we define a training function that trains the model on the training dataset created above, specified number of times (EPOCH), An epoch defines how many times the complete data will be passed through the network. # # Following events happen in this function to fine tune the neural network: # - The dataloader passes data to the model based on the batch size. # - Subsequent output from the model and the actual category are compared to calculate the loss. # - Loss value is used to optimize the weights of the neurons in the network. # - After every 5000 steps the loss value is printed in the console. # # As you can see just in 1 epoch by the final step the model was working with a miniscule loss of 0.022 i.e. the network output is extremely close to the actual output. # + colab={} colab_type="code" id="B9_DjWmfWx1q" def train(epoch): model.train() for _,data in enumerate(training_loader, 0): ids = data['ids'].to(device, dtype = torch.long) mask = data['mask'].to(device, dtype = torch.long) token_type_ids = data['token_type_ids'].to(device, dtype = torch.long) targets = data['targets'].to(device, dtype = torch.float) outputs = model(ids, mask, token_type_ids) optimizer.zero_grad() loss = loss_fn(outputs, targets) if _%5000==0: print(f'Epoch: {epoch}, Loss: {loss.item()}') optimizer.zero_grad() loss.backward() optimizer.step() # + colab={"base_uri": "https://localhost:8080/", "height": 87} colab_type="code" id="D4Yl7gXHYSRU" outputId="94be5496-6ab5-4744-b8db-c6370fbd9f88" for epoch in range(EPOCHS): train(epoch) # - # <a id='section06'></a> # ### Validating the Model # # During the validation stage we pass the unseen data(Testing Dataset) to the model. This step determines how good the model performs on the unseen data. # # This unseen data is the 20% of `train.csv` which was seperated during the Dataset creation stage. # During the validation stage the weights of the model are not updated. Only the final output is compared to the actual value. This comparison is then used to calcuate the accuracy of the model. # # As defined above to get a measure of our models performance we are using the following metrics. # - Accuracy Score # - F1 Micro # - F1 Macro # # We are getting amazing results for all these 3 categories just by training the model for 1 Epoch. # + colab={} colab_type="code" id="nIEoUm4aQkyl" def validation(epoch): model.eval() fin_targets=[] fin_outputs=[] with torch.no_grad(): for _, data in enumerate(testing_loader, 0): ids = data['ids'].to(device, dtype = torch.long) mask = data['mask'].to(device, dtype = torch.long) token_type_ids = data['token_type_ids'].to(device, dtype = torch.long) targets = data['targets'].to(device, dtype = torch.float) outputs = model(ids, mask, token_type_ids) fin_targets.extend(targets.cpu().detach().numpy().tolist()) fin_outputs.extend(torch.sigmoid(outputs).cpu().detach().numpy().tolist()) return fin_outputs, fin_targets # + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="Ov1_3R_pAcMo" outputId="96d0de09-1943-44b0-9f48-692f045e2863" for epoch in range(EPOCHS): outputs, targets = validation(epoch) outputs = np.array(outputs) >= 0.5 accuracy = metrics.accuracy_score(targets, outputs) f1_score_micro = metrics.f1_score(targets, outputs, average='micro') f1_score_macro = metrics.f1_score(targets, outputs, average='macro') print(f"Accuracy Score = {accuracy}") print(f"F1 Score (Micro) = {f1_score_micro}") print(f"F1 Score (Macro) = {f1_score_macro}") # - # <a id='section07'></a> # ### Saving the Trained Model Artifacts for inference # # This is the final step in the process of fine tuning the model. # # The model and its vocabulary are saved locally. These files are then used in the future to make inference on new inputs of news headlines. # # Please remember that a trained neural network is only useful when used in actual inference after its training. # # In the lifecycle of an ML projects this is only half the job done. We will leave the inference of these models for some other day.
multi_label_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import logging import math import random import mxnet as mx import numpy as np logging.getLogger().setLevel(logging.DEBUG) n_sample = 1000 batch_size = 1 learning_rate = 0.1 n_epoch = 1 train_in = [[random.uniform(0, 1) for c in range(2)] for n in range(n_sample)] train_out = [0 for n in range(n_sample)] for i in range(n_sample): train_out[i] = max(train_in[i][0], train_in[i][1]) train_iter = mx.io.NDArrayIter(data=np.array(train_in), label={'reg_label':np.array(train_out)}, batch_size=batch_size, shuffle=True) src = mx.sym.Variable('data') fc = mx.sym.FullyConnected(data=src, num_hidden=1, name='fc') net = mx.sym.LinearRegressionOutput(data=fc, name='reg') module = mx.mod.Module(symbol=net, label_names=(['reg_label'])) def epoch_callback(epoch, symbol, arg_params, aux_params): for k in arg_params: print(k) print(arg_params[k].asnumpy()) module.fit(train_iter, eval_data=None, eval_metric=mx.metric.create('mse'), optimizer='sgd', optimizer_params={'learning_rate':learning_rate}, num_epoch=n_epoch, batch_end_callback = mx.callback.Speedometer(batch_size, 100), epoch_end_callback = epoch_callback)
jupyter_notebook/mxnet/max-feed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MAT281 - Laboratorio N°10 # # # <a id='p1'></a> # ## I.- Problema 01 # # # <img src="https://www.goodnewsnetwork.org/wp-content/uploads/2019/07/immunotherapy-vaccine-attacks-cancer-cells-immune-blood-Fotolia_purchased.jpg" width="360" height="360" align="center"/> # # # El **cáncer de mama** es una proliferación maligna de las células epiteliales que revisten los conductos o lobulillos mamarios. Es una enfermedad clonal; donde una célula individual producto de una serie de mutaciones somáticas o de línea germinal adquiere la capacidad de dividirse sin control ni orden, haciendo que se reproduzca hasta formar un tumor. El tumor resultante, que comienza como anomalía leve, pasa a ser grave, invade tejidos vecinos y, finalmente, se propaga a otras partes del cuerpo. # # El conjunto de datos se denomina `BC.csv`, el cual contine la información de distintos pacientes con tumosres (benignos o malignos) y algunas características del mismo. # # # Las características se calculan a partir de una imagen digitalizada de un aspirado con aguja fina (FNA) de una masa mamaria. Describen las características de los núcleos celulares presentes en la imagen. # Los detalles se puede encontrar en [K. <NAME> and <NAME>: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34]. # # # Lo primero será cargar el conjunto de datos: # + import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.preprocessing import scale # %matplotlib inline sns.set_palette("deep", desat=.6) sns.set(rc={'figure.figsize':(11.7,8.27)}) # - # cargar datos df = pd.read_csv(os.path.join("data","BC.csv"), sep=",") df['diagnosis'] = df['diagnosis'] .replace({'M':1,'B':0}) # target df.head() # Basado en la información presentada responda las siguientes preguntas: # # 1. Realice un análisis exploratorio del conjunto de datos. # 1. Normalizar las variables numéricas con el método **StandardScaler**. # 3. Realizar un método de reducción de dimensionalidad visto en clases. # 4. Aplique al menos tres modelos de clasificación distintos. Para cada uno de los modelos escogidos, realice una optimización de los hiperparámetros. además, calcule las respectivas métricas. Concluya. # # # # # 1. Análisis exploratorio del conjunto de datos. # + # resumen de la informacion def resumen_por_columna(df,cols): pd_series = df[cols] # elementos distintos l_unique = pd_series.unique() # elementos vacios l_vacios = pd_series[pd_series.isna()] df_info = pd.DataFrame({ 'columna': [cols], 'unicos': [len(l_unique)], 'vacios': [len(l_vacios)] }) return df_info frames = [] for col in df.columns: aux_df = resumen_por_columna(df,col) frames.append(aux_df) df_info = pd.concat(frames).reset_index(drop=True) df_info['% vacios'] = df_info['vacios']/len(df) df_info # - df['diagnosis'].unique() # # 2. Normalizar las variables numéricas con el método StandardScaler. # + from sklearn.preprocessing import StandardScaler df.set_index('id', inplace = True) scaler = StandardScaler() columns = df.columns df[columns] = scaler.fit_transform(df[columns]) df # - df.describe() # # 3. Realizar un método de reducción de dimensionalidad visto en clases. # + from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline # Entrenamiento modelo PCA con escalado de los datos # ============================================================================== pca_pipe = make_pipeline(StandardScaler(), PCA()) pca_pipe.fit(df) # Se extrae el modelo entrenado del pipeline modelo_pca = pca_pipe.named_steps['pca'] # Se combierte el array a dataframe para añadir nombres a los ejes. pd.DataFrame( data = modelo_pca.components_, columns = df.columns, index = ['PC1', 'PC2', 'PC3', 'PC4','PC5', 'PC6', 'PC7', 'PC8','PC9', 'PC10', 'PC11', 'PC12','PC13', 'PC14', 'PC15', 'PC16', 'PC17', 'PC18', 'PC19','PC20','PC21', 'PC22', 'PC23', 'PC24','PC25','PC26', 'PC27', 'PC28', 'PC29','PC30','PC31'] ) # + # graficar varianza por componente percent_variance = np.round(modelo_pca.explained_variance_ratio_* 100, decimals =2) columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5', 'PC6', 'PC7', 'PC8','PC9', 'PC10', 'PC11', 'PC12','PC13', 'PC14', 'PC15', 'PC16', 'PC17', 'PC18', 'PC19','PC20','PC21', 'PC22', 'PC23', 'PC24','PC25','PC26', 'PC27', 'PC28', 'PC29','PC30','PC31'] plt.figure(figsize=(18,8)) plt.bar(x= range(1,32), height=percent_variance, tick_label=columns) plt.xticks(np.arange(modelo_pca.n_components_) + 1) plt.ylabel('Componente principal') plt.xlabel('Por. varianza explicada') plt.title('Porcentaje de varianza explicada por cada componente') plt.show() # + # graficar varianza por la suma acumulada de los componente percent_variance_cum = np.cumsum(percent_variance) columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5', 'PC6', 'PC7', 'PC8','PC9', 'PC10', 'PC11', 'PC12','PC13', 'PC14', 'PC15', 'PC16', 'PC17', 'PC18', 'PC19','PC20','PC21', 'PC22', 'PC23', 'PC24','PC25','PC26', 'PC27', 'PC28', 'PC29','PC30','PC31'] plt.figure(figsize=(18,8)) plt.bar(x= range(1,32), height=percent_variance_cum, tick_label=columns) plt.ylabel('Percentate of Variance Explained') plt.xlabel('Principal Component Cumsum') plt.title('PCA Scree Plot') plt.show() # - # Proyección de las observaciones de entrenamiento # ============================================================================== proyecc = pca_pipe.transform(X=df) proyecc = pd.DataFrame( proyecc, columns =['PC1', 'PC2', 'PC3', 'PC4','PC5', 'PC6', 'PC7', 'PC8','PC9', 'PC10', 'PC11', 'PC12','PC13', 'PC14', 'PC15', 'PC16', 'PC17', 'PC18', 'PC19','PC20','PC21', 'PC22', 'PC23', 'PC24','PC25','PC26', 'PC27', 'PC28', 'PC29','PC30','PC31'] , index = df.index ) proyecc.head() # + from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline # Entrenamiento modelo PCA con escalado de los datos # ============================================================================== pca_pipe = make_pipeline(PCA(n_components=11)) pca_pipe.fit(proyecc) # Se extrae el modelo entrenado del pipeline modelo_pca = pca_pipe.named_steps['pca'] # Se combierte el array a dataframe para añadir nombres a los ejes. pd.DataFrame( data = modelo_pca.components_, columns = df.columns, index = ['PC1', 'PC2', 'PC3', 'PC4','PC5', 'PC6', 'PC7', 'PC8','PC9','PC10','PC11'] ) # + # graficar varianza por componente percent_variance = np.round(modelo_pca_proyectado.explained_variance_ratio_* 100, decimals =2) columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5', 'PC6', 'PC7', 'PC8', 'PC9', 'PC10','PC11'] plt.figure(figsize=(18,8)) plt.bar(x= range(1,12), height=percent_variance, tick_label=columns) plt.xticks(np.arange(modelo_pca_proyectado.n_components_) + 1) plt.ylabel('Componente principal') plt.xlabel('Por. varianza explicada') plt.title('Porcentaje de varianza explicada por cada componente') plt.show() # - # Proyección de las observaciones de entrenamiento # ============================================================================== proyecciones = pca_pipe.transform(X=df) proyecciones = pd.DataFrame( proyecciones, columns = ['PC1', 'PC2', 'PC3', 'PC4','PC5', 'PC6', 'PC7', 'PC8','PC9','PC10','PC11'], index = df.index ) proyecciones.head() # + # Recostruccion de las proyecciones # ============================================================================== recostruccion = pca_pipe.inverse_transform(X=proyecciones) recostruccion = pd.DataFrame( recostruccion, columns = df.columns, index = df.index ) print('------------------') print('Valores originales') print('------------------') display(recostruccion.head()) print('---------------------') print('Valores reconstruidos') print('---------------------') display(df.head()) # - # # 4. Aplique al menos tres modelos de clasificación distintos. Para cada uno de los modelos escogidos, realice una optimización de los hiperparámetros. además, calcule las respectivas métricas. Concluya. # + from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_classif import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline import time from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import StackingClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix,accuracy_score,recall_score,precision_score,f1_score from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier # cargar datos df2 = pd.read_csv(os.path.join("data","BC.csv"), sep=",") df2['diagnosis'] = df2['diagnosis'] .replace({'M':1,'B':0}) # target # - # Método 1: Logistic Regression # + X = df2.drop(columns='diagnosis').values Y = df2['diagnosis'].values # split dataset X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state = 2) # - # Creando el modelo rlog = LogisticRegression(solver='newton-cg', max_iter=2000) rlog.fit(X_train, Y_train) # ajustando el modelo rlog.score(X_train,Y_train) # + # metrics from metrics_classification import * from sklearn.metrics import confusion_matrix y_true = list(Y_test) y_pred = list(rlog.predict(X_test)) print('Valores:\n') print('originales: ', y_true) print('predicho: ', y_pred) # - print('\nMatriz de confusion:\n ') print(confusion_matrix(y_true,y_pred)) # + # ejemplo df_temp = pd.DataFrame( { 'y':y_true, 'yhat':y_pred } ) df_metrics = summary_metrics(df_temp) print("\nMetricas para los regresores : 'datos de pacientes'") print("") df_metrics # - # En un principio se utilizó el solver ‘lbfgs’ pero daba métricas bajas prediciendo deficientemente, por lo que se optó por probar con el solver ‘newton-cg’. Este dio como resultado métricas mayores y predijo mejor. # Cabe mencionar que se le aumentó el número de iteraciones porque no convergía, a pesar de esto, el warning sigue apareciendo. # Método 2: MLPClassifier # + from sklearn.neural_network import MLPClassifier # Creando el modelo clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1) clf.fit(X_train, Y_train) # ajustando el modelo # - clf.score(X_train,Y_train) # + y_true2 = list(Y_test) y_pred2 = list(clf.predict(X_test)) print('\nMatriz de confusion:\n ') print(confusion_matrix(y_true2,y_pred2)) # + df_temp2 = pd.DataFrame( { 'y':y_true2, 'yhat':y_pred2 } ) df_metrics2 = summary_metrics(df_temp2) print("\nMetricas para los regresores : ") print("") df_metrics2 # - # A pesar de los varios intentos para mejorar las métricas, moviendo todos los hiperparámetros no se pudo obtener un mejor resultado. # Método 3: K-Nearest Neighbours # + from sklearn.neighbors import KNeighborsClassifier # Creando el modelo model = KNeighborsClassifier(n_neighbors=1) model.fit(X_train, Y_train) # ajustando el modelo # - model.score(X_train,Y_train) # + y_true3 = list(Y_test) y_pred3 = list(model.predict(X_test)) print('\nMatriz de confusion:\n ') print(confusion_matrix(y_true3,y_pred3)) # + df_temp3 = pd.DataFrame( { 'y':y_true3, 'yhat':y_pred3 } ) df_metrics3 = summary_metrics(df_temp3) print("\nMetricas para los regresores : ") print("") df_metrics3 # - # Al igual que en el anterior modelo, se probó varios hiperparámetros, pero ninguno logró mejorar las métricas, ni siquiera probando con árbol de decisión. # Por otro lado, probé distintas vecindarios pero resultó que con 1 daba el mejor resultado. # Conclusiones: # A partir de los resultados obtenidos con estos 3 métodos de clasificación, se concluye que en este caso el mejor método esel logístico, ya que es el que mejor métricas arroja. Luego, el segundo mejor método es K-Nearest Neighbours ya que es el que le sigue en métricas al modelo logístico y finalmente el peor modelo viene siendo MLPClassifier ya que dio métricas bajas.
labs/lab_10_BLN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sklearn import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import os import random from sklearn.ensemble import GradientBoostingClassifier as gbc items = pd.read_csv('mch_categories.tsv', sep='\t') items.head(10) hierarchy_dict = {} for code in items['code']: if len(code) not in hierarchy_dict: hierarchy_dict[len(code)] = [] hierarchy_dict[len(code)].append(code) hierarchy_dict.keys() for length, keys in hierarchy_dict.items(): for key in keys: items[key] = items['code'].apply(lambda row: int(row[:length] == key)) items.dropna() dict_3, dict_5, dict_7 = {}, {}, {} i,j,k = 1,1,1 for key in hierarchy_dict[3]: dict_3[key] = i i += 1 for key in hierarchy_dict[5]: dict_5[key] = j j += 1 for key in hierarchy_dict[3]: dict_7[key] = k k += 1 products = pd.read_csv('products.txt', sep='\t', header=None) transactions = pd.read_json('transactions.txt', lines=True) items['name'].nunique() def reciept2items(reciept_list, id_exchange): items = {} for ix, item_list in enumerate(reciept_list['itemList']): items[ix] = [] for item_dict in item_list: item_df = item_dict['item'] item_name = (id_exchange[id_exchange[0] == item_df][2].values[0]) item_id = (id_exchange[id_exchange[0] == item_df][1].values[0]) if item_name != 'Plastic Bags': items[ix].append(item_id) if ix > 1000: break return items item_dict = reciept2items(transactions, products) def load_data(items, item_dict, batch_size=50, maxlen=8): train, test = [], [] while True: for reciept in list(item_dict.values()): result = items[items['code'].isin(reciept)].loc[:,'M02':].to_numpy()[::-1] if result.shape[0] < maxlen+1: continue train.append(result[:maxlen]) test.append(result[1:maxlen+1]) if len(train)%batch_size == 0: yield np.stack(train, axis=0), np.stack(test, axis=0) train, test = [], [] m = load_data(items, item_dict, 50) next(m)[0].shape def load_data_emb(item_dict, batch_size=8, maxlen=8): while True: i = 0 res_1, res_2, res_3 = [], [], [] for lst in list(item_dict.values()): if len(lst) < maxlen+1: continue else: lst = lst[:maxlen] r = np.zeros(len(lst)) for ix, itm in enumerate(lst): for pos, key in enumerate(dict_3.keys()): if key == itm[:3]: r[ix] = dict_3[key] res_1.append(r) r = np.zeros(len(lst)) for ix, itm in enumerate(lst): for pos, key in enumerate(dict_5.keys()): if key == itm[:5]: r[ix] = dict_5[key] res_2.append(r) for ix, itm in enumerate(lst): for pos, key in enumerate(dict_7.keys()): if key == itm[:7]: r[ix] = dict_7[key] res_3.append(r) if i%batch_size == 0 and i: res_1 = np.stack(res_1, axis=0) res_2 = np.stack(res_2, axis=0) res_3 = np.stack(res_3, axis=0) yield (res_1[:maxlen], res_1[1:]), (res_2[:maxlen], res_2[1:]), (res_3[:maxlen], res_3[1:]) res_1, res_2, res_3 = [], [], [] i += 1 m = load_data_emb(item_dict) next(m) def LSTM_model(maxlen=8): # Sequential approach to product prediction model = tf.keras.models.Sequential() model.add(tf.keras.layers.InputLayer(input_shape=(maxlen, 916))) model.add(tf.keras.layers.LSTM(256, activation='relu', dropout=0.2, return_sequences=True)) model.add(tf.keras.layers.Conv1D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(tf.keras.layers.LSTM(256, activation='relu', recurrent_dropout=0.2, return_sequences=True)) model.add(tf.keras.layers.Conv1D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(tf.keras.layers.LSTM(256, activation='relu', recurrent_dropout=0.2, return_sequences=True)) model.add(tf.keras.layers.Conv1D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(tf.keras.layers.LSTM(256, activation='relu', recurrent_dropout=0.2, return_sequences=True)) model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(916, activation='softmax'))) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') return model model = get_model() model.summary() model.fit_generator(load_data(items, item_dict, batch_size=4), steps_per_epoch=100, epochs=10)
Loblaws recommendation system.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''PythonData'': conda)' # language: python # name: python361064bitpythondataconda7465b2e4d16447d5bf5dcc356c678f6e # --- # Dependencies from splinter import Browser from bs4 import BeautifulSoup as bs import time import pandas as pd import requests #Set Path and initialize chromedriver executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) # Scrapping Mars News nasa_url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest' browser.visit(nasa_url) # + jupyter={"outputs_hidden": true} # Create a html object of the site html_news = browser.html #Parse the website HTML with beautifulSoup soup_news = bs(html_news, 'html.parser') #Print results print(soup_news.prettify()) # - #Find the element slideElement = soup_news.select_one('ul.item_list li.slide') #slideElement slideElement.find("div", class_='content_title').get_text() news_title = slideElement.find("div", class_='content_title').get_text() news_title news_p = slideElement.find("div", class_ = 'article_teaser_body').get_text() news_p # ## JPL Mars Space Images - Featured Image # Set Path and initialize chromedriver executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) # Scrapping JPL Mars Space Images jpl_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(jpl_url) # Use splinter to navigate site and find feature mars image url full_image = browser.find_by_id("full_image") full_image.click() browser.is_element_present_by_text("more info", wait_time=1) more_info_element = browser.find_link_by_partial_text("more info") more_info_element.click() html_jpl = browser.html soup_jpl = bs(html_jpl, 'html.parser') featured_image_url = soup_jpl.select_one("figure.lede a img").get("src") featured_image_url = f'https://www.jpl.nasa.gov{featured_image_url}' print(featured_image_url) # ## Mars Facts #Use pandas to scrape table mars_url = pd.read_html("https://space-facts.com/mars/")[0] print(mars_url) #Rename columns mars_url.columns=['Description', 'Value'] mars_url.set_index('Description', inplace=True) mars_url #Convert data to HTML and save mars_url.to_html('Resources/mars_url.html') #Convert data to HTML table string mars_url_facts = mars_url.to_html(header=True, index=True) print(mars_url_facts) # ## Mars Hemispheres # Set path and initiate chromedriver executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars" browser.visit(hemisphere_url) # + #Create list to store url string and title hemisphere_list = [] # Get list all_links_url = browser.find_by_css("a.product-item h3") for x in range(len(all_links_url)): hemi = {} #find element in each loop browser.find_by_css("a.product-item h3")[x].click() #find sample tag and extract href sample_tag = browser.find_link_by_text("Sample").first hemi["img_url"] = sample_tag["href"] #extract title hemi["title"] = browser.find_by_css("h2.title").text #append object to list hemisphere_list.append(hemi) browser.back() # - hemisphere_list
mission_to_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Import Knowledge Graphs in RDF standard (nt files) # + from os import system from config import execute from src.utils_clickhouse import load_from_ch_to_df, upload_to_ch # + DROP_TABLE_CALIGRAPH = False CREATE_TABLE_CALIGRAPH = False DROP_TABLE_WIKIDATA = True CREATE_TABLE_WIKIDATA = True DIR_URL = 'CaLiGraph/' # - # ## Drop and Create the knowledge_graph_raw table # + pycharm={"name": "#%%\n"} if DROP_TABLE_CALIGRAPH: execute(''' DROP TABLE IF EXISTS knowledge_graph_raw ''') if CREATE_TABLE_CALIGRAPH: execute(''' CREATE TABLE IF NOT EXISTS knowledge_graph_raw( source String, subject String, predicate String, object String ) ENGINE = ReplacingMergeTree() ORDER BY (source, subject, predicate, object) ''') if DROP_TABLE_WIKIDATA: execute(''' DROP TABLE IF EXISTS knowledge_graph_wikidata_raw ''') if CREATE_TABLE_WIKIDATA: execute(''' CREATE TABLE IF NOT EXISTS knowledge_graph_wikidata_raw( source String, subject String, predicate String, object String ) ENGINE = ReplacingMergeTree() ORDER BY (source, subject, predicate, object) ''') # + [markdown] pycharm={"name": "#%% md\n"} # ## Upload data to ClickHouse # ### Bash scripts # #### Upload CSV # `cat test_insert.csv | clickhouse-client --query " # INSERT INTO knowledge_graph_raw # SELECT # 'test_insert.csv', # subject, # predicate, # object # FROM input('index String, subject String, predicate String, object String') # FORMAT CSV" ` # #### Upload NT # `cat test_insert.nt | clickhouse-client --query " # INSERT INTO knowledge_graph_raw # SELECT # 'test_insert.nt', # subject, # object = '' OR object is Null? Null: predicate, # object = '' OR object is Null? predicate: object # FROM ( # SELECT # replace(subject_, 'http://', '') as subject, # replace(predicate_, 'http://', '') as predicate, # replace(object_, 'http://', '') as object # FROM input('subject_ String, predicate_ String, object_ String') # ) # FORMAT Regexp # SETTINGS format_regexp='[<|\\"](.+?)[>|\\"] [<|\\"](.+?)[>|\\"] [<|\\"](.+?)[>|\\"]\s\.', format_regexp_escaping_rule='Escaped'" ` # #### Upload NT without transformation # `cat test_insert.nt | clickhouse-client --query " # INSERT INTO knowledge_graph_raw # SELECT # 'test_insert.nt', # subject_, # object_ = '' OR object_ is Null? Null: predicate_, # object_ = '' OR object_ is Null? predicate_: object_ # FROM input('subject_ String, predicate_ String, object_ String') # FORMAT Regexp # SETTINGS format_regexp='([<|\\"].+?[>|\\"])\s([<|\\"].+?[>|\\"])\s([<|\\"].+?[>|\\"])\s\.', format_regexp_escaping_rule='Escaped'" ` # #### Upload NT from gz-archive # `zcat latest-truthy.nt.gz | clickhouse-client --query " # INSERT INTO knowledge_graph_wikidata_raw # SELECT # 'latest-truthy.nt.gz', # subject, # object = '' OR object is Null? Null: predicate, # object = '' OR object is Null? predicate: object # FROM ( # SELECT # replace(subject_, 'http://', '') as subject, # replace(predicate_, 'http://', '') as predicate, # replace(object_, 'http://', '') as object # FROM input('subject_ String, predicate_ String, object_ String') ) # FORMAT Regexp # SETTINGS format_regexp='[<|\"|_](.+?)[\s][<|\"|_](.+?)[\s][<|\"|_](.+?)[\s\.]', # format_regexp_escaping_rule='Escaped' # ` # - # ### File list for uploading # + pycharm={"name": "#%%\n"} file_names = ['caligraph-instances_dbpedia-mapping.nt', 'caligraph-instances_relations.nt', 'caligraph-instances_provenance.nt', 'caligraph-instances_types.nt', 'caligraph-instances_labels.nt', 'caligraph-instances_transitive-types.nt', 'caligraph-ontology.nt', 'caligraph-ontology_provenance.nt', 'caligraph-ontology_dbpedia-mapping.nt', 'dbpedia_caligraph-relations.nt', 'dbpedia_caligraph-types.nt', 'dbpedia_caligraph-instances.nt'] # - # ### Code # + pycharm={"name": "#%%\n"} for file_name in file_names: upload_to_ch(file_name, dir_url=DIR_URL, transformation=False) # + pycharm={"name": "#%%\n"} print(f"Number of links: {execute('SELECT count() FROM knowledge_graph_raw')[0][0]:>,}") # + pycharm={"name": "#%%\n"} print('Example of data') load_from_ch_to_df('SELECT * FROM knowledge_graph_raw LIMIT 20', columns=['source', 'subject', 'predicate', 'object']) # + [markdown] pycharm={"name": "#%% md\n"} # ## Upload Wikidata from gz-archive # + pycharm={"name": "#%%\n"} upload_to_ch(file_name='latest-truthy.nt.gz', dir_url='Wikidata/', gz=True, transformation=True, target_table='knowledge_graph_wikidata_raw') # + pycharm={"name": "#%%\n"}
upload_nt_to_ch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Jako malí jsme hrávali hru na hledání pokladu. Jeden v pokoji schoval nějaký poklad, a ostatním jej pak hledali s nápovědou: "samá voda, samá voda, přihořívá, hoří!". # Vzpomněl jsem si na to, když jsem si četl zadání úkolu [Shadows of the Knight - Episode 2](https://www.codingame.com/training/expert/shadows-of-the-knight-episode-2) na CodingGame. # # Po prvním přečtení zadání jsem si dost naivně říkal, tak v čem je problém? Napovídají, že je dobré použít binární hledání a dělení intervalu. To vše umím, tak rovnou na to. # # Nakonec to zase tak jednoduché nebylo. # # Tak o co vlastně šlo? # # Zadání úkolu # Dostanu zadané dvourozměrné pole o velikosti WIDTH x HEIGHT políček. Na jednom políčku je umístěna bomba, kterou mám najít. # # Hra začíná tak, že se objevím na nějakém místě v tomto poli. Mohu skočit na libovolné políčko v tomto dvourozměrném poli. Po každém skoku dostanu informaci, jestli jsem blíže, dále nebo stejně daleko od bomby. Vzdálenost se v tomto případě počítá jako _Euklejdovská vzdálenost_ ve dvourozměrném prostoru. # # Úkolem je co nejmenším počtem skoků najít políčko s bombou. # # # Základní úvahy o řešení # Jak už sami autoři hry naznačovali, je dobré použít binární hledání. # Obvykle se tento postup prezentuje na jednoduchém setříděném poli. V tomto případě jsem ve dvou dimenzích, ale mělo by to jít použít stejně. # # Nejdříve budu hledat v jednom rozměru, např. v jednom řádku. Až najdu tu správnou pozici v řádku, budu hledat ve sloupci. # # Postup hledání v jednom rozměru je takový, že se snažím každým skokem rozdělit celý interval na dvě poloviny. Na polovinu, která je blíže bombě, no a na tu druhou, která je dále od bomby. V dalším kroku pak opět dělím ten interval blíže bomby. A to vše tak dlouho, dokud se mně interval nesmrskne na jedno políčko. # # ## Dělení intervalu skokem # Musel jsem si to namalovat, abych viděl, jak je třeba skákat, abych rozpůlil interval. # # Jsou tam totiž dvě možné situace: # # ![binary_search_1.jpg](attachment:binary_search_1.jpg) # # Ta zelená fajfka představuje moji aktuální pozici. Červený křížek pak pozici, kam bych měl skočit, abych rozdělil interval na dvě poloviny. # # V prvém případě aktuálně stojím uvnitř intervalu, který chci rozdělit. Cílová pozice je tedy opět uvnitř intervalu symetricky vůči středu. # # Ve druhém případě aktuálně stojím vně intervalu. Pak má cílová pozice musí být opět vně intervalu symetricky vůči jeho středu. # # A to je asi ta nejzásadnější úvaha. Následuje již pouze programátorská dřina. # # Příprava testovacího prostředí # # Nejdříve nějaké knihovny a základní inicializace: import random import math # Připravím si testovací třídu, jejimž úkolem bude poskytovat informaci o mé poloze vůči bombě: class TestSuite2D: def __init__(self, width, height, target=None): self.target = (random.randrange(width), random.randrange(height)) if target is None else target self.last = None def evaluate(self, value): if self.last is not None: if value == self.target: result = "FOUND" else: def euclidean_distance(a, b): return ((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2) ** 0.5 distance_last = euclidean_distance(self.target, self.last) distance_value = euclidean_distance(self.target, value) if distance_value > distance_last: result = 'COLDER' elif distance_value < distance_last: result = 'WARMER' else: result = 'SAME' else: result = 'UNKNOWN' self.last = value return result def validate(self, value): return self.target == value def __str__(self): return f"TestSuite2D[target={self.target}, last={self.last}]" # Při inicializaci zadávám rozměry pole a pozici bomby. Pokud pozici bomby nezadám, pak se umístí náhodně. # # Z instance této třídy budu volat metodu __evaluate__ se zadanou aktuální mojí pozicí. # # Očekávám některou z těchto odpovědí: # * WARMER - jsem blíže bombě # * COLDER - jsem dále bombě # * SAME - jsem ve stejné vzdálenosti od bomby # * FOUND - našel jsem ji # * UNKNOWN - při prvním dotazu, protože neexistuje moje předchozí pozice # # # Hledání v jednom řádku # # Nejdříve jsem si vytvořil funkci pro hledání v jednom řádku. # Pro testování budu používat testovací třídu pro 2D, takže bych si měl ověřit i závislost na Euklejdovské vzdálenosti. # # ## Funkce pro hledání v jednom rozměru def binary_search_1d(width, x0, f): lower_x, upper_x, prev_x, x = 0, width - 1, x0, x0 def next_index(low, high, max_x, curr): i = high + low - curr if i == curr: i = i - 1 if curr > 0 else i + 1 return max(0, min(max_x, i)) while True: check_distance = f((x, 0)) if check_distance == 'WARMER': if x < prev_x and lower_x < upper_x: upper_x = prev_x - math.ceil((abs(x - prev_x) + 1) / 2) elif x > prev_x and lower_x < upper_x: lower_x = prev_x + math.ceil((abs(x - prev_x) + 1) / 2) elif check_distance == 'COLDER': if x < prev_x and lower_x < upper_x: lower_x = x + math.ceil((abs(x - prev_x) + 1) / 2) elif x > prev_x and lower_x < upper_x: upper_x = x - math.ceil((abs(x - prev_x) + 1) / 2) elif check_distance == 'SAME': return (x + prev_x) // 2 elif check_distance == 'UNKNOWN': pass elif check_distance == 'FOUND': return x else: raise ValueError prev_x, x = x, next_index(lower_x, upper_x, width - 1, x) if lower_x < upper_x else lower_x return x # # Ověření, že funguje správně # # Udělal jsem si několik testovacích běhů přes rozměr pole 1, lichý a sudý. V každém rozměru pak udělám všechny kombinace pro možnou pozici bomby a startovací pozici. # Tím bych měl ověřit, že mně algoritmus funguje ve všech situacích: HEIGHT, j = 9, 5 for WIDTH in (1, 10, 11): for i in range(WIDTH): for x in range(WIDTH): suite = TestSuite2D(WIDTH, HEIGHT, (i, j)) res = binary_search_1d(WIDTH, x, lambda x: suite.evaluate(x)) if not suite.validate((res, j)): print("FAILED") print("FINISHED") # # Hledání ve dvouorozměrném poli # # Tady se tedy již dostávám ke konečnému řešení. # # Hledám nejdříve v řádku, a pak ve sloupci: def binary_search_2d_next(width, height, point, f): lower_x, upper_x, prev_x, x = 0, width - 1, point[0], point[0] lower_y, upper_y, prev_y, y = 0, height - 1, point[1], point[1] def next_index(low, high, max_i, curr): i = high + low - curr if i == curr: i = i - 1 if curr > 0 else i + 1 elif i > max_i: i = high elif i < 0: i = low return i while True: check_distance = f((x, y)) if check_distance == 'WARMER': if x < prev_x and lower_x < upper_x: upper_x = prev_x - math.ceil((abs(x - prev_x) + 1) / 2) elif x > prev_x and lower_x < upper_x: lower_x = prev_x + math.ceil((abs(x - prev_x) + 1) / 2) elif y < prev_y and lower_y < upper_y: upper_y = prev_y - math.ceil((abs(y - prev_y) + int(x == prev_x)) / 2) elif y > prev_y and lower_y < upper_y: lower_y = prev_y + math.ceil((abs(y - prev_y) + int(x == prev_x)) / 2) elif check_distance == 'COLDER': if x < prev_x and lower_x < upper_x: lower_x = x + math.ceil((abs(x - prev_x) + 1) / 2) elif x > prev_x and lower_x < upper_x: upper_x = x - math.ceil((abs(x - prev_x) + 1) / 2) elif y < prev_y and lower_y < upper_y: lower_y = y + math.ceil((abs(y - prev_y) + int(x == prev_x)) / 2) elif y > prev_y and lower_y < upper_y: upper_y = y - math.ceil((abs(y - prev_y) + int(x == prev_x)) / 2) elif check_distance == 'SAME': if x != prev_x and lower_x < upper_x: lower_x, upper_x = (x + prev_x) // 2, (x + prev_x) // 2 if y != prev_y and lower_y < upper_y and x == prev_x: lower_y, upper_y = (y + prev_y) // 2, (y + prev_y) // 2 elif check_distance == 'UNKNOWN': pass elif check_distance == 'FOUND': break else: raise ValueError prev_x, x = x, next_index(lower_x, upper_x, width - 1, x) if lower_x < upper_x else lower_x if prev_x == x or (lower_x == upper_x and abs(prev_x - x) <= 1): prev_y, y = y, next_index(lower_y, upper_y, height - 1, y) if lower_y < upper_y else lower_y return x, y # ## Ověření, že to funguje # # Budu opět ověřovat pro rozměry 1, sudý a lichý. V tomto případě se to bude týkat jak šířky pole, tak jeho délky. # # Opět udělám testy, kdy ověřím všechny možné pozice bomby a startovací pozice: # + for WIDTH in (1, 10, 11): for HEIGHT in (1, 10, 11): for i in range(WIDTH): for j in range(HEIGHT): for x in range(WIDTH): for y in range(HEIGHT): suite = TestSuite2D(WIDTH, HEIGHT, (i, j)) res = binary_search_2d_next(WIDTH, HEIGHT, (x, y), lambda x: suite.evaluate(x)) if not suite.validate(res): print("FAILED") print("FINISHED")
BinarySearch2D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Project 3 - Single Inheritance - Approach # I'm going to use an actual Python project with folders, modules, etc for this solution. # This notebook is simply detailing the sequence of steps I took to get at my final solution. # You can download the full solution from the resources in this video, or (preferrably) directly from the # [github repo](https://github.com/fbaptiste/python-deepdive) # #### Virtual Environment and pytest # I'm going to use `pytest` for testing in this project, so you should install it into your virtual environment. # Note that if you are not already using virtual environments for your projects I strongly suggest you do so. # Creating a virtual environment is incredibly easy. # # 1. create a folder for your project # 2. create a virtual environment named `env` (or any name you prefer) by typing this in a console from inside your new folder: # - `python -m venv env` # - note: if you have both Python 2.x and 3.x installed, you'll probably need to specify it as `python3 -m venv env` # - you should now have a new folder called `env` inside your project folder. # 3. Next you should activate your virtual environment. How you do this will differ on Windows vs Mac/Linux: # - Windows: `env\Scripts\activate` # - Linux/Mac: `source env/bin/activate` # - Your command prompt shoudl now reflect the activation of the virtual environment something like `(env)` at the beginning of the prompt. # To deactivate a virtual environment, simply type `deactivate`. # Next we need to install the `pytest` library. We want to install `pytest` in our virtual environment, so do this after **activating** your virtual environment - make sure your prompt reflects that first. # # Then install `pytest` by typing this: # `pip install -U pytest` # That's it, you now have a virtual environment that has `pytest`. # #### Project Steps # I'm going to provide proper docstrings for every module, class, function, etc. I will use the Google style of docstrings, which is documented [here](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) # 1. Create this folder hierarchy in the project root: # # ``` # <project root> # ....app # ........models # ........utils # ....tests # ........unit # ``` # # Note: there is no need to create packages (no `__init.py__`), we will simply use implicit namespace packages. # 2. Create a new module (`validators.py`) inside the `app/utils` package. In that module create a helper function `validate_integer` that will allow us to validate that a value is an integer, optionally between a min and max (inclusive), and raises a `TypeError`, `ValueError` with a custom error message that can be overriden when bound checks fail. # 3. Inside the `tests/unit` folder, create a new module called `test_validators.py` and create the unit tests for the `validate_integer` function. # 4. Run the unit tests and make sure all the tests pass. # - to run the unit tests, you can use your IDE's built-in way of doing it, or you can just use the command line, from the root of your project: # # `python -m pytest tests` # # (this will run all the tests found in that folder - you can specify more specific path to limit your tests further) # 5. In the `models` folder, create a new module file called `inventory.py`. # 6. Implement the `Resource` class # 7. Create a new file `test_resource.py` in the `tests` folder # 8. Create unit tests for the `Resource` class and make sure they all pass # 9. Create `CPU` class # 10. Unit test `CPU` class # 11. Create `Storage` Class # 12. Unit test `Storage` class # 13. Create `HDD` class # 14. Unit test `HDD` class # 15. Create `SDD` class # 16. Unit test `SDD` class
dd_1/Part 4/Section 07 - Project 3/Project 3 - Solution - Approach.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from prettypandas import PrettyPandas # + import pandas as pd import numpy as np np.random.seed(24) df = pd.DataFrame({'A': np.linspace(1, 10, 10)}) df = pd.concat([df, pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))], axis=1) # - df # ## Summaries # Supported summaries: # # * total # * average # * min # * max # * median PrettyPandas(df) PrettyPandas(df).total() PrettyPandas(df).average() PrettyPandas(df).average(axis=1) PrettyPandas(df).average(axis=None) PrettyPandas(df).min().max() PrettyPandas(df).summary(np.mean, "Average") # ### Multiple Summaries PrettyPandas(df).multi_summary([np.mean, np.sum], ['Average', 'Total'], axis=0) # ## Number Formatting PrettyPandas(df).as_percent() PrettyPandas(df).as_money() PrettyPandas(df).as_percent(precision=3) PrettyPandas(df).as_money(currency=u"$", precision=3) PrettyPandas(df).as_unit('cm', location='suffix') PrettyPandas(df).as_percent(subset=['B']) PrettyPandas(df).as_percent(subset=['B']).total() PrettyPandas(df).as_percent(subset=['B']).total().highlight_max()
docs/PrettyPandas Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Inception # language: python # name: inception # --- # + [markdown] toc="true" # # Table of Contents # <p> # + import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib.ticker import FormatStrFormatter plt.style.use('ggplot') # Update parameters for matplotlib params = {"text.usetex": True, 'font.size' : 12, "text.latex.unicode": True, "axes.unicode_minus": True, 'xtick.color': 'black', 'ytick.color': 'black', 'axes.labelcolor': "black" } plt.rcParams.update(params) #mpl.rcParams['axes.linewidth'] = 0.3 #mpl.rcParams['axes.edgecolor'] = grey #mpl.rcParams['xtick.color'] = grey #mpl.rcParams['ytick.color'] = grey #mpl.rcParams['axes.labelcolor'] = "black" # + fig, axes = plt.subplots(ncols=3, nrows=1) left = 0.125 # the left side of the subplots of the figure right = 0.9 # the right side of the subplots of the figure bottom = 0.1 # the bottom of the subplots of the figure top = 0.9 # the top of the subplots of the figure wspace = 0.35 # the amount of width reserved for space between subplots, # expressed as a fraction of the average axis width hspace = 0.2 # the amount of height reserved for space between subplots, # expressed as a fraction of the average axis height plt.subplots_adjust(left, bottom, right, top, wspace, hspace) # fig height and width in inch fig.set_figheight(7.226/4) fig.set_figwidth(7.226) ax1, ax2, ax3 = axes.ravel() x = np.linspace(-6, 6) ax1.yaxis.set_major_formatter(FormatStrFormatter('%g')) ax1.set_xlabel('$z$') ax1.set_ylabel('$\sigma(z)$') ax1.set_title('Sigmoid',fontsize=12) ax1.plot(x, 1 / (1 + np.exp(-x)),color = 'blue' ) ax1.text(-6.5,0.8,r'$ \sigma(z) = \frac{1}{1+e^{-z}}$') ax2.yaxis.set_major_formatter(FormatStrFormatter('%g')) ax2.set_xlabel('$z$') ax2.set_ylabel('$\sigma(z)$') ax2.set_title('TanH',fontsize=12) ax2.plot(x, np.tanh(x) ,color = 'blue' ) ax2.text(-6.5,0.8,r'$ \sigma(z) = \!\mathrm{tanh}(z)$',fontsize=10) ax3.yaxis.set_major_formatter(FormatStrFormatter('%g')) ax3.set_xlabel('$z$') ax3.set_ylabel('$\sigma(z)$') ax3.set_title('ReLU',fontsize=12) ax3.plot(x, np.maximum(x, 0) ,color = 'blue' ) ax3.text(-5,4.5,r'$\sigma (z) = \mathrm{max}(z,0)$',fontsize=10) #\left\{\begin{array}{lll} 0 & \textrm{if} & z < 0 \\z & \textrm{if} & z \geq 0\\ \end{array}\right. plt.draw() plt.savefig("ActivationFunction.pdf", #This is simple recomendation for publication plots dpi=1000, # Plot will be occupy a maximum of available space bbox_inches='tight', ) plt.show() # -
Activation_functions/ActivationFunctionsPlot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Pfli2FTRfCRI" colab_type="code" colab={} from google.colab import drive drive.mount('/content/gdrive') import os os.chdir('/content/gdrive/My Drive/finch/tensorflow1/free_chat/chinese_qingyun/main') # + id="UamD2ovxfOnD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="8308ef81-96f5-4c34-9eeb-8981f4fa055c" executionInfo={"status": "ok", "timestamp": 1587451846163, "user_tz": -480, "elapsed": 2013, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3ItGjzEGzUOlXTUHjOgeuVA5TICdNcY-Q1TGicA=s64", "userId": "01997730851420384589"}} # %tensorflow_version 1.x # + id="QrEVt-HkfP9p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="bf91d940-3055-4946-b4a2-38cc984a41fa" executionInfo={"status": "ok", "timestamp": 1587451851544, "user_tz": -480, "elapsed": 6601, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3ItGjzEGzUOlXTUHjOgeuVA5TICdNcY-Q1TGicA=s64", "userId": "01997730851420384589"}} import tensorflow as tf import numpy as np print("TensorFlow Version", tf.__version__) print('GPU Enabled:', tf.test.is_gpu_available()) # + id="UrCiQ929fR59" colab_type="code" colab={} def rnn_cell(): def cell_fn(): cell = tf.nn.rnn_cell.LSTMCell(params['rnn_units'], initializer=tf.orthogonal_initializer()) return cell if params['dec_layers'] > 1: cells = [] for i in range(params['dec_layers']): if i == params['dec_layers'] - 1: cells.append(cell_fn()) else: cells.append(tf.nn.rnn_cell.ResidualWrapper(cell_fn(), residual_fn=lambda i,o: tf.concat((i,o), -1))) return tf.nn.rnn_cell.MultiRNNCell(cells) else: return cell_fn() def dec_cell(enc_out, enc_seq_len): attn = tf.contrib.seq2seq.BahdanauAttention( num_units = params['rnn_units'], memory = enc_out, memory_sequence_length = enc_seq_len) return tf.contrib.seq2seq.AttentionWrapper( cell = rnn_cell(), attention_mechanism = attn, attention_layer_size = params['rnn_units']) # + id="_p_QNrQUfUcP" colab_type="code" colab={} class TiedDense(tf.layers.Layer): def __init__(self, tied_embed, out_dim): super().__init__() self.tied_embed = tied_embed self.out_dim = out_dim def build(self, input_shape): self.bias = self.add_weight(name='bias', shape=[self.out_dim], trainable=True) super().build(input_shape) def call(self, inputs): x = tf.matmul(inputs, self.tied_embed, transpose_b=True) x = tf.nn.bias_add(x, self.bias) return x def compute_output_shape(self, input_shape): return input_shape[:-1].concatenate(self.out_dim) # + id="GZR9EBDUfWuk" colab_type="code" colab={} def forward(features, labels, mode): words = features['words'] if isinstance(features, dict) else features words_len = tf.count_nonzero(words, 1, dtype=tf.int32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) batch_sz = tf.shape(words)[0] with tf.variable_scope('Embedding'): embedding = tf.Variable(np.load('../vocab/char.npy'), dtype=tf.float32, name='fasttext_vectors') x = tf.nn.embedding_lookup(embedding, words) with tf.variable_scope('Encoder'): t = tf.transpose(x, perm=[1, 0, 2]) lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['rnn_units']) lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['rnn_units']) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) o_fw, s_fw = lstm_cell_fw(t, dtype=tf.float32, sequence_length=words_len) o_bw, s_bw = lstm_cell_bw(t, dtype=tf.float32, sequence_length=words_len) enc_out = tf.concat([o_fw, o_bw], axis=-1) enc_out = tf.transpose(enc_out, perm=[1, 0, 2]) enc_state = tf.layers.dense(tf.concat((s_fw.h, s_bw.h), -1), params['rnn_units'], params['activation'], name='state_fc') enc_state = tf.nn.rnn_cell.LSTMStateTuple(c=enc_state, h=enc_state) if params['dec_layers'] > 1: enc_state = tuple(params['dec_layers'] * [enc_state]) with tf.variable_scope('Decoder'): output_proj = TiedDense(embedding, len(params['char2idx'])+1) enc_out_t = tf.contrib.seq2seq.tile_batch(enc_out, params['beam_width']) enc_state_t = tf.contrib.seq2seq.tile_batch(enc_state, params['beam_width']) enc_seq_len_t = tf.contrib.seq2seq.tile_batch(words_len, params['beam_width']) cell = dec_cell(enc_out_t, enc_seq_len_t) init_state = cell.zero_state(batch_sz*params['beam_width'], tf.float32).clone( cell_state=enc_state_t) decoder = tf.contrib.seq2seq.BeamSearchDecoder( cell = cell, embedding = embedding, start_tokens = tf.tile(tf.constant([1], tf.int32), [batch_sz]), end_token = 2, initial_state = init_state, beam_width = params['beam_width'], output_layer = output_proj, length_penalty_weight = params['length_penalty'], coverage_penalty_weight = params['coverage_penalty'],) decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode( decoder = decoder, maximum_iterations = params['max_len'],) return decoder_output.predicted_ids[:, :, :params['top_k']] # + id="kd2pG9_DflEc" colab_type="code" colab={} def model_fn(features, labels, mode, params): logits_or_ids = forward(features, labels, mode) if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode, predictions=logits_or_ids) # + id="gCjUHHMzfn3E" colab_type="code" colab={} params = { 'model_dir': '../model/lstm_seq2seq', 'export_dir': '../model/lstm_seq2seq_export', 'vocab_path': '../vocab/char.txt', 'rnn_units': 300, 'max_len': 30, 'activation': tf.nn.relu, 'dec_layers': 1, 'beam_width': 10, 'top_k': 3, 'length_penalty': .0, 'coverage_penalty': .0, } # + id="OqLbFqtAfvYL" colab_type="code" colab={} def serving_input_receiver_fn(): words = tf.placeholder(tf.int32, [None, None], 'words') features = {'words': words} receiver_tensors = features return tf.estimator.export.ServingInputReceiver(features, receiver_tensors) def get_vocab(f_path): word2idx = {} with open(f_path) as f: for i, line in enumerate(f): line = line.rstrip('\n') word2idx[line] = i return word2idx # + id="S9v4laKIfxfs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="94d3db84-2f27-4610-eb69-30ae80457703" executionInfo={"status": "ok", "timestamp": 1587451872019, "user_tz": -480, "elapsed": 8045, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3ItGjzEGzUOlXTUHjOgeuVA5TICdNcY-Q1TGicA=s64", "userId": "01997730851420384589"}} params['char2idx'] = get_vocab(params['vocab_path']) params['idx2char'] = {idx: char for char, idx in params['char2idx'].items()} estimator = tf.estimator.Estimator(model_fn, params['model_dir']) estimator.export_saved_model(params['export_dir'], serving_input_receiver_fn)
finch/tensorflow1/free_chat/chinese_qingyun/main/lstm_seq2seq_export.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [Learn Quantum Computing with Python and Q#](https://www.manning.com/books/learn-quantum-computing-with-python-and-q-sharp?a_aid=learn-qc-granade&a_bid=ee23f338)<br>Chapter 09 Exercise Solutions # ---- # > Copyright (c) <NAME> and <NAME>. # > Code sample from the book "Learn Quantum Computing with Python and Q#" by # > <NAME> and <NAME>, published by Manning Publications Co. # > Book ISBN 9781617296130. # > Code licensed under the MIT License. # ### Exercise 9.1 # **Use QuTiP to verify that the two operations `ApplyCNOT` and `ApplyCNOTTheOtherWay` can be simulated by the same unitary matrix, and thus do the exact same thing.** # This first snippet shows the unitary matrix for `ApplyCNOT` which is equvalent to the QuTiP function `cnot`. # + from qutip.qip.operations import cnot cnot() # - # The matrix above is the same as the one below which represents surrounding a `CNOT` operation with `H` on all qubits, and flipping the control and target qubits. # + from qutip.tensor import tensor from qutip.qip.operations import hadamard_transform ( tensor([hadamard_transform(), hadamard_transform()]) * cnot(None,1,0) * tensor([hadamard_transform(), hadamard_transform()]) ) # - # ---- # ### Exercise 9.2 # **Just as you can use three classical `XOR` instructions to implement an in-place classical swap, you can use three `CNOT` operations to do the same thing as a single `SWAP` operation. # In fact, the following Q# snippet does the same thing as `SWAP(left, right)`:** # # ``` # CNOT(left, right); # CNOT(right, left); # CNOT(left, right); # ``` # **Double-check that this is the same as `SWAP(left, right)`, both by using `AssertOperationsEqualReferenced`, and by using QuTiP.** import qsharp qsharp.compile(""" operation SwapWithCnot(pair : Qubit[]) : Unit { CNOT(pair[0], pair[1]); CNOT(pair[1], pair[0]); CNOT(pair[0], pair[1]); } operation SwapDirectly(pair : Qubit[]): Unit is Adj { SWAP(pair[0], pair[1]); } """) assert_swap = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation AssertSwapEqualsThreeCnot(): Unit { AssertOperationsEqualReferenced(2, SwapWithCnot, SwapDirectly); } """) assert_swap.simulate() # # # > **Extra credit**: `SWAP(left, right)` is the same as `SWAP(right, left)`, so the snippet above should work even if you start with `CNOT(right, left)` instead. Double-check that! # + qsharp.compile(""" operation ReverseSwapWithCnot(pair : Qubit[]) : Unit{ CNOT(pair[1], pair[0]); CNOT(pair[0], pair[1]); CNOT(pair[1], pair[0]); } operation ReverseSwapDirectly(pair : Qubit[]) : Unit is Adj { SWAP(pair[1], pair[0]); } """) assert_swap_reverse = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation AssertSwapEqualsThreeCnot(): Unit { AssertOperationsEqualReferenced(2, ReverseSwapWithCnot, ReverseSwapDirectly); } """) # - assert_swap_reverse.simulate() # ---- # ### Exercise 9.3 # **Using QuTiP, check that when run on two-qubit registers, the two programs from the listing below can be simulated by the same unitary matrix and thus do the same thing to their input registers.** # # ``` # open Microsoft.Quantum.Diagnostics; # # operation ApplyXUsingCNOTs(register : Qubit[]) # : Unit is Adj + Ctl { # within { # ApplyToEachCA( # CNOT(register[0], _), # register[1...] # ); # } apply { # X(register[0]); # } # } # # operation CheckThatThisWorks() : Unit { # AssertOperationsEqualReferenced(2, # ApplyXUsingCNOTs, # ApplyToEachCA(X, _) # ); # Message("Woohoo!"); # } # ``` # + from qutip import sigmax, qeye from qutip.tensor import tensor from qutip.qip.operations import cnot from functools import reduce def apply_x_using_cnot(n : int): within = reduce((lambda x, y: y * x), [cnot(n, 0, i) for i in range(1, n)]) return within.dag() * tensor([sigmax()] + [qeye(2)] * (n - 1)) * within def apply_to_each_x(n : int): return tensor([sigmax()] * n) print(apply_x_using_cnot(3)) print(apply_to_each_x(3)) # - # You can see that the above two matricies are the same and thus represent the same operation. # ---- # ### Exercise 9.4 # **Try modifying the listing from exercise 9.3 to see if both programs are equivalent when applied to more than two qubits.** # # > **NOTE:** It can be pretty expensive to use `AssertOperationsEqualReferenced` for more than a few qubits. [_, check_three_qubit, check_eight_qubit] = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation ApplyXUsingCNOTs(register : Qubit[]) : Unit is Adj + Ctl { within { ApplyToEachCA( CNOT(register[0], _), register[1...] ); } apply { X(register[0]); } } operation CheckThatThisWorks3() : Unit { AssertOperationsEqualReferenced(3, ApplyXUsingCNOTs, ApplyToEachCA(X, _) ); Message("Woohoo!"); } operation CheckThatThisWorks8() : Unit { AssertOperationsEqualReferenced(8, ApplyXUsingCNOTs, ApplyToEachCA(X, _) ); Message("Woohoo!"); } """) check_three_qubit.simulate() check_eight_qubit.simulate() # For at least a small sample, the assert succedes and so you know the two programs are the same. It turns out that they will always be the same, no matter the number of qubits used. # ---- # ### Exercise 9.5 # # **Try preparing your register in states other than $\left|00\right\rangle$ before calling `ApplyRotationAboutXX`. # Does your operation do what you expected?** # # > **HINT:** Recall from Part I that you can prepare a copy of the $\left|1\right\rangle$ state by applying an `X` operation, and that you can prepare $\left|+\right\rangle$ by applying an `H` operation. # + qsharp.compile(""" operation ApplyRotationAboutXX(angle : Double, register : Qubit[]) : Unit is Adj + Ctl { within { CNOT(register[0], register[1]); } apply { Rx(angle, register[0]); } } """) rotate_zeros_about_xx = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation RotateZeroAboutXX(angle : Double) : Unit { using(register = Qubit[2]) { ApplyRotationAboutXX(angle, register); DumpMachine(); Message("\n"); ResetAll(register); } } """) rotate_plus_about_xx = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation RotatePlusAboutXX(angle : Double) : Unit { using(register = Qubit[2]) { ApplyToEachCA(H, register); ApplyRotationAboutXX(angle, register); DumpMachine(); Message("\n"); ResetAll(register); } } """) rotate_ones_about_xx = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation RotateOnesAboutXX(angle : Double) : Unit { using(register = Qubit[2]) { ApplyToEachCA(X, register); ApplyRotationAboutXX(angle, register); DumpMachine(); Message("\n"); ResetAll(register); } } """) # - # With that Q# code compiled, make a table of the resulting states for a range of angles from 0 to $2\pi$. Here we started with the $|00\rangle$ state for comparison. import numpy as np [rotate_zeros_about_xx.simulate(angle=a * np.pi / 4) for a in range(8)] # Now repeat the rotation angles but start with the state $|++\rangle$. import numpy as np [rotate_plus_about_xx.simulate(angle=a * np.pi / 4) for a in range(0, 8)] # Note that these are all equivalent up to a global phase; rotating the |++⟩ state around the 𝑋𝑋-axis doesn't do anything. # One more time, repeat the angles with the inital state of $|11\rangle$. import numpy as np [rotate_ones_about_xx.simulate(angle=a * np.pi / 4) for a in range(0, 8)] # ---- # ### Exercise 9.6 # **Try using `DumpMachine` to explore how the `Rx` operation acts on a single qubit, and compare to the two-qubit rotation about the $X \otimes X$ axis that you implemented in the snippet below. # How are the two rotation operations similar, and how do they differ?** # ``` # open Microsoft.Quantum.Diagnostics; # open Microsoft.Quantum.Math; # # operation ApplyRotationAboutXX(angle : Double, register : Qubit[]) # : Unit is Adj + Ctl { # within { # CNOT(register[0], register[1]); # } apply { # Rx(angle, register[0]); # } # } # # operation DumpXXRotation() : Unit { # let angle = PI() / 2.0; # using (register = Qubit[2]) { # ApplyRotationAboutXX(angle, register); # DumpMachine(); # ResetAll(register); # } # } # ``` [_, dump_rx_rotation, dump_xx_rotation] = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.Math; operation ApplyRotationAboutXX(angle : Double, register : Qubit[]) : Unit is Adj + Ctl { within { CNOT(register[0], register[1]); } apply { Rx(angle, register[0]); } } operation DumpRxRotation(angle : Double) : Unit { using (q = Qubit()) { Rx(angle, q); DumpMachine(); Message("\n"); Reset(q); } } operation DumpXXRotation(angle : Double) : Unit { using (register = Qubit[2]) { ApplyRotationAboutXX(angle, register); DumpMachine(); Message("\n"); ResetAll(register); } } """) import numpy as np [dump_rx_rotation.simulate(angle=a * np.pi / 4) for a in range(0, 8)] [dump_xx_rotation.simulate(angle=a * np.pi / 4) for a in range(0, 8)] # The rotations are similar in that the amplitudes for the first ($|0\rangle$ or $|00\rangle$) and last state ($|1\rangle$ or $|11\rangle$) have the same amplitudes. They are obviously different from the standpoint they operate on different numbers of qubits. # **Compare rotating about the $X \otimes X$ axis with applying an `Rx` operation to each qubit in a two-qubit register.** dump_rxrx_rotation = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.Math; operation DumpRxRxRotation(angle : Double) : Unit { using (register = Qubit[2]) { ApplyToEach(Rx(angle, _), register); DumpMachine(); Message("\n"); ResetAll(register); } } """) [dump_rxrx_rotation.simulate(angle=a * np.pi / 4) for a in range(0, 8)] # You can see here that emphatically applying the `Rx` operation to each of the two qubits in a register is _not_ the same as rotating about the $XX$-axis. # ---- # ### Epilogue # # _The following cell logs what version of the components this was last tested with._ qsharp.component_versions()
ch09/ch09-exercise-solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Management - Fragmenter Validation 1.0 # + import qcportal as ptl import pandas as pd import datetime import time from management import * # + active="" # # connect without auth # # read only # client = ptl.FractalClient() # - # connect with authentication, therefore write access # don't use unless you plan to submit things client = ptl.FractalClient.from_file() client # ## Dataset tracking # ### 2020.06.27 01:00 UTC dataset = "OpenFF Fragmenter Validation 1.0" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client) res resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') next(incomplete) # All Lilac INCOMPLETEs. INCOMPLETEs are over a week old. We need to clear these out somehow with Ben. I anticipate this needing clearing on the server. count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) # ### 2020.06.18 14:10 UTC dataset = "OpenFF Fragmenter Validation 1.0" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client, merged=False) res resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') next(incomplete) # Not all Lilac INCOMPLETEs. Other INCOMPLETEs are over a week old. We need to clear these out somehow with Ben. I anticipate this needing clearing on the server. count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) # No current errors! # ### 2020.06.16 15:10 UTC dataset = "OpenFF Fragmenter Validation 1.0" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client, merged=False) res resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') next(incomplete) # Not all Lilac INCOMPLETEs. Some of these may be legit PRP/UCI INCOMPLETEs. count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) # No current errors! # ### 2020.06.13 22:45 UTC dataset = "OpenFF Fragmenter Validation 1.0" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client, merged=False) res resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') next(incomplete) # Not all Lilac INCOMPLETEs. Some of these may be legit PRP/UCI INCOMPLETEs. count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) # No current errors! # ### 2020.06.12 15:30 UTC dataset = "OpenFF Fragmenter Validation 1.0" ds = client.get_collection("TorsionDriveDataset", dataset) ds.list_specifications() res = get_unfinished_torsiondrive_optimizations(ds, 'default', client, merged=False) res resm = merge(res) incomplete = (opt.dict() for opt in resm if opt.status == 'INCOMPLETE') next(incomplete) # All Lilac INCOMPLETEs? count_unique_optimization_error_messages(resm, full=True, pretty_print=True) restart_optimizations(resm, client) [tdr for tdr in ds.df.default if tdr.status == 'ERROR'] restart_torsiondrives(ds.df.default.tolist(), client) # -------- # ## Archive ds = client.get_collection("TorsionDriveDataset", "OpenFF Fragmenter Validation 1.0") ds.list_specifications() res = get_incomplete_torsiondrive_optimizations(ds, 'default', client, merged=False) res res.keys() optr = res['19712173'][-6] optr.dict() ds.df # ------------ res = get_incomplete_torsiondrive_optimizations(ds, 'default', client, merged=True) res optrs = [optr for optr in res if optr.status == 'INCOMPLETE'] out = map(lambda x: x.dict(), optrs) next(out) errors = (i.get_error().error_message for i in res if i.status == 'ERROR') incompletes = tuple(i for i in res if i.status == 'INCOMPLETE') incompletes[0].dict() # ### Discussion with Trevor # Need to mark the incompletes here as errored. # # Also need to view input molecule. # There is no trajectory for this molecule. # # Check to make sure indices are actually a torsion (input validation); indices are 0-based. client.modify_tasks(operation='restart', base_result='19819414') # ### What molecules do we have failures or incompletes for? ds = client.get_collection("TorsionDriveDataset", "OpenFF Fragmenter Validation 1.0") ds.list_specifications() res = get_incomplete_torsiondrive_optimizations(ds, 'default', client, merged=True) for i in res: if i.status == 'ERROR': print(i.dict()) ds.df[ds.df.default.apply(lambda x: x.status == 'ERROR')].index.values ds.df[ds.df.default.apply(lambda x: x.status == 'RUNNING')].index.values.tolist() # ## Failure analysis 2020.05.28 [i for i in ds.df.default if i.status == 'ERROR'] ids = set(i.id for i in ds.df.default) res = client.query_procedures(ids) from collections import defaultdict angle_optimizations = defaultdict(set) for tdr in ds.df.default: if tdr.status == 'COMPLETE': continue for val in tdr.optimization_history.values(): angle_optimizations[tdr.id].update(set(val)) angle_optimizations # + angle_optimizations_i = set() for i in angle_optimizations.values(): angle_optimizations_i.update(set(i)) angle_optimizations_i # - len(angle_optimizations_i) res_angle_opt = client.query_procedures(angle_optimizations_i) res_angle_opt review = ((i, i.get_error().error_message) for i in res_angle_opt if i.status == 'ERROR') print(next(review)) # Perhaps a memory issue? client.modify_tasks(operation='restart', base_result='20576181') for i in ds.df.default: if i.status == 'ERROR': print(i.optimization_history) failed = tuple(i.id for i in res_angle_opt if i.status == 'ERROR') failed for i in ds.df.default: if i.status == 'ERROR': for key, value in i.optimization_history.items(): for f in failed: if f in value: print(key) # Restart again? angle_optimizations.keys() for tdrid in angle_optimizations.keys(): client.modify_services(operation='restart', procedure_id=tdrid) ds.status('default') # ## Problem cases 2020.05.27 # # From 'OpenFF Fragmenter Validation 1.0', job index: c1ccc:1Cl job id: '20548994'. Error message: # ``` # ComputeError(error_type='CancelledError', error_message='Caught Executor Error:\nTraceback (most recent call last):\n File "/home/chodera/miniconda/envs/qcfractal/lib/python3.6/site-packages/qcfractal/queue/executor_adapter.py", line 15, in _get_future\n return future.result()\n File "/home/chodera/miniconda/envs/qcfractal/lib/python3.6/concurrent/futures/_base.py", line 423, in result\n raise CancelledError()\nconcurrent.futures._base.CancelledError\n') # ) # ``` erred = [i for i in ds.df.default.values if i.status == 'ERROR'] erred # Start with the first one: tdr = erred[0] tdr.id tdr.dict() client.modify_services(operation='restart', procedure_id=tdr.id) ds = client.get_collection("TorsionDriveDataset", "OpenFF Fragmenter Validation 1.0") ds.list_specifications() # 'default' always means 'b3lyp-d3bj' for us. ds.status("default") ds.df
management/Management - Fragmenter Validation 1.0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="vkdnLiKk71g-" # ##### Copyright 2022 The TensorFlow Authors. # + cellView="form" id="0asMuNro71hA" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="jXslvcRocA-0" # # Composing Learning Algorithms # + [markdown] id="0XBJJIqwcXKd" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/federated/tutorials/composing_learning_algorithms"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/main/docs/tutorials/composing_learning_algorithms.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/federated/blob/main/docs/tutorials/composing_learning_algorithms.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/composing_learning_algorithms.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="MnUwFbCAKB2r" # ## Before you start # # Before you start, please run the following to make sure that your environment is # correctly setup. If you don't see a greeting, please refer to the # [Installation](../install.md) guide for instructions. # + id="ZrGitA_KnRO0" #@test {"skip": true} # !pip install --quiet --upgrade tensorflow-federated # !pip install --quiet --upgrade nest-asyncio import nest_asyncio nest_asyncio.apply() # + id="HGTM6tWOLo8M" from typing import Callable import tensorflow as tf import tensorflow_federated as tff # + [markdown] id="yr3ztf28fa1F" # **NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federated#compatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `main`. # + [markdown] id="CFlTaHe0jV2S" # # Composing Learning Algorithms # + [markdown] id="3zQlyijofSzI" # The [Building Your Own Federated Learning Algorithm Tutorial](https://github.com/tensorflow/federated/blob/main/docs/tutorials/building_your_own_federated_learning_algorithm.ipynb) used TFF's federated core to directly implement a version of the Federated Averaging (FedAvg) algorithm. # # In this tutorial, you will use federated learning components in TFF's API to build federated learning algorithms in a modular manner, without having to re-implement everything from scratch. # # For the purposes of this tutorial, you will implement a variant of FedAvg that employs gradient clipping throught local training. # + [markdown] id="fHwcFnLAjqcG" # ## Learning Algorithm Building Blocks # # At a high level, many learning algorithms can be separated into 4 separate components, referred to as **building blocks**. These are as follows: # # 1. Distributor (ie. server-to-client communication) # 1. Client work (ie. local client computation) # 1. Aggregator (ie. client-to-server communication) # 1. Finalizer (ie. server computation using aggregated client outputs) # + [markdown] id="YwhOtjlvjboB" # While the [Building Your Own Federated Learning Algorithm Tutorial](https://github.com/tensorflow/federated/blob/main/docs/tutorials/building_your_own_federated_learning_algorithm.ipynb) implemented all of these building blocks from scratch, this is often unnecessary. Instead, you can re-use building blocks from similar algorithms. # # In this case, to ipmlement FedAvg with gradient clipping, you only need to modify the **client work** building block. The remaining blocks can be identical to what is used in "vanilla" FedAvg. # + [markdown] id="LMnd0RvGlGjK" # # Implementing the Client Work # # First, let's write TF logic that does local model training with gradient clipping. For simplicity, gradients will be clipped have norm at most 1. # + [markdown] id="-lqZ-c4MphTU" # ## TF Logic # + id="pIw7QQCqltdV" @tf.function def client_update(model: tff.learning.Model, dataset: tf.data.Dataset, server_weights: tff.learning.ModelWeights, client_optimizer: tf.keras.optimizers.Optimizer): """Performs training (using the server model weights) on the client's dataset.""" # Initialize the client model with the current server weights. client_weights = tff.learning.ModelWeights.from_model(model) tf.nest.map_structure(lambda x, y: x.assign(y), client_weights, server_weights) # Use the client_optimizer to update the local model. # Keep track of the number of examples as well. num_examples = 0.0 for batch in dataset: with tf.GradientTape() as tape: # Compute a forward pass on the batch of data outputs = model.forward_pass(batch) num_examples += tf.cast(outputs.num_examples, tf.float32) # Compute the corresponding gradient grads = tape.gradient(outputs.loss, client_weights.trainable) # Compute the gradient norm and clip gradient_norm = tf.linalg.global_norm(grads) if gradient_norm > 1: grads = tf.nest.map_structure(lambda x: x/gradient_norm, grads) grads_and_vars = zip(grads, client_weights.trainable) # Apply the gradient using a client optimizer. client_optimizer.apply_gradients(grads_and_vars) # Compute the difference between the server weights and the client weights client_update = tf.nest.map_structure(tf.subtract, client_weights.trainable, server_weights.trainable) return tff.learning.templates.ClientResult( update=client_update, update_weight=num_examples) # + [markdown] id="Fe_emK8LpQe0" # There are a few important points about the code above. First, it keeps track of the number of examples seen, as this will constitute the *weight* of the client update (when computing an average across clients). # # Second, it uses [`tff.learning.templates.ClientResult`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/templates/ClientResult) to package the output. This return type is used to standardize client work building blocks in `tff.learning`. # + [markdown] id="l5aKjB1Vpiv3" # ## Creating a ClientWorkProcess # + [markdown] id="6IvXUJAzm8ab" # While the TF logic above will do local training with clipping, it still needs to be wrapped in TFF code in order to create the necessary building block. # # Specifically, the 4 building blocks are represented as a [`tff.templates.MeasuredProcess`](https://www.tensorflow.org/federated/api_docs/python/tff/templates/MeasuredProcess). This means that all 4 blocks have both an `initialize` and `next` function used to instantiate and run the computation. # # This allows each building block can keep track of its own **state** (stored at the server) as needed to perform its operations. While it will not be used in this tutorial, it can be used for things like tracking how many iterations have occurred, or keeping track of optimizer states. # # Client work TF logic should generally be wrapped as a [`tff.learning.templates.ClientWorkProcess`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/templates/ClientWorkProcess), which codifies the expected types going into and out of the client's local training. It can be parameterized by a model and optimizer, as below. # + id="X-I-jPsZmmMy" def build_gradient_clipping_client_work( model_fn: Callable[[], tff.learning.Model], optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer], ) -> tff.learning.templates.ClientWorkProcess: """Creates a client work process that uses gradient clipping.""" with tf.Graph().as_default(): # Wrap model construction in a graph to avoid polluting the global context # with variables created for this model. model = model_fn() data_type = tff.SequenceType(model.input_spec) model_weights_type = tff.learning.framework.weights_type_from_model(model) @tff.federated_computation def initialize_fn(): return tff.federated_value((), tff.SERVER) @tff.tf_computation(model_weights_type, data_type) def client_update_computation(model_weights, dataset): model = model_fn() optimizer = optimizer_fn() return client_update(model, dataset, model_weights, optimizer) @tff.federated_computation( initialize_fn.type_signature.result, tff.type_at_clients(model_weights_type), tff.type_at_clients(data_type) ) def next_fn(state, model_weights, client_dataset): client_result = tff.federated_map( client_update_computation, (model_weights, client_dataset)) # Return empty measurements, though a more complete algorithm might # measure something here. measurements = tff.federated_value((), tff.SERVER) return tff.templates.MeasuredProcessOutput(state, client_result, measurements) return tff.learning.templates.ClientWorkProcess( initialize_fn, next_fn) # + [markdown] id="NMUX0d0Sx1Gq" # # Composing a Learning Algorithm # # Let's put the client work above into a full-fledged algorithm. First, let's set up our data and model. # + [markdown] id="hQ_N9XbULo8P" # ## Preparing the input data # Load and preprocess the EMNIST dataset included in TFF. For more details, see the [image classification](federated_learning_for_image_classification.ipynb) tutorial. # + id="-WdnFluLLo8P" emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data() # + [markdown] id="kq8893GogB8E" # In order to feed the dataset into our model, the data is flattened and converted into tuples of the form `(flattened_image_vector, label)`. # # Let's select a small number of clients, and apply the preprocessing above to their datasets. # + id="Blrh8zJgLo8R" NUM_CLIENTS = 10 BATCH_SIZE = 20 def preprocess(dataset): def batch_format_fn(element): """Flatten a batch of EMNIST data and return a (features, label) tuple.""" return (tf.reshape(element['pixels'], [-1, 784]), tf.reshape(element['label'], [-1, 1])) return dataset.batch(BATCH_SIZE).map(batch_format_fn) client_ids = sorted(emnist_train.client_ids)[:NUM_CLIENTS] federated_train_data = [preprocess(emnist_train.create_tf_dataset_for_client(x)) for x in client_ids ] # + [markdown] id="gNO_Y9j_Lo8X" # ## Preparing the model # + [markdown] id="LJ0I89ixz8yV" # This uses the same model as in the [image classification](federated_learning_for_image_classification.ipynb) tutorial. This model (implemented via `tf.keras`) has a single hidden layer, followed by a softmax layer. In order to use this model in TFF, Keras model is wrapped as a [`tff.learning.Model`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model). This allows us to perform the model's [forward pass](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model#forward_pass) within TFF, and [extract model outputs](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model#report_local_unfinalized_metrics). For more details, also see the [image classification](federated_learning_for_image_classification.ipynb) tutorial. # + id="Yfld4oFNLo8Y" def create_keras_model(): initializer = tf.keras.initializers.GlorotNormal(seed=0) return tf.keras.models.Sequential([ tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10, kernel_initializer=initializer), tf.keras.layers.Softmax(), ]) def model_fn(): keras_model = create_keras_model() return tff.learning.from_keras_model( keras_model, input_spec=federated_train_data[0].element_spec, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) # + [markdown] id="7BPxQoGH0bEl" # ## Preparing the optimizers # + [markdown] id="mRw9zwdh0dnL" # Just as in [`tff.learning.build_federated_averaging_process`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_averaging_process), there are two optimizers here: A client optimizer, and a server optimizer. For simplicity, the optimizers will be SGD with different learning rates. # + id="kOO1ObqJ0cmX" client_optimizer_fn = lambda: tf.keras.optimizers.SGD(learning_rate=0.01) server_optimizer_fn = lambda: tf.keras.optimizers.SGD(learning_rate=1.0) # + [markdown] id="R64okB7k06sc" # ## Defining the building blocks # # Now that the client work building block, data, model, and optimziers are set up, it remains to create building blocks for the distributor, the aggregator, and the finalizer. This can be done just by borrowing some defaults available in TFF and that are used by FedAvg. # + id="iwXOTPeIx2nx" @tff.tf_computation() def initial_model_weights_fn(): return tff.learning.ModelWeights.from_model(model_fn()) model_weights_type = initial_model_weights_fn.type_signature.result distributor = tff.learning.templates.build_broadcast_process(model_weights_type) client_work = build_gradient_clipping_client_work(model_fn, client_optimizer_fn) # TFF aggregators use a factory pattern, which create an aggregator # based on the output type of the client work. This also uses a float (the number # of examples) to govern the weight in the average being computed.) aggregator_factory = tff.aggregators.MeanFactory() aggregator = aggregator_factory.create(model_weights_type.trainable, tff.TensorType(tf.float32)) finalizer = tff.learning.templates.build_apply_optimizer_finalizer( server_optimizer_fn, model_weights_type) # + [markdown] id="AEYYNHqI1Jif" # ## Composing the building blocks # # Finally, you can use a built-in **composer** in TFF for putting the building blocks together. This one is a relatively simple composer, that takes the 4 building blocks above and wires their types together. # + id="z_86iNeM0IBm" fed_avg_with_clipping = tff.learning.templates.compose_learning_process( initial_model_weights_fn, distributor, client_work, aggregator, finalizer ) # + [markdown] id="gcK69pCG16-E" # # Running the algorithm # # Now that the algorithm is done, let's run it. First, **initialize** the algorithm. The **state** of this algorithm has a component for each building block, along with one for the *global model weights*. # + id="Jg22oFx11YKK" state = fed_avg_with_clipping.initialize() state.client_work # + [markdown] id="qmCiEdoq2doJ" # As expected, the client work has an empty state (remember the client work code above!). However, other building blocks may have non-empty state. For example, the finalizer keeps track of how many iterations have occurred. Since `next` has not been run yet, it has a state of `0`. # + id="kEuB-8Z71-bd" state.finalizer # + [markdown] id="2N9XObhZ2zSQ" # Now run a training round. # + id="tKhPuBgW1-3c" learning_process_output = fed_avg_with_clipping.next(state, federated_train_data) # + [markdown] id="J7L0jKEe29bk" # The output of this (`tff.learning.templates.LearningProcessOutput`) has both a `.state` and `.metrics` output. Let's look at both. # + id="AMsBmmQz28AZ" learning_process_output.state.finalizer # + [markdown] id="hwcfhAbP3VkH" # Clearly, the finalizer state has incremented by one, as one round of `.next` has been run. # + id="0K91G_Ob3E05" learning_process_output.metrics # + [markdown] id="2sDyO9uz3Jaz" # While the metrics are empty, for more complex and practical algorithms they'll generally be full of useful information. # + [markdown] id="tPpxe7Ie3gLJ" # # Conclusion # + [markdown] id="F8uEZw-T3iBB" # By using the building block/composers framework above, you can create entirely new learning algorithms, without having to re-do everything from scratch. However, this is only the starting point. This framework makes it much easier to express algorithms as simple modifications of FedAvg. For more algorithms, see [`tff.learning.algorithms`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/algorithms), which contains algorithms such as [FedProx](https://www.tensorflow.org/federated/api_docs/python/tff/learning/algorithms/build_weighted_fed_prox) and [FedAvg with client learning rate scheduling](https://www.tensorflow.org/federated/api_docs/python/tff/learning/algorithms/build_weighted_fed_avg_with_optimizer_schedule). These APIs and can even aid implementations of entirely new algorithms, such as [federated k-means clustering](https://www.tensorflow.org/federated/api_docs/python/tff/learning/algorithms/build_fed_kmeans).
site/en-snapshot/federated/tutorials/composing_learning_algorithms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from bokeh.plotting import output_notebook, figure, show, hplot from bokeh.models import ColumnDataSource, CustomJS, Rect output_notebook() N = 20 img = np.empty((N,N), dtype=np.uint32) view = img.view(dtype=np.uint8).reshape((N, N, 4)) for i in range(N): for j in range(N): view[i, j, 0] = int(i/N*255) view[i, j, 1] = 158 view[i, j, 2] = int(j/N*255) view[i, j, 3] = 255 source = ColumnDataSource({'x':[], 'y':[], 'width':[], 'height':[]}) xrange_callback = CustomJS(args=dict(source=source), code=""" var data = source.get('data'); var start = cb_obj.get('frame').get('x_range').get('start'); var end = cb_obj.get('frame').get('x_range').get('end'); data['x'] = [start + (end - start) / 2]; data['width'] = [end - start]; source.trigger('change'); """) yrange_callback = CustomJS(args=dict(source=source), code=""" var data = source.get('data'); var start = cb_obj.get('frame').get('y_range').get('start'); var end = cb_obj.get('frame').get('y_range').get('end'); data['y'] = [start + (end - start) / 2]; data['height'] = [end - start]; source.trigger('change'); """) p1 = figure(title='Box Zoom Here', x_range=[0,10], y_range=[0,10], tools = ['box_zoom', 'reset']) p1.image_rgba(image=[img], x=[0], y=[0], dw=[10], dh=[10], level='image') p1.x_range.callback = xrange_callback p1.y_range.callback = yrange_callback p2 = figure(title='See Zoom Window Here', x_range=[0,10], y_range=[0,10], tools="") p2.image_rgba(image=[img], x=[0], y=[0], dw=[10], dh=[10], level='image') rect = Rect(x='x', y='y', width='width', height='height', fill_alpha=0.0, line_color='black') p2.add_glyph(source, rect) show(hplot(p1, p2)) # -
examples/interactions/Range update callback.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import firebase_admin from firebase_admin import credentials from firebase_admin import firestore cred = credentials.Certificate('crp-db-manager/crp-backend-server-b67475fc5e44.json') firebase_admin.initialize_app(cred) db = firestore.client() # + # code to add User details to the database import pandas as pd user_df = pd.read_csv('crp-db-manager/user_passw.csv') user_ref = db.collection(u'users') for index, row in user_df.iterrows(): doc_ref = user_ref.document(row['uid']) regno = row['reg-number'] gendr = row['gender'] uname = row['name'] email = row['email'] phnno = row['phone-number'] usert = row['user-type'] doc_ref.set({ u'registrationNumber': regno, u'name': uname, u'gender': gendr, u'email': email, u'phoneNumber': phnno, u'userType': usert }) # + # code to add Course details to the database import json course_ref = db.collection(u'courses') with open('crp-db-manager/course_parse_data.json', 'r') as course_db: course_data = json.loads(course_db.read()) for code, info in course_data.items(): doc_ref = course_ref.document(str(code)) try: doc_ref.set(info) except: print(info) # + # Code to fetch details # Available data: User, Course doc_ref = db.collection(u'users') docs = doc_ref.stream() for doc in docs: print(u'{} => {}'.format(doc.id, doc.to_dict())) # + from firebase_admin import auth import pandas as pd import bcrypt user_df = pd.read_csv('crp-db-manager/user_passw.csv') email_passw = user_df[['email', 'password']] users = list() for i in range(len(email_passw)): passw = email_passw.iloc[i]['password'].encode() salt = bcrypt.gensalt() hashed = bcrypt.hashpw(passw, salt) users.append( auth.ImportUserRecord( uid='uid' + str(i + 1), email=email_passw.iloc[i]['email'], password_hash=hashed, password_salt=salt ) ) users # + hash_alg = auth.UserImportHash.bcrypt() try: result = auth.import_users(users, hash_alg=hash_alg) for err in result.errors: print('Failed to import user:', err.reason) except exceptions.FirebaseError as error: print('Error importing users:', error) # + import pandas as pd user_df = pd.read_csv('crp-db-manager/user_passw.csv') user_course_list = list() for i in range(user_df.shape[0]): user_course_list.append(user_df.iloc[i]['reg-number'][8:11]) user_df = user_df.assign(Dept = user_course_list) # + from collections import defaultdict import json course_dict = defaultdict(list) courses = dict() with open('crp-db-manager/course_parse_data.json', 'r') as course_db: courses = json.loads(course_db.read()) for elem in courses.keys(): course_dict[elem[2:5]].append(elem) course_dict # + import random student_details = dict() for i in range(user_df.shape[0]): if user_df.iloc[i]['User Type'] == 'STUDENT': if user_df.iloc[i]['Dept'] != 'CIV': course_list = course_dict[user_df.iloc[i]['Dept']] + course_dict['HUM'] + course_dict['MAT'] else: course_list = course_dict['HUM'] + course_dict['MAT'] total_course = random.randint(0, min(7, len(course_list))) random_courses = random.sample(course_list, total_course) course_rem_set = list(set(course_list) - set(random_courses)) course_rem = random.sample(course_rem_set, min(10 - total_course, len(course_rem_set))) student_details[user_df.iloc[i]['Registration Number']] = { 'rollNumber': user_df.iloc[i]['Registration Number'], 'reviews': [], 'questions': [], 'coursesCompleted': random_courses, 'coursesRemaining': course_rem } # + student_ref = db.collection(u'Student') for rollNum, info in student_details.items(): doc_ref = student_ref.document(str(rollNum)) try: doc_ref.set(info) except: print(info) # + from collections import defaultdict subscribe_course_set = set() for elem in student_details.values(): subscribe_course_set.update(elem.get('coursesCompleted')) subscribe_course_set.update(elem.get('coursesRemaining')) course_count = defaultdict(int) for elem in subscribe_course_set: course_count[elem[2:5]] += 1 faculty_count = defaultdict(int) for i in range(user_df.shape[0]): if user_df.iloc[i]['User Type'] in ['FACULTY', 'MENTOR']: faculty_count[user_df.iloc[i]['Registration Number'][8:11]] += 1 # + student_details = dict() for rows in user_df.iterrows(): if rows[1]['user-type'] == 'STUDENT': student_details[rows[1]['uid']] = { 'rollNumber': rows[1]['reg-number'], 'reviews': {}, 'questions': {}, 'coursesCompleted': [], 'coursesRemaining': [] } student_details # + student_ref = db.collection(u'students') for uid, info in student_details.items(): doc_ref = student_ref.document(str(uid)) try: doc_ref.set(info) except: print(info) # + staff_details = dict() for rows in user_df.iterrows(): if rows[1]['user-type'] != 'STUDENT': staff_details[rows[1]['uid']] = { 'regNumber': rows[1]['reg-number'], 'questions': {}, 'previousCourses': {}, 'currentCourses': {} } staff_details # + staff_ref = db.collection(u'staffs') for uid, info in staff_details.items(): doc_ref = staff_ref.document(str(uid)) try: doc_ref.set(info) except: print(info) # - reviews = { 'reviewId': "", 'courseCode': "", 'uid': "", 'rating': {}, 'reviewText': "", } questions = { 'questionId': "", 'courseCode': "", 'uidAsked': "", 'uidAnswered': "", 'questionText': "", 'answerText': "" } ratingQuestions = { "0": { 'questionNumber': "0", 'questionText': "Overall rating" } } # + rate_ques_ref = db.collection(u'ratingQuestions') for qid, info in ratingQuestions.items(): doc_ref = rate_ques_ref.document(str(qid)) try: doc_ref.set(info) except: print(info)
crp-db-manager/.ipynb_checkpoints/db_populate-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + language="html" # <style> table {float:left} </style> # - # !pip install torch tqdm lazyme nltk gensim # !python -m nltk.downloader punkt # + import numpy as np from tqdm import tqdm import pandas as pd from gensim.corpora import Dictionary import torch from torch import nn, optim, tensor, autograd from torch.nn import functional as F from torch.utils.data import Dataset, DataLoader # - try: # Use the default NLTK tokenizer. from nltk import word_tokenize, sent_tokenize # Testing whether it works. # Sometimes it doesn't work on some machines because of setup issues. word_tokenize(sent_tokenize("This is a foobar sentence. Yes it is.")[0]) print("OK") except: # Use a naive sentence tokenizer and toktok. import re from nltk.tokenize import ToktokTokenizer # See https://stackoverflow.com/a/25736515/610569 sent_tokenize = lambda x: re.split(r'(?<=[^A-Z].[.?]) +(?=[A-Z])', x) # Use the toktok tokenizer that requires no dependencies. toktok = ToktokTokenizer() word_tokenize = word_tokenize = toktok.tokenize # # Classifying Toxic Comments # # Lets apply what we learnt in a realistic task and **fight cyber-abuse with NLP**! # # From https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/ # # > *The threat of abuse and harassment online means that many people stop <br>* # > *expressing themselves and give up on seeking different opinions. <br>* # > *Platforms struggle to effectively facilitate conversations, leading many <br>* # > *communities to limit or completely shut down user comments.* # # # The goal of the task is to build a model to detect different types of of toxicity: # # - toxic # - severe toxic # - threats # - obscenity # - insults # - identity-based hate # # In this part, you'll be munging the data as how I would be doing it at work. # # Your task is to train a feed-forward network on the toxic comments given the skills we have accomplished thus far. # ## Digging into the data... # # If you're using linux/Mac you can use these bang commands in the notebook: # # ``` # # !pip3 install kaggle # # !mkdir -p /content/.kaggle/ # # !echo '{"username":"natgillin","key":"<KEY>"}' > /content/.kaggle/kaggle.json # # !chmod 600 /content/.kaggle/kaggle.json # # !kaggle competitions download -c jigsaw-toxic-comment-classification-challenge # # !unzip /content/.kaggle/competitions/jigsaw-toxic-comment-classification-challenge/* # ``` # # Otherwise, download the data from https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/ import os os.chdir("D:/projects/tsundoku-master/data/toxic/") os.getcwd() # df_train = pd.read_csv("../input/train.csv") df_train = pd.read_csv("train.csv") df_train.head() df_train['comment_text_tokenzied'] = df_train['comment_text'].apply(word_tokenize) df_train['comment_text_tokenzied'].head(5) # + # Just in case your Jupyter kernel dies, save the tokenized text =) # To save your tokenized text you can do this: import pickle with open('train_tokenized_text.pkl', 'wb') as fout: pickle.dump(df_train['comment_text_tokenzied'], fout) # - # To load it back: import pickle with open('train_tokenized_text.pkl', 'rb') as fin: text_tokenzied = pickle.load(fin) df_train['comment_text_tokenzied'] = text_tokenzied text_tokenzied[:5] # # How to get a one-hot? # # There are many variants of how to get your one-hot embeddings from the individual columns. # # This is one way: label_column_names = "toxic severe_toxic obscene threat insult identity_hate".split() y_train = df_train[label_column_names].values ts_y_train = torch.tensor(y_train).float() # + # Convert one-hot to indices of the column. print(np.argmax(df_train[label_column_names].values, axis=1)) # - class ToxicDataset(Dataset): def __init__(self, tokenized_texts, labels): self.sents = tokenized_texts self.labels = labels self.vocab = Dictionary(tokenized_texts) special_tokens = {'<pad>': 0, '<unk>':1} self.vocab.patch_with_special_tokens(special_tokens) self.max_len = max([len(sent) for sent in tokenized_texts]) self.vocab_size = len(self.vocab) self._len = len(tokenized_texts) def __getitem__(self, sent_index): sent = self.sents[sent_index] vectorized_sent = self.vectorize(sent) sent_len = len(vectorized_sent) pad_len = self.max_len - len(vectorized_sent) pad_dim = (0, pad_len) padded_vectorized_sent = F.pad(vectorized_sent, pad_dim, 'constant') return {'x':padded_vectorized_sent, 'y':torch.tensor(self.labels[sent_index]), 'x_len':sent_len} def __len__(self): return self._len def vectorize(self, tokens): return torch.tensor(self.vocab.doc2idx(tokens)) def unvectorize(self, indices): return [self.vocab[i] for i in indices] label_column_names = "toxic severe_toxic obscene threat insult identity_hate".split() toxic_data = ToxicDataset(text_tokenzied, df_train[label_column_names].values) # + batch_size = 100 dataloader = DataLoader(toxic_data, batch_size=batch_size, shuffle=True) for data_dict in dataloader: # Sort indices of data in batch by lengths. sorted_indices = np.array(data_dict['x_len']).argsort()[::-1].tolist() data_batch = {name:_tensor[sorted_indices] for name, _tensor in data_dict.items()} print(data_batch) break # - class FFNet(nn.Module): def __init__(self, max_len, num_labels, vocab_size, embedding_size, hidden_dim, output_size): super(FFNet, self).__init__() self.embeddings = nn.Embedding(vocab_size, embedding_size, padding_idx=0) self.linear1 = nn.Linear(embedding_size*max_len, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) def forward(self, inputs): # We want to flatten the inputs so that we get the matrix of shape. # batch_size x no. of tokens in each input * embedding_size batch_size, max_len = inputs.shape embedded = self.embeddings(inputs).view(batch_size, -1) hid = F.relu(self.linear1(embedded)) out = self.linear2(hid) probs = F.sigmoid(out) return probs # + device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) embedding_size = 100 learning_rate = 0.003 hidden_size = 100 criterion = nn.BCELoss() # Hint: the CBOW model object you've created. model = FFNet(toxic_data.max_len, len(label_column_names), toxic_data.vocab_size, embedding_size=embedding_size, hidden_dim=hidden_size, output_size=6).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) #model = nn.DataParallel(model) losses = [] num_epochs = 10 for _e in range(num_epochs): epoch_loss = [] nbatch = 0 for batch in tqdm(dataloader): optimizer.zero_grad() x = batch['x'].to(device) x_len = batch['x_len'].to(device) y = batch['y'].to(device) output = model(x) loss = criterion(output, y.float()) loss.backward() optimizer.step() epoch_loss.append(loss.float().data) nbatch = nbatch + 1 if nbatch % 100 == 0: print(sum(epoch_loss)/len(epoch_loss)) losses.append(epoch_loss/nbatch) # - def predict(text): # Vectorize and Pad. vectorized_sent = toxic_data.vectorize(word_tokenize(text)) pad_dim = (0, toxic_data.max_len - len(vectorized_sent)) vectorized_sent = F.pad(vectorized_sent, pad_dim, 'constant') # Forward Propagation. # Unsqueeze because model is expecting `batch_size` x `sequence_len` shape. outputs = model(vectorized_sent.unsqueeze(0).to(device)) # To get the boolean output, we check if outputs are > 0.5 return [int(l > 0.5) for l in outputs.squeeze()] # What happens if you use torch.max instead? =) ##return label_column_names[int(torch.max(outputs, dim=1).indices)] text = "I will kill you." print(label_column_names) predict(text)
mywork/.ipynb_checkpoints/Textcat-Toxic-LIMING-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Private Eyes are Watching You](art/private-eyes.jpg) # # # Fetching Summary Data # ## Introducing the Ontology2 Edition of Dbpedia # # In our [last episode](http://ontology2.com/notebooks/local/DBpedia_Schema_Queries.html), I did a number of queries against the DBpedia Ontology to map out the information available. In that notebook, I gave myself the restriction that I would only do queries against a copy of the DBpedia # Ontology that is stored with the notebook. # # Because the Ontology contains roughly 740 types and 2700 properties (more than 250 for Person alone) # this turned out to be a serious limitation -- unless we know how much information is available for these properties, I can't know which ones are important, and thus make a visualization that makes sense. # # Gastrodon is capable of querying the [DBpedia Public SPARQL endpoint](http://dbpedia.org/sparql), but the DBpedia Endpoint has some limitations, particularly, it returns at most 10,000 results for a query. Complex queries can also time out. Certainly I could write a series of smaller queries to compute statistics, but then I face a balancing act between too many small queries (which will take a long time to run) and queries that get too large (and sometimes time out.) # # Fortunately I have a product in the AWS Marketplace, the [Ontology2 Edition of DBpedia 2016-04](https://aws.amazon.com/marketplace/pp/B01HMUNH4Q/) which is a private SPARQL endpoint already loaded with data from DBpedia. By starting this product, and waiting about an hour for it to initialize, I can run as many SPARQL queries as I like of arbitrary complexity, and shut it down when I'm through. # # In this notebook, I use this private SPARQL endpoint to count the prevalence of types, properties, and datatypes. I use SPARQL Construct to save this information into an RDF graph that I'll later be able to combine with the DBpedia Ontology RDF graph to better explore the schema. # # I start with the usual preliminaries, importing Python modules and prefix definitions # %load_ext autotime import sys from os.path import expanduser from gastrodon import RemoteEndpoint,QName,ttl,URIRef,inline import pandas as pd import json pd.options.display.width=120 pd.options.display.max_colwidth=100 prefixes=inline(""" @prefix dbo: <http://dbpedia.org/ontology/> . @prefix summary: <http://rdf.ontology2.com/summary/> . """).graph # It wouldn't be safe for me to check database connection information into Git, so I store it in a file # in my home directory named `~/.dbpedia/config.json`, which looks like # # ``` # { # "url":"http://192.168.127.12:8890/sparql-auth", # "user":"dba", # "passwd":"<PASSWORD>", # "base_uri":"http://dbpedia.org/resource/" # } # ``` # # (Note that that is not my real IP address and passwd. If you want to reproduce this, put in the IP address and password for your own server and save it to `~/.dbpedia/config.json` connection_data=json.load(open(expanduser("~/.dbpedia/config.json"))) connection_data["prefixes"]=prefixes endpoint=RemoteEndpoint(**connection_data) # ## Counting Properties and Classes # # ### Finding the right graphs # # The [Ontology2 Edition of DBpedia 2016-04](https://aws.amazon.com/marketplace/pp/B01HMUNH4Q/) is # divided into a number of different named graphs, one for each dataset described [here](http://wiki.dbpedia.org/downloads-2016-04#datasets). # # It's important to pay attention to this for two reasons. # # One of them is that facts can appear in the output of a SPARQL query more than once than if the query covers multiple graphs and if facts are repeated in those graphs. This can throw off the accuracy of our counts. # # The other is that some queries seem to take a long time to run if they are run over all graphs; particularly this affects queries that involve filtering over a prefix in the predicate field (ex.) # # ``` # FILTER(STRSTARTS(STR(?p)),"http://dbpedia.org/ontology/") # ``` # # Considering both of these factors, it is wise to know which graphs the facts we want are stored in, thus I start exploring: endpoint.select(""" select ?g (COUNT(*) AS ?cnt) { GRAPH ?g { ?a <http://dbpedia.org/ontology/Person/height> ?b } . } GROUP BY ?g """) # Thus I find one motherload of properties right away: I save this in a variable so I can use it later. pgraph=URIRef("http://downloads.dbpedia.org/2016-04/core-i18n/en/specific_mappingbased_properties_en.ttl.bz2") # Looking up types, I find a number of graphs and choose the transitive types: endpoint.select(""" select ?g (COUNT(*) AS ?cnt) { GRAPH ?g { ?a a dbo:Person } . } GROUP BY ?g """) tgraph=URIRef("http://downloads.dbpedia.org/2016-04/core-i18n/en/instance_types_transitive_en.ttl.bz2") # ### Counting Classes # # It is now straightforward to pull up a list of types (classes), noting that these are not mutually exclusive. (You can be a `dbo:Actor` and a `dbo:Politician`) endpoint.select(""" SELECT ?type (COUNT(*) AS ?cnt) { GRAPH ?_tgraph { ?s a ?type . } FILTER(STRSTARTS(STR(?type),"http://dbpedia.org/ontology/")) } GROUP BY ?type """) # I can store these facts in an RDF graph (instead of a Pandas DataFrame) by using a `CONSTRUCT` query (instead of a `SELECT` query). To capture the results of a `GROUP BY` query, however, I have to use a subquery -- this is because SPARQL requires that I only use variables in the `CONSTRUCT` clause, thus I have to evaluate expressions (such as `COUNT(*)`) somewhere else. # # The resulting query is straightforward, even if it looks a little awkward with all the braces: roughly I cut and pasted the above SELECT query into a `CONSTRUCT` query that defines the facts that will be emitted. t_counts=endpoint.construct(""" CONSTRUCT { ?type summary:count ?cnt . } WHERE { { SELECT ?type (COUNT(*) AS ?cnt) { GRAPH ?_tgraph { ?s a ?type . } FILTER(STRSTARTS(STR(?type),"http://dbpedia.org/ontology/")) } GROUP BY ?type } } """) # I can count the facts in this resulting graph (same as the number of rows in the `SELECT` query) len(t_counts) # And here is a sample fact: next(t_counts.__iter__()) # Note that in the DBpedia Ontology there are a number of other facts about `dbo:Book`, so if add the above fact to my copy of the DBpedia Ontology, SPARQL queries will be able to pick up the count together with all the other facts. # ## Counting "Specific Properties" # # If I count properties in the "specific mappingbased properties" graph, I find that these are # all properties that have the Class name baked in endpoint.select(""" SELECT ?p (COUNT(*) AS ?cnt) { GRAPH ?_pgraph { ?s ?p ?o . } } GROUP BY ?p """) sp_count=endpoint.construct(""" CONSTRUCT { ?p summary:count ?cnt . } WHERE { { SELECT ?p (COUNT(*) AS ?cnt) { GRAPH ?_pgraph { ?s ?p ?o . } } GROUP BY ?p } } """) # ## Other Ontology properties # # That begs the question of in which graphs other properties are stored. Searching for `dbo:birthDate` I find the location of ordinary Literal properties. (Which could be a date, a number or a string) endpoint.select(""" select ?g (COUNT(*) AS ?cnt) { GRAPH ?g { ?a dbo:birthDate ?b } . } GROUP BY ?g """) # A search for `dbo:child` turns up object properties (which point to a URI reference) endpoint.select(""" select ?g (COUNT(*) AS ?cnt) { GRAPH ?g { ?a dbo:child ?b } . } GROUP BY ?g """) lgraph=URIRef("http://downloads.dbpedia.org/2016-04/core-i18n/en/mappingbased_literals_en.ttl.bz2") ograph=URIRef("http://downloads.dbpedia.org/2016-04/core-i18n/en/mappingbased_objects_en.ttl.bz2") # ### Counting All Properties # # By taking a `UNION` I can count the "specific", object, and literal properties. The DataFrame looks OK, so I decide to save these counts into a graph. endpoint.select(""" SELECT ?p (COUNT(*) AS ?cnt) { { GRAPH ?_pgraph { ?s ?p ?o . } } UNION { GRAPH ?_ograph { ?s ?p ?o . } } UNION { GRAPH ?_lgraph { ?s ?p ?o . } } } GROUP BY ?p """) p_counts=endpoint.construct(""" CONSTRUCT { ?p summary:count ?cnt . } WHERE { { SELECT ?p (COUNT(*) AS ?cnt) { { GRAPH ?_pgraph { ?s ?p ?o . } } UNION { GRAPH ?_ograph { ?s ?p ?o . } } UNION { GRAPH ?_lgraph { ?s ?p ?o . } } } GROUP BY ?p } } """) len(p_counts) # ## Counting datatypes # # In a RDF, a Class is a kind of type which represents a "Thing" in the world. Datatypes, on the other hand, are types that represent literal values. The most famous types in RDF come from the [XML Schema Datatypes](http://www.w3.org/TR/xmlschema-2/) and represent things such as integers, dates, and strings. # # RDF also allows us to define custom datatypes, which are specified with URIs, like most things in RDF. # # A `GROUP BY` query reveals the prevalence of various datatypes, which I then dump to a graph. # # There still are some big questions to research such as "does the same property turn up with different units?" For instance, it is very possible that a length could be represented in kilometers, centimeters, feet, or furlongs. You won't get the right answer, however, if you try to add multiple lengths in different units that are all represented as floats. Thus it may be necessary at some point to build a bridge to a package like [numericalunits](https://pypi.python.org/pypi/numericalunits) or alternately build something that canonicalizes them. endpoint.select(""" SELECT ?datatype (COUNT(*) AS ?cnt) { { GRAPH ?_pgraph { ?s ?p ?o . } } UNION { GRAPH ?_lgraph { ?s ?p ?o . } } BIND(DATATYPE(?o) AS ?datatype) } GROUP BY ?datatype """) dt_counts=endpoint.construct(""" CONSTRUCT { ?datatype summary:count ?cnt . } WHERE { SELECT ?datatype (COUNT(*) AS ?cnt) { { GRAPH ?_pgraph { ?s ?p ?o . } } UNION { GRAPH ?_lgraph { ?s ?p ?o . } } BIND(DATATYPE(?o) AS ?datatype) } GROUP BY ?datatype } """) # ## Writing to disk # # RDFlib overloads the '+' operator so that we can easily merge the type, property and datatype counts into one (modestly sized) graph. all_counts = t_counts + p_counts + dt_counts # I add a few prefix declarations for (human) readability, then write the data to disk in Turtle format. I was tempted to write it to a relative path which would put this file in its final destination. (Underneath the `local` notebook directory, where it could be found by notebooks) but decided against it, since I don't want to take the chance of me (or you) trashing the project by mistake. Instead I'll have to copy the file into place later. all_counts.bind("datatype","http://dbpedia.org/datatype/") all_counts.bind("dbo","http://dbpedia.org/ontology/") all_counts.bind("summary","http://rdf.ontology2.com/summary/") all_counts.serialize("/data/schema_counts.ttl",format='ttl',encoding='utf-8') # ## Bonus File: Human Dimensions # # While I had my copy of DBpedia running, I thought I'd gather a data set that would be worth making visualizations of. Quite a lot of data exists in DBpedia concerning people's body dimensions, so I decided to run a query and save the data for future use. dimensions=endpoint.select(""" select ?p ?height ?weight { GRAPH ?_pgraph { ?p <http://dbpedia.org/ontology/Person/weight> ?weight . ?p <http://dbpedia.org/ontology/Person/height> ?height . } } """) dimensions # The data looks a bit messy. Most noticeably, I see quite a few facts which, instead of pointing to DBpedia concepts, point to synthetic URLs (such as `<Ron_Clarke__2>`) which are supposed to represent 'topics' such the time that a particular employee worked for a particular employer. (See [this notebook for some discussion of the phenomenon](http://ontology2.com/notebooks/remote/Querying%20DBpedia.html)). # # Filtering these out will not be hard, as these synthetic URLs all contain two consecutive underscores. # # I also think it's suspicious that a few people have a height of `0.0`, which might be in the underlying data, or might be because Gastrodon is not properly handling a missing data value. # # It would be certainly possible to serialize these results into an RDF graph, but instead I write them into a CSV for simplicity. dimensions.to_csv("/data/people_weight.csv.gz",compression="gzip",encoding="utf-8") # ## Conclusion # # To continue the analysis I began [here](http://ontology2.com/notebooks/local/DBpedia_Schema_Queries.html), I needed a count of how often various classes, properties, and datatypes were used in DBpedia. API limits could make getting this data from the public SPARQL endpoint challenging, so I decided to run queries against my own # private SPARQL endpoint powered by the [Ontology2 Edition of DBpedia](https://aws.amazon.com/marketplace/pp/B01HMUNH4Q/). # # After setting up connection information, connecting to this private endpoint turned out to be as simple as connecting to a public endpoint and I was efficiently able to get the data I needed into an RDF graph, ready to merge with the DBpedia Ontology graph to make a more meaningful analysis of the data in DBpedia towards the goal of producing interesting and attractive visualizations.
notebooks/remote/Collecting_Counts_From_Ontology2_DBpedia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential, load_model from keras.layers import LSTM, Dense, Dropout import os df = pd.read_excel('C://Users//eorsswv//Reporting//Data//Google1.xls') df.tail(2) df = df['open'].values df = df.reshape(-1, 1) print(df.shape) df[:5] dataset_train = np.array(df[:int(df.shape[0]*0.8)]) dataset_test = np.array(df[int(df.shape[0]*0.8)-50:]) print(dataset_train.shape) print(dataset_test.shape) scaler = MinMaxScaler(feature_range=(0,1)) dataset_train = scaler.fit_transform(dataset_train) dataset_train[:5] # + dataset_test = scaler.transform(dataset_test) dataset_test[:5] # - def create_dataset(df): x = [] y = [] for i in range(50, df.shape[0]): x.append(df[i-50:i, 0]) y.append(df[i, 0]) x = np.array(x) y = np.array(y) return x,y x_train, y_train = create_dataset(dataset_train) x_train[:1] x_test, y_test = create_dataset(dataset_test) x_test[:1] x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) model = Sequential() model.add(LSTM(units=96, return_sequences=True, input_shape=(x_train.shape[1], 1))) model.add(Dropout(0.2)) model.add(LSTM(units=96, return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(units=96, return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(units=96)) model.add(Dropout(0.2)) model.add(Dense(units=1)) model.compile(loss='mean_squared_error', optimizer='adam') if(not os.path.exists('stock_prediction.h5')): model.fit(x_train, y_train, epochs=50, batch_size=32) model.save('stock_prediction.h5') model = load_model('stock_prediction.h5') # + predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) fig, ax = plt.subplots(figsize=(8,4)) plt.plot(df, color='red', label="True Price") ax.plot(range(len(y_train)+50,len(y_train)+50+len(predictions)),predictions, color='blue', label='Predicted Testing Price') plt.legend() # + y_test_scaled = scaler.inverse_transform(y_test.reshape(-1, 1)) fig, ax = plt.subplots(figsize=(8,4)) ax.plot(y_test_scaled, color='red', label='True Testing Price') plt.plot(predictions, color='blue', label='Predicted Testing Price') plt.legend() # + x = x_test[-1] num_timesteps = 100 preds = [] for i in range(num_timesteps): data = np.expand_dims(x, axis=0) prediction = model.predict(data) prediction = scaler.inverse_transform(prediction) preds.append(prediction[0][0]) x = np.delete(x, 0, axis=0) # delete first row x = np.vstack([x, prediction]) # add prediction print(preds) # -
Google Stock Price Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h2 align="center"> Multivariable Regression - Exercise </h2> # ## Machine Learning - <NAME> ( Python Implementation) # # ## House Price Prediction # ### Loading Data & Libraries import pandas as pd import numpy as np import seaborn as sns from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt # %matplotlib inline plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (12,8) #loading the train and test data data_train=pd.read_csv("train.csv") data_test=pd.read_csv("test.csv") y_train=data_train.iloc[:,-1].values data_train.drop(["SalePrice"],axis=1,inplace=True) data=pd.concat([data_train,data_test]) # ### Plotting Data corr = data_train.corr() ax = sns.heatmap( corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(20, 220, n=200), square=True ) ax.set_xticklabels( ax.get_xticklabels(), rotation=45, horizontalalignment='right' ); # + #sns.heatmap(data.corr(), annot = True, vmin=-1, vmax=1, center= 0, cmap= 'coolwarm', linewidths=3, linecolor='black') # + #sns.pairplot(data_train.dropna()) # - data.head() data.info() data.describe() # Removing features with > 50% missing values missing=data.isnull().sum()/len(data) miss_features=missing.loc[missing>0.5].index data.drop(miss_features,inplace=True,axis=1) missing.head() data_train.corr() # removing features that has <+-0.3 correlation coefficient from SalePrice data.drop(["Id","YrSold","MoSold","MiscVal","PoolArea","ScreenPorch","3SsnPorch","EnclosedPorch","KitchenAbvGr", "BedroomAbvGr", "HalfBath","BsmtFullBath","BsmtHalfBath","LowQualFinSF","BsmtUnfSF","BsmtFinSF2","MSSubClass", "OverallCond", "LotArea"],inplace=True,axis=1) # + # fillna for bsmt in ["BsmtQual","BsmtCond","BsmtExposure","BsmtFinType1","BsmtFinType2"]: data[bsmt].fillna("No",inplace=True) data["FireplaceQu"].fillna("No",inplace=True) for gar in ('GarageType', 'GarageFinish', 'GarageQual',"GarageYrBlt","GarageCond"): data[gar].fillna('No',inplace=True) for gar1 in ('GarageCars', 'GarageArea'): data[gar1].fillna(0,inplace=True) for ea in ("LotFrontage","MasVnrArea","TotalBsmtSF","BsmtFinSF1"): data[ea].fillna(data[ea].mean(),inplace=True) for _ in ("MasVnrType","MSZoning","Utilities","Exterior1st","Exterior2nd","Functional","KitchenQual","SaleType","Electrical"): data[_].fillna(data[_].mode()[0],inplace=True) # - # encoding categorical features data=pd.get_dummies(data,columns=["MSZoning","Street","LotShape","LandContour","Utilities","LotConfig","LandSlope", "Neighborhood","Condition1","Condition2","BldgType","HouseStyle","RoofStyle","RoofMatl", "Exterior1st","Exterior2nd","MasVnrType","ExterQual","ExterCond","Foundation","BsmtQual", "BsmtCond","BsmtExposure","BsmtFinType1","BsmtFinType2","Heating","HeatingQC","CentralAir", "Electrical","KitchenQual","Functional","FireplaceQu","GarageType","GarageYrBlt","GarageFinish", "GarageQual","GarageCond","PavedDrive","SaleType","SaleCondition"], prefix=["MSZoning","Street","LotShape","LandContour","Utilities","LotConfig","LandSlope","Neighborhood", "Condition1","Condition2","BldgType","HouseStyle","RoofStyle","RoofMatl","Exterior1st","Exterior2nd", "MasVnrType","ExterQual","ExterCond","Foundation","BsmtQual","BsmtCond","BsmtExposure","BsmtFinType1", "BsmtFinType2","Heating","HeatingQC","CentralAir","Electrical","KitchenQual","Functional", "FireplaceQu","GarageType","GarageYrBlt","GarageFinish","GarageQual","GarageCond","PavedDrive", "SaleType","SaleCondition"],drop_first=True) data.head() # + # Convert dataframe into X and y numpy array X_train=data.iloc[:1460,:].values X_test=data.iloc[1460:,:].values # - X_train X_test sc_X=StandardScaler() X_train=sc_X.fit_transform(X_train) # add columns of ones to X m,n=X_train.shape[0],X_train.shape[1] X_train = np.append(np.ones((m,1)),X_train,axis=1) theta=np.zeros((n+1,1)) def computeCost(X,y,theta): """ Take in a numpy array X,y, theta and generate the cost function of using theta as parameter in a linear regression model """ m=len(y) predictions=X.dot(theta) square_err=(predictions - y)**2 return 1/(2*m) * np.sum(square_err) def gradientDescent(X,y,theta,alpha,num_iters): """ Take in numpy array X, y and theta and update theta by taking num_iters gradient steps with learning rate of alpha return theta and the list of the cost of theta during each iteration """ m=len(y) J_history=[] for i in range(num_iters): predictions = X.dot(theta) error = np.dot(X.transpose(),(predictions -y)) descent=alpha * 1/m * error theta-=descent J_history.append(computeCost(X,y,theta)) return theta, J_history # Run gradient descent theta, J_history = gradientDescent(X_train,y_train.reshape(m,1),theta,0.01,4000) # visualize the cost function plt.plot(J_history) plt.xlabel("Iteration") plt.ylabel("$J(\Theta)$") plt.title("Cost function using Gradient Descent"); def predict(x,theta): """ Takes in numpy array of x and theta and return the predicted value of y based on theta """ predictions= np.dot(x,theta) return predictions #feature normalisation of X_test values X_test= sc_X.fit_transform(X_test) X_test=np.append(np.ones((X_test.shape[0],1)),X_test,axis=1) predict=predict(X_test,theta) X_test predict # export as csv np.savetxt("submission.csv",predict,fmt="%.1f")
Linear Regression Exercise - House Price Prediction/Linear Regression Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/visiont3lab/machine-learning-fav/blob/master/notebook/google_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="oZjUvODbqhaC" # # Google Colaboratory # + [markdown] colab_type="text" id="O4xiNThbqxY4" # Uno Jupyter notebook è un insieme di celle che permettono sia di annotare informazioni (Celle di Testo) che di scrivere codice (Celle di Codice ). # + [markdown] colab_type="text" id="6QXcdFfbrhsK" # ## Text Cell # # # + [markdown] colab_type="text" id="eic-gYY7viEU" # Una cella di testo è una cella utilizzata per annotare appunti in modo semplice e veloce. Al fine di annotare velocemente informazioni si utilizza un astrazione del linguaggio html chiamato Markdown. Questo linguaggio permette velocemnte e in modo poco verbose di scrivere codice html. # + [markdown] colab_type="text" id="aiO2PUKjsQrZ" # ### Markdown # # + [markdown] colab_type="text" id="MKpimnpTvmOm" # Riassumiamo le principali caratteristiche del linguaggio Markdown che utilizzeremo per scrivere appunti nei nostri notebook. # # Maggiori informazioni sul linguaggio Markdown si posso trovare leggendo [Markdown Guide](https://colab.research.google.com/notebooks/markdown_guide.ipynb) e l'articolo [Mastering Markdown](https://guides.github.com/features/mastering-markdown/). # + [markdown] colab_type="text" id="RhLSUwhPtjzm" # #### Headers # # # # + [markdown] colab_type="text" id="xWWtTmM1vpx-" # > # # h1 # ## h2 # ### h3 # #### h4 # #### h5 # # + [markdown] colab_type="text" id="mOv-rVVfu_Q3" # #### Bold e Italic # # + [markdown] colab_type="text" id="NWs9RM85vsRG" # > **bold text** , *italicized text* ~~strikethrough~~ # > # **bold text** , *italicized text* , ~~strikethrough~~ # # # # # + [markdown] colab_type="text" id="UFaEWX_gvxZW" # #### Blockquote # # + [markdown] colab_type="text" id="-QyRQUKiwbDE" # # > 1 blockquote # >> 2 blockquote # # # > # > 1 blockquote # >> 2 blockquote # # + [markdown] colab_type="text" id="UHXClGV2whAN" # #### Liste # + [markdown] colab_type="text" id="jvAdkhsmwdwA" # Ordinate # 1. First item # 2. Second item # 3. Third item # # --- # # Non ordinate # > # - First item # - Second item # - Third item # # # - First item # - Second item # - Third item # # Lista di Task da fare # > # - [x] Write the press release # - [ ] Update the website # - [ ] Contact the media # # - [x] Write the press release # - [ ] Update the website # - [ ] Contact the media # # + [markdown] colab_type="text" id="gl2Wvji42X8V" # #### Latex Math formula # # + [markdown] colab_type="text" id="mSqBaT7v0DyP" # # > # 1. $\sqrt{3x-1}+(1+x)^2.$ # 2. $y=x^2$ # 3. $e^{i\pi} + 1 = 0$ # 4. $e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$ # 5. $\frac{n!}{k!(n-k)!} = {n \choose k}$ # 6. $A_{m,n} =\begin{pmatrix} # a_{1,1} & a_{1,2} & \cdots & a_{1,n} \\ # a_{2,1} & a_{2,2} & \cdots & a_{2,n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m,1} & a_{m,2} & \cdots & a_{m,n} # \end{pmatrix}$ # # # > 1) $\sqrt{3x-1}+(1+x)^2.$ # # > 2) $y=x^2$ # # > 3) $e^{i\pi} + 1 = 0$ # # > 4) $e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$ # # > 5) $\frac{n!}{k!(n-k)!} = {n \choose k}$ # # > 6) $A_{m,n} = # \begin{pmatrix} # a_{1,1} & a_{1,2} & \cdots & a_{1,n} \\ # a_{2,1} & a_{2,2} & \cdots & a_{2,n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m,1} & a_{m,2} & \cdots & a_{m,n} # \end{pmatrix}$ # # + [markdown] colab_type="text" id="A23Asms-wz6T" # #### Codice # # # + [markdown] colab_type="text" id="vd92w1JFxddU" # > # ``` # codice # ``` # # ```python # import numpy as np # a = np.array[1,2,3,4] # print("Vettore:", a) # s = np.sum() # print("Somma degli elementi del vettore: " + s) # ``` # + [markdown] colab_type="text" id="MX9wOmgKxyBw" # #### Link # # + [markdown] colab_type="text" id="wQZTXBkZyOzK" # > # [Github Repository](https://github.com/visiont3lab/tecnologie_data_science) # # [Github Repository](https://github.com/visiont3lab/tecnologie_data_science) # + [markdown] colab_type="text" id="THFS-ugLyKDz" # #### Image # + [markdown] colab_type="text" id="vr6NPQjGyqj7" # > # ![visionlab](https://raw.githubusercontent.com/visiont3lab/tecnologie_data_science/master/book/logo.png) # # ![visionlab](https://raw.githubusercontent.com/visiont3lab/tecnologie_data_science/master/book/logo.png) # + [markdown] colab_type="text" id="CdJECheqzBUc" # #### Table # # + [markdown] colab_type="text" id="UJxDXBqZzM19" # > # | Syntax | Description | # | ----------- | ----------- | # | Header | Title | # | Paragraph | Text | # # | Syntax | Description | # | ----------- | ----------- | # | Header | Title | # | Paragraph | Text | # + [markdown] colab_type="text" id="GTvSPLUE2k-A" # ## Code Cell # # + [markdown] colab_type="text" id="Fk2UDy_U36OM" # Una cella di codice serve per eseguire un programma scritto in linguaggio python. In particolare utilizzeremo sempre Python 3 Jupyter notebook. # # + [markdown] colab_type="text" id="xKoClhZ337Ft" # ### Shortcut per eseguire codice # # + [markdown] colab_type="text" id="NPlE2u6P47eQ" # * Cliccando l'icona di **Play** della cella il codice contenuto in essa viene eseguito. # * Cliccando **Cmd/Ctrl+Enter** viene eseguito il codice della cella evidenziata. # * Cliccando **Shift+Enter** viene eseguito il codice della cella evidenziata e il focus passa alla cella sucessiva. # * Cliccando **Alt+Enter** viene eseguito il codice della cella e viene inserita anche un nuovo blocco di codice. # # + [markdown] colab_type="text" id="HhqD7dvQ5epq" # ### Suggerimenti e auto-completamento # # + [markdown] colab_type="text" id="_K6xnHAT5jWM" # Google Colab ha un sistema di suggerimenti che viene triggerato schicciando Tab mentre si scrive codice. Esso è utile per scrivere velocemente codice ed evitare errori di sintassi. # + colab_type="code" id="mvgNkjkU33sE" colab={} # Importare la libreria numpy import numpy as np # + colab_type="code" id="jwNLfCxU7HND" colab={} np.random. # Se si prova a scrivere np.random. vedremo # apparire dei suggerimenti # Se premiamo Tab apparirà anche la documentazione # associata alla funzione # + colab_type="code" id="KslBi0ea53P5" colab={} # np.random? # Premendo Shift-Enter viene aperta la documentazione # Il punto interrogativo alla fine chiama # la documentazione quando il codice viene # eseguito # + [markdown] colab_type="text" id="oFEj2Tdc7m0o" # ### Esecuzione di comandi # # + [markdown] colab_type="text" id="uuduJjV58f54" # Google Colaboratory ci permette mediante uno Jupyter Notebook di eseguire il nostro codice Python e Mardown. Tuttavia è possibile che vi sia la necessità di installare programmi sul computer che ospita il nostro notebook. Per face ciò è necessario create una cella di codice e inserire il punto esclamativo **!** davanti all'istruzione che si vuole eseguire. Quest'ultima sarà un instruzione di sistema. Siccome la macchina che ospita il nostro notebook è una macchina linux è comune utilizzare il linguaggio **bash** per eseguire comando. # # Jupyter notebbok fornisce una lista di comandi che possono essere utilizzati per scrivere codice javascript (**%%javascript**), nascondere gli ouput del codice eseguito (**%%capture**) o creare un file utilizzando il notebook (**%%writefile**). Tali comandi vanno inseriti all'inizio del blocco di codice. La lista completa dei comandi si trova [Built-in-Magics Commands](https://ipython.readthedocs.io/en/stable/interactive/magics.html#cell-magics) # + colab_type="code" id="kKxrV6Ba-MX7" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="7c788d8a-06b3-40a1-ae6c-048a026358e8" # Percorso file a cui sono # !pwd # + colab_type="code" id="nzaid7yf9aY_" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="0ffd722f-9fc0-4a3e-b842-5c081c9c89c3" # Elencare i file contenuti in una cartella # !ls # + colab_type="code" id="bU9Od1Oi9a66" colab={"base_uri": "https://localhost:8080/", "height": 83} outputId="a2671dce-7b1c-46fe-a543-80cce022f985" # Elencare i file contenuti in una cartella # !cd sample_data && ls # + colab_type="code" id="DUQS50tz9oXb" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="51c3e0d1-fe80-4cf7-f2d7-8bf643022ac3" # Entrare in un cartella # %cd sample_data/ # + colab_type="code" id="3aNomQb-9xxF" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="f3827920-7da3-4a57-a880-a5ffd3644c2e" # Muovermi nella cartella superiore # %cd .. # + colab_type="code" id="f44jon7X-2fR" colab={} # Nascondere l'output di un comando. # I comandi di installazione generano output verbosi che si # possono nascondere aggiungendo alla cella il comando # %%capture # Installare un pacchetto python usando # pip package manager # # !pip list # elencare i pacchetti installati # !pip install names # !apt-get -qq install -y libfluidsynth1 # + colab_type="code" id="sWag_S9cAu_K" colab={"base_uri": "https://localhost:8080/", "height": 133} outputId="7121e532-15a7-4365-c253-4d160502d389" # Clonare un repositorio git # !git clone https://github.com/visiont3lab/tecnologie_data_science.git && ls # + colab_type="code" id="XBkfgDSX_BX9" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="568e0e40-4de4-4e17-dd5d-7316132a57cd" # Creazione di un file python myLib.py # %%writefile myLib.py import numpy as np import plotly.graph_objects as go from matplotlib import pyplot as plt def myplot(x,y): #fig = go.Figure() #fig.add_trace( go.Scatter(x=x,y=y, mode="lines+markers", name="Line") ) fig = plt.plot(x,y) return fig # + colab_type="code" id="HxnRidB4JWdh" colab={"base_uri": "https://localhost:8080/", "height": 264} outputId="85177256-2ea4-46ec-b15a-a1a8712d4bac" # Utilizzo del file mylib.py creato from importlib import reload import myLib myLib = reload(myLib) x = np.arange(0.0, 2.0, 0.01) y = 1 + np.sin(2 * np.pi * x) fig = myLib.myplot(x,y) # + colab_type="code" id="hkf_xNEwD7lH" colab={"base_uri": "https://localhost:8080/", "height": 183} outputId="40afdc4a-6756-426f-d7cc-fa4d34354313" magic_args="bash" language="script" # # È possibile scrivere codice bash # ls > name.txt; # cat name.txt # for i in 1 2 3; do # echo $i # done # + [markdown] colab_type="text" id="dtAVg2sDKS4O" # ## Montare Google Drive come hard disk esterno # + [markdown] colab_type="text" id="uk-pQLgBLT7O" # Una delle qualità di Google Colaboratory è il fatto che esso è connesso a Google Drive. Questo fa si che i notebook che vengono creati utilizzando Google Colab vengano automaticamente salvati nel drive dell'utente. Inoltre è possibile accedere al prorpio drive e lavorare con dei file contenuti in esso. Questo approccio è estramente interessante in quanto permette di combinare l'abilità di scrivere codice di Google Colab con le capacità di archiviazione file fornite da Google Drive. Di seguito `è riporatata la sequenza di comandi necessaria per montare il drive dell'utente all'interno di Google Colab. # # La sequenza di instruzioni sucessiva equivale al bottone chiamata **Mount Drive** che si a sinistra nel gruppo **Files**. # + colab_type="code" id="n4LgN1RMKaFv" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="fda156fb-b770-4fa6-a7e9-825a8bb7cdeb" from google.colab import drive drive.mount('/content/drive') # + colab_type="code" id="LBpIP587KvUD" colab={"base_uri": "https://localhost:8080/", "height": 66} outputId="418997a7-9bfb-4dc3-8fc9-8297b3f68347" # %cd /content/drive/My Drive/lessons/tecnologie_data_science # !ls
notebook/google_colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/YuichiHirano/Japanese_Yomigana/blob/master/Japanese_Yomigana.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="_2UAUSKQRPAU" colab_type="text" # (https://qiita.com/Naritoshi/items/8f55d7d5cce9ce414395) を参考にmecabインストール # + id="XukOclEtRHjN" colab_type="code" outputId="0a3795ff-35f1-4db4-c10c-9c496dbc4679" colab={"base_uri": "https://localhost:8080/", "height": 272} # !apt-get -q -y install sudo file mecab libmecab-dev mecab-ipadic-utf8 git curl python-mecab # + id="QlKbsCF1Rg7u" colab_type="code" outputId="d52f226b-ddfd-4d63-fae0-ad79438ebc9c" colab={"base_uri": "https://localhost:8080/", "height": 1000} # !git clone --depth 1 https://github.com/neologd/mecab-ipadic-neologd.git # !echo yes | mecab-ipadic-neologd/bin/install-mecab-ipadic-neologd -n # + id="eK1SuHKFR6ho" colab_type="code" outputId="fa46a8b0-7306-4220-f5fb-f9355ccb2a4e" colab={"base_uri": "https://localhost:8080/", "height": 85} language="bash" # mecab # メカブって神ってる # + id="w-cOrb9ySTn5" colab_type="code" colab={} # !sed -e "s!/var/lib/mecab/dic/debian!/usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd!g" /etc/mecabrc > /etc/mecabrc.new # + id="aKvNE0wgSboc" colab_type="code" outputId="653c15d5-67b0-4dd4-baf0-141a27594ed2" colab={"base_uri": "https://localhost:8080/", "height": 289} # !cat /etc/mecabrc.new # + id="-dydLmwkT5un" colab_type="code" colab={} # !cp /etc/mecabrc /etc/mecabrc.org # !cp /etc/mecabrc.new /etc/mecabrc # + id="VbLZC-ktUZsF" colab_type="code" outputId="d03f0ebc-325c-4559-e4eb-e4871929293e" colab={"base_uri": "https://localhost:8080/", "height": 289} # !cat /etc/mecabrc # + id="jvnSkkTpSfjB" colab_type="code" outputId="d5770671-32a6-4226-e802-aa6b34e185c5" colab={"base_uri": "https://localhost:8080/", "height": 85} language="bash" # mecab # メカブって神ってる # + id="g04W-KQQSmap" colab_type="code" outputId="4f167d23-8f41-4600-b529-8eb3180faf98" colab={"base_uri": "https://localhost:8080/", "height": 153} # !apt-get -q -y install swig # + id="raw4ZwrtS9kl" colab_type="code" outputId="5ecf2da5-5745-46e7-c707-d84b7875eec1" colab={"base_uri": "https://localhost:8080/", "height": 34} # !pip install mecab-python3 # + id="6-t4Abf6W0k4" colab_type="code" outputId="885aafb3-5a17-48aa-f17d-222b3d89fc15" colab={"base_uri": "https://localhost:8080/", "height": 34} # !pip install jaconv # + [markdown] id="BkPnP39XTGdd" colab_type="text" # 設定終わり # + id="l9IB5m9BTB2w" colab_type="code" colab={} import MeCab import re # + id="BUY6FvUmTVC7" colab_type="code" outputId="588fdf53-d617-4621-8ced-cb243f398f5b" colab={"base_uri": "https://localhost:8080/", "height": 153} mecab = MeCab.Tagger() res = mecab.parse('メカブって神ってるaaa') print(res) # + [markdown] id="aQ616W3wVB9u" colab_type="text" # 以下、(https://qiita.com/yonedaco/items/27e1ad19132c9f1c9180) を参考に、読み仮名変換を行う。 # # [ひらがなカタカナ判定](https://qiita.com/mocha_xx/items/00c5a968f7069d8e092c)と # [正規表現](https://qiita.com/fubarworld2/items/9da655df4d6d69750c06)を参考に判定 # + id="kthLIsc03TcF" colab_type="code" colab={} re_hiragana = re.compile(r'^[ぁ-ん]+$') re_katakana = re.compile(r'([ァ-ン]|ー)+') # + id="3wGQFcDDVK59" colab_type="code" colab={} import jaconvdef sentence_to_yomi(sentence): mecabTagger = MeCab.Tagger() node = mecabTagger.parseToNode(sentence) yomi ="" while node: word = node.surface features = node.feature.split(",") print(word,features) node = node.next word_yomi = "" #文章の終わりと始まりを示すものは要らない if (features[0] == 'BOS/EOS'): continue #記号は不要なので覗く if (features[0] == '記号'): continue if (len(features) == 9): #8だと長音が入るので7 word_yomi = features[7] elif(re_hiragana.fullmatch(word)): #ひらがなはカタカナにしましょうねー word_yomi = jaconv.hira2hkata(word) else: word_yomi = word #TODO: word_yomiがカタカナとひらがな混じりだと上手くいかない if(re_katakana.fullmatch(word_yomi)): yomi += word_yomi return yomi # + id="HcSV9Dht3jpU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 527} outputId="7fe7eaef-d050-425f-c844-29e4454f4ec0" #雑なテスト文章 sentence_to_yomi("ちょー1111!!!!、マジウケルwwww。日本語正しく使うの大変ンゴ。とりあえずたなかたろうさんとlunchしてくるわー") # + id="3nxsy9-x3mUY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="da7c08da-a200-4e2a-eb78-685a1dbd27c0" #雑なテスト2 sentence_to_yomi("暴れまわるAIの暴走を止める人類最後の戦士Arthurの物語") # + id="wp9aZVZo3pBV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="77aa92cb-6304-433e-b3b0-d22e9aa60f82" #長音の確認 sentence_to_yomi("パートナーシップ証明書を持つカップル")
Japanese_Yomigana.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SCOPRIRE GLI OPENDATA DELLA SCUOLA ITALIANA # # ![open data miur](http://dati.istruzione.it/opendata/img/pagine-interne/copertina-open-data.jpg) # # # ![](http://dati.istruzione.it/opendata/img/home/home_icon_01_opendata.png) # # [I DATI DISPONIBILI](#opendatalist) # http://dati.istruzione.it/opendata/opendata/ # # --- # # SCUOLE # <img src="http://dati.istruzione.it/opendata/img/pagine-interne/CTSCUOLA.svg" width="100" align="right"> # ## [Informazioni anagrafiche scuole](#anagraficascuole) # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/?area=Scuole # # sono presenti i dati con le informazioni riguardanti l'anagrafe delle scuole negli anni scolastici # * scuole statali # * scuole paritarie # * scuole statali delle province autonome di Trento, Bolzano e Aosta # * scuole paritarie delle province autonome di Trento, Bolzano e Aosta # # Gli anni scolastici presi in considerazione sono: **2015/2016, 2016,2017 e 2017/2018**. # # --- # # STUDENTI # <img src="http://dati.istruzione.it/opendata/img/pagine-interne/CTSTUDENTI.svg" width="100" align="right"> # ## [informazioni sugli studenti](#anagraficastudenti) # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/?area=Studenti # ### [Studenti per anno di corso e fascia di età](#studentifasceeta). # informazioni riguardanti il numero degli studenti per anno di corso e fascia di eta'. # * scuole statali # * scuole paritarie # # Gli anni scolastici presi in considerazione sono: **2015/2016 e 2016/2017**. # # ### [Studenti per anno di corso, classe e genere](#studenticlassegenere) # informazioni riguardanti il numero degli studenti per anno di corso, classe e genere. # * scuole statali # * scuole paritarie # # Gli anni scolastici presi in considerazione sono: **2015/2016 e 2016/2017**. # # --- # # EDILIZIA # <img src="http://dati.istruzione.it/opendata/img/pagine-interne/CTEDILIZIA.svg" width="100" align="right"> # ## [Edilizia Scolastica](#edilizia) # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/?area=Edilizia%20Scolastica # ### [Elenco degli edifici scolastici attivi e loro locazione](#edifici) # informazioni riguardanti l'anagrafica dell'edificio scolastico e la sua localizzazione # * scuole statali # # Sono presenti i dati per l'anno scolastico **2015/2016**. # # # ### [Presenza di macro ambiti funzionali degli edifici scolastici (palestra, mensa..)](#edificiextra) # informazioni riguardanti la presenza o meno di macro ambiti funzionali relativamente # * scuole statali # # Sono presenti i dati per l'anno scolastico **2015/2016**. # # # ### [Raggiungibilità degli edifici scolastici (mezzi pubblici, scuolabus..)](#raggiungibilita) # informazioni riguardanti la raggiungibilita' del plesso attraverso differenti modalità di trasporto relativamente alle scuole Statali di ogni ordine e grado. # * scuole statali # # Sono presenti i dati per l'anno scolastico **2015/2016**. # # --- # # VALUTAZIONE # <img src="http://dati.istruzione.it/opendata/img/pagine-interne/CTSISTEMA.svg" width="100" align="right"> # ## [SISTEMA NAZIONALE DI VALUTAZIONE](#valutazione) # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/?area=Sistema%20Nazionale%20di%20Valutazione # ### [regole valutazione utilizzata dalla scuola per l'autovalutazione.](#regolevalutazione) # informazioni riguardanti la rubrica di valutazione di ciascun criterio di qualita' utilizzato per l'autovalutazione delle sezioni Esiti e Processi. Scuola statale e scuola paritaria. # * scuole statali # * scuole paritarie # # Sono presenti i dati per l'anno scolastico **2015/2016 e 2016/2017**. # # # ### [Autovalutazione della scuola sezione ESITI.](#autovalutazioneesiti) # informazioni riguardanti l'autovalutazione in termini di: risultati scolastici, risultati nelle prove standardizzate nazionali, competenze chiave e di cittadinanza, risultati a distanza. # * scuole statali # * scuole paritarie # # # Sono presenti i dati per l'anno scolastico **2015/2016 e 2016/2017**. # # # ### [Autovalutazione della scuola sezione PROCESSI)](#autovalutazioneprocessi) # informazioni riguardanti l'autovalutazione in termini di: curricolo, progettazione e valutazione, abiente di apprendimento, inclusione e differenziazione, continuità e orientamento, orientamento strategico e organizzazione della scuola, sviluppo e valorizzazione delle risorse umane, integrazione con il territorio e rapporti con le famiglie # * scuole statali # * scuole paritarie # # # Sono presenti i dati per l'anno scolastico **2015/2016 e 2016/2017**. # # --- # # PERSONALE SCUOLA # <img src="http://dati.istruzione.it/opendata/img/pagine-interne/CTPERSONALE.svg" width="100" align="right"> # ## [PERSONALE DOCENTE E NON DOCENTE](#personale) # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/?area=Personale%20Scuola # ### [Docenti a tempo indeterminato](#docentiindeterminati) # informazioni riguardanti il numero del personale supplente docente per grado di istruzione, tipo posto, tipo supplenza, genere e fascia di età # * scuole statali # # Sono presenti i dati per l'anno scolastico **2015/2016 e 2016/2017**. # # # ### [ATA a tempo indeterminato](#ataindeterminati) # informazioni riguardanti il numero del personale titolare ATA per genere e fascia di eta'. # * scuole statali # # Sono presenti i dati per l'anno scolastico **2015/2016 e 2016/2017**. # # ### [Docenti a tempo determinato](#docentideterminati) # informazioni riguardanti il numero del personale supplente docente per grado di istruzione, tipo posto, tipo supplenza, genere e fascia di età # * scuole statali # # Sono presenti i dati per l'anno scolastico **2015/2016 e 2016/2017**. # # # ### [ATA a tempo determinato](#atadeterminati) # informazioni riguardanti il numero del personale titolare ATA per genere e fascia di eta'. # * scuole statali # # Sono presenti i dati per l'anno scolastico **2015/2016 e 2016/2017**. # # --- # # INIZIAMO AD INVESTIGARE I DATI # ![](https://upload.wikimedia.org/wikipedia/commons/thumb/a/a4/Magnifying_glass_with_focus_on_glass.png/640px-Magnifying_glass_with_focus_on_glass.png) # # ![](https://raw.githubusercontent.com/napo/opendatamiur/master/images/icons_opendata_miur.png) # # # <img src="https://image.flaticon.com/icons/svg/28/28842.svg" width="100" align="left" /> <h2>CSV = Comma-Separated Values</h2> # > Il **comma-separated values** (abbreviato in CSV) è un formato di file basato su **file di testo** utilizzato per l'importazione ed esportazione (ad esempio da fogli elettronici o database) di una **tabella di dati**. # > (fonte: Wikipedia) # # Esempio: # # **tabella di dati** # # NOME|COGNOME|PROFESSIONE|ANNONASCITA|ANNOMORTE # :---|:------|:----------|----------:|--------: # Giulio|Cesare|militare|101 ac|44 ac # Dante|Aligheri|poeta|1265 dc|1321 dc # Alessandro|Manzoni|scrittore|1785 dc|1873 dc # # # **in CSV** # ``` # NOME,COGNOME,PROFESSIONE,ANNONASCITA,ANNOMORTE # Giulio,Cesare,militare,101 ac,44 ac # Dante,Aligheri,poeta,1265 dc,1321 dc # Alessandro,Manzoni,scrittore,1785 dc,1873 dc # ``` # # --- # <center><H1>ED ORA TOCCA A PYTHON</H1></center> # ![](https://upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Python3-powered_hello-world.svg/200px-Python3-powered_hello-world.svg.png) # # --- # # RECUPERARE I DATI # <center><h1>Diamo a python i poteri del panda</h3></center> # ![Kung fu panda](https://i2.wp.com/de.straba.us/wp-content/uploads/2018/03/1gKYCyrcudAeE5e5KAbRhBQ-620x180-1.jpeg) import pandas as panda # indirizzo del csv con i dati delle scuole italiane del 2017/2018 url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/SCUANAGRAFESTAT20171820170901.csv' scuole=panda.read_csv(url) # ... una sbirciatina alle prime *tre* righe scuole.head(3) # e come è strutturata la tabella (la sua forma) scuole.shape # **51216** => primo valore rappresenta il numero di righe # # **20** => il secondo valore rappresenta il numero di colonne # che possiamo conoscere così scuole.columns # l'elenco mostra le colonne seguendo l'ordine di come la tabella è strutturata. # # *nota*: nel linguaggio del panda in realtà abbiamo creato un DataFrame e non una tabella # possiamo contarle usando questa funzione scuole.columns.size # Per conoscere qualche dettaglio su una colonna si può chiedere la descrizione. # # Es. per la colonna "**REGIONE**" scuole.REGIONE.describe() # count => numero di righe **51216** # # unique => i valori univoci **18** # # top => il valore più ricorrente **LOMBARDIA** # # freq => quante volte si ripete quel valore nella tabella **6494** # # *ATTENZIONE*: le regioni italiane sono *20* e non *18*! # # Mancano le Province Autonome (Aosta, Trento e Bolzano) # --- # andiamo ad aggiungere le scuole delle province autonome che si trovano a questo indirizzo # # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/SCUANAAUTSTAT20171820170901.csv # url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/SCUANAAUTSTAT20171820170901.csv' scuole_province_autonome=panda.read_csv(url) # ... una sbirciatina scuole_province_autonome.head(3) # dimensioni scuole_province_autonome.shape # Accidenti! # # **C'è una colonna in meno!** scuole.columns.size scuole_province_autonome.columns.size # verifica scuole.columns.size - scuole_province_autonome.columns.size # e ancora verifica scuole.columns.equals(scuole_province_autonome.columns) # In cosa cambiano? scuole.columns.difference(scuole_province_autonome.columns) # vediamo la descrizione di **SEDESCOLASTICA** scuole.SEDESCOLASTICA.describe() # Contiene solo due valori univoci (*unique*) # # Quali sono? scuole.SEDESCOLASTICA.unique() # .. è molto probabile che la tabella delle scuole delle province autonome da per scontato il valore di SEDESCOLASTICA # # Proviamo ad unire la tabella creandone una nuova. # + slideshow={"slide_type": "fragment"} scuole_italiane = panda.concat([scuole,scuole_province_autonome]) scuole_italiane.head(3) # - scuole_italiane.shape scuole_italiane.REGIONE.describe() # Vediamo come sono distribuite le scuole per ogni regione italiana scuolexregione = scuole_italiane.groupby(scuole_italiane.REGIONE).size() scuolexregione # e con un grafico scuolexregione.plot.bar(title="numero di scuole per regione",figsize=(15,10)) # --- # Tutto questo vale per le scuole statali. # Aggiungiamo anche quelle paritarie # # Questo è l'indirizzo # # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/SCUANAGRAFEPAR20171820170901.csv # # Questa volta però uso la funzione definita prima: tabellaMIUR(url) # # url = 'http://dati.istruzione.it/opendata/opendata/catalogo/elements1/SCUANAGRAFEPAR20171820170901.csv' scuole_paritarie=panda.read_csv(url) scuole_paritarie.shape scuole_paritarie.columns scuole_italiane.columns.difference(scuole_paritarie.columns) # ... si tratta di attributi non necessari per le scuole paritarie perchè non strutturate dentro delle gerarchie scuole_paritarie.REGIONE.describe() # ... devo prendere anche quelle delle province autonome # # http://dati.istruzione.it/opendata/opendata/catalogo/elements1/SCUANAAUTPAR20171820170901.csv # url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/SCUANAAUTPAR20171820170901.csv' scuole_paritarie_province_autonome = panda.read_csv(url) scuole_paritarie_province_autonome.shape scuole_paritarie_province_autonome.REGIONE.describe() scuole_paritarie_province_autonome.columns scuole_italiane = panda.concat([scuole_italiane,scuole_paritarie_province_autonome,scuole_paritarie]) scuole_italiane.CODICESCUOLA.size scuole_italiane.groupby(scuole_italiane.REGIONE).size() scuole_italiane.groupby(scuole_italiane.REGIONE).size().plot.bar(title="numero di scuole per regione",figsize=(15,10)) scuole_italiane.groupby(scuole_italiane.REGIONE).size().plot.bar(figsize=(15,5),title="numero di scuole per regione") # # SISTEMA NAZIONALE DI VALUTAZIONE url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/VALUTAZIONE_ESITI_STA20161720170831.csv' valutazioni_esiti1617=panda.read_csv(url) url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/VALUTAZIONE_PROCESSI_STA20161720170831.csv' valutazioni_processi1617=panda.read_csv(url) # ## criteri di valutazione # criteri di valutazione url = 'http://dati.istruzione.it/opendata/opendata/catalogo/elements1/RUBRICA_VAL20161720170831.csv' criteri_valutazione1617 = panda.read_csv(url) criteri_valutazione1617.shape criteri_valutazione1617.columns criteri_valutazione1617.sort_values("CODICECRITERIOQUALITA").head(3) # ## CRITERI DI QUALITÀ criteri_valutazione1617.groupby(["CODICECRITERIOQUALITA","CRITERIOQUALITA"]).size() # # Punteggio di Riferimento # # Valore utilizzato dall'istituto per l'assegnazione del punteggio per i criteri di valutazione. # # I valori sono compresi tra *1* e *7* e vengono indicati *1*, *3*, *5* e *7*; i valori 2, 4 e 6 _non_ sono esplicitati e permettono di posizionare le scuole che riscontrano una corrispondenza tra la descrizione e la situazione effettiva solo in relazione ad alcuni aspetti. # # 1 = situazione molto critica; # # 3 = situazione con qualche criticità; # # 5 = situazione positiva; # # 7 = situazione eccellente # --- # # EDILIZIA SCOLASTICA # edifici scolastici url='http://dati.istruzione.it/opendata/EDIANAGRAFESTA20160831.csv' ediliziascolastica=panda.read_csv(url) ediliziascolastica.head(3) # a quale anni scolastici fanno riferimento? ediliziascolastica.ANNOSCOLASTICO.unique() # .. solo anno scolastico **2015/2016** # ## strutture annesse alla scuola # spazi didattici, aula magna, mensa, palestra/piscina, spazi amminstrativi url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/EDIAMBFUNZSTA20160831.csv' strutturescolastiche = panda.read_csv(url) strutturescolastiche.head(3) # a quali anni scolastici fanno riferimento? strutturescolastiche.ANNOSCOLASTICO.unique() # ... solo anno scolastico **2015/2016** # ## raggiungibilità degli edifici url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/EDICOLLEGAMENTISTA20160831.csv' raggiungibilita= panda.read_csv(url) raggiungibilita.head(3) # --- # # ANAGRAFICA STUDENTI # ## Studenti per anno di corso e fascia di età. Scuola statale. #studenti scuola statale 2016/2017 url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/ALUCORSOETASTA20161720170831.csv' anagrafica_studenti1516 = panda.read_csv(url) anagrafica_studenti1516.head(3) anagrafica_studenti1516.shape anagrafica_studenti1516.columns # ## Studenti per anno di corso, classe e genere. Scuola statale. url='http://dati.istruzione.it/opendata/opendata/catalogo/elements1/ALUCORSOINDCLASTA20161720170831.csv' anagrafica_studenti_genere_1516= panda.read_csv(url) anagrafica_studenti_genere_1516.head(3) anagrafica_studenti_genere_1516.shape anagrafica_studenti_genere_1516.columns # --- # # Scuole della Regione Emilia-Romagna # + active="" # ... e prendiamo quelle della Regione Emilia-Romagna # - scuole_RER=scuole_italiane[scuole_italiane.REGIONE == 'EMILIA ROMAGNA'] # altra sbirciatina :) scuole_RER.head(3) scuole_RER.shape # come sono distribuite le scuole dell'Emilia Romagna per provincia? # # - devo raggrupparle (groupby) # - e contarle (size) scuole_province_RER = scuole_RER.groupby(scuole_RER.PROVINCIA).size() # e a questo punto posso visualizzarle scuole_province_RER.plot.bar(figsize=(15,5),title="distribuzione scuole per province dell'Emilia Romagna") # Quante gradi di istruzione esistono? # # (*unique*) mi serve perchè il valore potrebbe ripetersi) tipi_scuole = scuole_RER.DESCRIZIONETIPOLOGIAGRADOISTRUZIONESCUOLA.unique() # vediamo quante sono (size) tipi_scuole.size # e quali sono tipi_scuole # Mi incuriosce questo "IST PROF PER I SERVIZI PUBBLICITARI" formazione_servizi_pubblicitari=scuole_RER[scuole_RER.DESCRIZIONETIPOLOGIAGRADOISTRUZIONESCUOLA =='IST PROF PER I SERVIZI PUBBLICITARI'] formazione_servizi_pubblicitari.shape codice_scuola = formazione_servizi_pubblicitari.CODICESCUOLA.values[0] nomescuola = formazione_servizi_pubblicitari.DENOMINAZIONESCUOLA.values[0] print(codice_scuola) print(nomescuola) tipi_scuole = scuole_RER.DESCRIZIONETIPOLOGIAGRADOISTRUZIONESCUOLA.unique() eta_studenti_rossellini= anagrafica_studenti1516[anagrafica_studenti1516.CODICESCUOLA == codice_scuola] eta_studenti_rossellini.sort_values(by=['ANNOCORSO']) tabella_studenti_eta=eta_studenti_rossellini.pivot_table(index='ANNOCORSO', columns='FASCIAETA', values='ALUNNI', aggfunc='sum', fill_value=0) tabella_studenti_eta tabella_studenti_eta.plot.barh(stacked=True,figsize=[10,5],title="distribuzione età studenti per anno di corso") anagrafica_genere_rossini=anagrafica_studenti_genere_1516[anagrafica_studenti_genere_1516.CODICESCUOLA == codice_scuola] anagrafica_genere_rossini classi_genere_rossini=anagrafica_genere_rossini[['ANNOCORSOCLASSE','ALUNNIFEMMINE','ALUNNIMASCHI']] table=classi_genere_rossini.pivot_table(classi_genere_rossini,index=["ANNOCORSOCLASSE"]).fillna(0) table.plot.bar(stacked=True,figsize=(15,5),colormap='Pastel2',title="numero di alunni per anno di corso e distinzione fra maschi e femmine") # per la color map guarda qui https://matplotlib.org/examples/color/colormaps_reference.html # --- # Cerchiamo informazioni su *Istituto Enrico Mattei* - http://www.istitutomattei.bo.it/ # # Il sito web dichiara che ospita 4 scuole: # 1. IST. TEC. ECONOMICO # 2. LICEO SCIENTIFICO # 3. LICEO delle SCIENZE UMANE # 4. LICEO ECONOMICO SOCIALE # # Sede: # Via delle rimembranze n°26 - 40068 San Lazzaro di Savena # # tel: 051/464510 # # fax: 051/452735 # # C.F. 920046 00372 # # --- # cerchiamo quindi nelle scuole della Regione Emilia-Romagna quelle che hanno nome che contiene "MATTEI" e che sono nel comune di San Lazzaro di Savena scuole_RER[(scuole_RER.DENOMINAZIONESCUOLA.str.contains('MATTEI', na=False) & (scuole_RER.DESCRIZIONECOMUNE=='SAN LAZZARO DI SAVENA'))] istituto_enrico_mattei = scuole_RER[scuole_RER.CODICEISTITUTORIFERIMENTO=='BOIS017008'] codiceistituto_enrico_mattei = istituto_enrico_mattei.CODICEISTITUTORIFERIMENTO.unique()[0] codiceistituto_enrico_mattei # quante scuole sono? istituto_enrico_mattei.shape[0] # andiamo a vederle istituto_enrico_mattei # Recuperiamo i codici delle scuole codici_scuole_mattei = istituto_enrico_mattei.CODICESCUOLA.values codici_scuole_mattei # In singoli valori si possono recuperare per indice da 0 in poi # # Esempi codici_scuole_mattei[0] codici_scuole_mattei[1] codici_scuole_mattei[2] # ... proviamo a visualizzarle una volta usando un "ciclo for" # # La sintassi è questa (tutti i comandi iniziano con dalla tabulazione) # ```python # for nome_variabile in lista_di_valori: # print(nome_variabile) # ``` # # Ecco l'implementazione # for codice in codici_scuole_mattei: scuola = istituto_enrico_mattei[istituto_enrico_mattei.CODICESCUOLA==codice] print(codice) print(scuola.DESCRIZIONETIPOLOGIAGRADOISTRUZIONESCUOLA.values[0]) print("--") # Quanti studenti sono all'ISTITUTO TECNICO COMMERCIALE (anno 2015/2016) # # Interroghiamo l'anagrafica per conoscere gli studenti (codice scuola = BOTD01701E) # anagrafica_studenti1516[anagrafica_studenti1516.CODICESCUOLA=='BOTD01701E'].head(3) # ... e facciamo la somma degli ALUNNI anagrafica_studenti1516[anagrafica_studenti1516.CODICESCUOLA=='BOTD01701E'].ALUNNI.sum() # guardiamo i valori scuola per scuola con un ciclo for e contiamo anche il totale degli studenti totale_studenti = 0 for codice in codici_scuole_mattei: scuola = istituto_enrico_mattei[istituto_enrico_mattei.CODICESCUOLA==codice] print(codice) totale = anagrafica_studenti1516[anagrafica_studenti1516.CODICESCUOLA==codice].ALUNNI.sum() print(totale) print("---") totale_studenti = totale_studenti + totale print("totale studenti ", totale_studenti) # estrazione dati anagrafica dell' istituto tecnico commerciale del mattei # ed # estrazione dati anagrafica del liceo del mattei anagrafica_itc_mattei=anagrafica_studenti1516[anagrafica_studenti1516.CODICESCUOLA=='BOTD01701E'] anagrafica_liceo_mattei=anagrafica_studenti1516[anagrafica_studenti1516.CODICESCUOLA=='BOPS01701P'] # ed anche quella per genere anagrafica_studenti_genere_itc_mattei=anagrafica_studenti_genere_1516[anagrafica_studenti_genere_1516.CODICESCUOLA=='BOTD01701E'] anagrafica_studenti_genere_liceo_mattei=anagrafica_studenti_genere_1516[anagrafica_studenti_genere_1516.CODICESCUOLA=='BOPS01701P'] # e vediamo le valutazioni 2016/2017 per il codice di istituto valutazioni_esiti1617[valutazioni_esiti1617.CODICEISTITUTO==codiceistituto_enrico_mattei] valutazioni_processi1617[valutazioni_processi1617.CODICEISTITUTO==codiceistituto_enrico_mattei] valutazioni_enrico_mattei = panda.concat([valutazioni_esiti1617,valutazioni_processi1617]) valutazioni_enrico_mattei.head(3) tabella_valutazioni = valutazioni_enrico_mattei[['PUNTEGGIOSCUOLA','CODICECRITERIO']] grafico_valutazioni = tabella_valutazioni.pivot_table(tabella_valutazioni,'CODICECRITERIO').fillna(0) valutazioni_enrico_mattei.columns[0] grafico_valutazioni grafico_valutazioni.plot.barh(figsize=(15,5),title='punteggio della scuola sui criteri di valutazione') ediliziascolastica.columns # ***attenzione ai campi che hanno nome che inizia con uno spazio!!!*** edificio_itc_mattei=ediliziascolastica[ediliziascolastica[' CODICESCUOLA'] == 'BOTD01701E'] edificio_itc_mattei edificio_liceo_mattei=ediliziascolastica[ediliziascolastica[' CODICESCUOLA'] == 'BOPS01701P'] edificio_liceo_mattei strutturescolastiche.columns struttura_itc_mattei=strutturescolastiche[strutturescolastiche[' CODICESCUOLA'] == 'BOTD01701E'] struttura_itc_mattei struttura_liceo_mattei=strutturescolastiche[strutturescolastiche[' CODICESCUOLA'] == 'BOPS01701P'] struttura_liceo_mattei raggiungibilita.columns raggiungibilita_liceo_mattei=raggiungibilita[raggiungibilita[' CODICESCUOLA'] == 'BOPS01701P'] raggiungibilita_liceo_mattei raggiungibilita_itc_mattei=raggiungibilita[raggiungibilita[' CODICESCUOLA'] == 'BOTD01701E'] raggiungibilita_itc_mattei mezzi_x_mattei=raggiungibilita_itc_mattei[[' SCUOLABUS',' TRASPORTIPUBBLICIURBANI',' TRASPORTIPUBBLICIINTERURBANI',' TRASPORTIFERROVIARI', ' MEZZIPRIVATI', ' TRASPORTODISABILI']] mezzi_x_mattei = mezzi_x_mattei.eq('SI').mul(1) mezzi_x_mattei # ## Voglio vedere dove si trova l'edificio su una mappa! # # per farlo serve <a name="potenziarepython">potenziare python</a> con due moduli: # - geocoder # modulo che permette di geocodificare gli indirizzi ( = da toponimo a coordinate geografiche) # - folium # modulo che permette di creare mappe online # # # Se si sta usando anaconda questo può essere fatto con queste istruzioni # ```bash # import sys # # !conda install -c conda-forge --yes --prefix {sys.prefix} geocoder # # !conda install -c conda-forge --yes --prefix {sys.prefix} folium # ``` # # altrimenti con pip # ```bash # import sys # # !{sys.executable} -m pip install geocoder # # !{sys.executable} -m pip install folium # ``` # # *si riporta i comandi per anancoda ai fini del tutorial* import sys # !conda install -c conda-forge --yes --prefix {sys.prefix} geocoder # !conda install -c conda-forge --yes --prefix {sys.prefix} folium # **NOTA** # # si github appaiono errori in quanto queste librerie non possono essere installate # --- # # ed ora cominciamo con il ricavare i dati necessari al geocoder # # si tratta di creare una stringa formattata in questo modo # # *indirizzo della scuola, comune, italia* cerca_indirizzo = scuola.INDIRIZZOSCUOLA.values[0] cerca_indirizzo += ", " + scuola.DESCRIZIONECOMUNE.values[0] cerca_indirizzo += ", Italia" nome_scuola = scuola.DENOMINAZIONESCUOLA.values[0] cerca_indirizzo # ed ora serve un geocoder # Possiamo sceglierne diversi (google, bing, openstreetmap ...) # # Come scelta prendiamo **komoot** che è libero e basato su openstreetmap import geocoder coordinatescuola = geocoder.komoot(cerca_indirizzo) coordinatescuola.latlng # latitudine = 44.469136<br/> # longitudine = 11.4048108 # ed ora che ho le coordinate costruisco la mappa usando folium import folium mappa = folium.Map(location=[coordinatescuola.latlng[0], coordinatescuola.latlng[1]],zoom_start=17) folium.Marker([coordinatescuola.latlng[0], coordinatescuola.latlng[1]], popup=nome_scuola).add_to(mappa) mappa # !['la mappa intorno all'Enrico Mattei'](https://raw.githubusercontent.com/napo/opendatamiur/master/images/mappa_esempio.png) # ***... ed ora tocca a voi :)*** # # --- # # RIASSUNTO PYTHON # ## importare moduli # vanno caricati (*import*) e in alcuni [casi installati](#potenziarepython) # # questi i moduli usati # ```python # import requests # import io # import pandas as panda # import geocode # import folium # ``` # # ## metodi per interrogare un DataFrame (tabella) pandas # (nell'esempio *nometabella*) # # ### conoscere le prime 3 righe (ma il valore possiamo cambiarlo # ```python # nometabella.head(3) # ``` # ### la forma della tabella (numero di righe e numero di colonne) # ```python # nometabella.shape # ``` # #### e da lì il numero di righe # ```python # nometabella.shape[0] # ``` # #### e il numero di colonne # ```python # nometabella.shape[1] # ``` # #### i nomi delle colonne # ```python # nometabella.columns # ``` # #### e contare quante sono # ```python # nometabella.columns.size # ``` # #### avere una descrizione su come è fatta una colonna di un dataframe partendo dal suo nome # (es. *nomecolonna*)) # ```python # nometabella.nomecolonna.describe() # ``` # # ## unire più DataFrame # ```python # panda.concat([tabella1, tabella2]) # ``` # ## confrontare colonne fra tabelle diverse # (es. *tabella1* e *tabella2*) # ```python # tabella1.columns.equals(tabella2.columns) # ``` # ## vedere cosa cambia fra le colonne delle due tabelle # ```python # tabella1.columns.difference(tabella2.columns) # ``` # # ## individuare i valori univoci all'interno di una colonna # ```python # nometabella.nomecolonna.unique() # ``` # ## contare il numero di volte in cui compare ogni valore della colonna # ```python # nometabella.groupby(nometabella.nomecolonna).size() # ``` # ## ordinare una tabella per i valori di una colonna # ```python # nometabella.sort_values("nomecolonna") # ``` # ## filtrare una tabella per un valore di una colonna # ```python # nometabella[nometabella.nomeoclonna == 'valore da cercare'] # ``` # ## trasformare tutti i valori di una tabella con un altro # Esempio: trasformare tutti i "SI" in "1" # ```python # nometabella.eq('SI').mul(1) # ``` # # ## ribaltare righe con colonne (pivot) # ### scegliendo quale colonna usare come indice # ```python # nometabella.pivot_table(index='colonnacomeindice') # ``` # #### e se necessario assegnare il valore zero dove i valori mancano # ```python # nometabella.pivot_table(index='colonnasceltacomeindice').fillna(0) # ``` # #### ed anche più complesse dove sommare i valori presenti in una colonna # ```python # nometabella.pivot_table(index='colonnasceltacomeindice', columns='colonnescelte', values='colonnaconivalori', # aggfunc='sum', fill_value=0) # ``` # # # ## rappresentare i valori di un dataframe in grafico a barre verticali # e con dimensioni 10x10 in DPI [punti per pollice](https://it.wikipedia.org/wiki/Punti_per_pollice) e assegnare un titolo # ```python # nometabella.plot.bar(title='titolo',figsize[10,10]) # ``` # ### e a barre orizzontali # ```python # nometabella.plot.barh(title='titolo',figsize[10,10]) # ``` # ### e a barre orizzontali a segmenti # ```python # nometabella.plot.bar(stacked=True,figsize=(10,10)) # ``` # ### e a barre verticali a segmenti scegliendo i colori # *colormap* è una variabile che può essere in ogni grafico. # # l'elenco dei colori si trova qui https://matplotlib.org/examples/color/colormaps_reference.html # ```python # nometabella.plot.hbar(stacked=True,figsize=[10,10],colormap='Pastel2') # ``` # # ## usare un geocoder # con komoot # # ```python # geocoder.komoot("Via <NAME> Costa, 14, Bologna, Italia") # ``` # con google # # ```python # geocoder.google("<NAME>") # ``` # ### ed estrarre latitudine e longitudine # ```python # opificio_gollinelli = geocoder.komoot("<NAME>") # latitudine = opificio_gollinelli.latlng[0] # longitudine = opificio_gollinelli.latlng[1] # ``` # # # ## rappresentare un punto su una mappa # es. # latitudine = 44.5082397 # longitudine = 11.3066287 # # ```python # mappa = folium.Map(location=[44.5082397, 11.3066287]) # folium.Marker([44.5082397, 11.3066287]).add_to(mappa) # ``` #
MIUR_OpenData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('../../../GraphGallery/') sys.path.append('../../../GraphAdv/') import tensorflow as tf import numpy as np import networkx as nx import scipy.sparse as sp from graphgallery.nn.models import GCN from graphgallery.nn.functions import softmax from graphadv.attack.targeted import DICE import matplotlib.pyplot as plt plt.style.use(['no-latex', 'ieee']) # + from graphgallery.data import NPZDataset data = NPZDataset('citeseer', root="~/GraphData/datasets/", verbose=False, standardize=True) adj, x, labels = data.graph.unpack() idx_train, idx_val, idx_test = data.split(random_state=15) n_classes = labels.max() + 1 # - target = 0 assert target in idx_test print(f"Attack target {target} with class label {labels[target]}") attacker = DICE(adj, labels=labels, seed=None) attacker.reset() attacker.attack(target, direct_attack=True, structure_attack=True, feature_attack=False) # show logs attacker.show_edge_flips(detail=True) # # Before Attack model = GCN(adj, x, labels, device='GPU', norm_x=None, seed=42) model.build() his = model.train(idx_train, idx_val, verbose=1, epochs=100) softmax(model.predict(target).ravel()) # # After Attack model = GCN(attacker.A, x, labels, device='GPU', norm_x=None, seed=42) model.build() his = model.train(idx_train, idx_val, verbose=1, epochs=100) softmax(model.predict(target).ravel()) # # Visulation def evaluate(adj, x, retrain_iters=5): classification_margins = [] class_distrs = [] for _ in range(retrain_iters): print(f"... {_+1}/{retrain_iters} ") model = GCN(adj, x, labels, device='GPU', norm_x=None, seed=None) model.build() his = model.train(idx_train, idx_val, verbose=0, epochs=100) logit = softmax(model.predict(target).ravel()) class_distrs.append(logit) best_second_class_before = (logit - labels[target]).argmax() margin = logit[labels[target]] - logit[best_second_class_before] classification_margins.append(margin) model.close del model class_distrs = np.array(class_distrs) return class_distrs retrain_iters = 5 print("Before Attack") class_distrs_clean = evaluate(adj, x, retrain_iters=retrain_iters) print(f"After {attacker.name} Attack") class_distrs_retrain = evaluate(attacker.A, x, retrain_iters=retrain_iters) # + def make_xlabel(ix, correct): if ix == correct: return "Class {}\n(correct)".format(ix) return "Class {}".format(ix) figure = plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) center_ixs_clean = [] for ix, block in enumerate(class_distrs_clean.T): x_ixs = np.arange(len(block)) + ix*(len(block)+2) center_ixs_clean.append(np.mean(x_ixs)) color = '#555555' if ix == labels[target]: color = 'darkgreen' plt.bar(x_ixs, block, color=color) ax = plt.gca() plt.ylim((-.05, 1.05)) plt.ylabel("Predicted probability") ax.set_xticks(center_ixs_clean) ax.set_xticklabels([make_xlabel(k, labels[target]) for k in range(n_classes)]) ax.set_title(f"Predicted class probabilities for node {target} on clean data\n({retrain_iters} re-trainings)") fig = plt.subplot(1, 2, 2) center_ixs_retrain = [] for ix, block in enumerate(class_distrs_retrain.T): x_ixs = np.arange(len(block)) + ix*(len(block)+2) center_ixs_retrain.append(np.mean(x_ixs)) color = '#555555' if ix == labels[target]: color = 'darkgreen' plt.bar(x_ixs, block, color=color) ax = plt.gca() plt.ylim((-.05, 1.05)) ax.set_xticks(center_ixs_retrain) ax.set_xticklabels([make_xlabel(k, labels[target]) for k in range(n_classes)]) ax.set_title(f"Predicted class probabilities for node {target} after {attacker.n_perturbations} perturbations\n({retrain_iters} re-trainings)") plt.tight_layout() plt.show()
examples/Targeted Attack/test_DICE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.013058, "end_time": "2020-12-09T17:33:12.952096", "exception": false, "start_time": "2020-12-09T17:33:12.939038", "status": "completed"} tags=[] # **This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/data-leakage).** # # --- # # + [markdown] papermill={"duration": 0.011651, "end_time": "2020-12-09T17:33:12.978661", "exception": false, "start_time": "2020-12-09T17:33:12.967010", "status": "completed"} tags=[] # Most people find target leakage very tricky until they've thought about it for a long time. # # So, before trying to think about leakage in the housing price example, we'll go through a few examples in other applications. Things will feel more familiar once you come back to a question about house prices. # # # Setup # # The questions below will give you feedback on your answers. Run the following cell to set up the feedback system. # + papermill={"duration": 0.063988, "end_time": "2020-12-09T17:33:13.054927", "exception": false, "start_time": "2020-12-09T17:33:12.990939", "status": "completed"} tags=[] # Set up code checking from learntools.core import binder binder.bind(globals()) from learntools.ml_intermediate.ex7 import * print("Setup Complete") # + [markdown] papermill={"duration": 0.012134, "end_time": "2020-12-09T17:33:13.080570", "exception": false, "start_time": "2020-12-09T17:33:13.068436", "status": "completed"} tags=[] # # Step 1: The Data Science of Shoelaces # # Nike has hired you as a data science consultant to help them save money on shoe materials. Your first assignment is to review a model one of their employees built to predict how many shoelaces they'll need each month. The features going into the machine learning model include: # - The current month (January, February, etc) # - Advertising expenditures in the previous month # - Various macroeconomic features (like the unemployment rate) as of the beginning of the current month # - The amount of leather they ended up using in the current month # # The results show the model is almost perfectly accurate if you include the feature about how much leather they used. But it is only moderately accurate if you leave that feature out. You realize this is because the amount of leather they use is a perfect indicator of how many shoes they produce, which in turn tells you how many shoelaces they need. # # Do you think the _leather used_ feature constitutes a source of data leakage? If your answer is "it depends," what does it depend on? # # After you have thought about your answer, check it against the solution below. # + papermill={"duration": 0.027182, "end_time": "2020-12-09T17:33:13.120179", "exception": false, "start_time": "2020-12-09T17:33:13.092997", "status": "completed"} tags=[] # Check your answer (Run this code cell to receive credit!) q_1.check() # + [markdown] papermill={"duration": 0.013333, "end_time": "2020-12-09T17:33:13.147306", "exception": false, "start_time": "2020-12-09T17:33:13.133973", "status": "completed"} tags=[] # # Step 2: Return of the Shoelaces # # You have a new idea. You could use the amount of leather Nike ordered (rather than the amount they actually used) leading up to a given month as a predictor in your shoelace model. # # Does this change your answer about whether there is a leakage problem? If you answer "it depends," what does it depend on? # + papermill={"duration": 0.027504, "end_time": "2020-12-09T17:33:13.188552", "exception": false, "start_time": "2020-12-09T17:33:13.161048", "status": "completed"} tags=[] # Check your answer (Run this code cell to receive credit!) q_2.check() # + [markdown] papermill={"duration": 0.015251, "end_time": "2020-12-09T17:33:13.219738", "exception": false, "start_time": "2020-12-09T17:33:13.204487", "status": "completed"} tags=[] # # Step 3: Getting Rich With Cryptocurrencies? # # You saved Nike so much money that they gave you a bonus. Congratulations. # # Your friend, who is also a data scientist, says he has built a model that will let you turn your bonus into millions of dollars. Specifically, his model predicts the price of a new cryptocurrency (like Bitcoin, but a newer one) one day ahead of the moment of prediction. His plan is to purchase the cryptocurrency whenever the model says the price of the currency (in dollars) is about to go up. # # The most important features in his model are: # - Current price of the currency # - Amount of the currency sold in the last 24 hours # - Change in the currency price in the last 24 hours # - Change in the currency price in the last 1 hour # - Number of new tweets in the last 24 hours that mention the currency # # The value of the cryptocurrency in dollars has fluctuated up and down by over \$100 in the last year, and yet his model's average error is less than \$1. He says this is proof his model is accurate, and you should invest with him, buying the currency whenever the model says it is about to go up. # # Is he right? If there is a problem with his model, what is it? # + papermill={"duration": 0.029187, "end_time": "2020-12-09T17:33:13.267220", "exception": false, "start_time": "2020-12-09T17:33:13.238033", "status": "completed"} tags=[] # Check your answer (Run this code cell to receive credit!) q_3.check() # + [markdown] papermill={"duration": 0.017178, "end_time": "2020-12-09T17:33:13.303281", "exception": false, "start_time": "2020-12-09T17:33:13.286103", "status": "completed"} tags=[] # # Step 4: Preventing Infections # # An agency that provides healthcare wants to predict which patients from a rare surgery are at risk of infection, so it can alert the nurses to be especially careful when following up with those patients. # # You want to build a model. Each row in the modeling dataset will be a single patient who received the surgery, and the prediction target will be whether they got an infection. # # Some surgeons may do the procedure in a manner that raises or lowers the risk of infection. But how can you best incorporate the surgeon information into the model? # # You have a clever idea. # 1. Take all surgeries by each surgeon and calculate the infection rate among those surgeons. # 2. For each patient in the data, find out who the surgeon was and plug in that surgeon's average infection rate as a feature. # # Does this pose any target leakage issues? # Does it pose any train-test contamination issues? # + papermill={"duration": 0.031606, "end_time": "2020-12-09T17:33:13.351804", "exception": false, "start_time": "2020-12-09T17:33:13.320198", "status": "completed"} tags=[] # Check your answer (Run this code cell to receive credit!) q_4.check() # + [markdown] papermill={"duration": 0.018026, "end_time": "2020-12-09T17:33:13.388365", "exception": false, "start_time": "2020-12-09T17:33:13.370339", "status": "completed"} tags=[] # # Step 5: Housing Prices # # You will build a model to predict housing prices. The model will be deployed on an ongoing basis, to predict the price of a new house when a description is added to a website. Here are four features that could be used as predictors. # 1. Size of the house (in square meters) # 2. Average sales price of homes in the same neighborhood # 3. Latitude and longitude of the house # 4. Whether the house has a basement # # You have historic data to train and validate the model. # # Which of the features is most likely to be a source of leakage? # + papermill={"duration": 0.032198, "end_time": "2020-12-09T17:33:13.439412", "exception": false, "start_time": "2020-12-09T17:33:13.407214", "status": "completed"} tags=[] # Fill in the line below with one of 1, 2, 3 or 4. potential_leakage_feature = 2 # Check your answer q_5.check() # + papermill={"duration": 0.028902, "end_time": "2020-12-09T17:33:13.488287", "exception": false, "start_time": "2020-12-09T17:33:13.459385", "status": "completed"} tags=[] #q_5.hint() #q_5.solution() # + [markdown] papermill={"duration": 0.019938, "end_time": "2020-12-09T17:33:13.528582", "exception": false, "start_time": "2020-12-09T17:33:13.508644", "status": "completed"} tags=[] # # Conclusion # Leakage is a hard and subtle issue. You should be proud if you picked up on the issues in these examples. # # Now you have the tools to make highly accurate models, and pick up on the most difficult practical problems that arise with applying these models to solve real problems. # # There is still a lot of room to build knowledge and experience. Try out a [Competition](https://www.kaggle.com/competitions) or look through our [Datasets](https://kaggle.com/datasets) to practice your new skills. # # Again, Congratulations! # + [markdown] papermill={"duration": 0.019638, "end_time": "2020-12-09T17:33:13.568342", "exception": false, "start_time": "2020-12-09T17:33:13.548704", "status": "completed"} tags=[] # --- # # # # # *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161289) to chat with other Learners.*
both machine learning models/exercise-data-leakage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # ### US map small multiples # + import plotly.plotly as py import pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/1962_2006_walmart_store_openings.csv') df.head() data = [] layout = dict( title = 'New Walmart Stores per year 1962-2006<br>\ Source: <a href="http://www.econ.umn.edu/~holmes/data/WalMart/index.html">\ University of Minnesota</a>', # showlegend = False, autosize = False, width = 1000, height = 900, hovermode = False, legend = dict( x=0.7, y=-0.1, bgcolor="rgba(255, 255, 255, 0)", font = dict( size=11 ), ) ) years = df['YEAR'].unique() for i in range(len(years)): geo_key = 'geo'+str(i+1) if i != 0 else 'geo' lons = list(df[ df['YEAR'] == years[i] ]['LON']) lats = list(df[ df['YEAR'] == years[i] ]['LAT']) # Walmart store data data.append( dict( type = 'scattergeo', showlegend=False, lon = lons, lat = lats, geo = geo_key, name = years[i], marker = dict( color = "rgb(0, 0, 255)", opacity = 0.5 ) ) ) # Year markers data.append( dict( type = 'scattergeo', showlegend = False, lon = [-78], lat = [47], geo = geo_key, text = [years[i]], mode = 'text', ) ) layout[geo_key] = dict( scope = 'usa', showland = True, landcolor = 'rgb(229, 229, 229)', showcountries = False, domain = dict( x = [], y = [] ), subunitcolor = "rgb(255, 255, 255)", ) def draw_sparkline( domain, lataxis, lonaxis ): ''' Returns a sparkline layout object for geo coordinates ''' return dict( showland = False, showframe = False, showcountries = False, showcoastlines = False, domain = domain, lataxis = lataxis, lonaxis = lonaxis, bgcolor = 'rgba(255,200,200,0.0)' ) # Stores per year sparkline layout['geo44'] = draw_sparkline({'x':[0.6,0.8], 'y':[0,0.15]}, \ {'range':[-5.0, 30.0]}, {'range':[0.0, 40.0]} ) data.append( dict( type = 'scattergeo', mode = 'lines', lat = list(df.groupby(by=['YEAR']).count()['storenum']/1e1), lon = range(len(df.groupby(by=['YEAR']).count()['storenum']/1e1)), line = dict( color = "rgb(0, 0, 255)" ), name = "New stores per year<br>Peak of 178 stores per year in 1990", geo = 'geo44', ) ) # Cumulative sum sparkline layout['geo45'] = draw_sparkline({'x':[0.8,1], 'y':[0,0.15]}, \ {'range':[-5.0, 50.0]}, {'range':[0.0, 50.0]} ) data.append( dict( type = 'scattergeo', mode = 'lines', lat = list(df.groupby(by=['YEAR']).count().cumsum()['storenum']/1e2), lon = range(len(df.groupby(by=['YEAR']).count()['storenum']/1e1)), line = dict( color = "rgb(214, 39, 40)" ), name ="Cumulative sum<br>3176 stores total in 2006", geo = 'geo45', ) ) z = 0 COLS = 5 ROWS = 9 for y in reversed(range(ROWS)): for x in range(COLS): geo_key = 'geo'+str(z+1) if z != 0 else 'geo' layout[geo_key]['domain']['x'] = [float(x)/float(COLS), float(x+1)/float(COLS)] layout[geo_key]['domain']['y'] = [float(y)/float(ROWS), float(y+1)/float(ROWS)] z=z+1 if z > 42: break fig = { 'data':data, 'layout':layout } py.iplot( fig, filename='US Walmart growth', height=900, width=1000 ) # - # #### Reference # See https://plot.ly/python/reference/#scattergeo for more information and chart attribute options! # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'map-subplots.ipynb', ' python/map-subplots-and-small-multiples/', ' Python Map Subplots and Map Small Multiples| Plotly', 'How to make map subplots and map small multiples in Python.', title = 'Python Map Subplots and Map Small Multiples | plotly', name = 'Map Subplots', has_thumbnail='true', thumbnail='thumbnail/map-subplots.jpg', language='python', page_type='example_index' display_as='multiple_axes', order=5, ipynb= '~notebook_demo/59') # -
_posts/python-v3/subplots/map-subplots/map-subplots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- name = '2016-02-05-ipywidgets-interact' title = 'ipywidgets.interact() examples' tags = 'ipywidgets, iris' author = '<NAME>' # + from nb_tools import connect_notebook_to_post from IPython.core.display import HTML html = connect_notebook_to_post(name, title, tags, author) # - # Today's meeting opened the topic of building interactive figures in Python. This notebook will show an example of using ipywidgets module, more specifically `interact()` function. The full documentation can be found on the [ipywidgets website](http://ipywidgets.readthedocs.org/en/latest/), but beware: since the project is young and is evolving quickly, the documentation can be incomplete or sometimes outdated. # The examples below were bluntly taken from <NAME>'s [post](http://earthpy.org/pyncview_pm.html) on his awesome EarthPy blog. Note that since then `IPython.html.widgets` migrated into a separate package. # There are dozens of examples on the web on how to use ipywidgets in many cool ways. You'd better start from the project's [collection of notebooks on GitHub](https://github.com/ipython/ipywidgets/tree/master/examples). # <div class="alert alert-warning"> # In the static HTML version of this notebook the interactive mode is unavailable. # # <b>To play with figures you can switch to <a href=http://mybinder.org/repo/ueapy/interactive_notebooks>binder</a>.</b> # Or download the notebook using the link in the end and launch it on your machine. # # Hopefully, future releases of ipywidgets will include <a href=https://jakevdp.github.io/blog/2013/12/05/static-interactive-widgets/>static widgets</a>. # </div> import warnings warnings.filterwarnings('ignore') # ## In the beginning there was a sine wave # Imagine you have a plotting function with two arguments, for example, a line plot of a sine wave. # First, import the necessary modules and tell matplotlib to embed figures in the notebook. import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # Next, define a 1d array to pass into `sin()` function. x = np.linspace(0,1,100) # Define a trivial function to plot a sine wave depending on frequency and amplitude inputs. def pltsin(freq, ampl): y = ampl*np.sin(2*np.pi*x*freq) plt.plot(x, y) plt.ylim(-10,10) # fix limits of the vertical axis # Test it with arbitrary arguments: pltsin(10, 3) # #### Changing arguments # Now, if you want to see how the arguments affect the result you would need to rerun the cell above over and over again. Luckily, the ipywidgets make it more fun. from ipywidgets import interact # Just pass the function name into `interact()` as a first argument. Then add its arguments and their respective range (start, stop, step): _ = interact(pltsin, freq=(1,10,0.1), ampl=(1,10,1)) # And voila, you can change frequency and amplitude interactively using the two independent sliders. # #### Another example # Of course, `interact()` can be used not only for plotting. For example, using code from [this](http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/3035188#3035188) StackOverflow answer, we can print out a sequence of prime numbers smaller than a given number n. def primesfrom3to(n): """ Returns a array of primes, 3 <= p < n """ sieve = np.ones(n//2, dtype=np.bool) for i in range(3,int(n**0.5)+1,2): if sieve[i//2]: sieve[i*i//2::i] = False res = 2*np.nonzero(sieve)[0][1::]+1 seq = '' for i in res: seq += ' {}'.format(i) return seq[1:] # And then make the function interactive: _ = interact(primesfrom3to, n=(3,100,1)) # _ used to suppress output # ## ipywidgets + contourf + real data # How can we apply `interact()` to real data analysis in Earth sciences? Well, one of the trivial application is to explore N-dimensional fields stored NetCDF files. import netCDF4 as nc # As a sample data file we will use the same `data.nc` file from previous examples. fpath = '../data/data.nc' # Here we create a function `ncfun()`, whose arguments are: # # * NetCDF file name # * name of one of the variables stored in that file # * assuming we have 4d-arrays, time and level indices (`=0` by default). # # In a nutshell, the function opens a file using netCDF4 module, reads the variable labelled `varname`, as well as longitude and latitude arrays, and then displays lon-lat horizontal cross-section. def ncfun(filename, varname='', time=0, lev=0): with nc.Dataset(filename) as da: arr = da.variables[varname][:] lon = da.variables['longitude'][:] lat = da.variables['latitude'][:] fig = plt.figure(figsize=(8,5)) ax = fig.add_subplot(111) c = ax.contourf(lon, lat, arr[time, lev, ...], cmap='viridis') fig.colorbar(c, ax=ax, shrink=0.5) # This function is easily wrapped by `interact()`: _ = interact(ncfun, filename=fpath, varname=['u','v'], time=(0,1,1), lev=(0,3,1)) # ## ncview clone in Jupyter # **Tools: iris, cartopy, ipywidgets** # We can improve that function and effectively create a clone of the [ncview](http://meteora.ucsd.edu/~pierce/ncview_home_page.html) or [Panoply](http://www.giss.nasa.gov/tools/panoply/). We also will use the capabilities of iris and cartopy packages. import iris import cartopy.crs as ccrs iris.FUTURE.netcdf_promote = True # see explanation in previous posts # For colour schemes we will use palettable package (brewer2mpl successor). It is available on PyPi (`pip install palettable`). import palettable def plot_cube(cube, time=0, lev=0, cmap='viridis'): """Display a cross-section of iris.cube.Cube on a map""" # Get cube data and extract a 2d lon-lat slice arr = cube.data[time, lev, ...] # Find longitudes and latitudes lon = cube.coords(axis='x')[0].points lat = cube.coords(axis='y')[0].points # Create a figure with the size 8x5 inches fig = plt.figure(figsize=(8,5)) # Create a geo-references Axes inside the figure ax = fig.add_subplot(111, projection=ccrs.PlateCarree()) # Plot coastlines ax.coastlines() # Plot the data as filled contour map c = ax.contourf(lon, lat, arr, cmap=cmap) # Attach a colorbar shrinked by 50% fig.colorbar(c, ax=ax, shrink=0.5) # The interesting part is below. We use another function that have only one argument - a file name. It opens the file and then allows us to choose a variable to plot (in the previous example we had to know variable names prior to executing the function). def iris_view(filename): """Interactively display NetCDF data""" # Load file as iris.cube.CubeList cubelist = iris.load(filename) # Create a dict of variable names and iris cubes vardict = {i.name(): cubelist.extract(i.name())[0] for i in cubelist} # Use sequential colorbrewer palettes for colormap keyword cmaps = [i for i in palettable.colorbrewer.COLOR_MAPS['Sequential']] interact(plot_cube, cube=vardict, time=(0,1,1), lev=(0,3,1), cmap=cmaps) iris_view(fpath) # This is by no means a finished ncview-killer app. If you played with it, you could have noticed that it's much slower than ncview, even though the NetCDF file size is a little less than 10 Mb. However, you are free to customize this function in any possible way and use the power of Python and Jupyter. # We can keep tweaking the code above forever: we can make it more generic, we can add a second subplot, we can substitute time and level indices by their actual values, we can update colormap depending on data range and so on. By the way, any comments, suggestions and PRs are very welcome. # Final remarks. The interactive data visualisation is a very hot topic, and beside 'out-of-the-box' ipywidgets there are several Python packages that make use of JavaScript to allow you to build animations and interactive figures in Python. At some point we will talk about such projects as # # * [bokeh](http://bokeh.pydata.org/en/latest/) # # * [mpl3d](http://mpld3.github.io/) # # * [holoviews](http://holoviews.org/) # # * [plotly](https://plot.ly/) HTML(html)
content/notebooks/2016-02-05-ipywidgets-interact.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 14: Data Privacy # ### NOTE as of June 2020, this will only work with TensorFlow 1.x # !pip install tensorflow_privacy # + colab={} colab_type="code" id="4N10jj3R47Sj" import tensorflow as tf import tensorflow_hub as hub import pandas as pd import numpy as np # + from pathlib import Path repo_dir = Path.cwd().parents[1] # - # ## Feature engineering # + colab={} colab_type="code" id="5lDj1OJCWfTy" ONE_HOT_FEATURES = { "product": None, "sub_product": None, "company_response": None, "state": None, "issue": None } # feature name, bucket count BUCKET_FEATURES = { "zip_code": 10 } # feature name, value is unused TEXT_FEATURES = { "consumer_complaint_narrative": None } # + colab={} colab_type="code" id="3o-EurrDk7Fi" feature_names = ["product", "sub_product", "issue", "sub_issue", "state", "zip_code", "company", "company_response", "timely_response", "consumer_disputed", "consumer_complaint_narrative"] df = pd.read_csv(repo_dir/'data/consumer_complaints_with_narrative.csv', usecols=feature_names) # + colab={} colab_type="code" id="nH5SAgmOOL6K" def make_one_hot(df): one_hot_array = [] for feature_name in ONE_HOT_FEATURES.keys(): temp_array = pd.np.asarray(tf.keras.utils.to_categorical(df[feature_name].values)) ONE_HOT_FEATURES[feature_name] = temp_array.shape[1] one_hot_array.append(temp_array) return one_hot_array # + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" id="jxbSJIw3lDOj" outputId="7d85ae8c-5410-446b-a3db-3a66449751c8" for feature in ONE_HOT_FEATURES.keys(): df[feature] = df[feature].astype("category").cat.codes one_hot_x = make_one_hot(df) embedding_x = [pd.np.asarray(df[feature_name].values).reshape(-1) for feature_name in TEXT_FEATURES.keys()] df['zip_code'] = df['zip_code'].str.replace('X', '0', regex=True) df['zip_code'] = df['zip_code'].str.replace(r'\[|\*|\+|\-|`|\.|\ |\$|\/|!|\(', '0', regex=True) df['zip_code'] = df['zip_code'].fillna(0) df['zip_code'] = df['zip_code'].astype('int32') # one bucket per 10k df['zip_code'] = df['zip_code'].apply(lambda x: x//10000) numeric_x = [df['zip_code'].values] X = one_hot_x + numeric_x + embedding_x y = np.asarray(df["consumer_disputed"], dtype=np.uint8).reshape(-1) # + [markdown] colab_type="text" id="t9Eo3zrCVRPm" # ## Adding DP # + colab={} colab_type="code" id="Yto8Cmn7VErQ" # DP parameters NOISE_MULTIPLIER = 1.1 NUM_MICROBATCHES = 32 LEARNING_RATE = 0.1 POPULATION_SIZE = 1000 L2_NORM_CLIP = 1.0 BATCH_SIZE = 32 EPOCHS = 1 # + colab={} colab_type="code" id="u0JJ_EnmVTk6" from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer optimizer = DPGradientDescentGaussianOptimizer( l2_norm_clip=L2_NORM_CLIP, noise_multiplier=NOISE_MULTIPLIER, num_microbatches=NUM_MICROBATCHES, learning_rate=LEARNING_RATE) loss = tf.keras.losses.BinaryCrossentropy( from_logits=True, reduction=tf.losses.Reduction.NONE) # + [markdown] colab_type="text" id="LoQHOGsh5Anr" # The model is unchanged, we just pass in the differentially private optimizer and loss. # + colab={} colab_type="code" id="JZ7Z1LHd4-kb" def transformed_name(key): return key + '_xf' def get_model(dp_optimizer, dp_loss, show_summary=True): """ This function defines a Keras model and returns the model as a Keras object. """ # one-hot categorical features input_features = [] for key, dim in ONE_HOT_FEATURES.items(): input_features.append(tf.keras.Input(shape=(dim), name=transformed_name(key))) # adding bucketized features for key, dim in BUCKET_FEATURES.items(): input_features.append(tf.keras.Input(1, name=transformed_name(key))) # adding text input features input_texts = [] for key in TEXT_FEATURES.keys(): input_texts.append(tf.keras.Input(shape=(1,), name=transformed_name(key), dtype=tf.string)) # embed text features MODULE_URL = "https://tfhub.dev/google/universal-sentence-encoder/4" embed = hub.KerasLayer(MODULE_URL) reshaped_narrative = tf.reshape(input_texts[0], [-1]) embed_narrative = embed(reshaped_narrative) deep_ff = tf.keras.layers.Reshape((512, ), input_shape=(1, 512))(embed_narrative) deep = tf.keras.layers.Dense(256, activation='relu')(deep_ff) deep = tf.keras.layers.Dense(64, activation='relu')(deep) deep = tf.keras.layers.Dense(16, activation='relu')(deep) wide_ff = tf.keras.layers.concatenate(input_features) wide = tf.keras.layers.Dense(16, activation='relu')(wide_ff) both = tf.keras.layers.concatenate([deep, wide]) output = tf.keras.layers.Dense(1, activation='sigmoid')(both) inputs = input_features + input_texts keras_model = tf.keras.models.Model(inputs, output) keras_model.compile(optimizer=dp_optimizer, loss=dp_loss, metrics=[ tf.keras.metrics.BinaryAccuracy(), tf.keras.metrics.TruePositives() ]) if show_summary: keras_model.summary() return keras_model # + colab={} colab_type="code" id="Y4TTGI9glD_M" model = get_model(show_summary=False, dp_optimizer=optimizer, dp_loss=loss) # + colab={"base_uri": "https://localhost:8080/", "height": 769} colab_type="code" id="yAmaGolZl4cX" outputId="64396fc5-a6c0-4f95-a094-89bad543347d" model.fit(x=X, y=y, batch_size=32, validation_split=0.1, epochs=EPOCHS) # + [markdown] colab_type="text" id="P1gtS5tFfZau" # ### Calculate Epsilon # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="q6u5MIUkMrpS" outputId="098d1d3f-5e10-4590-b7bd-1a723e1a9ad7" from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy compute_dp_sgd_privacy.compute_dp_sgd_privacy(n=POPULATION_SIZE, batch_size=BATCH_SIZE, noise_multiplier=NOISE_MULTIPLIER, epochs=EPOCHS, delta=1e-3) # + colab={} colab_type="code" id="gBzK9bK1gBab"
chapters/data_privacy/differential_privacy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:stats_env] # language: python # name: conda-env-stats_env-py # --- # # Day 6 # ! cat README.md with open('input.txt') as f: l = f.read().splitlines() sets = list(map(lambda x: set(x) - {'_'}, '_'.join(l).split('__'))) sum(list(map(len, sets))) # + active="" # --- Part Two --- # As you finish the last group's customs declaration, you notice that you misread one word in the instructions: # # You don't need to identify the questions to which anyone answered "yes"; you need to identify the questions to which everyone answered "yes"! # # Using the same example as above: # # abc # # a # b # c # # ab # ac # # a # a # a # a # # b # This list represents answers from five groups: # # In the first group, everyone (all 1 person) answered "yes" to 3 questions: a, b, and c. # In the second group, there is no question to which everyone answered "yes". # In the third group, everyone answered yes to only 1 question, a. Since some people did not answer "yes" to b or c, they don't count. # In the fourth group, everyone answered yes to only 1 question, a. # In the fifth group, everyone (all 1 person) answered "yes" to 1 question, b. # In this example, the sum of these counts is 3 + 0 + 1 + 1 + 1 = 6. # # For each group, count the number of questions to which everyone answered "yes". What is the sum of those counts? # - sets = list(map(lambda x: set.intersection(*list(map(set, x.split('_')))), '_'.join(l).split('__'))) sum(list(map(len, sets)))
2020/ferran/06/06.ipynb
# # 8.4. Learning from text — Naive Bayes for Natural Language Processing import numpy as np import pandas as pd import sklearn import sklearn.model_selection as ms import sklearn.feature_extraction.text as text import sklearn.naive_bayes as nb import matplotlib.pyplot as plt # %matplotlib inline df = pd.read_csv('https://github.com/ipython-books/' 'cookbook-2nd-data/blob/master/' 'troll.csv?raw=true') # + podoc={"output_text": "Output"} df[['Insult', 'Comment']].tail() # - y = df['Insult'] tf = text.TfidfVectorizer() X = tf.fit_transform(df['Comment']) print(X.shape) p = 100 * X.nnz / float(X.shape[0] * X.shape[1]) print(f"Each sample has ~{p:.2f}% non-zero features.") (X_train, X_test, y_train, y_test) = \ ms.train_test_split(X, y, test_size=.2) bnb = ms.GridSearchCV( nb.BernoulliNB(), param_grid={'alpha': np.logspace(-2., 2., 50)}) bnb.fit(X_train, y_train) bnb.score(X_test, y_test) # We first get the words corresponding to each feature names = np.asarray(tf.get_feature_names()) # Next, we display the 50 words with the largest # coefficients. print(','.join(names[np.argsort( bnb.best_estimator_.coef_[0, :])[::-1][:50]])) print(bnb.predict(tf.transform([ "I totally agree with you.", "You are so stupid." ])))
chapter08_ml/04_text.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import glob import numpy as np import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img # %matplotlib inline # - # ## Run Notebook (Datasets Builder.ipynb) first which will create training_data and validation_data folder IMG_DIM = (150, 150) ## For windows separator is "\\" and for OSX the saparator is "/" separator = "/" # + train_files = glob.glob('training_data/*') train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files] train_imgs = np.array(train_imgs) train_labels = [fn.split(separator)[1].split('.')[0].strip() for fn in train_files] print('Train dataset shape:', train_imgs.shape) # - ## Test code for split separator train_files[0].split('/')[1].split('.')[0] # + validation_files = glob.glob('validation_data/*') validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files] validation_imgs = np.array(validation_imgs) validation_labels = [fn.split(separator)[1].split('.')[0].strip() for fn in validation_files] print('Validation dataset shape:', validation_imgs.shape) # - print('Train dataset shape:', train_imgs.shape, '\tValidation dataset shape:', validation_imgs.shape) train_labels validation_labels train_imgs_scaled = train_imgs.astype('float32') validation_imgs_scaled = validation_imgs.astype('float32') train_imgs_scaled /= 255 validation_imgs_scaled /= 255 print(train_imgs[1].shape) array_to_img(train_imgs[2]) # + batch_size = 30 num_classes = 2 epochs = 30 input_shape = (150, 150, 3) # encode text category labels from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(train_labels) train_labels_enc = le.transform(train_labels) validation_labels_enc = le.transform(validation_labels) print(train_labels[1495:1505], train_labels_enc[1495:1505]) # + from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout from keras.models import Sequential from keras import optimizers model = Sequential() model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(), metrics=['accuracy']) model.summary() # + from IPython.display import SVG from keras.utils.vis_utils import model_to_dot SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='TB').create(prog='dot', format='svg')) # - history = model.fit(x=train_imgs_scaled, y=train_labels_enc, validation_data=(validation_imgs_scaled, validation_labels_enc), batch_size=batch_size, epochs=epochs, verbose=1) history.history # + f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Basic CNN Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = list(range(1,31)) ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, 31, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, 31, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") # + model = Sequential() model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(), metrics=['accuracy']) model.summary() # - history = model.fit(x=train_imgs_scaled, y=train_labels_enc, validation_data=(validation_imgs_scaled, validation_labels_enc), batch_size=batch_size, epochs=epochs, verbose=1) # + f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Basic CNN Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = list(range(1,31)) ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, 31, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, 31, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") # - model.save('cats_dogs_basic_cnn.h5')
notebooks/Ch05 - Unleash the Power of Transfer Learning/Basic CNN Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # 2.3.1 파이썬으로 말뭉치 전처리하기 text = 'You say goodbye and I say hello.' text = text.lower() text = text.replace('.',' .') text # - words = text.split(' ') words # + word_to_id = {} id_to_word = {} for word in words: if word not in word_to_id: new_id = len(word_to_id) word_to_id[word] = new_id id_to_word[new_id] = word id_to_word # - word_to_id id_to_word[1] word_to_id['hello'] import numpy as np corpus = [word_to_id[w] for w in words] corpus = np.array(corpuspus) corpus # + def preprocess(text): text = text.lower() text = text.replace('.', ' .') words = text.split(' ') word_to_id = {} id_to_word = {} for word in words: if word not in word_to_id: new_id = len(word_to_id) word_to_id[word] = new_id id_to_word[new_id] = word corpus = np.array([word_to_id[w] for w in words]) return corpus, word_to_id, id_to_word text = 'You say goodbye and I say hello.' corpus, word_to_id, id_to_word = preprocess(text) # + # 2.3.4 동시발생 행렬 print(corpus) print(id_to_word) # + C = np.array([ [0,1,0,0,0,0,0], [1,0,1,0,1,1,0], [0,1,0,1,0,0,0], [0,0,1,0,1,0,0], [0,1,0,1,0,0,0], [0,1,0,0,0,0,1], [0,0,0,0,0,1,0], ], dtype=np.int32) print(C[0]) print(C[4]) print(C[word_to_id['goodbye']]) # - def create_co_matrix(corpus, vocab_size, window_size=1): '''동시발생 행렬 생성 :param corpus: 말뭉치(단어 ID 목록) :param vocab_size: 어휘 수 :param window_size: 윈도우 크기(윈도우 크기가 1이면 타깃 단어 좌우 한 단어씩이 맥락에 포함) :return: 동시발생 행렬 ''' corpus_size = len(corpus) co_matrix = np.zeros((vocab_size, vocab_size), dtype=np.int32) for idx, word_id in enumerate(corpus): for i in range(1, window_size + 1): left_idx = idx - i right_idx = idx + i if left_idx >= 0: left_word_id = corpus[left_idx] co_matrix[word_id, left_word_id] += 1 if right_idx < corpus_size: right_word_id = corpus[right_idx] co_matrix[word_id, right_word_id] += 1 return co_matrix # + # 2.3.5 벡터 간 유사도 def cos_similarity(x, y, eps=1e-8): '''코사인 유사도 산출 :param x: 벡터 :param y: 벡터 :param eps: '0으로 나누기'를 방지하기 위한 작은 값 :return: ''' nx = x / (np.sqrt(np.sum(x ** 2)) + eps) ny = y / (np.sqrt(np.sum(y ** 2)) + eps) return np.dot(nx, ny) # -
ch02/2-3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys print('python', sys.version) import numpy as np print('numpy', np.__version__) import pandas as pd print('pandas', pd.__version__) import matplotlib as mpl print('matplotlib', mpl.__version__) import matplotlib.pyplot as plt plt.rc("font", family="Malgun Gothic") # 한글표시 (window) plt.rc("axes", unicode_minus=False) # x,y축 (-)부호 표시 # 레티나 디스플레이로 폰트가 선명하게 표시되도록 합니다. from IPython.display import set_matplotlib_formats set_matplotlib_formats("retina") import seaborn as sns print('seaborn', sns.__version__) # 결과 확인을 용이하게 하기 위한 코드 from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' # 모든 컬럼이 표시되도록 max_columns 의 수를 지정합니다. pd.options.display.max_columns = 39 # - # # data read lab = pd.read_csv('/project/data/폐렴환자lab.csv') patient = pd.read_csv('/project/data/폐렴환자.csv') # # # 전처리 # ## patient 전처리 # ### 환자 7799 맞춰주기 all = set(patient['SUBJECT_ID']).intersection(set(lab['SUBJECT_ID'])) set(patient['SUBJECT_ID'].unique())-all patient = patient[patient['SUBJECT_ID'].isin(all)] patient # ### 시간 부분 제거, 날짜만 남기기 import datetime as dt patient patient['DOD'] = pd.to_datetime(patient['DOD']) patient patient['DOD'] = patient['DOD'].dt.date patient # ### columns 선택 , 인덱스 정리 , 정렬 patient_sorted = patient[['SUBJECT_ID', 'EXPIRE_FLAG']].set_index('SUBJECT_ID').sort_values(['SUBJECT_ID'])#, inplace = True) patient_sorted # ## lab 전처리 # ### lab columns 선택, 행 중복제거, 시간 부분 제거, 날짜만 남기기, 열 정렬 # + lab lab_sorted_ = lab[['SUBJECT_ID', 'CHARTTIME', 'ITEMID', 'FLAG']].drop_duplicates() lab_sorted_ import datetime as dt lab_sorted_['CHARTTIME'] = pd.to_datetime(lab_sorted_['CHARTTIME']) lab_sorted_['CHARTTIME'] = lab_sorted_['CHARTTIME'].dt.date lab_sorted_ lab_sorted = lab_sorted_.set_index('SUBJECT_ID').sort_values(['SUBJECT_ID', 'CHARTTIME']) # - # ### abnormal -> 1 # ### NaN -> 0 # + lab_sorted['FLAG'].replace('abnormal', 1, inplace = True) #abnormal을 1로 바꿔주는 코드 lab_sorted[lab_sorted.index==9]['CHARTTIME'].max() #lab기록의 마지막 날짜 DOD랑 같은지 확인하려고 친 코드 # - lab_sorted.fillna(0, inplace = True) lab_sorted['CHARTTIME'].unique()[-10:] lab_sorted.index.unique() # ### lab과 patient merge #expire_flag는 y(label)값. 함께 보려고 lab과 patient 쓰는 것만 붙여줌. merge_lab_pat = pd.merge(left=lab_sorted, right= patient_sorted, how='inner', left_index=True,right_index=True) merge_lab_pat # key: sub_id #value: charttime (array length max=10), value가 리스트 안의 array 형태가 아닌 그냥 array 형태로 들어있음. dic = {} for i in lab_sorted.index.unique(): dic[i] = lab_sorted[lab_sorted.index==i]['CHARTTIME'].unique()[-10:] dic # # 교수님 피드백 # ``` # dic_sub2idx : subject_id 정렬해서 idx 매김 # dic_item2idx : itemid 정렬해서 idx 매김 # dic_sub_charttime2idx : subject_id랑 charttime 정렬해서 idx 매김 # ``` # next level... df = merge_lab_pat.copy() df dic_sub2idx = {} for i,j in enumerate(df.index.unique()): dic_sub2idx[j] = i dic_sub2idx dic_item2idx = {} for i,j in enumerate(df['ITEMID'].sort_values().unique()): dic_item2idx[j] = i dic_item2idx # + # 이 경우 튜플리스트 부터 만들고 idx는 나중에 붙여. # 튜플리스트 만들기 t = [] for i in df.index.unique(): for j in range(len(dic[i])): t.append((i, dic[i][j])) # idx dic 만들기 dic_sub_charttime2idx = {} c = 0 for i in range(len(t)): k = len(dic[t[i][0]]) - 1 if i == 0 or t[i][0] != t[i-1][0]: c = 0 dic_sub_charttime2idx[t[i]] = c c += 1 elif t[i][0] == t[i-1][0]: dic_sub_charttime2idx[t[i]] = c c += 1 # - dic_sub_charttime2idx dic[252] # + # 0~9까지 다시 만들기 # 이 경우 튜플리스트 부터 만들고 idx는 나중에 붙여. # 튜플리스트 만들기 t_ = [] for i in df.index.unique(): i for j in range(10): if len(dic[i]) != 9: t_.append((i, 0)) elif len(dic[i]) == 9: # j # dic[i][j] t_.append((i, dic[i][j])) t_ # # idx dic 만들기 # dic_sub_charttime2idx = {} # c = 0 # for i in range(len(t)): # k = len(dic[t[i][0]]) - 1 # if i == 0 or t[i][0] != t[i-1][0]: # c = 0 # dic_sub_charttime2idx[t[i]] = c # c += 1 # elif t[i][0] == t[i-1][0]: # dic_sub_charttime2idx[t[i]] = c # c += 1 # - # + # 체크 셀 df_ = df.reset_index() len(df_) df_['FLAG'].value_counts() a=df_[df_['FLAG']=='delta'] # df_.drop(df_[df_['FLAG']=='delta']) df_=df_.drop(index=a.index,axis=0) df_['FLAG'].value_counts() # - # # 최종 3차원 array 생성 # + x = np.zeros((7799,690,10)) #690행, 10열, 레이어: 7799 # x df_ = df.reset_index() for i in range(len(df_)): # df_.iloc[i,3] # if i == 2: # break if df_.iloc[i,3] == 'delta': pass else: sub, item, charttime = int(df_.iloc[i,0]), int(df_.iloc[i,2]), df_.iloc[i]['CHARTTIME'] sub_idx = dic_sub2idx[sub] item_idx = dic_item2idx[item] if (sub, charttime) not in dic_sub_charttime2idx: continue else: time_idx = dic_sub_charttime2idx[(sub, charttime)] # print(sub, charttime, time_idx) #idx가 계속 0이 나올 수가 있나? index인데..?#-> 각각과 세트인 itemid가 다 달라서 그런건가? ==> ㅇㅇ abnormal = df_.iloc[i,3] x[sub_idx, item_idx, time_idx] = abnormal # - x.shape (x==1).sum() # + # 수정중인 cell x = np.zeros((7799,690,10)) #690행, 10열, 레이어: 7799 x df_ = df.reset_index() df_ for i in range(len(df_)): # print('#############',i) # # print(df_.iloc[i]) if df_.iloc[i,3] == 'delta': pass else: sub, item, charttime = int(df_.iloc[i,0]), int(df_.iloc[i,2]), df_.iloc[i]['CHARTTIME'] # # print("$~~~~~~~~~~~~~~~$") # # print(sub, item, charttime) sub_idx = dic_sub2idx[sub] item_idx = dic_item2idx[item] # # print(sub_idx, item_idx) # itemid 정렬된 순서대로 idx 매김. if (sub, charttime) not in dic_sub_charttime2idx: # 마지막 기록으로부터 10일 이내가 아니면 continue # 지나가 else: time_idx = dic_sub_charttime2idx[(sub, charttime)] # print(df_.iloc[i]) # print("$~~~~~~~~~~~~~~~$") # print(sub, item, charttime) # print(sub_idx, item_idx) # itemid 정렬된 순서대로 idx 매김. # print("$~~~~~~~~~~~~~~~$") # print(sub, charttime, time_idx) #idx가 계속 0이 나올 수가 있나? index인데..? #-> 각각과 세트인 itemid가 다 달라서 그런건가? ==> ㅇㅇ abnormal = df_.iloc[i,3] x[sub_idx, item_idx, time_idx] = abnormal # print("") # print('********************') # print(sub_idx, item_idx, time_idx) # print(abnormal) # print("") # print("") # - x[0, 76, 0] # # lstm 돌리기 # + from sklearn.preprocessing import MinMaxScaler import warnings warnings.filterwarnings(action='ignore') import math from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error import pickle import gzip # - # ## 최종 arr(x) 생성 시간 check # + import time start = time.time() # 시작 시간 저장 print(start) x = np.zeros((7799,690,10)) #690행, 10열, 레이어: 7799 df_ = df.reset_index() for i in range(len(df_)): if df_.iloc[i,3] == 'delta': pass else: sub, item, charttime = int(df_.iloc[i,0]), int(df_.iloc[i,2]), df_.iloc[i]['CHARTTIME'] sub_idx = dic_sub2idx[sub] item_idx = dic_item2idx[item] if (sub, charttime) not in dic_sub_charttime2idx: continue else: time_idx = dic_sub_charttime2idx[(sub, charttime)] abnormal = df_.iloc[i,3] x[sub_idx, item_idx, time_idx] = abnormal print("time :", time.time() - start) x.shape # - # ## 3dim array 파일로 저장 type(x) # ### 방법 1 - np.save / np.load - 용량 큼 (약 410MB) # save np.save("3d_array_k.npy", x) # load train = np.load("3d_array_k.npy") train.shape # ### 방법 2 - pickle, gzip - 용량 작음 (약 1MB) # + import pickle import gzip # save and compress. with gzip.open('3d_array_k.pickle', 'wb') as f: pickle.dump(x, f) # - # load and uncompress. with gzip.open('3d_array_k.pickle','rb') as f: x_data = pickle.load(f) x_data.shape # # Label 생성 len(patient['EXPIRE_FLAG']) label = patient['EXPIRE_FLAG'].reshape(,1) label #이건 어차피 용량 작으므로 np.save/load 사용하여 저장 # save np.save("3d_array_label_k.npy", label) # load yyy = np.load("3d_array_label_k.npy") yyy
0727.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.pipeline import Pipeline from sklearn.base import BaseEstimator, TransformerMixin # # Preprocessing dir_name = str("./ks-projects-201801.csv") data = pd.read_csv(dir_name) data.info() data.head() data.describe() cols = data.columns cols # see columns with a nan # data[data.isnull().any(axis=1)] data.isna().any() # + # Drop NA class DataSampleDropper(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, x, y=None): return self def transform(self, X): return X.dropna(how='any') # Drop Live Campaign class DropLive(BaseEstimator, TransformerMixin): def __init__(self): pass def fit(self, x, y=None): return self def transform(self, X): return X[X['state'] != 'live'] # - pipe1 = Pipeline([ # Drop NA rows ("dropna", DataSampleDropper()), # Drop rows that are live (ie we don't know how to evaluate them) ("droplive", DropLive()) # Add a nice string to column encoder here ]) preprocessed_data = pipe1.fit_transform(data) preprocessed_data.head() X = preprocessed_data.drop(columns=['usd pledged', 'usd_pledged_real', 'usd_goal_real','pledged', 'state']) X.head() # Get our labels le = LabelEncoder() y = le.fit_transform(preprocessed_data['state']) le.classes_ # Only looking for successful projects y = [l==2 for l in y] y[:6] # # Partioning Data cutoff = int(np.shape(X)[0] * .7) cutoff # + x_labels = X.columns x_train = X[:cutoff] x_test = X[cutoff:] y_train = y[:cutoff] y_test = y[cutoff:] # -
project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: "Python Cient\xEDfico" # language: python # name: pysci # --- # # Trabajando con `NumPy` # ### Inicialización import numpy as np import matplotlib.pyplot as plt # ## Ajuste de funciones polinomiales usando `np.polyfit` # Si simplemente necesitamos hacer ajustes básicos de polinomios, lo podemos hacer fácilmente sólo con `numpy`: # ### Ajuste polinomial de mínimos cuadrados. # Ajuste un polinomio `p (x) = p [0] * x ** deg + ... + p [deg]` de grados [deg] a los puntos $(x, y)$. Devuelve un vector de coeficientes `p` que minimiza el error al cuadrado en el orden $deg$, $deg-1$, $\dots$, $0$. # - Crearemos unos datos para trabajar con ellos n = 50 # haremos 50 datos con ruido aleatorio jitter_amp = 4.0 # esto es la amplitud del ruido x = np.linspace(0,10,n) # generamos la variable x jitter = jitter_amp*(np.random.random(n)-0.5) # calculamos el ruido para cada dato y = x + jitter # y calculamos el dato ruidoso # Ahora graficamos los dato ruidosos plt.plot(x,y,'ok') plt.grid() # ### Ajuste con polyfit # ahora estamos en condiciones de realizar el ajuste, utlizando la libreria `numpy` y de allí, la función de ajuste `polyfit`, la cual ajusta polinomios de grado $n$, `np.polyfit(x,y,n)`. a = np.polyfit(x,y,1) # el número 1 significa que usaremos un polin de 1er grado a # ## Observemos los Resultados plt.plot(x,y,'ok') t = np.linspace(0,10,n) # t es la variable independiente entre (0,12) y_t = a[0]*t + a[1] # y_t sería y(t), y depende de t plt.plot(t, y_t, 'r') # 'r' es de red, 'b' de blue, etc plt.grid() # ### Veamos otros ejemplo # + # Datos experimentales x = np.array([ 0., 1., 2., 3., 4.]) y = np.array([ 10.2 , 12.1, 15.5 , 18.3, 20.6 ]) # Ajuste a una recta (polinomio de grado 1) p = np.polyfit(x, y, 1) print(p) # imprime [ 2.7 9.94] # - # en este ejemplo `np.polyfit()` devuelve la lista de parámetros p de la recta, por lo que el modelo lineal $f(x)=ax+b$ de nuestros datos será: # + # Valores de y calculados del ajuste y_ajuste = p[0]*x + p[1] # Dibujamos los datos experimentales p_datos, = plt.plot(x, y, 'b.') # Dibujamos la recta de ajuste p_ajuste, = plt.plot(x, y_ajuste, 'r-') plt.title('Ajuste lineal por minimos cuadrados') plt.xlabel('Eje X') plt.ylabel('Eje Y') plt.legend(('Datos experimentales', 'Ajuste lineal'), loc="upper left") plt.show() # - # Como se ve en este ejemplo, la salida por defecto de `np.polyfit()}` es un array con los parámetros del ajuste. Sin embargo, si se pide una salida detalla con el parámetro `full=True` (por defecto `full=False`), el resultado es una tupla con el array de parámetros, el residuo, el rango, los valores singulares y la condición relativa. Nos interesa especialmente el residuo del ajuste, que es la suma cuadrática de todos los resíduos $$\sum_{i=1}^{n}|y_i−f(x_i)|^2$$ # Para el ejemplo anterior tendríamos lo siguiente: # Ajuste a una recta, con salida de datos # print(x, y) resultado = np.polyfit(x, y, 1, full=True) """ Imprime tupla (array([ 2.7 , 9.94]), Parámetros del ajuste array([ 0.472]), Suma de residuos 2, Rango de la matriz del sistema array([1.34777468, 0.42837299]), Valores singulares 1.1102230246251565e-15) rcond """ # los parámetros, el residuo, el rango, los valores singulares y la condición relativa print(resultado) # `rcond` El valor de `rcond` no se trata realmente de la calidad del ajuste, sino describe el proceso mediante el cual se obtuvo el ajuste, es decir, una solución de mínimos cuadrados de un sistema lineal. Este valor se devuelve al usuario para su información. # # `rcond` se utiliza para el truncamiento en matrices mal condicionadas. # Corte para pequeños valores singulares. Los valores singulares menores o iguales a `rcond * greatest_singular_value` se establecen en cero. # # `rcond` *opcional* # # Número de condición relativa del ajuste. Se ignorarán los valores singulares menores que este en relación con el valor singular más grande. El valor predeterminado es len (x) * eps, donde eps es la precisión relativa del tipo flotante, aproximadamente 2e-16 en la mayoría de los casos. # + # Datos experimentales x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) y = np.array([2.95, 17.27, 86.07, 268.55, 603.72, 1233.62, 2393.58, 4229.55, 6689.21, 9550.65]) # Ajuste a una recta (polinomio de grado n) n = 1 p = np.polyfit(x, y, n) print(p) # imprime [ 2.7 9.94] # + # Valores de y calculados del ajuste if n == 4: y_ajuste = p[0] * x**4 + p[1] * x**3 + p[2] * x**2 + p[3] * x + p[4] elif n == 3: y_ajuste = p[0] * x**3 + p[1] * x**2 + p[2] * x**1 + p[3] elif n == 2: y_ajuste = p[0] * x**2 + p[1] * x**1 + p[2] elif n == 1: y_ajuste = p[0] * x + p[1] z_real = x**4+2 # Dibujamos los datos experimentales p_datos, = plt.plot(x, y, 'bo') # Dibujamos la recta de ajuste p_ajuste, = plt.plot(x, y_ajuste, 'r-') p_reales, = plt.plot(x, z_real, 'gx', ms=8) plt.title('Ajuste lineal por minimos cuadrados') plt.xlabel('Eje X') plt.ylabel('Eje Y') plt.legend(('Datos experimentales', 'Ajuste lineal','Datos Reales'), loc="upper left") #plt.figure(figsize=(18, 3)) plt.grid() plt.grid(b=True, color='g') plt.show() # -
docs/source/notas/09.1-Ajuste_con_SciPy-CEF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import math # The operator module exports a set of efficient functions #corresponding to the intrinsic operators of Python import operator data = pd.read_csv("IRIS.csv") data.head() # TO do add header manually # + # create and shuffle datset from sklearn.utils import shuffle dataset = np.array(data) dataset = shuffle(dataset) dataset_X , dataset_y = dataset[: , 0:8] , dataset[: , -1] dataset_X.shape , dataset_y.shape x_train , y_train , x_val , y_val = dataset_X[0:90] , dataset_y[0:90] , dataset_X[90:] , dataset_y[90:] # - def euclideanDistance(data1 , data2): features = len(data1) # data1.shape : (4,) distance = 0 for i in range(features): distance += np.square(data1[i] - data2[i]) return np.sqrt(distance) euclideanDistance(x_train[0] ,x_val[0]) def kNN(trainingSet , testInstance , k , verbose=False): """ kNN Model @params{trainingSet : 90% of IRIS.csv, testInstance : single data from remainign set, k : number of neighbours for prediction} """ distance = {} sort = {} # Calculating euclidean distance between each row # of training data and test data for x in range(len(trainingSet)): dist = euclideanDistance(testInstance , trainingSet[x]) distance[x] = dist # Sorting them on the basis of distance # if(verbose): # print("Distance :" , distance) # print() sorted_dist = sorted(distance.items() , key = operator.itemgetter(1)) if(verbose): print("Sorted Distance from ALL traning Dataset :",sorted_dist) neighbours = [] # Extracting top k neighbors for i in range(k): neighbours.append(sorted_dist[i][0]) classVotes = {} # Calculating the most freq class in the neighbors for i in range(len(neighbours)): response = y_train[neighbours[i]] if response in classVotes: classVotes[response] +=1 else: classVotes[response] = 1 sortedVotes = sorted(classVotes.items() , key = operator.itemgetter(1) , reverse= True) if(verbose): print("All votes of neighbours : " , sortedVotes) return(sortedVotes[0][0] , neighbours) result , neigh = kNN(x_train[0:10] , x_val[0] , 3) # + def checkAcc(validate_y , k , verbose=False): """ @params{y_val : 10% of IRIS.csv labels, k : number of neighbours for prediction} """ results = [] neighbours = [] for i in range(len(validate_y)): # Running KNN model result,neigh = kNN(x_train, x_val[i], k , verbose) results.append(result) neighbours.append(neigh) # if True: # print("Predicted Labels : " , results) # print("Actual Labels : " , validate_y) accuracy = np.sum(results == validate_y)/len(validate_y) if verbose : for i in range(len(validate_y)): if results[i]!=validate_y[i]: print("Predicted Labels : " , results[i]) print("Actual Labels : " , validate_y[i]) print("Neighbours used is : " , [y_train[name] for name in neighbours[i]]) return accuracy , results # - acc , _ = checkAcc(y_val , 5) # Plotting Accuracy vs learning rate import matplotlib.pyplot as plt def plot_acc(k): accuracy = [] for i in range(1,k): acc , _ = checkAcc(y_val , i) accuracy.append(acc) k_val = [i for i in range(1 , k)] # print(accuracy) plt.plot(k_val, accuracy, 'ro') plt.axis([0, k, 0, 2]) plt.show() # + """ Comparing our model with scikit-learn """ from sklearn.neighbors import KNeighborsClassifier k=5 knn = KNeighborsClassifier(n_neighbors=k) knn.fit(x_train , y_train) pred = knn.predict(x_val) neigh = knn.kneighbors(x_val)[1][: ,0:k] print(neigh) acc = np.sum(pred == y_val)/len(y_val) print(acc)
kNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} import pandas as pd import numpy as np sales=pd.read_csv("https://raw.githubusercontent.com/Adarsh09052000/demo-csv/main/raw_data.csv") # - sales sales = sales.drop(['tran_type', 'tran_month', 'd_date','year', 'day_type', 'day','quarter','week'],axis='columns') sales.shape sales = pd.DataFrame( sales.groupby([sales['brcode'],sales["item_code"], sales["month"]])[ 'qty'].sum()) sales = sales.reset_index() # reset the index sales X = sales.drop(['qty'],axis='columns') y = sales['qty'] X y scalable_variable = 'qty' mini_train, maxi_train = sales[scalable_variable].min(), sales[scalable_variable].max() sales.loc[:, scalable_variable] = (sales[scalable_variable] - mini_train) / ( maxi_train - mini_train) mini_test, maxi_test = sales[scalable_variable].min(), sales[scalable_variable].max() sales.loc[:, scalable_variable] = (sales[scalable_variable] - mini_test) / (maxi_test - mini_test) y from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=0.75,test_size=0.25,random_state=None) from xgboost import XGBRegressor lr = XGBRegressor(max_depth=5) lr.fit(X_train, y_train) y_pred = lr.predict(X_test) y_pred XGboost = round(lr.score(X_test, y_test), 2) XGboost import numpy as np from sklearn import metrics from sklearn.metrics import mean_squared_error, r2_score, median_absolute_error rmse = np.sqrt(mean_squared_error(y_test, y_pred)) mse_lr = round(metrics.mean_squared_error(y_test, y_pred), 4) print("Monthly Forecasting") print("RMSE: %f" % rmse) r2 = r2_score(y_test,y_pred) print("R2: %f" %r2) mse_lr mae_lr = round(metrics.mean_absolute_error(y_test, y_pred), 4) mae_lr def predict_qty(brcode,item_code,month): dict_data={'brcode':[brcode],'item_code':[item_code],'month':[month],} data=pd.DataFrame.from_dict(dict_data) a=lr.predict(data) a=a.tolist()[0] scalable_variable = 'qty' y = a*(maxi_train - mini_train) + mini_train return round(y) X.columns a=predict_qty(3,201975,4) a import pickle with open("gui.pickle",'wb') as f: pickle.dump(lr,f) import json columns = { 'data_columns' : [col.lower() for col in X] } with open("salescolumn.json","w") as f: f.write(json.dumps(columns))
Main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Przykładowy notebook używający Pythona # - # #### Model fermentacji okresowej z równaniem typu Monoda do opisu wzrostu biomasy (X), konsumpcji substratu (S) i wytwarzania produktu (P). Węcej informacji:https://github.com/sbednarz/modeling str. 123 # ## Import bibliotek # + # %matplotlib inline import matplotlib.pyplot as plt # File 47-1.py # Example 47. Fermentation - Monod model # Copyright (C) 2016, <NAME> # Released under the GNU General Public License import matplotlib.pyplot as plt import numpy as np from scipy.integrate import odeint # - # ## Model procesu # + # Fermentation (Monod growth kinetics) def model(y, t): X = y[0] S = y[1] P = y[2] mi = mi_max * S/(KM + S) dXdt = X * mi dSdt = -1/Y_XS * X * mi dPdt = Y_PX * X * mi return [dXdt, dSdt, dPdt] # - # ## Paramery modelu # + KM = 0.4 # g/L Y_XS = 0.5 Y_PX = 0.1 mi_max = 1 # 1/h # Experiment with different values: X0 = 0.1 # g/L S0 = 10 # g/L P0 = 0 # g/L # - # ## Obliczenia i raportowanie wyników (wykres) t = np.linspace(0, 5) # 0-5h results = odeint( model, [X0, S0, P0], t) X = results[:,0] S = results[:,1] P = results[:,2] plt.plot(t,X, label='X') plt.plot(t,S, label='S') plt.plot(t,P, label='P') plt.xlabel('Time, h') plt.ylabel('Conc., g/L') plt.legend()
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Slicing in nodes' names within networks # + import pandas as pd # read tables df_host = pd.read_table("COV19_hosts_more_0.7.txt", header=None) df_human = pd.read_table("humanPPI_score_more_0.7.txt", header=None) # slice strings for i, row in df_host.iterrows(): df_host.at[i,0] = row[0][14:20] df_host.at[i,1] = row[1][14:20] for i, row in df_human.iterrows(): df_human.at[i,0] = row[0][14:20] df_human.at[i,1] = row[1][14:20] # set data types df_host = df_host.astype(np.int) df_human = df_human.astype(np.int) # + import numpy as np # export txts np.savetxt(r'COV19_hosts_more_0.7_slice.txt', df_host.values, fmt='%d') np.savetxt(r'humanPPI_score_more_0.7_slice.txt', df_human.values, fmt='%d') # - # # Analyse the diffusion result for nCoV19 host proteins vs human PPI(both confidence > 0.7) # + import pandas as pd import numpy as np # prepare data df_DK_score = pd.read_csv('Cytoscape/humanPPI_vs_Host_DKernel_scores.csv', dtype = {'name':object,'SCORES':np.float32})[['name','SCORES']] # set a list to store nodes in intervals with different scores l_score = [] num_set = 10 for i in range(0,num_set): l_score.append(set()) # split max-min into intervals interval = df_DK_score['SCORES'].max() - df_DK_score['SCORES'].min() itv_list = [i*interval/num_set for i in range(1,num_set+1)] # - import math # iterate scores and feed the corresponding names into sets for (index, data) in df_DK_score.iterrows(): val = math.floor(data['SCORES']/interval*num_set) # max value should be considered individually if(val == num_set): val = num_set - 1 l_score[val].add(str(data['name'])) # # Visualize # + # histogram for visualizing l_score import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter fig, ax = plt.subplots(figsize=(10,4)) total = sum([len(l) for l in l_score]) # formatter def percent(x, pos): return '%1.1f%%' % (x/total*100) formatter = FuncFormatter(percent) ax.yaxis.set_major_formatter(formatter) rect = ax.bar([i*interval for i in range(1, num_set+1)],[len(l) for l in l_score]) def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 0.6), # 1 points vertical offset textcoords="offset points", ha='center', va='bottom') autolabel(rect) plt.title('Diffusion result of nCoV19 host proteins on HumanPPI', pad = 20.0) plt.ylabel('Percentage') plt.xlabel('Score') plt.savefig('img/Diffusion result of nCoV19 host proteins on HumanPPI.png',dpi = 200) plt.show() # - # # Extract the target vicinity # To extract the target proteins, there're serveral premises. # # 1. nCoV19 host proteins(should have high scores in the diffusion algorithm as they're treated as the source) # => need verification # 2. a criteria to distinguish possible drug targets from the humanPPI # => a score range
src/test/java/resources/cov19/.ipynb_checkpoints/network_analysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/marcelo-cueto/Blockchain_python/blob/main/Cryptomoneda_Blockchain.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="T4CInbaQr15Z" outputId="c495e8ac-d169-4f63-d6e4-5a25b9dbdf67" pip install flask-ngrok # + id="NNhGfBhi-CwW" import datetime import hashlib import json import requests from uuid import uuid4 from flask import Flask, jsonify, request from urllib.parse import urlparse from flask_ngrok import run_with_ngrok # + id="uB8zcyQH-bUK" class Blockchain: def __init__(self) : """Constructor del Blockchain""" self.chain = [] self.transactions = [] self.create_block(proof=1, previous_hash= '0') self.nodes = set() def create_block(self, proof, previous_hash): """Cracion de un nuevo Bloque: Arguments: -proof:Nounce del Bloque actual -previous_hash: Hash del bloque previo Returns: -Block: Devolucion Nuevo bloque creado. """ block={ 'index' : len(self.chain)+1, 'Timestamp' : str(datetime.datetime.now()), 'proof' : proof, 'previous_hash' : previous_hash, 'transactions' : self.transactions } self.transactions = [] self.chain.append(block) return block def get_previous_block(self): """Obtencion del bloque anterior Return: -Devolucion del ultimo bloque de la blockchain. """ return self.chain[-1] def proof_of_work(self, previous_proof): """Protocolo de concenco (PoW) Arguments: -previous_proof: Hash del bloque previo Return: -new_proof: Devolucion del nuevo hash obtenido del PoW """ new_proof=1 chech_proof=False while chech_proof == False: hash_operation = hashlib.SHA256(str(new_proof**2-previous_proof**2).encode()).hexidiges() if hash_operation[:4] == '0000': chech_proof == True else: new_proof += 1 return new_proof def hash(self, block): """Calculo del Hash de un bloque Arguments: -block : Identifica a un bloque de Blockchain Returns: -hash_block: Devuelve el hash del bloque """ encoded_block = json.dumps(block, sort_keys=True).encode() hash_block = hashlib.SHA256(encoded_block).hexidiges() return hash_block def is_chain_valid(self, chain): """Verifica si la Blockchain es valida Arguments: -chain : cadena de bloques que contiene toda la informacion de las transacciones Returns: -True/False: Devuelve un booleano en funcion de la validez de la blockchain (True: valido , False:Invalido) """ previous_block = chain[0] block_index=1 while block_index < len(chain): block = chain[block_index] if block['previous_hash'] != self.hash(previous_block): return False previous_proof = previous_block['proof'] proof= block['proof'] hash_operation = hashlib.SHA256(str(proof**2-previous_proof**2).encode()).hexidiges() if hash_operation[:4] != '0000': return False previous_block = block block_index += 1 return True def add_transaction(self, sender, receiver, amount): """ Realizacion de transaccion Arguments: - sender: Persona que hace la Transaccion - receiver : Persona que resive la Transaccion -amount : Monto de la Transaccion Returns: -Devolucion del indice superior al ultimo bloque """ self.trnsactions.append({'sender' :sender, 'receiver' : receiver, 'amount' :amount}) previous_block = self.get_previous_block() return previous_block['index']+1 def add_node(self, address): """ Nuevo nodo en la Blockchain Arguments: -address: Direccion del nuevo nodo """ parsed_url=urlparse(address) self.nodes.add(parsed_url.netloc) def replace_chain(self): """Reemplazo de la cadena por la cadena mas larga, siempre y cuando sea valida""" network= self.nodes longest_chain = None max_length = len(self.chain) for node in network: response=requests.get(f'http://{node}/get_chain') if response.status_code == 200: length = response.json()['length'] chain = response.json()['chain'] if length > max_length and self.is_chain_valid(chain): max_length = length longest_chain = chain if longest_chain: self.chain = longest_chain return True return False # + id="V2tZxy97ySkf" #Crear una app web #Ejecucion de la app con Flask app = Flask(__name__) run_with_ngrok(app) #Si se obtiene un error 500, actualizar flask y ejecutar la siguiente linea app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False # Crear la direccion del nodo en el puerto 500 node_address = str(uuid4()).replace('-','') #Creacion de la Blockchain blockchain = Blockchain() @app.route('/mine_block', methods =['GET']) def mine_block(): """Minado de un bloque""" previous_block = blockchain.get_previous_block() previous_proof = previous_block['proof'] proof = blockchain.proof_of_work(previous_proof) previous_hash = blockchain.hash(previous_block) blockchain.add_transaction(sender = node_address, receiver= 'JuanAmengual' , amount =10) block = blockchain.create_block(proof, previous_hash) response = {'message' :'Good work, a block was mined!', 'index' : block['index'], 'Timestamp' : block['Timestamp'], 'proof' : block['proof'], 'previous_hash' : block['previous_hash'], 'transactions' : block['transactions'], } return jsonify(response), 200 @app.route('/get_chain', methods = ['GET']) def get_chain(): """ Obtencion de Blockchain """ response = {'(chain' : blockchain.chain, 'length' : len(blockchain.chain) } return jsonify(response), 200 @app.route('/is_valid', methods = ['GET']) def is_valid(): """ Comprobasion si la Blockchain es valida """ is_valid = blockchain.is_chain_valid(blockchain.chain) if is_valid: response = {'message' : 'The Blockchain es valida'} else: response = {'message' : 'The Blockchain NO es valida'} return jsonify(response), 200 @app.route('/add_transaction', method = ['POST']) def add_transaction(): json=request.get_json() transaction_keys=['sender', 'receiver', 'amount'] if not all(key in json for key in transaction_keys): return 'Faltan algunos elementos de la transaccion' index = blockchain.add_transaction(json['sender'],json['receiver'],json['amount']) response = {'message' : f'La transaccion sera añadida al bloque {index}'} return jsonify(response), 201 # Descentalizacion de la CAdena de Bloques # Conectar nuevos nodos @app.route('/conect_node', method=['POST']) def connect_node(): json = request.get_json() nodes = json.get('nodes') if nodes is None: return 'No hay nodos para añadir', 400 for node in nodes: blockchain.add_node(node) response = {'message' : 'Todos los nodos han sido conectados. La Blockchain contiene ahora los nodos siguientes: ', 'total_nodes' : list(blockchain.nodes)} return jsonify(response), 201 @app.route('/replace_chain', methods = ['GET']) def replace_chain(): """Reemplaza la cadena por la mas larga (si es necesario)""" is_chain_replaced = blockchain.replace_chain() if is_chain_replaced: response = {'message' : 'Los nodos tenian diferentes cadenas, se ha reemplazado por la Blockchain mas larga.', 'new_chain' : blockchain.chain} else: response = {'message' : 'Todo Correcto. La Blockchain ya es la mas larga en todos los nodos', 'actual_chain' : blockchain.chain} return jsonify(response), 201 # + id="_-0Ge8E67TAx" app.run()
Cryptomoneda_Blockchain.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: spylon-kernel // language: scala // name: spylon-kernel // --- // %%init_spark launcher.jars = ["file:///opt/benchmark-tools/spark-sql-perf/target/scala-2.12/spark-sql-perf_2.12-0.5.1-SNAPSHOT.jar", "/opt/benchmark-tools/oap/oap_jars/spark-columnar-core-1.2.0-jar-with-dependencies.jar","/opt/benchmark-tools/oap/oap_jars/spark-arrow-datasource-standard-1.2.0-jar-with-dependencies.jar"] launcher.conf.set("spark.driver.extraClassPath", "/opt/benchmark-tools/oap/oap_jars/spark-columnar-core-1.2.0-jar-with-dependencies.jar:/opt/benchmark-tools/oap/oap_jars/spark-arrow-datasource-standard-1.2.0-jar-with-dependencies.jar:/opt/benchmark-tools/spark-sql-perf/target/scala-2.12/spark-sql-perf_2.12-0.5.1-SNAPSHOT.jar") launcher.conf.set("spark.executor.extraClassPath", "/opt/benchmark-tools/oap/oap_jars/spark-columnar-core-1.2.0-jar-with-dependencies.jar:/opt/benchmark-tools/oap/oap_jars/spark-arrow-datasource-standard-1.2.0-jar-with-dependencies.jar:/opt/benchmark-tools/spark-sql-perf/target/scala-2.12/spark-sql-perf_2.12-0.5.1-SNAPSHOT.jar") launcher.conf.set("spark.sql.warehouse.dir", "hdfs:///user/livy") launcher.conf.set("spark.executorEnv.LD_LIBRARY_PATH", "/opt/benchmark-tools/oap/lib") launcher.conf.set("spark.executor.extraLibraryPath", "/opt/benchmark-tools/oap/lib") launcher.conf.set("spark.driver.extraLibraryPath", "/opt/benchmark-tools/oap/lib") launcher.conf.set("spark.executorEnv.LIBARROW_DIR", "/opt/benchmark-tools/oap") launcher.conf.set("spark.executorEnv.CC", "/opt/benchmark-tools/oap/bin/gcc") launcher.conf.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin") launcher.conf.set("spark.oap.sql.columnar.preferColumnar", "true") launcher.conf.set("spark.sql.join.preferSortMergeJoin", "false") launcher.conf.set("spark.oap.sql.columnar.joinOptimizationLevel", "12") launcher.conf.set("spark.sql.broadcastTimeout", "3600") launcher.conf.set("spark.executor.memoryOverhead", "2989") launcher.conf.set("spark.dynamicAllocation.executorIdleTimeout", "3600s") launcher.conf.set("spark.sql.autoBroadcastJoinThreshold", "31457280") launcher.conf.set("spark.kryoserializer.buffer.max", "256m") launcher.conf.set("spark.network.timeout", "3600s") launcher.conf.set("spark.memory.offHeap.enabled", "false") launcher.conf.set("spark.sql.inMemoryColumnarStorage.batchSize", "20480") launcher.conf.set("spark.sql.sources.useV1SourceList", "avro") launcher.conf.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin") launcher.conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") launcher.conf.set("spark.dynamicAllocation.executorIdleTimeout", "3600s") launcher.conf.set("spark.sql.execution.arrow.maxRecordsPerBatch", "20480") launcher.conf.set("spark.kryoserializer.buffer", "64m") launcher.conf.set("spark.sql.shuffle.partitions", "72") launcher.conf.set("spark.sql.parquet.columnarReaderBatchSize", "20480") launcher.conf.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager") launcher.conf.set("spark.sql.columnar.codegen.hashAggregate", "false") launcher.conf.set("spark.memory.offHeap.size", "3g") !hadoop fs -mkdir /user/livy // + val scaleFactor = "1" // data scale 1GB val iterations = 1 // how many times to run the whole set of queries. val format = "parquet" // support parquer or orc val storage = "hdfs" // support HDFS val bucket_name = "/user/livy" // scala notebook only has the write permission of "hdfs:///user/livy" directory val partitionTables = true // create partition tables val query_filter = Seq() // Seq() == all queries //val query_filter = Seq("q1-v2.4", "q2-v2.4") // run subset of queries val randomizeQueries = false // run queries in a random order. Recommended for parallel runs. // detailed results will be written as JSON to this location. var resultLocation = s"${storage}://${bucket_name}/results/tpcds_${format}/${scaleFactor}/" var databaseName = s"tpcds_${format}_scale_${scaleFactor}_db" val use_arrow = true // when you want to use gazella_plugin to run TPC-DS, you need to set it true. if (use_arrow){ val data_path= s"${storage}://${bucket_name}/datagen/tpcds_${format}/${scaleFactor}" resultLocation = s"${storage}://${bucket_name}/results/tpcds_arrow/${scaleFactor}/" databaseName = s"tpcds_arrow_scale_${scaleFactor}_db" val tables = Seq("call_center", "catalog_page", "catalog_returns", "catalog_sales", "customer", "customer_address", "customer_demographics", "date_dim", "household_demographics", "income_band", "inventory", "item", "promotion", "reason", "ship_mode", "store", "store_returns", "store_sales", "time_dim", "warehouse", "web_page", "web_returns", "web_sales", "web_site") if (spark.catalog.databaseExists(s"$databaseName")) { println(s"$databaseName has exists!") }else{ spark.sql(s"create database if not exists $databaseName").show spark.sql(s"use $databaseName").show for (table <- tables) { if (spark.catalog.tableExists(s"$table")){ println(s"$table has exists!") }else{ spark.catalog.createTable(s"$table", s"$data_path/$table", "arrow") } } if (partitionTables) { for (table <- tables) { try{ spark.sql(s"ALTER TABLE $table RECOVER PARTITIONS").show }catch{ case e: Exception => println(e) } } } } } val timeout = 60 // timeout in hours // COMMAND ---------- // Spark configuration spark.conf.set("spark.sql.broadcastTimeout", "10000") // good idea for Q14, Q88. // ... + any other configuration tuning // COMMAND ---------- sql(s"use $databaseName") import com.databricks.spark.sql.perf.tpcds.TPCDS val tpcds = new TPCDS (sqlContext = spark.sqlContext) def queries = { val filtered_queries = query_filter match { case Seq() => tpcds.tpcds2_4Queries case _ => tpcds.tpcds2_4Queries.filter(q => query_filter.contains(q.name)) } if (randomizeQueries) scala.util.Random.shuffle(filtered_queries) else filtered_queries } val experiment = tpcds.runExperiment( queries, iterations = iterations, resultLocation = resultLocation, tags = Map("runtype" -> "benchmark", "database" -> databaseName, "scale_factor" -> scaleFactor)) println(experiment.toString) experiment.waitForFinish(timeout*60*60)
integrations/oap/dataproc/notebooks/tpcds_power_test_with_gazelle_plugin_Dataproc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Indicator Exploration # # *<NAME>* # ### *Executive Summary* # # # ### *Objectives* # # # ### Setup # Import libraries import os import math import pprint import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import statsmodels.api as sm from ipywidgets import * from sklearn.cluster import KMeans, AgglomerativeClustering from sklearn.linear_model import LinearRegression from sklearn import preprocessing from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeClassifier, plot_tree from scipy.cluster.hierarchy import dendrogram # Declare global variables DATA_DIR = os.path.join('../data/processed') DATA_FILE = os.path.join(DATA_DIR, 'processed_features.csv') plt.style.use('ggplot') # + # Useful functions def null_counter(df): record_nulls = [] for col in df.columns: nulls = df[col].isnull().sum() percent_null = round((nulls / df.shape[0]) * 100, 2) record_nulls.append([col, nulls, percent_null]) output = pd.DataFrame(record_nulls, columns=['Attribute', 'Null Count', '% Null']) return output def get_year_range(df): year_range = list(df['year'].unique()) year_range.sort() return year_range def subset_by_states_only(df): df = df[df['fips'] <= 56] return df def sound_off(df): nRow, nCol = cluster_df.shape print(f'There are {nRow} rows and {nCol} columns.') print('') YEAR_RANGE = get_year_range(cluster_df) print(f'Data spans the years {YEAR_RANGE[0]} to {YEAR_RANGE[-1]}.') print('') print('Available columns include:') display(null_counter(cluster_df)) def filter_out_factor(df, column_name): ## Identify records with null values in column bad_records = df[df[column_name].isnull()] bad_records.to_csv(f'missing_{column_name}.csv') ## Drop records with null values in column df = df[df[column_name].notnull()] return df # + # Load and preview data ## Isolate by specific columns cluster_df = pd.read_csv(DATA_FILE) sound_off(cluster_df) # - # Remove the columns that won't be used as features cluster_prepared_df = cluster_df.drop(['leaid', 'year', 'lea_name', 'fips'], axis=1) # ### Regression # + # Normalize data features = cluster_prepared_df.drop(['academic_performance', 'read_test_pct_prof_midpt', 'math_test_pct_prof_midpt'], axis=1) labels = cluster_prepared_df['academic_performance'] normed_features = preprocessing.StandardScaler().fit_transform(features) normed_labels = preprocessing.StandardScaler().fit_transform(labels.to_numpy().reshape(-1,1)) # Setup a Linear Regression lr_model = LinearRegression() # Fit the regression with the available data lr_model.fit(normed_features, normed_labels) preds = lr_model.predict(normed_features) # Visualize the resulting model r_2 = round(lr_model.score(normed_features, normed_labels), 3) print(f'R^2 Value: {r_2}') print() mse = round(mean_squared_error(normed_labels, preds), 3) print(f'MSE: {mse}') print() coef_vals = [round(x,3) for x in lr_model.coef_[0]] coef_map = list(zip(features.columns, coef_vals)) print(f'Coefficients:') pprint.pprint(coef_map) print() print(f'Intercept: {lr_model.intercept_}') # Use statsmodels to get more details display(sm.OLS(normed_labels, normed_features).fit().summary()) # - # ### Modeling # + # Build an elbow chart to find a good cluster range def build_elbow_chart(df, min_clusters=2, max_clusters=10, random_seed=777): ## Calculate sum of squared distances for each cluster ssd = [] for k in range(min_clusters, max_clusters+1): kmeans_learner = KMeans(n_clusters=k, random_state=random_seed) kmeans_learner.fit(df) ssd.append(kmeans_learner.inertia_) ## Plot sum of squared distances plt.plot(range(min_clusters, max_clusters+1), ssd) plt.title('Elbow Chart') plt.xlabel('K') plt.ylabel('Sum of Squared Distances') # Build a good cluster and view the resulting data def build_cluster(df, k=6, random_seed=777): kmeans_learner = KMeans(n_clusters=k, random_state=random_seed) results = kmeans_learner.fit_predict(df) return results # View the characteristics of each labeled dataset def view_cluster(df, results, k=6, verbose=True): df['labels'] = results for i in range(0, k): subset = df[df['labels'] == i] # subset.to_csv(f'labeled_{i}.csv') if verbose: print(i) display(subset.describe()) print() # + # Place data into four categories, to prevent clustering from emphasizing size single_df = cluster_prepared_df[cluster_prepared_df['number_of_schools'] == 1] small_df = cluster_prepared_df[(cluster_prepared_df['number_of_schools'] > 1) & (cluster_prepared_df['number_of_schools'] <= 3)] medium_df = cluster_prepared_df[(cluster_prepared_df['number_of_schools'] > 3) & (cluster_prepared_df['number_of_schools'] <= 10)] large_df = cluster_prepared_df[(cluster_prepared_df['number_of_schools'] > 10)] df_list = [single_df, small_df, medium_df, large_df] counts = [x['number_of_schools'].count() for x in df_list] print(counts) # - # Normalize within clusters to detect patterns besides size normed_df_list = [] for df in df_list: normed_df_list.append(preprocessing.StandardScaler().fit_transform(df)) build_elbow_chart(normed_df_list[0]) build_elbow_chart(normed_df_list[1]) build_elbow_chart(normed_df_list[2]) build_elbow_chart(normed_df_list[3]) results = [] for df in normed_df_list: results.append(build_cluster(df, k=4)) # --- # Analysis (Category) # --- # + # Box Plot # Interactive Box Plot for Indicators # %matplotlib notebook metrics = list(cluster_prepared_df.columns) category = [0, 1, 2, 3] category_mapping = {0: "Single", 1: "Small", 2: "Medium", 3: "Large"} @interact(metric=metrics) def cat_box_metric_explorer(metric): # Clear any old figures plt.close() # Make a plot to match states to the chosen metric plt.figure(figsize=(6, 4), num='Boxplot Explorer Tool') plot_data = [category_df[metric] for category_df in df_list] plt.boxplot(plot_data) plt.title(f'Category: {metric}') interactive_plot = interactive(cat_box_metric_explorer, metric=metrics[0]) # - # --- # Analysis (Sub-Category) # --- # + # Box Plot # Interactive Box Plot for Indicators # %matplotlib notebook metrics = list(cluster_prepared_df.columns) category = [0, 1, 2, 3] category_mapping = {0: "Single", 1: "Small", 2: "Medium", 3: "Large"} @interact(metric=metrics, cat=category) def box_metric_explorer(metric, cat): # Clear any old figures plt.close() # Make a plot to match states to the chosen metric plt.figure(figsize=(6, 4), num='Boxplot Explorer Tool') # Select category data cluster_count = len(np.unique(results[cat])) category_df = df_list[cat] view_cluster(category_df, results[cat], k=cluster_count) # Subset sub-categories sub_categories = [category_df[category_df['labels'] == i] for i in range(0,cluster_count)] # Subset metric plot_data = [sub_df[metric] for sub_df in sub_categories] plt.boxplot(plot_data) plt.title(f'{category_mapping[cat]} Category: {metric}') interactive_plot = interactive(box_metric_explorer, metric=metrics[0], cat=category[0]) # - # ### Sub-Category Regressions # # + # For a given category, # Show the regressions for each sub-category contained within, # Using non-normalized data single_df # + # Regressions # Interactive Regressions for Indicators # %matplotlib notebook metrics = list(cluster_prepared_df.columns) category = [0, 1, 2, 3] category_mapping = {0: "Single", 1: "Small", 2: "Medium", 3: "Large"} @interact(metric=metrics, cat=category) def multi_regression_metric_explorer(metric, cat): # Clear any old figures plt.close() # Select category data cluster_count = len(np.unique(results[cat])) category_df = df_list[cat] view_cluster(category_df, results[cat], k=cluster_count, verbose=False) # Subset sub-categories sub_categories = [category_df[category_df['labels'] == i] for i in range(0,cluster_count)] # Subset metric # plot_data = [sub_df for sub_df in sub_categories] # Normalize data # normed_features = preprocessing.StandardScaler().fit_transform(features) # normed_labels = preprocessing.StandardScaler().fit_transform(labels.to_numpy().reshape(-1,1)) # Display regression results for each subcategory print(f'{category_mapping[cat]}') for i in range(0, cluster_count): features = sub_categories[i].drop([metric, 'labels'], axis=1) labels = sub_categories[i][metric] # Setup a Linear Regression lr_model = LinearRegression() # Fit the regression with the available data lr_model.fit(features, labels) preds = lr_model.predict(features) # Visualize the resulting model display(sm.OLS(labels, features).fit().summary()) interactive_plot = interactive(multi_regression_metric_explorer, metric=metrics[0], cat=category[0]) # - # Regenerate the input dataset, but with labels def reconstitute_data(df_list, results_list): # Map results to dataframe for i in range(0, len(df_list)): offset = (4 * i) + 1 df_list[i]['results'] = results_list[i] + offset # Merge dataframes output = pd.concat(df_list) return output x = reconstitute_data(df_list, results) x.describe() # + # Check post-processed dataset # DATA_DIR = os.path.join('../data/processed') # DATA_FILE = os.path.join(DATA_DIR, 'processed_features_labeled.csv') # cluster_df = pd.read_csv(DATA_FILE) # sound_off(cluster_df) # -
notebooks/1.0-jrg-full-prototype.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pandas as pd import geopandas as gpd from lets_plot import * LetsPlot.setup_html() # - states = pd.read_csv("../data/USA_States.csv") states.head() LetsPlot.set(maptiles_zxy(url='https://a.tile.openstreetmap.org/{z}/{x}/{y}.png')) ggplot(data=states) + geom_livemap(aes("Longitude", "Latitude")) # + dept = pd.read_csv("../data/usgs_dept_by_state_2019.csv") dept.head() # + dept_gg = dept \ .drop([" ", "Unnamed: 2", "Unnamed: 4", "State and Local Debt", "Unnamed: 6", "Unnamed: 8", "Real State Growth %", "Unnamed: 10", "Population (million)"], axis=1) \ .set_index("State") \ .stack() \ .reset_index() \ .rename(columns={"level_1": "Item", 0: "$B"}) dept_gg # - ggplot(dept_gg) \ + geom_bar(aes("State", "$B", fill="Item"), stat="identity", position="dodge", sampling="none") \ + scale_fill_manual(values=["#FE0968", "#FF75A6", "#007BCD"]) \ + ggsize(1000, 500) states_gdf = gpd.GeoDataFrame(states, geometry=gpd.points_from_xy(states.Longitude, states.Latitude)) states_gdf.head() # + ggplot(dept_gg) + geom_livemap(aes(sym_x="Item", sym_y="$B", fill="Item"), symbol='bar', size=30, map=states_gdf, map_join=["State", "State"])
docs/dev/notebooks/map_proportional_symbols_new.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # execute with %run "/path/to/pydata-references/template_cv.ipynb" import pandas as pd import numpy as np import scipy as sp # %precision 4 np.set_printoptions(suppress=True) pd.set_option('display.max_columns', 100) pd.set_option('display.max_colwidth', 100) import seaborn as sns import matplotlib.pyplot as plt import matplotlib as mpl # %matplotlib inline # %config InlineBackend.figure_format = 'retina' #import cv2 import torch # %reload_ext autoreload # %autoreload 2 # - import requests # + img_data = requests.get("https://upload.wikimedia.org/wikipedia/commons/d/d3/Elliot%27s_handwriting.png").content with open('handwriting.png', 'wb') as f: f.write(img_data) from PIL import Image img = Image.open('handwriting.png') img # + from mpldatacursor import datacursor arr = np.asarray(img)[15:25, 20:35] img_small = Image.fromarray(arr) w,h = img_small.width, img_small.height fig, ax = plt.subplots(figsize=(15,5)) r = ax.imshow(img_small) datacursor(r) plt.show() # + methods = {'nearest': Image.NEAREST, 'box': Image.BOX, 'bilinear': Image.BILINEAR, 'hamming': Image.HAMMING, 'bicubic': Image.BICUBIC, 'lanczos': Image.LANCZOS } fig, axs = plt.subplots(len(methods), 1, figsize=(10,10), sharey=True) for i, (name, m) in enumerate(methods.items()): img_res = img_small.resize((w*2, h*2), resample=m) axs[i].axis('off') axs[i].set_title(name) axs[i].imshow(img_res, cmap="gray") # - # + methods=[("area", cv2.INTER_AREA), #("nearest", cv2.INTER_NEAREST), ("linear", cv2.INTER_LINEAR), ("linear_exact", cv2.INTER_LINEAR_EXACT), ("cubic", cv2.INTER_CUBIC), ("lanczos4", cv2.INTER_LANCZOS4)] for i, m in enumerate(methods): ann_img = p.annotate_img(inkml_img_path) axs[i].axis('off') axs[i].set_title(name) axs[i].imshow(ann_img.img[340:440, 120:350], cmap="gray")
interpolation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:larval_gonad] # language: python # name: conda-env-larval_gonad-py # --- # + import os import sys import re from pathlib import Path from IPython.display import display, HTML, Markdown import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # Project level imports from larval_gonad.notebook import Nb # - # Setup notebook nbconfig = Nb.setup_notebook(seurat_dir='/data/fearjm/local_data_store/larval_gonad/output/scrnaseq-wf/scrnaseq_combine_force') # + mapper = { 'chrX': 'X', 'chr2L': 'A', 'chr2R': 'A', 'chr3L': 'A', 'chr3R': 'A', 'chrY': 'Y', 'chr4': '4', } fbgn2chrom = pd.read_csv('/data/fearjm/local_data_store/larval_gonad/output/fbgn2chrom.tsv', sep='\t', index_col=0) fbgn2chrom = fbgn2chrom.chrom.map(mapper) # - chrom_gene_number = fbgn2chrom.value_counts() chrom_gene_number clusters = pd.read_csv('/data/fearjm/local_data_store/larval_gonad/output/scrnaseq-wf/scrnaseq_rep2_force/metadata.tsv', sep='\t', usecols=['res.0.6']).iloc[:, 0] clusters.index.name = 'cell_id' clusters.name = 'cluster' clusters = clusters[clusters < 9].map(nbconfig.short_cluster_annot) raw = pd.read_csv('/data/fearjm/local_data_store/larval_gonad/output/scrnaseq-wf/scrnaseq_rep2_force/raw.tsv', sep='\t') raw.index.name = 'FBgn' raw.reset_index(inplace=True) melted = raw.melt(id_vars='FBgn', var_name='cell_id', value_name='UMI') df = melted.join(fbgn2chrom, on='FBgn').join(clusters, on='cell_id').set_index(['cluster', 'cell_id', 'chrom', 'FBgn']) df.sort_index(inplace=True) df.head() num_missing = (df == 0).groupby(['cluster', 'cell_id', 'chrom']).sum() num_missing.div(chrom_gene_number.T, axis='rows', level='chrom')
notebook/2019-01-22_missing_plots_for_friday_meeting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial: advection-diffusion kernels in Parcels # Adding diffusion to simulations is insightful when investigating tracers such as heat, salt or nutrients. The spread of tracers is affected by subgrid-scale processes and unresolved physics ([Van Sebille et al. 2018](https://doi.org/10.1016/j.ocemod.2017.11.008)). Also when individual water parcels are considered, diffusion can be included in ensemble particle simulations to trace the evolution of a probability distribution. This is especially important when coarse velocity fields that are not eddy-resolving are considered, since the absence of eddies can still be parameterized through eddy diffusive fluxes ([Shah et al., 2017](https://doi.org/10.1175/JPO-D-16-0098.1)). # # ## Stochastic Differential Equations (SDEs) for Particle Trajectories in the Presence of Diffusivity # The stochastic differential equation for a particle trajectory is given by # $$ # \begin{aligned} # d\mathbf{X}(t) &\overset{\text{Îto}}{=} (\mathbf{u} + \nabla \cdot \mathbf{K}) dt + \mathbf{V}(t, \mathbf{X})\cdot d\mathbf{W}(t), \\ # \mathbf{X}(t_0) &= \mathbf{x}_0, # \end{aligned} # $$ # where $\mathbf{X}$ is the particle position vector ($\mathbf{x}_0$ being the initial position vector), $\mathbf{v}$ the velocity vector, $\mathbf{K} = \frac{1}{2} \mathbf{V} \cdot \mathbf{V}^T$ the diffusivity tensor, and $d\mathbf{W}(t)$ a Wiener increment (normally distributed with zero mean and variance $dt$). This equation is obtained through writing the advection-diffusion equation as a Fokker-Planck equation. Particle distributions obtained by solving above equation therefore satisfy the advection-diffusion equation ([<NAME> et al., 2018](https://doi.org/10.1016/j.ocemod.2017.11.008); [Shah et al., 2013](https://doi.org/10.1016/j.ocemod.2013.04.001)). # # Generally, in three-dimensional ocean models, $\mathbf{K}$ (and therefore $\mathbf{V}$) contains off-diagonal components to account for diffusion along non-flat isopycnal surfaces. Instead, here we will focus on the simpler case of diffusion in a horizontal plane, where diffusivity is specified only in the zonal and meridional direction, i.e. # $$\mathbf{K}(x,y)=\begin{bmatrix} # K_x(x,y) & 0\\ # 0 & K_y(x,y) # \end{bmatrix}.$$ # # The above stochastic differential equation then becomes # # $$ # \begin{align} # dX(t) &= a_x dt + b_x dW_x(t), \quad &X(t_0) = x_0,\\ # dY(t) &= a_y dt + b_y dW_y(t), \quad &Y(t_0) = y_0, # \end{align} # $$ # where $a_i = v_i + \partial_i K_i(x, y)$ is the deterministic drift term and $b_i = \sqrt{2K_i(x, y)}$ a stochastic noise term ($\partial_i$ denotes the partial derivative with respect to $i$). # # ## Numerical Approximations of SDEs # The simplest numerical approximation of the above SDEs is obtained by replacing $dt$ by a finite time discrete step $\Delta t$ and $dW$ by a discrete increment $\Delta W$, yielding the **Euler-Maruyama (EM) scheme** ([Maruyama, 1955](https://link.springer.com/article/10.1007/BF02846028)): # $$ # \begin{equation} # X_{n+1} = X_n + a_x \Delta t + b_x \Delta W_{n, x}, # \end{equation} # $$ # with a similar expression for $Y$. # # The next higher-order scheme is found by including extra terms from a Taylor expansion on our SDE, yielding the **Milstein scheme of order 1 (M1)**: # $$ # \begin{equation} # X_{n+1} = X_n + a_x \Delta t + b_x \Delta W_x + \frac{1}{2}b_x \partial_x b_x(\Delta W_{n, x}^2 - \Delta t), # \end{equation} # $$ # which can be rewritten by explicitly writing $b_x\partial_x b_x$ as $\partial_x K_x(z)$: # $$ # \begin{equation} # X_{n+1} = X_n + v_x \Delta t + \frac{1}{2}\partial_x K_x(\Delta W_{n, x}^2 + \Delta t) + b\Delta W_n. # \end{equation} # $$ # The extra term in the M1 scheme provides extra accuracy at negligible computational cost. # # The spatial derivatives in the EM and M1 schemes can be approximated by a central difference. Higher order numerical schemes (see [Gräwe et al., 2012](https://doi.org/10.1007/s10236-012-0523-y)) include higher order derivatives. Since Parcels uses bilinear interpolation, these higher order derivatives cannot be computed, meaning that higher order numerical schemes cannot be used. # # An overview of numerical approximations for SDEs in a particle tracking setting can be found in [Gräwe (2011)](https://doi.org/10.1016/j.ocemod.2010.10.002). # # ## The Wiener Increment # The Wiener increment should be a normally distributed random variable with zero mean and a standard deviation of $\sqrt{dt}$. While such random numbers can be generated easily by Parcels (i.e. through `ParcelsRandom.normalvariate(0, particle.dt)`), it is more efficient to draw random numbers from a uniform distribution with the same first and second moments. The central limit theorem ensures that the contribution from consecutive Wiener increments drawn from a uniform distribution quickly converges to a normal distribution, making a substitution with a uniform distribution valid for ensemble distributions. The discrete Wiener increment used by Parcels therefore reads $\Delta W = \sqrt{3\Delta t } R[-1, 1]$. Wiener increments are drawn individually for each direction. # # # ## Using Advection-Diffusion Kernels in Parcels # The EM and M1 advection-diffusion approximations are available as `AdvectionDiffusionEM` and `AdvectionDiffusionM1`, respectively. The `AdvectionDiffusionM1` kernel should be the default choice. # # The advection component of these kernels is similar to that of the Explicit Euler advection kernel (`AdvectionEE`). In case the Euler approximation is numerically unstable for the advection part, there are two advection-diffusion kernels available where the advection is approximated through a fourth-order Runge-Kutta algorithm, similar to in `AdvectionRK4`. These are available as `AdvectionRK4DiffusionM1` and `AdvectionRKDiffusionEM`. Since the diffusion part of these kernels is only accurate to first-order, these kernels are only beneficial in case of numerical instability, or if the simulation contains regimes where diffusion vanishes. # # In the special case where diffusivity is constant over the entire domain, the diffusion-only kernel `DiffusionUniformKh` can be used in combination with an advection kernel of choice. Since the diffusivity here is space-independent, gradients are not calculated, increasing efficiency. The diffusion-step can in this case be computed after or before advection, thus allowing you to chain kernels using the `+` operator. # # To make a central difference approximation for computing the gradient in diffusivity, a resolution for this approximation `dres` is needed. This constant is used by the advection-diffusion kernels and must be chosen and added to the FieldSet by the user (e.g. `fieldset.add_constant('dres', 0.01)`). The size of `dres` should be much smaller than the spatial resolution of the data, but within reasonable limits of machine precision to avoid numerical errors. # # ## Example: Impermeable Diffusivity Profile # # Let's see the `AdvectionDiffusionM1` in action and see why it's preferable over the `AdvectionDiffusionEM` kernel. To do so, we create an idealized profile with diffusivities $K_\text{zonal}$ uniform everywhere ($K_\text{zonal} = \bar{K}=0..25$) and $K_\text{meridional}$ constant in the zonal direction, while having the following profile in the meridional direction: # # $$ K_\text{meridional}(y) = \bar{K}\frac{2(1+\alpha)(1+2\alpha)}{\alpha^2H^{1+1/\alpha}} \begin{cases} # y(L-2y)^{1/\alpha},\quad 0 \leq y \leq L/2,\\ # (L-y)(2y-1)^{1/a},\quad H/2 \leq y \leq L, # \end{cases}$$ # with $L$ being the basin length scale, $\alpha$ as a parameter determining the steepness in the gradient in the profile. This profile is similar to that used by [Gräwe (2011)](https://doi.org/10.1016/j.ocemod.2010.10.002), now used in the meridional direction for illustrative purposes. # # Let's plot $K_\text{meridional}(y)$: # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import xarray as xr from datetime import timedelta from parcels import ParcelsRandom from parcels import (FieldSet, ParticleSet, JITParticle, DiffusionUniformKh, AdvectionDiffusionM1, AdvectionDiffusionEM) # + K_bar = 0.25 # Average diffusivity alpha = 1. # Profile steepness L = 1. # Basin scale Ny = 103 # Number of grid cells in y_direction (101 +2, one level above and one below, where fields are set to zero) dy = 1.03/Ny # Spatial resolution y = np.linspace(-0.01, 1.01, 103) # y-coordinates for grid y_K = np.linspace(0., 1., 101) # y-coordinates used for setting diffusivity beta = np.zeros(y_K.shape) # Placeholder for fraction term in K(y) formula for yi in range(len(y_K)): if y_K[yi] < L/2: beta[yi] = y_K[yi]*np.power(L - 2*y_K[yi], 1/alpha) elif y_K[yi] >= L/2: beta[yi] = (L - y_K[yi])*np.power(2*y_K[yi] - L, 1/alpha) Kh_meridional = 0.1*(2*(1+alpha)*(1+2*alpha))/(alpha**2*np.power(L, 1+1/alpha))*beta Kh_meridional = np.concatenate((np.array([0]), Kh_meridional, np.array([0]))) # - plt.plot(Kh_meridional, y) plt.ylabel("y") plt.xlabel(r"$K_{meridional}$") plt.show() # In this profile, diffusivity drops to 0 at $y=0.5$ and at $y=0$ and $y=1$. In the absence of advection, particles starting out in one half of the domain should remain confined to that half as they are unable to cross the points where the diffusivity drops to 0. The line $y=0.5$ should therefore provide an impermeable barrier. # # Now we can put this idealized profile into a flat fieldset: xdim, ydim = (1, Ny) data = {'U': np.zeros(ydim), 'V': np.zeros(ydim), 'Kh_zonal': K_bar*np.ones(ydim), 'Kh_meridional': Kh_meridional} dims = {'lon': 1, 'lat': np.linspace(-0.01, 1.01, ydim, dtype=np.float32)} fieldset = FieldSet.from_data(data, dims, mesh='flat', allow_time_extrapolation=True) fieldset.add_constant('dres', 0.00005) # We release 100 particles at (0, 0.75). def get_test_particles(): return ParticleSet.from_list(fieldset, pclass=JITParticle, lon=np.zeros(100), lat=np.ones(100)*0.75, time=np.zeros(100), lonlatdepth_dtype=np.float64) # Now we will simulate the advection and diffusion of the particles using the `AdvectionDiffusionM1` kernel. We run the simulation for 0.3 seconds, with a numerical timestep $\Delta t = 0.0001$s. We also write away particle locations at each timestep for plotting. Note that this will hinder a runtime comparison between kernels, since it will cause most time to be spent on I/O. dt = 0.0001 testParticles = get_test_particles() output_file = testParticles.ParticleFile(name="M1_out.nc", outputdt=timedelta(seconds=dt)) ParcelsRandom.seed(1636) # Random seed for reproducibility testParticles.execute(AdvectionDiffusionM1, runtime=timedelta(seconds=0.3), dt=timedelta(seconds=dt), output_file=output_file, verbose_progress=True) output_file.close() # to write the output to a netCDF file, since `output_file` does not close automatically when using notebooks M1_out = xr.open_dataset("M1_out.nc") # We can plot the individual coordinates of the particle trajectories against time ($x$ against $t$ and $y$ against $t$) to investigate how diffusion works along each axis. # + fig, ax = plt.subplots(1, 2) fig.set_figwidth(12) for data, ai, dim, ystart, ylim in zip([M1_out.lat, M1_out.lon], ax, ('y', 'x'), (0.75, 0), [(0, 1), (-1, 1)]): ai.plot(np.arange(0, 0.3002, 0.0001), data.T, alpha=0.2) ai.scatter(0, ystart, s=20, c='r', zorder=3) ai.set_xlabel("t") ai.set_ylabel(dim) ai.set_xlim(0, 0.3) ai.set_ylim(ylim) fig.suptitle("`AdvectionDiffusionM1` Simulation: Particle trajectories in the x- and y-directions against time") plt.show() # - # We see that the along the meridional direction, particles remain confined to the ‘upper’ part of the domain, not crossing the impermeable barrier where the diffusivity drops to zero. In the zonal, particles follow random walks, since all terms involving gradients of the diffusivity are zero. # # Now let's execute the simulation with the `AdvectionDiffusionEM` kernel instead. dt = 0.0001 testParticles = get_test_particles() output_file = testParticles.ParticleFile(name="EM_out.nc", outputdt=timedelta(seconds=dt)) ParcelsRandom.seed(1636) # Random seed for reproducibility testParticles.execute(AdvectionDiffusionEM, runtime=timedelta(seconds=0.3), dt=timedelta(seconds=dt), output_file=output_file, verbose_progress=True) output_file.close() # to write the output to a netCDF file, since `output_file` does not close automatically when using notebooks EM_out = xr.open_dataset("EM_out.nc") # + fig, ax = plt.subplots(1, 2) fig.set_figwidth(12) for data, ai, dim, ystart, ylim in zip([EM_out.lat, EM_out.lon], ax, ('y', 'x'), (0.75, 0), [(0, 1), (-1, 1)]): ai.plot(np.arange(0, 0.3002, 0.0001), data.T, alpha=0.2) ai.scatter(0, ystart, s=20, c='r', zorder=3) ai.set_xlabel("t") ai.set_ylabel(dim) ai.set_xlim(0, 0.3) ai.set_ylim(ylim) fig.suptitle("`AdvectionDiffusionEM` Simulation: Particle trajectories in the x- and y-directions against time") plt.show() # - # The Wiener increments for both simulations are equal, as they are fixed through a random seed. As we can see, the Euler-Maruyama algorithm performs worse than the Milstein algorithm, letting particles cross the impermeable barrier at $y=0.5$. In contrast, along the zonal direction, particles follow the same random walk as in the Milstein algorithm, which is expected since the extra terms in the Milstein algorithm are zero in this case.. # # Lastly, we perform the same simulation with the `DiffusionUniformKh` kernel. dt = 0.0001 testParticles = get_test_particles() output_file = testParticles.ParticleFile(name="Uniform_out.nc", outputdt=timedelta(seconds=dt)) ParcelsRandom.seed(1636) # Random seed for reproducibility testParticles.execute(DiffusionUniformKh, runtime=timedelta(seconds=0.3), dt=timedelta(seconds=dt), output_file=output_file, verbose_progress=True) output_file.close() # to write the output to a netCDF file, since `output_file` does not close automatically when using notebooks Uniform_out = xr.open_dataset("Uniform_out.nc") # + fig, ax = plt.subplots(1, 2) fig.set_figwidth(12) for data, ai, dim, ystart, ylim in zip([Uniform_out.lat, Uniform_out.lon], ax, ('y', 'x'), (0.75, 0), [(0, 1), (-1, 1)]): ai.plot(np.arange(0, 0.3002, 0.0001), data.T, alpha=0.2) ai.scatter(0, ystart, s=20, c='r', zorder=3) ai.set_xlabel("t") ai.set_ylabel(dim) ai.set_xlim(0, 0.3) ai.set_ylim(ylim) fig.suptitle("`DiffusionUniformKh` Simulation: Particle trajectories in the x- and y-directions against time") plt.show() # - # We clearly see that in the meridional direction, particles are not able to leave the areas with a low diffusivity, accumulating on the impermeable boundaries. In the zonal direction, results are equal to those of the other two kernels. When fields have a uniform diffusivity, this kernel is the most efficient since it does not execute the interpolation steps needed for computing gradients in the diffusivity field. # # ## References # <NAME>. (2011). “Implementation of high-order particle-tracking schemes in a water column model.” *Ocean Modelling*, 36(1), 80–89. https://doi.org/10.1016/j.ocemod.2010.10.002 # # Gräwe, Deleersnijder, Shah & Heemink (2012). “Why the Euler scheme in particle tracking is not enough: The shallow-sea pycnocline test case.” *Ocean Dynamics*, 62(4), 501–514. https://doi.org/10.1007/s10236-012-0523-y # # <NAME>. (1955). “Continuous Markov processes and stochastic equations.” *Rendiconti del Circolo Matematico di Palermo*, 4(1), 48. # # <NAME> et al. (2018). “Lagrangian ocean analysis: Fundamentals and practices.” *Ocean Modelling*, 121, 49–75. https://doi.org/10.1016/j.ocemod.2017.11.008 # # <NAME>., <NAME>., <NAME>., & <NAME>. (2013). “Adaptive time stepping algorithm for Lagrangian transport models: Theory and idealised test cases.” *Ocean Modelling*, 68, 9–21. https://doi.org/10.1016/j.ocemod.2013.04.001 # # <NAME>, Deleersnijder & Heemink (2017). “Tracing the Ventilation Pathways of the Deep North Pacific Ocean Using Lagrangian Particles and Eulerian Tracers.” *Journal of Physical Oceanography*, 47(6), 1261–1280. https://doi.org/10.1175/JPO-D-16-0098.1
parcels/examples/tutorial_diffusion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/paulfrancis24/OOP-1-2/blob/main/OOP_Concepts_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="iypLaQU9Y41I" # Classes with Multiple Objects # + id="waFLk9FrY5PP" class Birds: def __init__(self,bird_name): self.bird_name = bird_name def flying_birds(self): print(f"{self.bird_name} flies above the sky") def non_flying_birds(self): print(f"{self.bird_name} is the national bird of the Philippines") vulture = Birds("Griffon Vulture") crane = Birds("Common Crane") emu = Birds("Emu") vulture.flying_birds() crane.flying_birds() emu.non_flying_birds() # + [markdown] id="oFhZSvMVXYhs" # Encapsulation using mangling with double underscores # + colab={"base_uri": "https://localhost:8080/"} id="UjLOnhkjXX1Y" outputId="633a61a4-b398-4956-f7b3-e79a18d2c8b5" class foo: def __init__(self,a,b): self.__a = a self.__b = b def add(self): return self.__a + self.__b number = foo(3,4) number.add() number.a = 7 number.add() # + [markdown] id="nec_kNsBY4AY" # Encapsulation with Private Attributes # + colab={"base_uri": "https://localhost:8080/"} id="uEFnxkwCXos8" outputId="338a22e2-7ad3-4260-8f92-97725ec54eba" class Counter: def __init__(self): self.current = 0 def increment(self): self.current += 1 def value(self): return self.current def reset(self): self.current = 0 num = Counter() num.increment() num.increment() num.increment() num.value() # + colab={"base_uri": "https://localhost:8080/"} id="9o4lHS7wam14" outputId="e5631310-5df6-4f70-b222-a9357c091da4" class Counter: def __init__(self): self.__current = 0 def increment(self): self.__current += 1 def value(self): return self.__current def reset(self): self.__current = 0 num = Counter() num.increment() num.increment() num.increment() num.Counter = 1 num.increment() num.value() # + [markdown] id="jUbpVyZwdVsH" # Inheritance # + colab={"base_uri": "https://localhost:8080/"} id="bSOl-cN-auqH" outputId="46951cb1-e4ff-4f08-b02b-d80a6724a474" class Person: def __init__(self,firstname,surname): self.firstname = firstname self.surname = surname def printname(self): print(self.firstname,self.surname) person = Person("Ana", "Santos") person.printname() class Teacher(Person): pass person2 = Teacher("Maria", "Sayo") person2.printname() class Student(Person): pass person3 = Student("Paul", "Masangcay") person3.printname() # + [markdown] id="DwOmGuQ1fIga" # Polymorphism # + colab={"base_uri": "https://localhost:8080/"} id="2J1o9xL0fJPG" outputId="42589196-7983-4f53-d178-63f27add7ffa" class RegularPolygon: def __init__(self,side): self.side = side class Square(RegularPolygon): def area(self): return self.side * self.side class EquilateralTriangle(RegularPolygon): def area(self): return self.side * self.side * 0.433 obj = Square(4) print(obj.area()) obj2 = EquilateralTriangle(3) print(obj2.area()) # + [markdown] id="096JghYYiYf3" # Application 1 # + [markdown] id="-iYarrf2kSLB" # 1. Create a Python program that displays the name of three students (Student 1, Student 2 and Student 3) # 2. Create a class name Person and attributes - std1, std2, std3, pre, mid, fin # 3. Compute the average of each term grade using Grade() method # 4. Information about student's grades must be hidden from others # + id="0b81XwlDiaKF" colab={"base_uri": "https://localhost:8080/"} outputId="01260a26-c4cf-432e-b235-a736324b5cd9" class Person: def __init__(self,name,pre,mid,fin): self.__name = name self.__pre = pre self.__mid = mid self.__fin = fin def Grade(self): Ave = 0 Ave = (self.__pre + self.__mid + self.__fin)/3 print("Your Final Grade: ", round(Ave,2), "\n") class std1(Person): pass std1 = Person(str(input("Name: ")),float(input("Prelims: ")),float(input("Midterms: ")),float(input("Finals: "))) std1.Grade() class std2(Person): pass std2 = Person(str(input("Name: ")),float(input("Prelims: ")),float(input("Midterms: ")),float(input("Finals: "))) std2.Grade() class std3(Person): pass std3 = Person(str(input("Name: ")),float(input("Prelims: ")),float(input("Midterms: ")),float(input("Finals: "))) std3.Grade()
OOP_Concepts_2(Practice).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Transfer Learning for Object Detection # # This notebook is adapted from a [PyTorch tutorial](https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html) and demonstrates transfer learning with [Intel Extension for PyTorch (IPEX)](https://github.com/intel/intel-extension-for-pytorch) for an object detection task. It uses object detection models from [torchvision](https://pytorch.org/vision/stable/index.html) that were originally trained using [COCO](https://cocodataset.org/) and does transfer learning with the [PennFudan dataset](https://www.cis.upenn.edu/~jshi/ped_html/), consisting of 170 images with 345 labeled pedestrians. # # The notebook performs the following steps: # 1. [Import dependencies and setup parameters](#1.-Import-dependencies-and-setup-parameters) # 2. [Prepare the dataset](#2.-Prepare-the-dataset) # 3. [Predict using the original model](#3.-Predict-using-the-original-model) # 4. [Transfer learning](#4.-Transfer-Learning) # 5. [Visualize the model output](#5.-Visualize-the-model-output) # 6. [Export the saved model](#6.-Export-the-saved-model) # ## 1. Import dependencies and setup parameters # + import os from collections import Counter import numpy as np import torch import torchvision import intel_extension_for_pytorch as ipex from PIL import Image from pydoc import locate import warnings import torchvision.models.detection as detection from torchvision.utils import make_grid, draw_bounding_boxes from torchvision.transforms.functional import convert_image_dtype import torchvision.transforms.functional as F import matplotlib.pyplot as plt from model_utils import torchvision_model_map, get_retrainable_model from dataset_utils import PennFudanDataset, COCO_LABELS warnings.filterwarnings("ignore") print('Supported models:') print('\n'.join(torchvision_model_map.keys())) # + # Specify a model from the list above model_name = "fasterrcnn_resnet50_fpn" # Specify the location for the dataset to be downloaded dataset_directory = os.environ["DATASET_DIR"] if "DATASET_DIR" in os.environ else \ os.path.join(os.environ["HOME"], "dataset") # Specify a directory for output output_directory = os.environ["OUTPUT_DIR"] if "OUTPUT_DIR" in os.environ else \ os.path.join(os.environ["HOME"], "output") # Batch size batch_size = 2 # + if model_name not in torchvision_model_map.keys(): raise ValueError("The specified model_name ({}) is invalid. Please select from: {}". format(model_name, torchvision_model_map.keys())) # Get the info for the specified model from the map model_map_values = torchvision_model_map[model_name] predictor_handle = torchvision_model_map[model_name]["predictor_model"] print("Pretrained Object Detection Model:", model_name) print("Bounding Box Predictor/Classifier:", predictor_handle) # + # Get reference scripts from the torchvision repo that are not in the package if not os.path.exists("vision"): # !git clone --depth 1 --branch v0.11.3 https://github.com/pytorch/vision.git import sys sys.path.append("vision/references/detection") import utils import transforms as T # Define transform function for image inputs def get_transform(train): transforms = [] transforms.append(T.ToTensor()) if train: transforms.append(T.RandomHorizontalFlip(0.5)) return T.Compose(transforms) # - # ## 2. Prepare the dataset # Download and extract the [PennFudan dataset](https://www.cis.upenn.edu/~jshi/ped_html/). If the dataset is not found in the dataset directory it is downloaded. Subsequent runs will reuse the already downloaded dataset. num_classes = 2 dataset_path = os.path.join(dataset_directory, "PennFudanPed") if not os.path.exists(dataset_path): # !wget https://www.cis.upenn.edu/~jshi/ped_html/PennFudanPed.zip . # !unzip PennFudanPed.zip -d $dataset_directory # !rm PennFudanPed.zip # For data performance tuning, see the PyTorch [DataLoader](https://pytorch.org/docs/stable/data.html#multi-process-data-loading) documentation. Setting num_workers optimally will depend on hardware and batch size, but 2, 4, or 8 workers will probably work well. dataset = PennFudanDataset(dataset_path, get_transform(train=True)) dataset_test = PennFudanDataset(dataset_path, get_transform(train=False)) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=utils.collate_fn) # ## 3. Predict using the original model # # Use the pretrained model that was trained using COCO to do predictions from the new dataset and view the results for a single batch. # + # Load the detection model pre-trained on COCO pretrained_model_class = locate('torchvision.models.detection.{}'.format(model_name)) predictor_class = locate('torchvision.models.detection.{}'.format(predictor_handle)) model = pretrained_model_class(pretrained=True) # Get a batch of data images, targets = next(iter(data_loader)) images = list(image for image in images) model.eval() predictions = model(images) # + # Visualization functions plt.rcParams["savefig.bbox"] = 'tight' def show_image(img, objects_detected): if not isinstance(img, list): img = [img] fix, axs = plt.subplots(ncols=len(img), squeeze=False) for i, im in enumerate(img): im = im.detach() im = F.to_pil_image(im) axs[0, i].imshow(np.asarray(im)) axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) plt.title(objects_detected) def show_image_results(images, predictions, label_map, score_threshold=0.8): for i in range(len(images)): if 'scores' in predictions[i]: indices_over_threshold = predictions[i]['scores'] > score_threshold else: # If there are no scores, show them all indices_over_threshold = [k for k in range(len(predictions[i]['labels']))] result = draw_bounding_boxes(convert_image_dtype(images[i], dtype=torch.uint8), predictions[i]['boxes'][indices_over_threshold], width=5) c = Counter(predictions[i]['labels'][indices_over_threshold].tolist()) d = ["{}: {}".format(label_map[a], c[a]) for a in c.keys()] show_image(result, '\n'.join(d)) # - show_image_results(images, predictions, COCO_LABELS) # ## 4. Transfer Learning # # Replace the pretrained head of the network with a new layer based on the number of classes in our dataset. Train and evaluate the model using the new dataset for the specified number of epochs. # Number of training epochs training_epochs = 1 # + from importlib import reload from engine import train_one_epoch, evaluate def main(num_classes, dataset, dataset_test): # Train on the CPU device = torch.device('cpu') # Split the dataset into train and test subsets indices = torch.randperm(len(dataset)).tolist() dataset = torch.utils.data.Subset(dataset, indices[:-50]) dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:]) # Define training and validation data loaders data_loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=utils.collate_fn) data_loader_test = torch.utils.data.DataLoader( dataset_test, batch_size=1, shuffle=False, num_workers=4, collate_fn=utils.collate_fn) # Get the model using helper function model = get_retrainable_model(model_name, num_classes, pretrained_model_class, predictor_class) # Move model to the right device model.to(device) # Construct optimizer params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005) # Construct learning rate scheduler lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) # Apply the IPEX optimize function model, optimizer = ipex.optimize(model, optimizer=optimizer) for epoch in range(training_epochs): # Train for one epoch, printing every 10 iterations train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10) # Update the learning rate lr_scheduler.step() # Evaluate on the test dataset evaluate(model, data_loader_test, device=device) return model # - # Train the model model = main(num_classes, dataset, dataset_test) # ## 5. Visualize the model output # # After the training completes, test the model's predictions on the original batch. # Show object detections from fine-tuned model model.eval() predictions = model(images) # Show the predicted results for the fine-tuned model show_image_results(images, predictions, COCO_LABELS) # ## 6. Export the saved model if not os.path.exists(output_directory): # !mkdir -p $output_directory file_path = "{}/object_detection.pt".format(output_directory) torch.save(model.state_dict(), file_path) print("Saved to {}".format(file_path)) # ## Dataset citation # ``` # @InProceedings{10.1007/978-3-540-76386-4_17, # author="<NAME> # and <NAME> # and <NAME> # and <NAME>", # editor="<NAME> # and <NAME> # and <NAME> # and <NAME>", # title="Object Detection Combining Recognition and Segmentation", # booktitle="Computer Vision -- ACCV 2007", # year="2007", # publisher="Springer Berlin Heidelberg", # address="Berlin, Heidelberg", # pages="189--199", # abstract="We develop an object detection method combining top-down recognition with bottom-up image segmentation. There are two main steps in this method: a hypothesis generation step and a verification step. In the top-down hypothesis generation step, we design an improved Shape Context feature, which is more robust to object deformation and background clutter. The improved Shape Context is used to generate a set of hypotheses of object locations and figure-ground masks, which have high recall and low precision rate. In the verification step, we first compute a set of feasible segmentations that are consistent with top-down object hypotheses, then we propose a False Positive Pruning(FPP) procedure to prune out false positives. We exploit the fact that false positive regions typically do not align with any feasible image segmentation. Experiments show that this simple framework is capable of achieving both high recall and high precision with only a few positive training examples and that this method can be generalized to many object classes.", # isbn="978-3-540-76386-4" # } # ```
docs/notebooks/transfer_learning/pytorch_object_detection/PyTorch_Object_Detection_Transfer_Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # NLP From Scratch: Classifying Names with a Character-Level RNN # ************************************************************** # **Author**: `<NAME> <https://github.com/spro/practical-pytorch>`_ # # We will be building and training a basic character-level RNN to classify # words. This tutorial, along with the following two, show how to do # preprocess data for NLP modeling "from scratch", in particular not using # many of the convenience functions of `torchtext`, so you can see how # preprocessing for NLP modeling works at a low level. # # A character-level RNN reads words as a series of characters - # outputting a prediction and "hidden state" at each step, feeding its # previous hidden state into each next step. We take the final prediction # to be the output, i.e. which class the word belongs to. # # Specifically, we'll train on a few thousand surnames from 18 languages # of origin, and predict which language a name is from based on the # spelling: # # :: # # $ python predict.py Hinton # (-0.47) Scottish # (-1.52) English # (-3.57) Irish # # $ python predict.py Schmidhuber # (-0.19) German # (-2.48) Czech # (-2.68) Dutch # # # **Recommended Reading:** # # I assume you have at least installed PyTorch, know Python, and # understand Tensors: # # - https://pytorch.org/ For installation instructions # - :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general # - :doc:`/beginner/pytorch_with_examples` for a wide and deep overview # - :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user # # It would also be useful to know about RNNs and how they work: # # - `The Unreasonable Effectiveness of Recurrent Neural # Networks <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`__ # shows a bunch of real life examples # - `Understanding LSTM # Networks <https://colah.github.io/posts/2015-08-Understanding-LSTMs/>`__ # is about LSTMs specifically but also informative about RNNs in # general # # Preparing the Data # ================== # # .. Note:: # Download the data from # `here <https://download.pytorch.org/tutorial/data.zip>`_ # and extract it to the current directory. # # Included in the ``data/names`` directory are 18 text files named as # "[Language].txt". Each file contains a bunch of names, one name per # line, mostly romanized (but we still need to convert from Unicode to # ASCII). # # We'll end up with a dictionary of lists of names per language, # ``{language: [names ...]}``. The generic variables "category" and "line" # (for language and name in our case) are used for later extensibility. # # # + from __future__ import unicode_literals, print_function, division from io import open import glob import os def findFiles(path): return glob.glob(path) print(findFiles('data/names/*.txt')) import unicodedata import string all_letters = string.ascii_letters + " .,;'" n_letters = len(all_letters) # Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters ) print(unicodeToAscii('Ślusàrski')) # Build the category_lines dictionary, a list of names per language category_lines = {} all_categories = [] # Read a file and split into lines def readLines(filename): lines = open(filename, encoding='utf-8').read().strip().split('\n') return [unicodeToAscii(line) for line in lines] for filename in findFiles('data/names/*.txt'): category = os.path.splitext(os.path.basename(filename))[0] all_categories.append(category) lines = readLines(filename) category_lines[category] = lines n_categories = len(all_categories) # - # Now we have ``category_lines``, a dictionary mapping each category # (language) to a list of lines (names). We also kept track of # ``all_categories`` (just a list of languages) and ``n_categories`` for # later reference. # # # print(category_lines['Italian'][:5]) # Turning Names into Tensors # -------------------------- # # Now that we have all the names organized, we need to turn them into # Tensors to make any use of them. # # To represent a single letter, we use a "one-hot vector" of size # ``<1 x n_letters>``. A one-hot vector is filled with 0s except for a 1 # at index of the current letter, e.g. ``"b" = <0 1 0 0 0 ...>``. # # To make a word we join a bunch of those into a 2D matrix # ``<line_length x 1 x n_letters>``. # # That extra 1 dimension is because PyTorch assumes everything is in # batches - we're just using a batch size of 1 here. # # # # + import torch # Find letter index from all_letters, e.g. "a" = 0 def letterToIndex(letter): return all_letters.find(letter) # Just for demonstration, turn a letter into a <1 x n_letters> Tensor def letterToTensor(letter): tensor = torch.zeros(1, n_letters) tensor[0][letterToIndex(letter)] = 1 return tensor # Turn a line into a <line_length x 1 x n_letters>, # or an array of one-hot letter vectors def lineToTensor(line): tensor = torch.zeros(len(line), 1, n_letters) for li, letter in enumerate(line): tensor[li][0][letterToIndex(letter)] = 1 return tensor print(letterToTensor('J')) print(lineToTensor('Jones').size()) # - # Creating the Network # ==================== # # Before autograd, creating a recurrent neural network in Torch involved # cloning the parameters of a layer over several timesteps. The layers # held hidden state and gradients which are now entirely handled by the # graph itself. This means you can implement a RNN in a very "pure" way, # as regular feed-forward layers. # # This RNN module (mostly copied from `the PyTorch for Torch users # tutorial <https://pytorch.org/tutorials/beginner/former_torchies/ # nn_tutorial.html#example-2-recurrent-net>`__) # is just 2 linear layers which operate on an input and hidden state, with # a LogSoftmax layer after the output. # # .. figure:: https://i.imgur.com/Z2xbySO.png # :alt: # # # # # + import torch.nn as nn class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear(input_size + hidden_size, hidden_size) self.i2o = nn.Linear(input_size + hidden_size, output_size) self.softmax = nn.LogSoftmax(dim=1) def forward(self, input, hidden): combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) output = self.i2o(combined) output = self.softmax(output) return output, hidden def initHidden(self): return torch.zeros(1, self.hidden_size) n_hidden = 128 rnn = RNN(n_letters, n_hidden, n_categories) # - # To run a step of this network we need to pass an input (in our case, the # Tensor for the current letter) and a previous hidden state (which we # initialize as zeros at first). We'll get back the output (probability of # each language) and a next hidden state (which we keep for the next # step). # # # # + input = letterToTensor('A') hidden =torch.zeros(1, n_hidden) output, next_hidden = rnn(input, hidden) # - # For the sake of efficiency we don't want to be creating a new Tensor for # every step, so we will use ``lineToTensor`` instead of # ``letterToTensor`` and use slices. This could be further optimized by # pre-computing batches of Tensors. # # # # + input = lineToTensor('Albert') hidden = torch.zeros(1, n_hidden) output, next_hidden = rnn(input[0], hidden) print(output) # - # As you can see the output is a ``<1 x n_categories>`` Tensor, where # every item is the likelihood of that category (higher is more likely). # # # # Training # ======== # Preparing for Training # ---------------------- # # Before going into training we should make a few helper functions. The # first is to interpret the output of the network, which we know to be a # likelihood of each category. We can use ``Tensor.topk`` to get the index # of the greatest value: # # # # + def categoryFromOutput(output): top_n, top_i = output.topk(1) category_i = top_i[0].item() return all_categories[category_i], category_i print(categoryFromOutput(output)) # - # We will also want a quick way to get a training example (a name and its # language): # # # # + import random def randomChoice(l): return l[random.randint(0, len(l) - 1)] def randomTrainingExample(): category = randomChoice(all_categories) line = randomChoice(category_lines[category]) category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long) line_tensor = lineToTensor(line) return category, line, category_tensor, line_tensor for i in range(10): category, line, category_tensor, line_tensor = randomTrainingExample() print('category =', category, '/ line =', line) # - # Training the Network # -------------------- # # Now all it takes to train this network is show it a bunch of examples, # have it make guesses, and tell it if it's wrong. # # For the loss function ``nn.NLLLoss`` is appropriate, since the last # layer of the RNN is ``nn.LogSoftmax``. # # # criterion = nn.NLLLoss() # Each loop of training will: # # - Create input and target tensors # - Create a zeroed initial hidden state # - Read each letter in and # # - Keep hidden state for next letter # # - Compare final output to target # - Back-propagate # - Return the output and loss # # # # + learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn def train(category_tensor, line_tensor): hidden = rnn.initHidden() rnn.zero_grad() for i in range(line_tensor.size()[0]): output, hidden = rnn(line_tensor[i], hidden) loss = criterion(output, category_tensor) loss.backward() # Add parameters' gradients to their values, multiplied by learning rate for p in rnn.parameters(): p.data.add_(p.grad.data, alpha=-learning_rate) return output, loss.item() # - # Now we just have to run that with a bunch of examples. Since the # ``train`` function returns both the output and loss we can print its # guesses and also keep track of loss for plotting. Since there are 1000s # of examples we print only every ``print_every`` examples, and take an # average of the loss. # # # # + import time import math n_iters = 100000 print_every = 5000 plot_every = 1000 # Keep track of losses for plotting current_loss = 0 all_losses = [] def timeSince(since): now = time.time() s = now - since m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) start = time.time() for iter in range(1, n_iters + 1): category, line, category_tensor, line_tensor = randomTrainingExample() output, loss = train(category_tensor, line_tensor) current_loss += loss # Print iter number, loss, name and guess if iter % print_every == 0: guess, guess_i = categoryFromOutput(output) correct = '✓' if guess == category else '✗ (%s)' % category print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct)) # Add current loss avg to list of losses if iter % plot_every == 0: all_losses.append(current_loss / plot_every) current_loss = 0 # - # Plotting the Results # -------------------- # # Plotting the historical loss from ``all_losses`` shows the network # learning: # # # # + import matplotlib.pyplot as plt import matplotlib.ticker as ticker plt.figure() plt.plot(all_losses) # - # Evaluating the Results # ====================== # # To see how well the network performs on different categories, we will # create a confusion matrix, indicating for every actual language (rows) # which language the network guesses (columns). To calculate the confusion # matrix a bunch of samples are run through the network with # ``evaluate()``, which is the same as ``train()`` minus the backprop. # # # # + # Keep track of correct guesses in a confusion matrix confusion = torch.zeros(n_categories, n_categories) n_confusion = 10000 # Just return an output given a line def evaluate(line_tensor): hidden = rnn.initHidden() for i in range(line_tensor.size()[0]): output, hidden = rnn(line_tensor[i], hidden) return output # Go through a bunch of examples and record which are correctly guessed for i in range(n_confusion): category, line, category_tensor, line_tensor = randomTrainingExample() output = evaluate(line_tensor) guess, guess_i = categoryFromOutput(output) category_i = all_categories.index(category) confusion[category_i][guess_i] += 1 # Normalize by dividing every row by its sum for i in range(n_categories): confusion[i] = confusion[i] / confusion[i].sum() # Set up plot fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(confusion.numpy()) fig.colorbar(cax) # Set up axes ax.set_xticklabels([''] + all_categories, rotation=90) ax.set_yticklabels([''] + all_categories) # Force label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) # sphinx_gallery_thumbnail_number = 2 plt.show() # - # You can pick out bright spots off the main axis that show which # languages it guesses incorrectly, e.g. Chinese for Korean, and Spanish # for Italian. It seems to do very well with Greek, and very poorly with # English (perhaps because of overlap with other languages). # # # # Running on User Input # --------------------- # # # # + def predict(input_line, n_predictions=3): print('\n> %s' % input_line) with torch.no_grad(): output = evaluate(lineToTensor(input_line)) # Get top N categories topv, topi = output.topk(n_predictions, 1, True) predictions = [] for i in range(n_predictions): value = topv[0][i].item() category_index = topi[0][i].item() print('(%.2f) %s' % (value, all_categories[category_index])) predictions.append([value, all_categories[category_index]]) predict('Dovesky') predict('Jackson') predict('Satoshi') # - # The final versions of the scripts `in the Practical PyTorch # repo <https://github.com/spro/practical-pytorch/tree/master/char-rnn-classification>`__ # split the above code into a few files: # # - ``data.py`` (loads files) # - ``model.py`` (defines the RNN) # - ``train.py`` (runs training) # - ``predict.py`` (runs ``predict()`` with command line arguments) # - ``server.py`` (serve prediction as a JSON API with bottle.py) # # Run ``train.py`` to train and save the network. # # Run ``predict.py`` with a name to view predictions: # # :: # # $ python predict.py Hazaki # (-0.42) Japanese # (-1.39) Polish # (-3.51) Czech # # Run ``server.py`` and visit http://localhost:5533/Yourname to get JSON # output of predictions. # # # # Exercises # ========= # # - Try with a different dataset of line -> category, for example: # # - Any word -> language # - First name -> gender # - Character name -> writer # - Page title -> blog or subreddit # # - Get better results with a bigger and/or better shaped network # # - Add more linear layers # - Try the ``nn.LSTM`` and ``nn.GRU`` layers # - Combine multiple of these RNNs as a higher level network # # #
PyTorch/Visual-Audio/Torchscript/char_rnn_classification_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5-nEl7bmbKhM" colab_type="text" # # Torrent to Google Drive Downloader Using Python3-libtorrent liberary and Google Drive File Stream # # + [markdown] id="bSCady43z8ym" colab_type="text" # **Important Note:** To get more disk space: # Go to **Runtime** -> **Change runtime type** and give GPU as the Hardware Accelerator. You will get around 350-400GB of Torrent Download Capability. # + [markdown] id="owbPKrx_bVIt" colab_type="text" # ### Mount Google Drive # To stream files mounting **Google Drive** is needed.The Google Drive File Stream is maintained by Google and The Data Privacy is intact same as in accordance with Google Privacy Policies. # + id="QRzj7rLHbS_W" colab_type="code" colab={} from google.colab import drive drive.mount('/content/drive') # + [markdown] id="Gws9m9r5b8M_" colab_type="text" # ### Install Dependency python3-libtorrent # + id="uzJtOLOlcJxu" colab_type="code" colab={} # !apt install python3-libtorrent # + [markdown] id="ASYhMgxTii_G" colab_type="text" # ### Add Torrent and Start Download # + id="wkMkEndgirM0" colab_type="code" colab={} import libtorrent as torrentlib import time import datetime ses = torrentlib.session() ses.listen_on(6881, 6891) params = { 'save_path': '/content/drive/My Drive/Torrent Download/', 'storage_mode': torrentlib.storage_mode_t(2), 'paused': False, 'auto_managed': True, 'duplicate_is_error': True} link = input("Enter Torrent File Link/Magnet Link:") handle = torrentlib.add_magnet_uri(ses, link, params) ses.start_dht() begin = time.time() print(datetime.datetime.now()) print ('Downloading Metadata....') while (not handle.has_metadata()): time.sleep(1) print ('Metadata Downloaded, Starting Torrent Download....') print("Starting Torrent Download>>>>", handle.name()) while (handle.status().state != torrentlib.torrent_status.seeding): s = handle.status() state_str = ['queued', 'checking', 'downloading metadata', \ 'downloading', 'finished', 'seeding', 'allocating'] print ('%.2f%% complete (down: %.1f mb/s up: %.1f kb/s peers: %d) %s ' % \ (s.progress * 100, s.download_rate / 1048576, s.upload_rate / 8000, \ s.num_peers, state_str[s.state])) time.sleep(5) end = time.time() print(handle.name(), "TORRENT DOWNLOAD COMPLETED") print("Elapsed Time: ",int((end-begin)//60),"min :", int((end-begin)%60), "sec") print(datetime.datetime.now())
torrent_to_drive_downloader.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Animated graph # This notebook shows how to use the matplotlib animate tool for turning a simple xy-graph into an animation. The last step saving the animation requires the ffmpeg package and takes some time - so be patient. # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from matplotlib.lines import Line2D import os # + # %matplotlib inline # #%config InlineBackend.figure_format = 'pdf' from mpl_toolkits.axes_grid1.inset_locator import inset_axes from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf', 'png') plt.rcParams['savefig.dpi'] = 300 plt.rcParams['figure.autolayout'] = False plt.rcParams['figure.figsize'] = 10, 8 plt.rcParams['axes.labelsize'] = 24 plt.rcParams['axes.titlesize'] = 30 plt.rcParams['font.size'] = 24 plt.rcParams['lines.linewidth'] = 2.0 plt.rcParams['lines.markersize'] = 8 plt.rcParams['legend.fontsize'] = 24 plt.rcParams['axes.titlepad'] = 20 # - def running_mean(x, N): cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / float(N) FILEs=["data1.dat",] # + N = 5 figname = "data1.png" Legends = ["data1.png",] data = np.loadtxt("data1.dat", comments=["@","#"]) plt.plot(running_mean(data[:,0],N) ,running_mean(data[:,1],N), label="data1") #plt.title(figname) plt.xlim(0,1000) plt.ylim(0,360) #plt.legend(frameon=False, loc=0) plt.xlabel(r'Time [$\mu$s]') plt.ylabel("Orientation angle (deg)") plt.savefig(figname, dpi=300) plt.show() # - # Next, we'll calculate a running average of the data (using how many frames?) and create a movie where the graph grows over time. Note that this take some time, so be patient. If you have the hour glass icon in your tab at the top of the screen, Python is working, and you should wait. If you think things have failed, you can interrup the Kernel from the Notebook menu (Kernel -> interrupt). # + N = 5 import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation def update_line(num, data, line): line.set_data(data[..., :num]) return line, fig1 = plt.figure() list1 = [running_mean(data[:,0],N),] list2 = [running_mean(data[:,1],N),] list1_ar = np.array(list1) list2_ar = np.array(list2) print(list1_ar.size) data1 = np.stack((list1_ar, list2_ar)) print(data1) l, = plt.plot([], [], 'r-') plt.xlim(0, 1000) plt.ylim(0, 360) plt.xlabel("Time [ns]") plt.ylabel("Orientation angle [deg]") line_ani = animation.FuncAnimation(fig1, update_line, 1000, fargs=(data1, l), interval=7, blit=True) line_ani.save('data1.mp4') plt.show() # - # The script exported the movie as data1.mp4. If you want to take a look, you can change to the main Notebook tab, navigate to the movie subfolder, download the file and play it with some movie app.
movie/Animated_graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pathlib import Path import numpy as np # ## Defining dataset # + import torch from torch.utils.data import DataLoader, Dataset, RandomSampler import math class PySourceDataset(Dataset): def __init__(self, main_folder, seq_len=80): self.main_folder = main_folder self.seq_len = seq_len # Read all python source files files = list() for path in Path(main_folder).rglob('*.py'): with open(path, encoding="utf8") as fp: files.append(fp.read()) print("Number of .py files:", len(files)) # Concatenate them with a page separator # corpus = "\n\nNEW FILE\n\n".join(files) self.corpus = "\n".join(files) self.corpus_len = len(self.corpus) print("Chars in corpus:", self.corpus_len) # Define useful mappings idx = 0 self.c2i = dict() for c in corpus: if c not in self.c2i: self.c2i[c] = idx idx += 1 self.i2c = {v: k for k, v in self.c2i.items()} self.n_chars = len(self.c2i) print("Number of distinct chars:", self.n_chars) def _get_onehot(self, c): t = torch.zeros(1, self.n_chars) t[0][self.c2i[c]] = 1 return t def __len__(self): return self.corpus_len - self.seq_len def __getitem__(self, idx): assert idx < len(self) # raw text sequences source_seq = self.corpus[idx : idx + self.seq_len] target_seq = self.corpus[idx + 1 : idx + self.seq_len + 1] # one-hot source_seq_t = torch.stack( [self._get_onehot(c) for c in source_seq] ).squeeze(dim=1) target_seq_t = torch.Tensor([c2i[c] for c in target_seq]).long() return (source_seq_t, target_seq_t) # - pyd = PySourceDataset('./scikit-learn-master') # + import pytorch_lightning as pl import torch.nn.functional as F import IPython class CharLM(pl.LightningModule): def __init__( self, input_size, hidden_size, output_size, num_layers=2, dropout=0.5, learning_rate=1e-4 ): super().__init__() self.save_hyperparameters() self.lstm = torch.nn.LSTM( input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, dropout=dropout, batch_first=True, ) self.Who = torch.nn.Linear(hidden_size, output_size) self.softmax = torch.nn.LogSoftmax(dim=2) def forward(self, x): out, hidden_state = self.lstm(x) out = self.Who(out) logprob = self.softmax(out) return logprob, hidden_state def training_step(self, batch, batch_idx): source, target = batch y_hat, hidden_state = self(source) # print(y_hat.shape, target.shape) # IPython.embed() # compute sum of losses across time steps loss = F.nll_loss(y_hat.view(-1, y_hat.shape[2]), target.view(-1)) self.log("train_loss", loss) return loss def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate) # + from aim.pytorch_lightning import AimLogger input_size = output_size = pyd.n_chars hidden_size = 512 lr = 1e-4 pl.seed_everything(42) py_dataset = PySourceDataset('./scikit-learn-master') train_loader = DataLoader(py_dataset, batch_size=3) model = CharLM(input_size, hidden_size, output_size, learning_rate=lr) aim_logger = AimLogger( experiment="pt_lightning_exp", train_metric_prefix="train_", ) trainer = pl.Trainer(logger=aim_logger) trainer.fit(model, train_loader)
main.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.3.1 # language: julia # name: julia-1.3 # --- using STMO using Plots, DataStructures # # Dijkstra's shortest path algorithm # # Dijkstra's algorithm is a popular algorithm to find the shortest path between the nodes of a graph. The algorithm can be used in two ways: # # - when both a source and a sink node are provided, the algorithm gives the list of nodes of the shortest path, together with the length (distance of the path). # - when only a source is given, the shortest distance between the source and all (accessable) nodes is returned together with a dictionary representing the tree of the shortest paths between the source and other nodes. # # The pseudocode can be found below. # # ## Pseudocode of Dijkstra's shortest path algorithm # Source: Wikipedia.org # ``` # 1 function Dijkstra(Graph, source, sink (optional)): # 2 distance[source] := 0 // initialize a dictionary with distance # 3 // to source # 4 for each vertex V in Graph: # 5 if V ≠ source # 6 distance[V] := infinity # 7 previous := empty dict // stores the previous node in path # 8 # 10 make empty priority queue Q for vertices to check # 11 Q.add_with_priority(source, 0) # first check the source # 12 # 13 # 14 while Q is not empty: // The main loop # 15 get node U with smallest distance from Q # 16 if U is the sink (if provided) # 17 break the loop # 17 for every neighbor V of U: # 18 alternative := distance[U] + dist(U, V) # 18 if alternative < distance[V] # 19 distance[V] := alternative # 20 previous[V] := U # 21 Q.add_with_priority(V, alternative) # 22 # 23 if sink is given # 24 reconstruct shortest path # 25 return shortest path and length of path # 26 else # 27 return distance, previous # ``` # # ## Pseudocode of path reconstruction algorithm # ``` # 1 function reconstruct_path(previous, source, sink) # 2 if the source is not in previous # 3 return empty path # 4 V := sink \\ path is reconstructed backwards # 5 path := [V] \\ path is a list of nodes # 6 while V is not source # 7 V := previous node in path # 8 add V in beginning of the path # 9 return the path # ``` # from shortestpaths import dijkstra, reconstruct_path """ dijkstra(graph::AdjList{R,T}, source::T, sink::T) where {R<:Real,T} Dijkstra's shortest path algorithm. Inputs: - `graph` : adjacency list representing a weighted directed graph - `source` - `sink` Outputs: - the shortest path - the cost of this shortest path """ function dijkstra(graph::AdjList{R,T}, source::T, sink) where {R<:Real,T} # initialize the tentative distances distances = Dict(v => Inf for v in keys(graph)) distances[source] = ... ... end """ reconstruct_path(previous::Dict{T,T}, source::T, sink::T) where {T} Reconstruct the path from the output of the Dijkstra algorithm. Inputs: - previous : a Dict with the previous node in the path - source : the source node - sink : the sink node Ouput: - the shortest path from source to sink """ function reconstruct_path(previous::Dict{T,T}, source::T, sink::T) where {T} path = T[sink] ... return ... end # **Assignment 1: Dijkstra** # # 1. Complete the implementation for Dijstra's algorithm and the path reconstruction function # 2. Find the shortest path from node A to I in the example graph below. # 3. Find the shortest path from Portland to Nashville on the Ticket to Ride graph. # 3. (optional, hard) Modify `dijkstra` such that the sink is optional. When no sink is given, the dictionary `previous` (where `previous[u] = v` indicates that the shortest path from `v` is via `u`) and `distances`, a dictionary containing the minimal distance from every node to the sink. # # ![Example network](Figures/example_graph.png) # the above graph as an adjacency list graph = Dict('A' => [(2, 'B'), (3, 'D')], 'B' => [(1, 'C'), (5, 'E')], 'C' => [(2, 'D'), (1, 'E')], 'D' => [(3, 'E')], 'E' => [(2, 'B'), (3, 'F')], 'F' => [(5, 'G'), (8, 'I')], 'G' => [(2, 'H'), (5, 'I')], 'H' => [(3, 'I')], 'I' => [(7, 'G')]); dijkstra(graph, 'A', 'I') using STMO.TicketToRide tickettoride_graph # # A* shortest path algorithm # # It is hard to explain the algorithm into more depth than [here](http://theory.stanford.edu/~amitp/GameProgramming/) and [here](http://www.redblobgames.com/pathfinding/a-star/introduction.html). # # The A\* algorithm is exact (like Dijkstra's algorithm), but it can also use a **heuristic** to speed up the search. In each iteration, the next neighbor $v$ of the current vertex that the algorithm considers is chosen based on the heuristic # # $$ # f(v) = g(v) + h(v) # $$ # # with $g(v)$ the cost of the path so far and $h(v)$ a heuristic that estimates the cost of the cost of the shortest path from $v$ to the goal. The heuristic $h(v)$ should satify two properties: # 1. To guaranty that a shortest path is found, the heuristic should be **admissible**, i.e. it should never overestimate the true distance to goal. # 2. Evaluating the heuristic should be cheap (it is either pre-computed or is a function). # # So the A\* algorithm is basically the same as Dijkstra's algorithm, but with the main difference that the the latter chooses nodes to explore based on the distance from the starting node, while the latter chooses nodes **based on an estimate of distance to the goal**. # # When using A\* to find the shortest path between two physical locations, each vertex corresponds to a point in space. A good heuristic is the (Euclidian) distance between the vertexs, as this will always be a lower bound for the actual distance to travel to the goal. # # Consider the following example of finding the path from vertex $a$ to vertex $h$. # # ![Example how A\* uses the heuristics](Figures/astarexample.png) # # The shortest path so far is $[a, e]$ and the algorithm needs to choose to explore vertex $d$ or vertex $g$. Since the path of $ed$ is shorter than the path $eg$, Dijkstra will choose this vertex, even though it is farther removed from the goal $h$ (Dijkstra chooses a neighbor $n$ only based on the current path length form the starting vertex $g(n)$. The A\* algorithm will chose vertex $g$ to explore, because the estimated path length $f(e) = g(e) + h(e) > f(d)$ and hence will approach the goal. # # ## Pseudocode of A\* shortest path algorithm # Source from Wikipedia (modified) # ``` # 1 function Astar(Graph, source, sink, heuristic): # 2 make empty priority queue Q for vertices to check # 3 # 4 add source to Q with priority f(source) # 5 // use the estimated path length for priority # 6 # 7 distance[V] := 0 // initialize a dictionary with distance # 8 // to source # 9 previous := empty dict // stores the previous node in path # 10 # 11 while Q is not empty: // The main loop # 12 pop node U with lowest estimated path length to sink # 13 if U is the sink # 14 break the loop # 15 for every neighbor V of U: # 16 distance_source_U := distance[U] + dist(U, V) # 17 if V not in distance or distance_source_U < distance[V] # 18 distance[V] := distance_source_U # 19 previous[V] := U # 20 heuristic_V_sink := distance[V] + heuristic(V) # 21 Q.add_with_priority(V, heuristic_V_sink) # 22 # 23 reconstruct shortest path # 24 return shortest path and length of path # ``` # # # **Assignment 2: A\*** # # 1. Complete the implementation for the A\* algorithm. # 2. Compute the shortest path between Portland to Nashville. The function `tickettoride_dist` returns the shortest distance (as the bird flies) between two cities. # 3. Compare running time of the two shortest path algorithms using `@time`. """ a_star(graph::AdjList{R,T}, source::T, sink::T, heuristic) where {R<:Real,T} A* shortest path algorithm. Inputs: - `graph` : adjacency list representing a weighted directed graph - `source` - `sink` - `heuristic` : a function that inputs a node and returns an lower bound for the distance to the source. Note that a distance can be turned into a heuristic using `n -> d(n, sink)` Outputs: - the shortest path - the cost of this shortest path """ function a_star(graph::AdjList{R,T}, source::T, sink::T, heuristic) where {R<:Real,T} # initialize the tentative distances distances = Dict(v => Inf for v in keys(graph)) ... while ... ... end # # Word problem # # Let us use shortest path for some word problems? What is the shortest chain from one word to another if you can only change 5 letters at a time? We will explore this on all words of length 12. # + using STMO.Words words12 = getwords(12) # - # We have the function `hamming` to compute the hamming distance between two stings. w1, w2 = rand(words12, 2) hamming(w1, w2) # We can compute the weighted graph where two words are connected if they have a Hamming distance of at most 5. words_edges = getwordedges(words12, cutoff=5); words_graph = edges2adjlist(words_edges) # **Assignment 3: word salad** # # 1. Use Dijkstra and A* to find the distance between two randomly chosen words. # 2. Can you find the longest word chain?
chapters/08.ShortestPath/shortestpath.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # `poser` steals a bunch of methods from the standard lib and has HUGE api. This document tests some features. from poser import * import builtins assert λ.isinstance(int)(2) ^ λ.isinstance(int)(2.1) λ.range().list()(2, 10, 3) import re (λ * λ.compile()('^a').match + list)(('abc', '123')) import fnmatch assert λ.fnmatch('abc*')('abc123') import types class F(): def x(self): raise NotImplemented() def x(self): return self f = F() f.x = λ.MethodType(x)(f) assert (λ**F+bool)(f.x()) import pathlib assert λ.Path('readme.md').read_text().contains('`poser`')() assert not λ.issubclass(int)(2) and λ.issubclass(int)(int) import statistics λ.range(1, 3)[λ.mean(), λ.stdev(), λ.min(), λ.max()]() import math λ[λ.floordiv(2), λ.truediv(2)](10.5) import inspect assert λ.isfunction()(lambda x:x) import json λ.loads()("""{"a": 1}"""), λ.loads().dumps()("""{"a": 1}""") import toolz λ.valfilter(bool).get('a')(dict(a=2, b=None)) import random λ.randint(1,1)() import glob λ.glob('../poser/*.py').sorted()() import string assert (λ**string.Template)(λ.Template('get $paid')()) # + tags=[] import io with λ.StringIO() + ... as s: λ.do(s.write)("❤️") λ + s.getvalue + print + ... # - import operator, dataclasses @λ.dataclass() class Class: a: dict λ.attrgetter('a').itemgetter('foo')(Class(dict(foo="🐼")))
tests/test_poser_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: base # language: python # name: base # --- # + # %load_ext autoreload # %autoreload 2 from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # + import sys from itertools import groupby import numpy as np import cv2 from tqdm.notebook import tqdm tqdm.pandas() import pandas as pd import os from glob import glob from multiprocessing import Pool import matplotlib.pyplot as plt # import cupy as cp import ast from pathlib import Path import pickle5 as pickle import torch import shutil import sys sys.path.append("../src/") sys.path.append("../yolov5/") import util from joblib import Parallel, delayed from IPython.display import display, HTML from sahi.model import Yolov5DetectionModel from sahi.utils.cv import read_image from sahi.utils.file import download_from_url from sahi.predict import get_prediction, get_sliced_prediction, predict from ensemble_boxes import weighted_boxes_fusion import albumentations as A from IPython.display import Image from matplotlib import animation, rc rc('animation', html='jshtml') # + from norfair import Detection, Tracker # Helper to convert bbox in format [x_min, y_min, x_max, y_max, score] to norfair.Detection class def to_norfair(detects, frame_id): result = [] for x_min, y_min, x_max, y_max, score in detects: xc, yc = (x_min + x_max) / 2, (y_min + y_max) / 2 w, h = x_max - x_min, y_max - y_min result.append(Detection(points=np.array([xc, yc]), scores=np.array([score]), data=np.array([w, h, frame_id]))) return result def euclidean_distance(detection, tracked_object): return np.linalg.norm(detection.points - tracked_object.estimate) def tracking_function(tracker, frame_id, bboxes, scores, best_conf, num_prev_bbox = None): detects = [] predictions = [] if len(scores)>0: for i in range(len(bboxes)): # remember to check if scores[i] <= best_conf: continue box = bboxes[i] score = scores[i] x_min = int(box[0]) y_min = int(box[1]) bbox_width = int(box[2]) bbox_height = int(box[3]) detects.append([x_min, y_min, x_min+bbox_width, y_min+bbox_height, score]) predictions.append('{:.2f} {} {} {} {}'.format(score, x_min, y_min, bbox_width, bbox_height)) # Update tracks using detects from current frame tracked_objects = tracker.update(detections=to_norfair(detects, frame_id)) to_add_preds = [] for tobj in tracked_objects: bbox_width, bbox_height, last_detected_frame_id = tobj.last_detection.data if last_detected_frame_id == frame_id: # Skip objects that were detected on current frame continue xc, yc = tobj.estimate[0] x_min, y_min = int(round(xc - bbox_width / 2)), int(round(yc - bbox_height / 2)) #exclude those in the edge if (x_min + bbox_width >= 1279) or (y_min + bbox_height) >= 719 or (x_min <= 1) or (y_min <= 1): continue score = tobj.last_detection.scores[0] to_add_preds.append('{:.2f} {} {} {} {}'.format(score, x_min, y_min, bbox_width, bbox_height)) #predictions.append('{:.2f} {} {} {} {}'.format(score, x_min, y_min, bbox_width, bbox_height)) if (num_prev_bbox is None or (num_prev_bbox - len(predictions) + 1) >= len(to_add_preds)): predictions.extend(to_add_preds) return predictions # - from ensemble_boxes import weighted_boxes_fusion def run_wbf(bboxes, confs, iou_thr=0.5, skip_box_thr=0.00000001): if len(bboxes) == 1: return bboxes[0], confs[0], [] else: for i in range(len(bboxes)): sub_bboxes = bboxes[i] if len(sub_bboxes) > 0: sub_bboxes[:,2:] = sub_bboxes[:,2:] + sub_bboxes[:,:2] sub_bboxes[:,0] = sub_bboxes[:,0] / 1279. sub_bboxes[:,1] = sub_bboxes[:,1] / 719. sub_bboxes[:,2] = sub_bboxes[:,2] / 1279. sub_bboxes[:,3] = sub_bboxes[:,3] / 719. bboxes[i] = sub_bboxes labels = [np.ones(len(conf)) for conf in confs] boxes, scores, labels = weighted_boxes_fusion(bboxes, confs, labels, iou_thr=iou_thr, skip_box_thr=0.001, allows_overflow=True, conf_type='avg') if len(boxes) > 0: boxes[:,0] = boxes[:,0] * 1279. boxes[:,1] = boxes[:,1] * 719. boxes[:,2] = boxes[:,2] * 1279. boxes[:,3] = boxes[:,3] * 719. boxes[:,2:] = boxes[:,2:] - boxes[:,:2] boxes = [box for i,box in enumerate(boxes) if scores[i] >= skip_box_thr] scores = [conf for conf in scores if conf >= skip_box_thr] return boxes, scores, labels # + from IPython.display import Image from PIL import Image as Img import subprocess #This code I found in: https://www.kaggle.com/bamps53/create-annotated-video Thank you for sharing. out_dir = "/home/vincent/Kaggle/data/tensorflow-great-barrier-reef/video_check/" if not os.path.exists(out_dir): os.makedirs(out_dir) def load_image(image_dir): assert os.path.exists(image_dir), f'{image_dir} does not exist.' img = cv2.imread(str(image_dir)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img def load_image_with_annotations(image_dir, annotations): img = load_image(image_dir) if len(annotations) > 0: for ann in annotations: cv2.rectangle(img, (int(ann['x']), int(ann['y'])), (int(ann['x'] + ann['width']), int(ann['y'] + ann['height'])), (0, 255, 0), thickness=3) return img def show_prediction(img, bboxes, scores, show = True): colors = [(0, 0, 255)] obj_names = ["s"] for box, score in zip(bboxes, scores): cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[0] + box[2]), int(box[1] + box[3])), (255,0,0), 2) cv2.putText(img, f'{score:.2f}', (int(box[0]), int(box[1])-3), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2, cv2.LINE_AA) if show: img = Img.fromarray(img).resize((1280, 720)) return img def save_bulk_images(df, video_id, sequence_id, pred_col, out_dir, best_conf, name=None, s_f=None, e_f=None): width = 1280 height = 720 if name is None: name = "check_pics" save_dir = f'{out_dir}/{name}/' if not os.path.exists(save_dir): os.makedirs(save_dir) # I just generate ony part of video if video_id is not None and sequence_id is not None: query = 'video_id == {} and sequence == {}'.format(video_id, sequence_id) if s_f is not None: query = query + 'and video_frame >= {} and video_frame <= {}'.format(s_f, e_f) print(query) video_df = df.query(query) else: video_df = df print(video_df.shape) for _, row in tqdm(video_df.iterrows(), total=len(video_df)): video_id = row.video_id video_frame = row.video_frame annotations = row.annotations img_file = row.image_path img = load_image_with_annotations(img_file, annotations) preds = row[pred_col] best_conf = 0 bboxes = [preds[i][1:] for i in range(len(preds)) if preds[i][0] >= best_conf] confis = [preds[i][0] for i in range(len(preds)) if preds[i][0] >= best_conf] img = show_prediction(img, bboxes, confis, show=False) cv2.putText(img, f'{video_id}-{video_frame}', (10,70), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 1, cv2.LINE_AA) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) cv2.imwrite(save_dir + "{}.jpg".format(video_frame), img) def make_video(df, video_id, sequence_id, pred_col, out_dir, best_conf, name=None, s_f=None, e_f=None): fps = 15 width = 1280 height = 720 if name is None: name = "video" save_path = f'{out_dir}/{name}.mp4' tmp_path = f'{out_dir}/tmp-{name}.mp4' output_video = cv2.VideoWriter(tmp_path, cv2.VideoWriter_fourcc(*"MP4V"), fps, (width, height),) # I just generate ony part of video if video_id is not None and sequence_id is not None: query = 'video_id == {} and sequence == {}'.format(video_id, sequence_id) if s_f is not None: query = query + 'and video_frame >= {} and video_frame <= {}'.format(s_f, e_f) print(query) video_df = df.query(query) else: video_df = df print(video_df.shape) for _, row in tqdm(video_df.iterrows(), total=len(video_df)): video_id = row.video_id video_frame = row.video_frame annotations = row.annotations img_file = row.image_path img = load_image_with_annotations(img_file, annotations) preds = row[pred_col] best_conf = 0 bboxes = [preds[i][1:] for i in range(len(preds)) if preds[i][0] >= best_conf] confis = [preds[i][0] for i in range(len(preds)) if preds[i][0] >= best_conf] img = show_prediction(img, bboxes, confis, show=False) cv2.putText(img, f'{video_id}-{video_frame}', (10,70), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 1, cv2.LINE_AA) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) output_video.write(img) output_video.release() if os.path.exists(save_path): os.remove(save_path) subprocess.run( ["ffmpeg", "-i", tmp_path, "-crf", "18", "-preset", "veryfast", "-vcodec", "libx264", save_path] ) os.remove(tmp_path) # - from base64 import b64encode def play(filename): html = '' video = open(filename,'rb').read() src = 'data:video/mp4;base64,' + b64encode(video).decode() html += '<video width=1280 controls autoplay loop><source src="%s" type="video/mp4"></video>' % src return HTML(html) # # Read Data INPUT_DIR = Path("../../data/tensorflow-great-barrier-reef/") df_origin = pd.read_csv(INPUT_DIR / "train.csv") # + df = pd.read_csv(INPUT_DIR / "train.csv") folds = util.load_pickle("../input/fold_test_2.pkl") df["fold"] = df["sequence"].apply(lambda x: folds[x]) highFP_df = pd.read_csv('../input/df_highFPNoBB.csv') df = pd.merge(df, highFP_df[['video_id',"video_frame","highFBNoBB"]], on=["video_id","video_frame"], how='left') df["highFBNoBB"].fillna(False, inplace=True) df.shape # - data_param = {'root_dir':INPUT_DIR,'label_dir':INPUT_DIR / "labels"} df = df.progress_apply(lambda x: util.get_path(x, data_param, infer=True), axis=1) df['annotations'] = df['annotations'].progress_apply(lambda x: ast.literal_eval(x)) df["real_bbox"] = df["annotations"].apply(lambda annots: [list(annot.values()) for annot in annots]) df['num_bbox'] = df['annotations'].progress_apply(lambda x: len(x)) data = (df.num_bbox>0).value_counts(normalize=True)*100 print(f"No BBox: {data[0]:0.2f}% | With BBox: {data[1]:0.2f}%") df['train'] = False df.loc[df.query("fold == 0 and (num_bbox > 0 or highFBNoBB)").index, 'train'] = True df['train'].value_counts() # COTS per frame df.groupby("fold").apply(lambda df: df["num_bbox"].sum() / df.shape[0]) df['fold'].value_counts(normalize=True) df.head() # # Add OOF prediction df_oof = util.load_pickle("../input/wbf_notrack_pred.pkl") df_oof.head() df['wbf_pred'] = None df.loc[df['fold']==1, 'wbf_pred'] = df_oof['notrack_pred'] df_pred1 = util.load_pickle("whole_pred_0204_yolov5s6_B.pkl") df_pred2 = util.load_pickle("whole_pred_0205_yolov5m6_B_LS02.pkl") df['pred_0204_yolov5s6_B'] = df_pred1['pred_0204_yolov5s6_B'] df['pred_0205_yolov5m6_B_LS02'] = df_pred2['pred_0205_yolov5m6_B_LS02'] best_conf_dict = { '0204_yolov5s6_B':0.2, '0205_yolov5m6_B_LS02':0.3, } iou_thr = 0.4 skip_box_thr = 0.22 wbf_models =["0204_yolov5s6_B", '0205_yolov5m6_B_LS02'] df["wbf_noTrack_pred"] = None frame_id = 0 tracker = Tracker( distance_function=euclidean_distance, distance_threshold=30, hit_inertia_min=3, hit_inertia_max=6, initialization_delay=2, ) num_prev_bbox = None for i, idx in enumerate(df.query("fold!=1").index): row = df.loc[idx] bboxes_l = [] confs_l = [] for model_version in wbf_models: BEST_CONF = best_conf_dict[model_version] pred_col = "pred_" + model_version prd_bboxs = row[pred_col] prd_bboxs = [p for p in prd_bboxs if p[0] >= BEST_CONF] bboxes, confis = [p[1:] for p in prd_bboxs], [p[0] for p in prd_bboxs] bboxes = np.array(bboxes,dtype=np.float64) bboxes_l.append(bboxes) confs_l.append(confis) bboxes_l = np.array(bboxes_l) wbf_boxes, wbf_confs, _ = run_wbf(bboxes_l, confs_l, iou_thr, skip_box_thr) df.at[idx, "wbf_noTrack_pred"] = [[wbf_confs[i]] + wbf_boxes[i].tolist() for i in range(len(wbf_boxes))] predictions = tracking_function(tracker, frame_id, wbf_boxes, wbf_confs, 0, num_prev_bbox) prd_bboxs_tracking = [[float(p) for p in pred.split(" ")] for pred in predictions] num_prev_bbox = len(wbf_boxes) df.at[idx, "wbf_pred"] = prd_bboxs_tracking frame_id += 1 df.loc[df.query("fold==1").index, "wbf_noTrack_pred"] = df.query("fold==1")['wbf_pred'] # ## check score all_gt = [np.array(x) for x in df['real_bbox']] all_gt_m = all_gt.copy() all_pred = [np.array(x) if x is not None else np.array([]) for x in df['wbf_pred']] all_pred_notrack = [np.array(x) if x is not None else np.array([]) for x in df['wbf_noTrack_pred']] # + def check_score_gt(df, q): selected_gt = [] selected_pred = [] for idx in df.query(q).index: selected_gt.append(all_gt[idx]) selected_pred.append(all_pred[idx]) f2_dict = util.calc_f2_score(selected_gt, selected_pred, verbose=False) return f2_dict['f2'] def check_score_gt_notrack(df, q): selected_gt = [] selected_pred = [] for idx in df.query(q).index: selected_gt.append(all_gt[idx]) selected_pred.append(all_pred_notrack[idx]) f2_dict = util.calc_f2_score(selected_gt, selected_pred, verbose=False) return f2_dict['f2'] # - check_score_gt(df, "fold==1") check_score_gt(df, "train") check_score_gt(df, "not train") check_score_gt_notrack(df, "train") # ## save video # + # for video_id, sequence in df.query("fold!=1").groupby(["video_id","sequence"]).size().index: # name = f'check-{video_id}-{sequence}' # make_video(df, video_id, sequence, 'wbf_pred', out_dir, 0, name=name) # + video_id=2 sequence=22643 s_f = 5661 e_f = 5691 name = f'checkFix2-{video_id}-{sequence}_{s_f}_{e_f}' make_video(df, video_id, sequence, 'wbf_pred', out_dir, 0, name=name,s_f=s_f, e_f=e_f) # - # # modify GT # ## Modify fold==1 GT # 1-15827-2148-2205 video_id = 1 sequence = 15827 s_f = 2148 e_f = 2205 q = f"video_id == {video_id} and sequence == {sequence} and video_frame >= {s_f} and video_frame <= {e_f}" df_check = df.query(q).copy() for idx in df_check.index: pred = all_pred[idx] if len(all_gt_m[idx]) == 0: if len(pred) > 0: all_gt_m[idx] = pred[:,1:] # 2-26651-4639-4748 video_id = 2 sequence = 26651 s_f = 4639 e_f = 4748 q = f"video_id == {video_id} and sequence == {sequence} and video_frame >= {s_f} and video_frame <= {e_f}" df_check = df.query(q).copy() for idx in df_check.index: pred = all_pred[idx] if len(all_gt_m[idx]) == 0: if len(pred) > 0: all_gt_m[idx] = pred[:,1:] # modified f2 selected_gt = [] selected_pred = [] for idx in df.query("fold==1").index: selected_gt.append(all_gt_m[idx]) selected_pred.append(all_pred[idx]) f2_dict = util.calc_f2_score(selected_gt, selected_pred, verbose=False) f2_dict['f2'] df.query("video_frame == 6845") df.loc[[11893]] # ## Modify fold != 1 GT # + video_id = 1 sequence = 18048 s_f = 6709 e_f = 6780 name = f'InterestingFP_wbf-{video_id}-{sequence}-{s_f}-{e_f}' make_video(df, video_id, sequence, 'wbf_pred', out_dir, 0, name=name,s_f=s_f, e_f=e_f) # + video_id = 1 sequence = 8503 s_f =3891 e_f =3904 name = f'Error_TP_wbf-{video_id}-{sequence}-{s_f}-{e_f}' make_video(df, video_id, sequence, 'wbf_pred', out_dir, 0, name=name,s_f=s_f, e_f=e_f) # - # # Add GT video_dir = out_dir check_videos = glob(video_dir + "*.mp4") gt_candidates = [] for name in check_videos: name = name.split("/")[-1] if name in ['UnkownFP_wbf-2-26651-4154-4200.mp4', 'commonFP_wbf-0-996-11971-11994.mp4']: continue if "fp" in name.lower() and "error" not in name.lower(): print(name) _, video_id, sequence, s_f, e_f_mp4 = name.split("-") e_f = e_f_mp4[:-4] gt_candidates.append([int(video_id), int(sequence), int(s_f), int(e_f)]) gt_candidates gt_candidates_dfs = [] cols = ["video_id","sequence","video_frame"] for video_id, sequence, s_f, e_f in gt_candidates: l = e_f - s_f + 1 df_ = pd.DataFrame([f for f in range(s_f, e_f+1)], columns=['video_frame']) df_['video_id'] = video_id df_['sequence'] = sequence gt_candidates_dfs.append(df_) gt_candidates_df = pd.concat(gt_candidates_dfs)[cols] df_new = pd.merge(gt_candidates_df, df[cols + ['annotations','image_path',"real_bbox", "wbf_noTrack_pred"]], on=cols) df_new = df_new.drop_duplicates(subset=['video_id',"sequence","video_frame"]).sort_values(["video_id","video_frame"]) df_new["added_pred"] = [list([]) for _ in range(df_new.shape[0])] df_new["new_real_bbox"] = [list([]) for _ in range(df_new.shape[0])] for idx, row in df_new.iterrows(): if len(row['wbf_noTrack_pred']) == 0: continue elif len(row['real_bbox']) == 0: df_new.at[idx, 'added_pred'] = df_new.loc[idx,'wbf_noTrack_pred'] else: wbf_pred = np.array(row['wbf_noTrack_pred']) real_gt = np.array(row['real_bbox']) added_pred = [] ious = util.calc_iou(np.array(wbf_pred)[:,1:], np.array(real_gt)) ious_max = ious.max(axis=1) added_pred = wbf_pred[ious_max < 0.3].copy().tolist() df_new.at[idx,'added_pred'] = added_pred df_new['added_pred'].apply(len).sum() df_new['new_real_bbox'] = df_new.apply(lambda x: x['real_bbox'] + [p[1:] for p in x['added_pred']], axis=1) df_new['wbf_pred'] = df_new['new_real_bbox'].apply(lambda pp: [[1] + p for p in pp]) # + #make_video(df_new, None, None, 'wbf_pred', out_dir, 0, name="new_GT_check",s_f=None, e_f=None) # - df_new["new_real_bbox"] = [list([]) for _ in range(df_new.shape[0])] df_new["wbf_pred"] = [list([]) for _ in range(df_new.shape[0])] # + # remove those unnecessary pairs = [ [0, 40], [0, 69], [0, 101], [0, 103], [0, 107], [0, 140], [0, 153], [0, 153], ] pairs = np.concatenate(pairs).tolist() pairs.extend( [ 0,4596,0,9648,0,11860, ]) for i in range(11857, 11870): pairs.extend([0, i]) for i in range(4590, 4597): pairs.extend([0, i]) for i in range(4751, 4759): pairs.extend([0, i]) pairs.extend([ 0, 11900, 0, 12171, 1, 4177, 1, 4178, 1, 4184, 1, 4261, 1, 4262, 1, 4255, 1, 4400, 1, 5269, 1, 5335, 1, 5344, 1, 5348, 1, 5436, 1, 5439, 1, 5882, 1, 6737, 2, 4712, 2, 4717, 2, 4721, 2,5704, 2,5741, 2, 5907, 0, 4525, ]) for i in range(5516, 5594): pairs.extend([1, i]) for i in range(5675, 5684): pairs.extend([1, i]) len(pairs) # - appeared = set([]) removed_FP = 0 manual_process = [] for i in range(len(pairs)//2): video_id = pairs[i * 2] video_frame = pairs[i * 2 + 1] if (video_id, video_frame) in appeared: continue else: appeared.add((video_id, video_frame)) idx = df_new.query("video_id == @video_id and video_frame == @video_frame").index[0] if len(df_new.loc[idx, 'added_pred']) <= 1: removed_FP += len(df_new.loc[idx,'added_pred']) df_new.at[idx, 'added_pred'] = [] else: manual_process.extend([video_id, video_frame]) print(removed_FP) len(manual_process) df_new.index tmp = [] mannual_process_df = None for i in range(len(manual_process)//2): video_id = manual_process[i * 2] video_frame = manual_process[i * 2 + 1] tmp.append(df_new.query("video_id == @video_id and video_frame == @video_frame").copy()) mannual_process_df = pd.concat(tmp) mannual_process_df mannual_process_df['added_pred'].apply(lambda x: [p[0] for p in x]) for idx, row in mannual_process_df.iterrows(): if row['sequence'] == 996: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'added_pred'] if p[2] >= 360] removed_FP += 1 elif row['sequence'] == 8503 and row['video_frame'] <=4255: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'added_pred'] if p[2] <= 360] removed_FP += 1 elif row['sequence'] == 8503: mannual_process_df.at[idx, 'added_pred'] = [] removed_FP += 2 else: origin_len = len(mannual_process_df.at[idx, 'added_pred']) mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'added_pred'] if p[0] >= 0.5] removed_FP += origin_len - len(mannual_process_df.at[idx, 'added_pred']) removed_FP df_new.loc[mannual_process_df.index, 'added_pred'] = mannual_process_df['added_pred'] df_new["added_pred"].apply(len).sum() df_new['new_real_bbox'] = df_new.apply(lambda x: x['real_bbox'] + [p[1:] for p in x['added_pred']], axis=1) df_new['wbf_pred'] = df_new['new_real_bbox'].apply(lambda pp: [[1] + p for p in pp]) # + #make_video(df_new, None, None, 'wbf_pred', out_dir, 0, name="new_GT_check_clean",s_f=None, e_f=None) # - # ## combine with old data if 'mark' in df.columns: del df['mark'] df_new['mark'] = True df = pd.merge(df, df_new[cols + ['mark']], on=cols, how='left') df['mark'].fillna(False, inplace=True) df["new_real_bbox"] = df['real_bbox'] df['added_pred'] = [list([]) for i in range(df.shape[0])] df.loc[df.query('mark').index, "new_real_bbox"] = df_new['new_real_bbox'].values df.loc[df.query('mark').index, "added_pred"] = df_new['added_pred'].values df['num_bbox'].sum() 1120/11898 df.groupby("fold").apply(lambda df: np.sum(((df['added_pred'].apply(len)>0).astype(int) + (df['num_bbox'] == 0)).astype(int) > 1 )) 353 / (df['num_bbox'] > 0).sum() df.groupby("fold").apply(lambda df: np.sum(df['added_pred'].apply(len))) # save it def from_bbox_to_str(bbox): if len(bbox) == 0: return '[]' else: d = [] for p in bbox: d.append({"x":round(p[0],1), "y":round(p[1],1), "width":round(p[2],1), "height":round(p[3],1)}) return str(d) def from_pred_to_str(bbox): if len(bbox) == 0: return '[]' else: d = [] for p in bbox: d.append({'conf':round(p[0],3), "x":round(p[1],2), "y":round(p[2],3), "width":round(p[3],4), "height":round(p[4],5)}) return str(d) df['new_annotations'] = df['new_real_bbox'].apply(from_bbox_to_str) df['new_added_pred'] = df['added_pred'].apply(from_pred_to_str) ast.literal_eval(df.iloc[9316]["new_annotations"]) ast.literal_eval(df.iloc[9316]["new_added_pred"]) ast.literal_eval(df_origin.query("video_id == 1 and video_frame==4164").iloc[0]['annotations']) df_origin = pd.merge(df_origin, df[['video_id','video_frame','new_annotations','new_added_pred']], on=['video_id','video_frame'], how='left') df_origin.fillna("[]", inplace=True) df_origin # + #df_origin.to_csv("../../data/tensorflow-great-barrier-reef/train_with_added_GT.csv",index=False) # - # # checking FP from best LB df_origin.head() LB_pred = pd.read_csv("../input/train_oof_v79.csv") LB_pred['annotations'] = df['new_annotations'].apply(ast.literal_eval) LB_pred['image_path'] = df['image_path'] LB_pred['real_bbox'] = df['new_real_bbox'] LB_pred['wbf_pred'] = LB_pred['pred'].apply(lambda x: [list(p.values()) for p in ast.literal_eval(x)]) LB_pred["added_pred"] = [list([]) for _ in range(LB_pred.shape[0])] LB_pred["new_real_bbox"] = [list([]) for _ in range(LB_pred.shape[0])] for idx, row in LB_pred.iterrows(): if len(row['wbf_pred']) == 0 or len(row['real_bbox']) == 0: continue else: wbf_pred = np.array(row['wbf_pred']) real_gt = np.array(row['real_bbox']) added_pred = [] ious = util.calc_iou(np.array(wbf_pred)[:,1:], np.array(real_gt)) ious_max = ious.max(axis=1) added_pred = wbf_pred[ious_max < 0.3].copy().tolist() LB_pred.at[idx,'wbf_pred'] = added_pred # + # for video_id, sequence in LB_pred.groupby(["video_id","sequence"]).size().index: # make_video(LB_pred, video_id, sequence, 'wbf_pred', out_dir[:-1] + "_2/", 0, name=f"check-{video_id}-{sequence}",s_f=None, e_f=None) # + # video_id = 0 # sequence = 8399 # s_f = 4722 # e_f = 4821 # make_video(LB_pred, video_id, sequence, 'wbf_pred', out_dir[:-1] + "_2/", 0, name=f"checkBlackHole-{video_id}-{sequence}-s{s_f}-e{e_f}",s_f=s_f, e_f=e_f) # + # pic_out_dir = '/home/vincent/Kaggle/data/tensorflow-great-barrier-reef/pic_check/' # for video_id, sequence in LB_pred.groupby(["video_id","sequence"]).size().index: # save_bulk_images(LB_pred, video_id, sequence, 'wbf_pred', pic_out_dir, 0, name=f"v{video_id}-s{sequence}",s_f=None, e_f=None) # - # # Add GT from best LB # + # remove those unnecessary pairs = [] pairs.extend( [ 0, 8900,1, 8946, 0, 5762, 0, 5781,2 ,4691,2,4695,2,4699, 0,9633, ]) for i in range(85, 133): pairs.extend([0, i]) for i in range(85, 133): pairs.extend([0, i]) for i in range(220, 243): pairs.extend([0, i]) for i in range(11843, 11890): pairs.extend([0, i]) for i in range(12187, 12241): pairs.extend([0, i]) for i in range(1877, 1945): pairs.extend([0, i]) for i in range(2032, 2039): pairs.extend([0, i]) for i in range(2230, 2272): pairs.extend([0, i]) for i in range(1496, 1543): pairs.extend([0, i]) for i in range(4448, 4488): pairs.extend([0, i]) for i in range(4523, 4548): pairs.extend([0, i]) for i in range(9376, 9460): # remove those fish in the ocean pairs.extend([0, i]) for i in range(9523, 9544): pairs.extend([0, i]) for i in range(9599, 9630 ): pairs.extend([0, i]) for i in range(9041, 9212): pairs.extend([1, i]) for i in range(9242, 9275): pairs.extend([1, i]) for i in range(9344, 9372): pairs.extend([1, i]) for i in range(4056, 4088): pairs.extend([1, i]) for i in range(4197, 4265): pairs.extend([1, i]) for i in range(5828, 5916): pairs.extend([1, i]) for i in range(4658, 4685): pairs.extend([2, i]) for i in range(4705, 4719): pairs.extend([2, i]) for i in range(4705, 4719): pairs.extend([2, i]) for i in range(5975, 6058): pairs.extend([0, i]) len(pairs) # - added_GT = 0 appeared = set([]) removed_FP = 0 manual_process = [] for i in range(len(pairs)//2): video_id = pairs[i * 2] video_frame = pairs[i * 2 + 1] if (video_id, video_frame) in appeared: continue else: appeared.add((video_id, video_frame)) idx = LB_pred.query("video_id == @video_id and video_frame == @video_frame").index[0] if len(LB_pred.loc[idx, 'wbf_pred']) <= 1: added_GT += len(LB_pred.loc[idx,'wbf_pred']) LB_pred.at[idx, 'added_pred'] = LB_pred.at[idx, "wbf_pred"] else: manual_process.extend([video_id, video_frame]) added_GT, len(manual_process) LB_pred[LB_pred['added_pred'].apply(len) > 0].query("sequence == 45015") remove = [6050, 6053, 6054, 6057] for video_frame in remove: LB_pred.at[LB_pred.query('sequence == 45015 and video_frame == @video_frame').index[0], "added_pred"] = [] added_GT -= 1 added_GT tmp = [] mannual_process_df = None for i in range(len(manual_process)//2): video_id = manual_process[i * 2] video_frame = manual_process[i * 2 + 1] tmp.append(LB_pred.query("video_id == @video_id and video_frame == @video_frame").copy()) mannual_process_df = pd.concat(tmp) mannual_process_df.sort_values(["sequence","video_frame"])[["sequence","video_frame","wbf_pred"]] for idx, row in mannual_process_df.iterrows(): if row['sequence'] == 996: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] >= 640] elif row['sequence'] == 8503: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] <= 200] elif row['sequence'] == 26651: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] <= 640] elif row['sequence'] == 45015: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] >= 920] elif row['sequence'] == 45518: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] <= 320 and p[2] <= 400 ] elif row['sequence'] == 53708: if row['video_frame'] in [9451, 9455]: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] <= 640] elif row['video_frame'] in [9633]: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] == 115] else: mannual_process_df.at[idx, 'added_pred'] = mannual_process_df.at[idx, 'wbf_pred'] elif row["sequence"] == 59337: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] >= 500] elif row['sequence'] == 60754: if row['video_frame'] in [9056]: mannual_process_df.at[idx, 'added_pred'] = [] elif row['video_frame'] in [9064]: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[1] <= 590] elif row['video_frame'] in [9081]: mannual_process_df.at[idx, 'added_pred'] = [p for p in mannual_process_df.at[idx, 'wbf_pred'] if p[0] >= 0.45] else: mannual_process_df.at[idx, 'added_pred'] = mannual_process_df.at[idx, 'wbf_pred'] added_GT += len(mannual_process_df.at[idx, 'added_pred']) added_GT LB_pred.loc[mannual_process_df.index, 'added_pred'] = mannual_process_df['added_pred'] LB_pred['new_real_bbox'] = LB_pred.apply(lambda x: x['real_bbox'] + [p[1:] for p in x['added_pred']], axis=1) LB_pred['combined_bbox'] = LB_pred['new_real_bbox'].apply(lambda pp: [[1] + p for p in pp]) # + # make_video(LB_pred[LB_pred['added_pred'].apply(len)>0], None, None, 'combined_bbox', out_dir[:-1] + "_2/", 0, name="added_LB_part",s_f=None, e_f=None) # - cols if 'mark' in df.columns: del df['mark'] LB_pred['mark'] = True df = pd.merge(df, LB_pred[cols + ['mark']], on=cols, how='left') df['mark'].fillna(False, inplace=True) df["new_real_bbox_LB"] = df['new_real_bbox'] df['added_pred_LB'] = [list([]) for i in range(df.shape[0])] df.loc[df.query('mark').index, "new_real_bbox_LB"] = LB_pred['new_real_bbox'].values df.loc[df.query('mark').index, "added_pred_LB"] = LB_pred['added_pred'].values df['new_annotations_LB'] = df['new_real_bbox_LB'].apply(from_bbox_to_str) df['new_added_pred_LB'] = df['added_pred_LB'].apply(from_pred_to_str) df_origin.to_csv("../../data/tensorflow-great-barrier-reef/train_with_added_GT.csv",index=False) # + # sanity check # - df_origin = pd.merge(df_origin, df[['video_id','video_frame','new_annotations_LB','new_added_pred_LB']], on=['video_id','video_frame'], how='left') df_origin.fillna("[]", inplace=True) df_origin.head(2) # number of bbox df_origin_check = df_origin.copy() annot_cols = ['annotations', "new_annotations", "new_annotations_LB"] for col in annot_cols: df_origin_check[col] = df_origin_check[col].apply(ast.literal_eval) print(col, df_origin_check[col].apply(len).sum()) (13181 / 11898 - 1) # number of image with bbox for col in annot_cols: print(col, (df_origin_check[col].apply(len) > 0).sum()) (5302 / 4919 - 1) # # check LB prediction size LB_pred['LB_pred'] = LB_pred['pred'].apply(lambda x: [list(p.values()) for p in ast.literal_eval(x)]) LB_pred.LB_pred.apply(len).sum() LB_pred.columns df.columns LB_pred['fold'] = df['fold'] LB_pred['highFBNoBB'] = df['highFBNoBB'] LB_pred['train_bbox'] = df['new_real_bbox'] LB_pred_train = LB_pred[((LB_pred['train_bbox'].apply(len) > 0) | LB_pred['highFBNoBB'])].copy() LB_pred_OOF = LB_pred[~((LB_pred['train_bbox'].apply(len) > 0) | LB_pred['highFBNoBB'])].copy() LB_pred_OOF.shape LB_pred_train.LB_pred.apply(len).sum() LB_pred_OOF.LB_pred.apply(len).sum() df[~((df['new_real_bbox'].apply(len) > 0) | df['highFBNoBB'])]['wbf_pred'].apply(len).sum() LB_pred.query("video_id==0 and video_frame==9633")["added_pred"].iloc[0] LB_pred 44 * 35 1280 * 720 * 0.001429 LB_pred_OOF['pred'].iloc[100] # # Improve Tracking # + # tracking needs to be improved! especially when the camera moves fast # - ??Tracker iou_thr = 0.4 skip_box_thr = 0.22 wbf_models =["0204_yolov5s6_B", '0205_yolov5m6_B_LS02'] df["wbf_noTrack_pred"] = None frame_id = 0 tracker = Tracker( distance_function=euclidean_distance, distance_threshold=5, hit_inertia_min=3, hit_inertia_max=6, initialization_delay=2, ) for i, idx in enumerate(df.query("sequence == 18048").index): row = df.loc[idx] bboxes_l = [] confs_l = [] for model_version in wbf_models: BEST_CONF = best_conf_dict[model_version] pred_col = "pred_" + model_version prd_bboxs = row[pred_col] prd_bboxs = [p for p in prd_bboxs if p[0] >= BEST_CONF] bboxes, confis = [p[1:] for p in prd_bboxs], [p[0] for p in prd_bboxs] bboxes = np.array(bboxes,dtype=np.float64) bboxes_l.append(bboxes) confs_l.append(confis) bboxes_l = np.array(bboxes_l) wbf_boxes, wbf_confs, _ = run_wbf(bboxes_l, confs_l, iou_thr, skip_box_thr) df.at[idx, "wbf_noTrack_pred"] = [[wbf_confs[i]] + wbf_boxes[i].tolist() for i in range(len(wbf_boxes))] predictions = tracking_function(tracker, frame_id, wbf_boxes, wbf_confs, 0) prd_bboxs_tracking = [[float(p) for p in pred.split(" ")] for pred in predictions] df.at[idx, "wbf_pred"] = prd_bboxs_tracking frame_id += 1 seq_check = 18048 df_check = df.query("sequence == @seq_check").copy() df_check["pred_track_num"] = df_check["wbf_pred"].apply(len) df_check["pred_notrack_num"] = df_check["wbf_noTrack_pred"].apply(len) df_check[["video_frame","num_bbox","pred_track_num","pred_notrack_num"]].iloc[20:30] ## 11851 is where the things go wrong # # Similarity import image_similarity_measures from image_similarity_measures.quality_metrics import metric_functions metric_functions df.groupby("sequence").size().sort_values() # + max_p = 255 seq_check = [18048,17665,44160,29424] df_check = df.query("sequence in @seq_check").copy() prev_image = cv2.imread(str(df.iloc[-1]['image_path'])) for idx, row in tqdm(df_check.iterrows()): curr_image = cv2.imread(str(row['image_path'])) for name, func in metric_functions.items(): if name in ['rmse']: sim = func(curr_image, prev_image, max_p) else: continue df_check.loc[idx, name] = sim prev_image = curr_image # - fig, axes = plt.subplots(1,2,figsize=(18,9)) axes[0].plot(df_check['rmse'].values) axes[0].twinx().plot(np.diff(df_check['sequence']) != 0, color='red' ) axes[1].plot(np.abs(np.diff(df_check['rmse'].values))) #axes[1].twinx().plot(np.diff(df_check['sequence']) != 0, color='red' ) plt.show() prev_image = cv2.imread(str(df.iloc[-1]['image_path'])) for idx, row in tqdm(df.iterrows()): curr_image = cv2.imread(str(row['image_path'])) for name, func in metric_functions.items(): if name in ['rmse']: sim = func(curr_image, prev_image, max_p) else: continue df.loc[idx, name] = sim prev_image = curr_image fig, axes = plt.subplots(1,2,figsize=(18,9)) axes[0].plot(df['rmse'].values) axes[0].twinx().plot(np.diff(df['sequence']) != 0, color='red' ) axes[1].plot(np.abs(np.diff(df['rmse'].values))) #axes[1].twinx().plot(np.diff(df_check['sequence']) != 0, color='red' ) plt.show() df['rmse_diff_abs'] = df['rmse'].diff().abs() df_seq_place = df.loc[np.diff(df['sequence'], prepend=0) != 0].copy() df_seq_place[['sequence','rmse_diff_abs']]#.min() prev_img
notebook/AddGT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import pandas as pd import os download_path = '/Users/akhiljalan/Downloads/' download_files = os.listdir(download_path) datafile_paths = [] for name in download_files: if '_datasets_full.csv' in name: datafile_paths.append(download_path + name) datafile_paths[0] test_df = pd.read_csv(datafile_paths[0]) update_times = list(test_df['4/30/18 - time rows updated(days)']) update_times import numpy as np np.histogram(update_times) pd.cut(update_times, bins=20, retbins=True, include_lowest=True) test_df['4/30/18 - time rows updated(days)'] = test_df['4/30/18 - time rows updated(days)'].sort_values() update_days = sorted(test_df['4/30/18 - time rows updated(days)']) test_df['days_since_updated'] = update_days out1, out2 = pd.cut(test_df['days_since_updated'], bins=20, retbins=True, include_lowest=True) out1[3] out1[23] len(out1) out1[2] == out1[1] cur_interval = out1[0] upper_bounds = [cur_interval.right] out_counts = [1] for i in range(1, len(out1)): if out1[i] == cur_interval: out_counts[-1] += 1 else: cur_interval = out1[i] upper_bounds.append(cur_interval.right) out_counts.append(1) datafile_counts = [np.cumsum(out_counts)] out_counts datafile_counts int_upper_bounds = [1 + int(x) for x in upper_bounds] out_file = pd.DataFrame(columns=['Upper Bounds', 'Berkeley']) out_file['Upper Bounds'] = int_upper_bounds [5] * 5 len(datafile_counts[0]) len(int_upper_bounds) # + # out_file['Berkeley'] = datafile_counts[0] # - out_file int_upper_bounds.append(int(1e8)) int_upper_bounds # # All Other Cites for i in range(1, len(datafile_paths)): # current_df = pd.read_csv(datafile_paths[i]) if i in [1, 4, 5]: update_times = list(pd.read_csv(datafile_paths[i])['4/30/18 - time rows updated(days)']) elif i in [2, 3]: update_times = list(pd.read_csv(datafile_paths[i])['4/30/18 - time updated(days)']) # else: # print(pd.read_csv(datafile_paths[i]).columns.values) # = list(current_df['4/30/18 - time rows updated(days)']) sorted_update_times = sorted(update_times) out_counts = [] cur_upper_bound_index = 0 cur_upper_bound = int_upper_bounds[cur_upper_bound_index] #establish current upper bound for least value while sorted_update_times[0] > cur_upper_bound: out_counts.append(0) cur_upper_bound_index += 1 cur_upper_bound = int_upper_bounds[cur_upper_bound_index] out_counts.append(1) #loop through all other values for i in range(1, len(sorted_update_times)): if sorted_update_times[i] < cur_upper_bound: out_counts[-1] += 1 else: cur_upper_bound_index += 1 cur_upper_bound = int_upper_bounds[cur_upper_bound_index] out_counts.append(1) if len(out_counts) < len(int_upper_bounds): num_missing = len(int_upper_bounds) - len(out_counts) out_counts += [sum(out_counts)] * num_missing datafile_counts.append(out_counts) datafile_counts datafile_counts[0] = list(datafile_counts[0]) + [24] datafile_counts [len(x) for x in datafile_counts] # # Write to File max_update_time = 0 for i in range(len(datafile_paths)): # current_df = pd.read_csv(datafile_paths[i]) if i in [1, 4, 5]: update_times = list(pd.read_csv(datafile_paths[i])['4/30/18 - time rows updated(days)']) elif i in [2, 3]: update_times = list(pd.read_csv(datafile_paths[i])['4/30/18 - time updated(days)']) if max(update_times) > max_update_time: max_update_time = max(update_times) max_update_time int_upper_bounds[-1] = max_update_time + 1 int_upper_bounds out_file['Upper Bounds'] = int_upper_bounds out_file for i in range(len(datafile_paths)): filename = datafile_paths[i].split('/Users/akhiljalan/Downloads/')[-1] city_name = filename.split('_')[0] print(city_name) datafile_counts out_file['Berkeley'] = datafile_counts[0] out_file['<NAME>'] = datafile_counts[1] out_file out_file['Oakland'] = datafile_counts[2] out_file['Richmond'] = datafile_counts[3] out_file['<NAME>'] = datafile_counts[4] out_file['<NAME>'] = datafile_counts[5] out_file np.cumsum(datafile_counts[3]) col_names = out_file.columns.values for name in col_names[1:]: cum_sum = np.cumsum(out_file[name]) out_file[name] = cum_sum for name in col_names[1:]: percentage_vals = out_file[name] / max(out_file[name]) out_file[name] = percentage_vals out_file out_file.to_csv('') out_counts pd.read_csv(datafile_paths[2]) ['4/30/18 - time rows updated(days)']) out2[1:] out1[0].right out1 out_counts sum(out_counts) out1, out2 = pd.cut(test_df['4/30/18 - time rows updated(days)'], bins=20, retbins=True, include_lowest=True) out1 out2 colname = "test1" colvals = ["test2", "test3", "test4"] try: temp_df = pd.read_csv('test.csv') temp_df[colname] = colvals except: temp_df = pd.DataFrame( { colname:colvals } ) temp_df.to_csv('test.csv') temp_df temp_df = pd.read_csv('test.csv') temp_df
CTSP/code/date-utils.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd from pandasql import sqldf pysqldf = lambda q: sqldf(q, globals()) # - subjects = pd.read_csv("subject.csv") subjects measurements = pd.read_csv("measurement.csv") measurements combined_df = pysqldf("SELECT * FROM subjects JOIN measurements \ ON subjects.subject_id = measurements.subject_id \ WHERE gender = 'M' AND data > 105") combined_df # to join by pandas rather than sql subjects.merge(measurements, on=['subject_id'])
JoinDataFrame/JoinSubjectMeasurement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # MNIST - Handwriting Recognition from com.yahoo.ml.caffe.DisplayUtils import * from com.yahoo.ml.caffe.CaffeOnSpark import * from com.yahoo.ml.caffe.Config import * from com.yahoo.ml.caffe.DataSource import * # ## Training Data df = sqlCtx.read.parquet('/Users/afeng/dev/ml/CaffeOnSpark/data/mnist_test_dataframe') show_df(df,5) # # Training cos=CaffeOnSpark(sc,sqlContext) args={} args['conf']='/Users/afeng/dev/ml/CaffeOnSpark/data/lenet_dataframe_solver.prototxt' args['model']='file:///tmp/lenet.model' args['devices']='1' args['clusterSize']='1' cfg=Config(sc,args) dl_train_source = DataSource(sc).getSource(cfg,True) cos.train(dl_train_source) # # Test dl_test_source = DataSource(sc).getSource(cfg,False) test_result=cos.test(dl_test_source) test_result # # Feature Extraction args['features']='accuracy,ip1,ip2' args['label']='label' cfg=Config(sc,args) dl_feature_source = DataSource(sc).getSource(cfg,False) f=cos.features(dl_feature_source) f.show(5) def maxScoreAndIndex(array_of_scores): return max(enumerate(array_of_scores), key=lambda x: x[1]) g = sqlContext.createDataFrame(f.map(lambda row: ( row.SampleID, row.accuracy[0], row.ip2, maxScoreAndIndex(row.ip2)[1], maxScoreAndIndex(row.ip2)[0], int(row.label[0]))), ["SampleID", "Accuracy", "Scores", "MaxScore", "Prediction", "Label"]) g.toPandas()[:5] # ### Logistic Regression using MLlib from pyspark.mllib.linalg import Vectors from pyspark.mllib.regression import LabeledPoint from pyspark.mllib.classification import LogisticRegressionWithLBFGS data = f.map(lambda row: LabeledPoint(row.label[0], Vectors.dense(row.ip1))) lr = LogisticRegressionWithLBFGS.train(data, numClasses=10, iterations=10) predictions = lr.predict(data.map(lambda pt : pt.features)) predictions.take(5)
caffe-grid/src/main/python/examples/DLDemo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} tags=[] # # Reducing Failure-Inducing Inputs # # By construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging. # + slideshow={"slide_type": "skip"} from bookutils import YouTubeVideo YouTubeVideo('noJUPjSJVh0') # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # **Prerequisites** # # * The simple "delta debugging" technique for reduction has no specific prerequisites. # * As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea. # * The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). # + [markdown] slideshow={"slide_type": "skip"} # ## Synopsis # <!-- Automatically generated. Do not edit. --> # # To [use the code provided in this chapter](Importing.ipynb), write # # ```python # >>> from fuzzingbook.Reducer import <identifier> # ``` # # and then make use of the following features. # # # A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. # # Here is a simple example: An arithmetic expression causes an error in the Python interpreter: # # ```python # >>> !python -c 'x = 1 + 2 * 3 / 0' # Traceback (most recent call last): # File "<string>", line 1, in <module> # ZeroDivisionError: division by zero # # ``` # Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs. # # ```python # >>> from Fuzzer import ProgramRunner # >>> import subprocess # >>> class ZeroDivisionRunner(ProgramRunner): # >>> """Make outcome 'FAIL' if ZeroDivisionError occurs""" # >>> # >>> def run(self, inp: str = "") -> Tuple[subprocess.CompletedProcess, Outcome]: # >>> process, outcome = super().run(inp) # >>> if process.stderr.find('ZeroDivisionError') >= 0: # >>> outcome = 'FAIL' # >>> return process, outcome # ``` # If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed. # # ```python # >>> python_input = "x = 1 + 2 * 3 / 0" # >>> python_runner = ZeroDivisionRunner("python") # >>> process, outcome = python_runner.run(python_input) # >>> outcome # 'FAIL' # ``` # Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail: # # ```python # >>> dd = DeltaDebuggingReducer(python_runner) # >>> dd.reduce(python_input) # '3/0' # ``` # The input is reduced to the minimum: We get the essence of the division by zero. # # ![](PICS/Reducer-synopsis-1.svg) # # # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Why Reducing? # # At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "fragment"} # Here's an example of such a situation. We have a class `MysteryRunner` with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious. # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} import bookutils # + slideshow={"slide_type": "skip"} from bookutils import quiz # + slideshow={"slide_type": "skip"} from typing import Tuple, List, Sequence, Any, Optional # + slideshow={"slide_type": "skip"} from ExpectError import ExpectError # + slideshow={"slide_type": "skip"} tags=[] from Fuzzer import RandomFuzzer, Runner, Outcome # + slideshow={"slide_type": "skip"} import re # + slideshow={"slide_type": "subslide"} class MysteryRunner(Runner): def run(self, inp: str) -> Tuple[str, Outcome]: x = inp.find(chr(0o17 + 0o31)) y = inp.find(chr(0o27 + 0o22)) if x >= 0 and y >= 0 and x < y: return (inp, Runner.FAIL) else: return (inp, Runner.PASS) # + [markdown] slideshow={"slide_type": "fragment"} # Let us fuzz the function until we find a failing input. # + slideshow={"slide_type": "subslide"} mystery = MysteryRunner() random_fuzzer = RandomFuzzer() while True: inp = random_fuzzer.fuzz() result, outcome = mystery.run(inp) if outcome == mystery.FAIL: break # + slideshow={"slide_type": "fragment"} failing_input = result failing_input # + [markdown] slideshow={"slide_type": "fragment"} # Something in this input causes `MysteryRunner` to fail. But what is it? # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Manual Input Reduction # # One important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it: # # > For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. # + [markdown] slideshow={"slide_type": "subslide"} # Specifically for inputs, they suggest a _divide and conquer_ process: # # > Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input. # # This is something we can easily try out, using our last generated input: # + slideshow={"slide_type": "fragment"} failing_input # + [markdown] slideshow={"slide_type": "fragment"} # For instance, we can see whether the error still occurs if we only feed in the first half: # + slideshow={"slide_type": "subslide"} half_length = len(failing_input) // 2 # // is integer division first_half = failing_input[:half_length] mystery.run(first_half) # + [markdown] slideshow={"slide_type": "fragment"} # Nope – the first half alone does not suffice. Maybe the second half? # + slideshow={"slide_type": "fragment"} second_half = failing_input[half_length:] mystery.run(second_half) # + [markdown] slideshow={"slide_type": "fragment"} # This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. # + [markdown] slideshow={"slide_type": "slide"} # ## Delta Debugging # + [markdown] slideshow={"slide_type": "fragment"} # One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut away # the first quarter, the second quarter, and so on. # + [markdown] slideshow={"slide_type": "fragment"} # Let us illustrate this on our example, and see what happens if we cut away the first quarter. # + slideshow={"slide_type": "subslide"} quarter_length = len(failing_input) // 4 input_without_first_quarter = failing_input[quarter_length:] mystery.run(input_without_first_quarter) # + [markdown] slideshow={"slide_type": "fragment"} # Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter. # + slideshow={"slide_type": "fragment"} input_without_first_and_second_quarter = failing_input[quarter_length * 2:] mystery.run(input_without_first_and_second_quarter) # + [markdown] slideshow={"slide_type": "fragment"} # This is not too surprising, as we had that one before: # + slideshow={"slide_type": "fragment"} second_half # + slideshow={"slide_type": "fragment"} input_without_first_and_second_quarter # + [markdown] slideshow={"slide_type": "subslide"} # How about removing the third quarter, then? # + slideshow={"slide_type": "fragment"} input_without_first_and_third_quarter = failing_input[quarter_length: quarter_length * 2] + failing_input[quarter_length * 3:] mystery.run(input_without_first_and_third_quarter) # + [markdown] slideshow={"slide_type": "fragment"} # Ok. Let us remove the fourth quarter. # + slideshow={"slide_type": "fragment"} input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3] mystery.run(input_without_first_and_fourth_quarter) # + [markdown] slideshow={"slide_type": "fragment"} # Yes! This has succeeded. Our input is now 50% smaller. # + [markdown] slideshow={"slide_type": "fragment"} # We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. # + [markdown] slideshow={"slide_type": "subslide"} # However, this is something we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` method runs a single test (with logging, if wanted); the `reduce()` method will eventually reduce an input to the minimum. # + slideshow={"slide_type": "subslide"} class Reducer: """Base class for reducers.""" def __init__(self, runner: Runner, log_test: bool = False) -> None: """Attach reducer to the given `runner`""" self.runner = runner self.log_test = log_test self.reset() def reset(self) -> None: """Reset the test counter to zero. To be extended in subclasses.""" self.tests = 0 def test(self, inp: str) -> Outcome: """Test with input `inp`. Return outcome. To be extended in subclasses.""" result, outcome = self.runner.run(inp) self.tests += 1 if self.log_test: print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome) return outcome def reduce(self, inp: str) -> str: """Reduce input `inp`. Return reduced input. To be defined in subclasses.""" self.reset() # Default: Don't reduce return inp # + [markdown] slideshow={"slide_type": "subslide"} # The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again: # + slideshow={"slide_type": "subslide"} class CachingReducer(Reducer): """A reducer that also caches test outcomes""" def reset(self): super().reset() self.cache = {} def test(self, inp): if inp in self.cache: return self.cache[inp] outcome = super().test(inp) self.cache[inp] = outcome return outcome # + [markdown] slideshow={"slide_type": "subslide"} # Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. # + [markdown] slideshow={"slide_type": "fragment"} # Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input. # + slideshow={"slide_type": "subslide"} class DeltaDebuggingReducer(CachingReducer): """Reduce inputs using delta debugging.""" def reduce(self, inp: str) -> str: """Reduce input `inp` using delta debugging. Return reduced input.""" self.reset() assert self.test(inp) != Runner.PASS n = 2 # Initial granularity while len(inp) >= 2: start = 0.0 subset_length = len(inp) / n some_complement_is_failing = False while start < len(inp): complement = inp[:int(start)] + \ inp[int(start + subset_length):] if self.test(complement) == Runner.FAIL: inp = complement n = max(n - 1, 2) some_complement_is_failing = True break start += subset_length if not some_complement_is_failing: if n == len(inp): break n = min(n * 2, len(inp)) return inp # + [markdown] slideshow={"slide_type": "subslide"} # To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain: # + slideshow={"slide_type": "subslide"} dd_reducer = DeltaDebuggingReducer(mystery, log_test=True) dd_reducer.reduce(failing_input) # + [markdown] slideshow={"slide_type": "subslide"} # Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `#27` and `#29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "subslide"} # A reduced test case such as the one above has many advantages: # # * A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant. # # * A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`. # # * A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. # + [markdown] slideshow={"slide_type": "subslide"} # How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) # + [markdown] slideshow={"slide_type": "fragment"} # In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing. # + slideshow={"slide_type": "subslide"} quiz("What happens if the function under test does not fail?", [ "Delta debugging searches for the minimal input" " that produces the same result", "Delta debugging starts a fuzzer to find a failure", "Delta debugging raises an AssertionError", "Delta debugging runs forever in a loop", ], '0 ** 0 + 1 ** 0 + 0 ** 1 + 1 ** 1') # + [markdown] slideshow={"slide_type": "fragment"} # Indeed, `DeltaDebugger` checks if its assumptions hold. If not, an assertion fails. # + slideshow={"slide_type": "subslide"} with ExpectError(): dd_reducer.reduce("I am a passing input") # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} toc-hr-collapsed=false # ## Grammar-Based Input Reduction # # If the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. # + [markdown] slideshow={"slide_type": "subslide"} # ### Lexical Reduction vs. Syntactic Rules # # Despite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input # + slideshow={"slide_type": "subslide"} expr_input = "1 + (2 * 3)" dd_reducer = DeltaDebuggingReducer(mystery, log_test=True) dd_reducer.reduce(expr_input) # + [markdown] slideshow={"slide_type": "subslide"} # Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug. # + slideshow={"slide_type": "skip"} from Grammars import EXPR_GRAMMAR # + slideshow={"slide_type": "skip"} from Parser import EarleyParser, Parser # minor dependency # + slideshow={"slide_type": "subslide"} class EvalMysteryRunner(MysteryRunner): def __init__(self) -> None: self.parser = EarleyParser(EXPR_GRAMMAR) def run(self, inp: str) -> Tuple[str, Outcome]: try: tree, *_ = self.parser.parse(inp) except SyntaxError: return (inp, Runner.UNRESOLVED) return super().run(inp) # + slideshow={"slide_type": "fragment"} eval_mystery = EvalMysteryRunner() # + [markdown] slideshow={"slide_type": "fragment"} # Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before. # + slideshow={"slide_type": "subslide"} dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True) dd_reducer.reduce(expr_input) # + [markdown] slideshow={"slide_type": "subslide"} # This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # ### A Grammmar-Based Reduction Approach # # To reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come # # 1. From the tree itself, or # 2. By applying an alternate grammar expansion using elements from the tree. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression: # + slideshow={"slide_type": "skip"} from Grammars import Grammar from GrammarFuzzer import all_terminals, expansion_to_children, display_tree # + slideshow={"slide_type": "subslide"} derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input) display_tree(derivation_tree) # + [markdown] slideshow={"slide_type": "subslide"} # ### Simplifying by Replacing Subtrees # + [markdown] slideshow={"slide_type": "fragment"} # To simplify this tree, we could replace any `<expr>` symbol up in the tree with some `<expr>` subtree down in the tree. For instance, we could replace the uppermost `<expr>` with its right `<expr>` subtree, yielding the string `(2 + 3)`: # + slideshow={"slide_type": "skip"} import copy # + slideshow={"slide_type": "fragment"} new_derivation_tree = copy.deepcopy(derivation_tree) # We really should have some query language sub_expr_tree = new_derivation_tree[1][0][1][2] display_tree(sub_expr_tree) # + slideshow={"slide_type": "fragment"} new_derivation_tree[1][0] = sub_expr_tree display_tree(new_derivation_tree) # + slideshow={"slide_type": "fragment"} all_terminals(new_derivation_tree) # + [markdown] slideshow={"slide_type": "subslide"} # Replacing one subtree by another only works as long as individual elements such as `<expr>` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `<expr>` trees only once more. # + [markdown] slideshow={"slide_type": "subslide"} # ### Simplifying by Alternative Expansions # + [markdown] slideshow={"slide_type": "fragment"} # A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. # + [markdown] slideshow={"slide_type": "subslide"} # As an example, consider the `new_derivation_tree` above. The applied expansion for `<term>` has been # # <term> ::= <term> * <factor> # # Let us replace this with the alternative expansion: # # <term> ::= <factor> # + slideshow={"slide_type": "fragment"} term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0] display_tree(term_tree) # + slideshow={"slide_type": "subslide"} shorter_term_tree = term_tree[1][2] display_tree(shorter_term_tree) # + slideshow={"slide_type": "fragment"} new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree display_tree(new_derivation_tree) # + slideshow={"slide_type": "fragment"} all_terminals(new_derivation_tree) # + [markdown] slideshow={"slide_type": "fragment"} # If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. # + [markdown] slideshow={"slide_type": "subslide"} # ### Excursion: A Class for Reducing with Grammars # + [markdown] slideshow={"slide_type": "fragment"} # We introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates. # + slideshow={"slide_type": "subslide"} class GrammarReducer(CachingReducer): """Reduce inputs using grammars""" def __init__(self, runner: Runner, parser: Parser, *, log_test: bool = False, log_reduce: bool = False): """Constructor. `runner` is the runner to be used. `parser` is the parser to be used. `log_test` - if set, show tests and results. `log_reduce` - if set, show reduction steps. """ super().__init__(runner, log_test=log_test) self.parser = parser self.grammar = parser.grammar() self.start_symbol = parser.start_symbol() self.log_reduce = log_reduce self.try_all_combinations = False # + [markdown] slideshow={"slide_type": "subslide"} # #### A Few Helpers # # We define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees: # + slideshow={"slide_type": "skip"} from GrammarFuzzer import DerivationTree # + slideshow={"slide_type": "fragment"} def tree_list_to_string(q: List[DerivationTree]) -> str: return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]" # + slideshow={"slide_type": "fragment"} tree_list_to_string([derivation_tree, derivation_tree]) # + [markdown] slideshow={"slide_type": "fragment"} # The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$. # + slideshow={"slide_type": "subslide"} def possible_combinations(list_of_lists: List[List[Any]]) -> List[List[Any]]: if len(list_of_lists) == 0: return [] ret = [] for e in list_of_lists[0]: if len(list_of_lists) == 1: ret.append([e]) else: for c in possible_combinations(list_of_lists[1:]): new_combo = [e] + c ret.append(new_combo) return ret # + slideshow={"slide_type": "subslide"} possible_combinations([[1, 2], ['a', 'b']]) # + [markdown] slideshow={"slide_type": "fragment"} # The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively. # + slideshow={"slide_type": "fragment"} def number_of_nodes(tree: DerivationTree) -> int: (symbol, children) = tree if children is None: return 1 return 1 + sum([number_of_nodes(c) for c in children]) # + slideshow={"slide_type": "fragment"} number_of_nodes(derivation_tree) # + slideshow={"slide_type": "subslide"} def max_height(tree: DerivationTree) -> int: (symbol, children) = tree if children is None or len(children) == 0: return 1 return 1 + max([max_height(c) for c in children]) # + slideshow={"slide_type": "fragment"} max_height(derivation_tree) # + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=false # #### Simplification Strategies # # Let us now implement our two simplification strategies – replacing subtrees and alternate expansions. # + [markdown] slideshow={"slide_type": "subslide"} # ##### Finding Subtrees # # The method `subtrees_with_symbol()` returns all subtrees in the given tree which's root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.) # + slideshow={"slide_type": "subslide"} class GrammarReducer(GrammarReducer): def subtrees_with_symbol(self, tree: DerivationTree, symbol: str, depth: int = -1, ignore_root: bool = True) -> List[DerivationTree]: """Find all subtrees in `tree` whose root is `symbol`. If `ignore_root` is true, ignore the root note of `tree`.""" ret = [] (child_symbol, children) = tree if depth <= 0 and not ignore_root and child_symbol == symbol: ret.append(tree) # Search across all children if depth != 0 and children is not None: for c in children: ret += self.subtrees_with_symbol(c, symbol, depth=depth - 1, ignore_root=False) return ret # + [markdown] slideshow={"slide_type": "subslide"} # Here's an example: These are all subtrees with `<term>` in our derivation tree `derivation_tree`. # + slideshow={"slide_type": "fragment"} grammar_reducer = GrammarReducer( mystery, EarleyParser(EXPR_GRAMMAR), log_reduce=True) # + slideshow={"slide_type": "fragment"} all_terminals(derivation_tree) # + slideshow={"slide_type": "fragment"} [all_terminals(t) for t in grammar_reducer.subtrees_with_symbol( derivation_tree, "<term>")] # + [markdown] slideshow={"slide_type": "fragment"} # If we want to replace `<term>` subtrees to simplify the tree, these are the subtrees we could replace them with. # + [markdown] slideshow={"slide_type": "subslide"} # ##### Alternate Expansions # + [markdown] slideshow={"slide_type": "fragment"} # Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set). # + slideshow={"slide_type": "subslide"} class GrammarReducer(GrammarReducer): def alternate_reductions(self, tree: DerivationTree, symbol: str, depth: int = -1): reductions = [] expansions = self.grammar.get(symbol, []) expansions.sort( key=lambda expansion: len( expansion_to_children(expansion))) for expansion in expansions: expansion_children = expansion_to_children(expansion) match = True new_children_reductions = [] for (alt_symbol, _) in expansion_children: child_reductions = self.subtrees_with_symbol( tree, alt_symbol, depth=depth) if len(child_reductions) == 0: match = False # Child not found; cannot apply rule break new_children_reductions.append(child_reductions) if not match: continue # Try next alternative # Use the first suitable combination for new_children in possible_combinations(new_children_reductions): new_tree = (symbol, new_children) if number_of_nodes(new_tree) < number_of_nodes(tree): reductions.append(new_tree) if not self.try_all_combinations: break # Sort by number of nodes reductions.sort(key=number_of_nodes) return reductions # + slideshow={"slide_type": "subslide"} grammar_reducer = GrammarReducer( mystery, EarleyParser(EXPR_GRAMMAR), log_reduce=True) # + slideshow={"slide_type": "fragment"} all_terminals(derivation_tree) # + [markdown] slideshow={"slide_type": "fragment"} # Here are _all_ combinations for `<term>`: # + slideshow={"slide_type": "fragment"} grammar_reducer.try_all_combinations = True print([all_terminals(t) for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")]) # + [markdown] slideshow={"slide_type": "fragment"} # The default, though, is simply to return the first of these: # + slideshow={"slide_type": "subslide"} grammar_reducer.try_all_combinations = False [all_terminals(t) for t in grammar_reducer.alternate_reductions( derivation_tree, "<term>")] # + [markdown] slideshow={"slide_type": "subslide"} # ##### Both Strategies Together # + [markdown] slideshow={"slide_type": "fragment"} # Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`). # + slideshow={"slide_type": "subslide"} class GrammarReducer(GrammarReducer): def symbol_reductions(self, tree: DerivationTree, symbol: str, depth: int = -1): """Find all expansion alternatives for the given symbol""" reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth) + self.alternate_reductions(tree, symbol, depth=depth)) # Filter duplicates unique_reductions = [] for r in reductions: if r not in unique_reductions: unique_reductions.append(r) return unique_reductions # + slideshow={"slide_type": "subslide"} grammar_reducer = GrammarReducer( mystery, EarleyParser(EXPR_GRAMMAR), log_reduce=True) # + slideshow={"slide_type": "fragment"} all_terminals(derivation_tree) # + [markdown] slideshow={"slide_type": "fragment"} # These are the possible reductions for `<expr>` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `<expr>` (`1`). # + slideshow={"slide_type": "fragment"} reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>") tree_list_to_string([r for r in reductions]) # + [markdown] slideshow={"slide_type": "fragment"} # These are the possible reductions for `<term>` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`. # + slideshow={"slide_type": "subslide"} reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>") tree_list_to_string([r for r in reductions]) # + [markdown] slideshow={"slide_type": "subslide"} # #### The Reduction Strategy # # We are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well. # + slideshow={"slide_type": "subslide"} class GrammarReducer(GrammarReducer): def reduce_subtree(self, tree: DerivationTree, subtree: DerivationTree, depth: int = -1): symbol, children = subtree if children is None or len(children) == 0: return False if self.log_reduce: print("Reducing", all_terminals(subtree), "with depth", depth) reduced = False while True: reduced_child = False for i, child in enumerate(children): if child is None: continue (child_symbol, _) = child for reduction in self.symbol_reductions( child, child_symbol, depth): if number_of_nodes(reduction) >= number_of_nodes(child): continue # Try this reduction if self.log_reduce: print( "Replacing", all_terminals( children[i]), "by", all_terminals(reduction)) children[i] = reduction if self.test(all_terminals(tree)) == Runner.FAIL: # Success if self.log_reduce: print("New tree:", all_terminals(tree)) reduced = reduced_child = True break else: # Didn't work out - restore children[i] = child if not reduced_child: if self.log_reduce: print("Tried all alternatives for", all_terminals(subtree)) break # Run recursively for c in children: if self.reduce_subtree(tree, c, depth): reduced = True return reduced # + [markdown] slideshow={"slide_type": "subslide"} # All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`: # + slideshow={"slide_type": "fragment"} class GrammarReducer(GrammarReducer): def reduce_tree(self, tree): return self.reduce_subtree(tree, tree) # + [markdown] slideshow={"slide_type": "fragment"} # The custom method `parse()` turns a given input into a derivation tree: # + slideshow={"slide_type": "fragment"} class GrammarReducer(GrammarReducer): def parse(self, inp): tree, *_ = self.parser.parse(inp) if self.log_reduce: print(all_terminals(tree)) return tree # + [markdown] slideshow={"slide_type": "fragment"} # The method `reduce()` is the one single entry point, parsing the input and then reducing it. # + slideshow={"slide_type": "subslide"} class GrammarReducer(GrammarReducer): def reduce(self, inp): tree = self.parse(inp) self.reduce_tree(tree) return all_terminals(tree) # + [markdown] slideshow={"slide_type": "subslide"} # ### End of Excursion # + [markdown] slideshow={"slide_type": "fragment"} # Let us try out our `GrammarReducer` class in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it? # + slideshow={"slide_type": "fragment"} expr_input # + slideshow={"slide_type": "subslide"} grammar_reducer = GrammarReducer( eval_mystery, EarleyParser(EXPR_GRAMMAR), log_test=True) grammar_reducer.reduce(expr_input) # + [markdown] slideshow={"slide_type": "subslide"} # Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. # + [markdown] slideshow={"slide_type": "subslide"} # ### A Depth-Oriented Strategy # + [markdown] slideshow={"slide_type": "fragment"} # Even if five steps are already good, we can still do better. If we look at the log above, we see that after test `#2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `<term>` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. # + [markdown] slideshow={"slide_type": "subslide"} # Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. # + [markdown] slideshow={"slide_type": "fragment"} # This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`: # + slideshow={"slide_type": "subslide"} grammar_reducer = GrammarReducer( mystery, EarleyParser(EXPR_GRAMMAR), log_reduce=True) # + slideshow={"slide_type": "fragment"} all_terminals(derivation_tree) # + slideshow={"slide_type": "fragment"} display_tree(derivation_tree) # + [markdown] slideshow={"slide_type": "fragment"} # At a depth of 1, there is no `<term>` symbol: # + slideshow={"slide_type": "fragment"} [all_terminals(t) for t in grammar_reducer.subtrees_with_symbol( derivation_tree, "<term>", depth=1)] # + [markdown] slideshow={"slide_type": "fragment"} # At a depth of 2, we have the `<term>` subtree on the left hand side: # + slideshow={"slide_type": "subslide"} [all_terminals(t) for t in grammar_reducer.subtrees_with_symbol( derivation_tree, "<term>", depth=2)] # + [markdown] slideshow={"slide_type": "fragment"} # At a depth of 3, we have the `<term>` subtree on the right hand side: # + slideshow={"slide_type": "fragment"} [all_terminals(t) for t in grammar_reducer.subtrees_with_symbol( derivation_tree, "<term>", depth=3)] # + [markdown] slideshow={"slide_type": "fragment"} # The idea is now to start with a depth of 0, subsequently increasing it as we proceed: # + slideshow={"slide_type": "subslide"} class GrammarReducer(GrammarReducer): def reduce_tree(self, tree): depth = 0 while depth < max_height(tree): reduced = self.reduce_subtree(tree, tree, depth) if reduced: depth = 0 # Start with new tree else: depth += 1 # Extend search for subtrees return tree # + slideshow={"slide_type": "subslide"} grammar_reducer = GrammarReducer( mystery, EarleyParser(EXPR_GRAMMAR), log_test=True) grammar_reducer.reduce(expr_input) # + [markdown] slideshow={"slide_type": "fragment"} # We see that a depth-oriented strategy needs even fewer steps in our setting. # + [markdown] slideshow={"slide_type": "subslide"} # ### Comparing Strategies # # We close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression: # + slideshow={"slide_type": "skip"} from GrammarFuzzer import GrammarFuzzer # + slideshow={"slide_type": "fragment"} long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz() long_expr_input # + [markdown] slideshow={"slide_type": "fragment"} # With grammars, we need only a handful of tests to find the failure-inducing input: # + slideshow={"slide_type": "skip"} from Timer import Timer # + slideshow={"slide_type": "fragment"} grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR)) with Timer() as grammar_time: print(grammar_reducer.reduce(long_expr_input)) # + slideshow={"slide_type": "subslide"} grammar_reducer.tests # + slideshow={"slide_type": "fragment"} grammar_time.elapsed_time() # + [markdown] slideshow={"slide_type": "fragment"} # Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer. # + slideshow={"slide_type": "fragment"} dd_reducer = DeltaDebuggingReducer(eval_mystery) with Timer() as dd_time: print(dd_reducer.reduce(long_expr_input)) # + slideshow={"slide_type": "fragment"} dd_reducer.tests # + slideshow={"slide_type": "subslide"} dd_time.elapsed_time() # + [markdown] slideshow={"slide_type": "fragment"} # We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. # + [markdown] slideshow={"slide_type": "slide"} # ## Synopsis # # A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. # + [markdown] slideshow={"slide_type": "fragment"} # Here is a simple example: An arithmetic expression causes an error in the Python interpreter: # + slideshow={"slide_type": "fragment"} # !python -c 'x = 1 + 2 * 3 / 0' # + [markdown] slideshow={"slide_type": "subslide"} # Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs. # + slideshow={"slide_type": "skip"} from Fuzzer import ProgramRunner import subprocess # + slideshow={"slide_type": "fragment"} class ZeroDivisionRunner(ProgramRunner): """Make outcome 'FAIL' if ZeroDivisionError occurs""" def run(self, inp: str = "") -> Tuple[subprocess.CompletedProcess, Outcome]: process, outcome = super().run(inp) if process.stderr.find('ZeroDivisionError') >= 0: outcome = 'FAIL' return process, outcome # + [markdown] slideshow={"slide_type": "fragment"} # If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed. # + slideshow={"slide_type": "subslide"} python_input = "x = 1 + 2 * 3 / 0" python_runner = ZeroDivisionRunner("python") process, outcome = python_runner.run(python_input) outcome # + [markdown] slideshow={"slide_type": "fragment"} # Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail: # + slideshow={"slide_type": "fragment"} dd = DeltaDebuggingReducer(python_runner) dd.reduce(python_input) # + [markdown] slideshow={"slide_type": "fragment"} # The input is reduced to the minimum: We get the essence of the division by zero. # + slideshow={"slide_type": "fragment"} # ignore from ClassDiagram import display_class_hierarchy # + slideshow={"slide_type": "subslide"} # ignore display_class_hierarchy([DeltaDebuggingReducer, GrammarReducer], public_methods=[ Reducer.__init__, Reducer.reset, Reducer.reduce, DeltaDebuggingReducer.reduce, GrammarReducer.__init__, GrammarReducer.reduce, ], types={ 'DerivationTree': DerivationTree, 'Grammar': Grammar, 'Outcome': Outcome, }, project='fuzzingbook') # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Lessons Learned # # * Reducing failure-inducing inputs to a minimum is helpful for testing and debugging. # * _Delta debugging_ is a simple and robust algorithm to easily reduce test cases. # * For syntactically complex inputs, _grammar-based reduction_ is much faster and yields better results. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Next Steps # # Our next chapter focuses on [Web GUI Fuzzing](WebFuzzer.ipynb), another domain where generating and reducing test cases is central. # + [markdown] slideshow={"slide_type": "slide"} # ## Background # # The "lexical" delta debugging algorithm discussed here stems from \cite{Zeller2002}; actually, this is the exact Python implementation as used by Zeller in 2002. The idea of systematically reducing inputs has been discovered a number of times, although not as automatic and generic as delta debugging. \cite{Slutz1998}, for instance, discusses systematic reduction of SQL statements for SQL databases; the general process as manual work is well described by \cite{Kernighan1999}. # + [markdown] slideshow={"slide_type": "subslide"} # The deficits of delta debugging as it comes to syntactically complex inputs were first discussed in *compiler testing*, and _reducing tree inputs_ rather than string inputs was quickly discovered as an alternative. *Hierarchical Delta Debugging* (*HDD*) \cite{Misherghi2006} applies delta debugging on subtrees of a parse tree, systematically reducing a parse tree to a minimum. _Generalized Tree Reduction_ \cite{Herfert2017} generalizes this idea to apply arbitrary _patterns_ such as replacing a term by a compatible term in a subtree, as `subtrees_with_symbol()` does. Using _grammars_ to reduce inputs was first implemented in the _Perses_ tool \cite{Sun2018}; our algorithm implements very similar strategies. Searching for alternate expansions (as `alternate_reductions()`) is a contribution of the present chapter. # + [markdown] slideshow={"slide_type": "subslide"} # While `GrammarReducer` is a generic approach that can be parameterized with an arbitrary grammar, _language-specific_ approaches can do a much better job for the language at hand. *C-Reduce* \cite{Regehr2012} is a reducer specifically targeting the reduction of programming languages. Besides reductions in the style of delta debugging or tree transformations, C-Reduce comes with more than 30 source-to-source transformations that replace aggregates by scalars, remove function parameters at a definition and all call sites, change functions to return `void` and deleting all `return` statements, and many more. While specifically instantiated for the C language (and used for testing C compilers), these principles extend to arbitrary programming languages following an ALGOL-like syntax. When testing a compiler, C-Reduce is the tool to go for. # + [markdown] slideshow={"slide_type": "subslide"} # The chapter on ["Reducing Failure-Inducing Inputs" in the Debugging Book](https://www.debuggingbook.org/html/DeltaDebugger.html) has an alternate implementation `DeltaDebugger` of delta debugging that is even easier to deploy; here, one simply writes # # ```python # with DeltaDebugger() as dd: # fun(args...) # dd # ``` # # to reduce the input in `args` for a failing (exception-throwing) function `fun()`. The chapter also discusses further usage examples, including reducing _code_ to a minimum. # + [markdown] slideshow={"slide_type": "subslide"} # This [blog post](https://www.drmaciver.com/2019/01/notes-on-test-case-reduction/) by <NAME> contains lots of insights on how to apply reduction in practice, in particular multiple runs with different abstraction levels. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Exercises # # How to best reduce inputs is still an underdeveloped field of research, with lots of opportunities. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # ### Exercise 1: Mutation-Based Fuzzing with Reduction # # When fuzzing with a population, it can be useful to occasionally _reduce_ the length of each element, such that future descendants are shorter, too, which typically speeds up their testing. # # Consider the `MutationFuzzer` class from [the chapter on mutation-based fuzzing](MutationFuzzer.ipynb). # Extend it such that whenever a new input is added to the population, it is first reduced using delta debugging. # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** Left to the reader. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # ### Exercise 2: Reduction by Production # # Grammar-based input reduction, as sketched above, might be a good algorithm, but is by no means the only alternative. One interesting question is whether "reduction" should only be limited to elements already present, or whether one would be allowed to also create _new_ elements. These would not be present in the original input, yet still allow to produce a much smaller input that would still reproduce the original failure. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # As an example, consider the following grammar: # # ``` # <number> ::= <float> | <integer> | <not-a-number> # <float> ::= <digits>.<digits> # <integer> ::= <digits> # <not-a-number> ::= NaN # <digits> ::= [0-9]+ # ``` # # Assume the input `100.99` fails. We might be able to reduce it to a minimum of, say, `1.9`. However, we cannot reduce it to an `<integer>` or to `<not-a-number>`, as these symbols do not occur in the original input. By allowing to _create_ alternatives for these symbols, we could also tests inputs such as `1` or `NaN` and further generalize the class of inputs for which the program fails. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # Create a class `GenerativeGrammarReducer` as subclass of `GrammarReducer`; extend the method `reduce_subtree()` accordingly. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** Left to the reader. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # ### Exercise 3: The Big Reduction Shoot-Out # # Create a _benchmark_ for the grammars already defined earlier, consisting of: # # 1. A set of _inputs_, produced from these very grammars using `GrammarFuzzer` and derivatives; # 2. A set of _tests_ which check for the occurrence of individual symbols as well as pairs and triples of these symbols: # * Tests should be _unresolved_ if the input is not syntactically valid; # * Tests should _fail_ if the symbols (or pairs or triples thereof) occur; # * Tests should _pass_ in all other cases. # # Compare delta debugging and grammar-based debugging on the benchmark. Implement HDD \cite{Misherghi2006} and _Generalized Tree Reduction_ \cite{Herfert2017} and add them to your comparison. Which approach performs best, and under which circumstances? # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** Left to the reader.
docs/notebooks/Reducer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Computer vision # + hide_input=true from fastai.gen_doc.nbdoc import * # - # The [`vision`](/vision.html#vision) module of the fastai library contains all the necessary functions to define a Dataset and train a model for computer vision tasks. It contains four different submodules to reach that goal: # - [`vision.image`](/vision.image.html#vision.image) contains the basic definition of an [`Image`](/vision.image.html#Image) object and all the functions that are used behind the scenes to apply transformations to such an object. # - [`vision.transform`](/vision.transform.html#vision.transform) contains all the transforms we can use for data augmentation. # - [`vision.data`](/vision.data.html#vision.data) contains the definition of [`ImageDataBunch`](/vision.data.html#ImageDataBunch) as well as the utility function to easily build a [`DataBunch`](/basic_data.html#DataBunch) for Computer Vision problems. # - [`vision.learner`](/vision.learner.html#vision.learner) lets you build and fine-tune models with a pretrained CNN backbone or train a randomly initialized model from scratch. # # Each of the four module links above includes a quick overview and examples of the functionality of that module, as well as complete API documentation. Below, we'll provide a walk-thru of end to end computer vision model training with the most commonly used functionality. # ## Minimal training example # First, import everything you need from the fastai library. from fastai.vision import * # First, create a data folder containing a MNIST subset in `data/mnist_sample` using this little helper that will download it for you: path = untar_data(URLs.MNIST_SAMPLE) path # Since this contains standard [`train`](/train.html#train) and `valid` folders, and each contains one folder per class, you can create a [`DataBunch`](/basic_data.html#DataBunch) in a single line: data = ImageDataBunch.from_folder(path) # You load a pretrained model (from [`vision.models`](/vision.models.html#vision.models)) ready for fine tuning: learn = cnn_learner(data, models.resnet18, metrics=accuracy) # And now you're ready to train! learn.fit(1) # Let's look briefly at each of the [`vision`](/vision.html#vision) submodules. # ## Getting the data # The most important piece of [`vision.data`](/vision.data.html#vision.data) for classification is the [`ImageDataBunch`](/vision.data.html#ImageDataBunch). If you've got labels as subfolders, then you can just say: data = ImageDataBunch.from_folder(path) # It will grab the data in a train and validation sets from subfolders of classes. You can then access that training and validation set by grabbing the corresponding attribute in [`data`](/vision.data.html#vision.data). ds = data.train_ds # ## Images # That brings us to [`vision.image`](/vision.image.html#vision.image), which defines the [`Image`](/vision.image.html#Image) class. Our dataset will return [`Image`](/vision.image.html#Image) objects when we index it. Images automatically display in notebooks: img,label = ds[0] img # You can change the way they're displayed: img.show(figsize=(2,2), title='MNIST digit') # And you can transform them in various ways: img.rotate(35) # ## Data augmentation # [`vision.transform`](/vision.transform.html#vision.transform) lets us do data augmentation. Simplest is to choose from a standard set of transforms, where the defaults are designed for photos: help(get_transforms) # ...or create the exact list you want: tfms = [rotate(degrees=(-20,20)), symmetric_warp(magnitude=(-0.3,0.3))] # You can apply these transforms to your images by using their `apply_tfms` method. fig,axes = plt.subplots(1,4,figsize=(8,2)) for ax in axes: ds[0][0].apply_tfms(tfms).show(ax=ax) # You can create a [`DataBunch`](/basic_data.html#DataBunch) with your transformed training and validation data loaders in a single step, passing in a tuple of *(train_tfms, valid_tfms)*: data = ImageDataBunch.from_folder(path, ds_tfms=(tfms, [])) # ## Training and interpretation # Now you're ready to train a model. To create a model, simply pass your [`DataBunch`](/basic_data.html#DataBunch) and a model creation function (such as one provided by [`vision.models`](/vision.models.html#vision.models) or [<code>torchvision.models</code>](https://pytorch.org/docs/stable/torchvision/models.html#torchvision-models)) to [`cnn_learner`](/vision.learner.html#cnn_learner), and call [`fit`](/basic_train.html#fit): learn = cnn_learner(data, models.resnet18, metrics=accuracy) learn.fit(1) # Now we can take a look at the most incorrect images, and also the classification matrix. interp = ClassificationInterpretation.from_learner(learn) interp.plot_top_losses(9, figsize=(6,6)) interp.plot_confusion_matrix() # To simply predict the result of a new image (of type [`Image`](/vision.image.html#Image), so opened with [`open_image`](/vision.image.html#open_image) for instance), just use `learn.predict`. It returns the class, its index and the probabilities of each class. img = learn.data.train_ds[0][0] learn.predict(img)
docs_src/vision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.decomposition import LatentDirichletAllocation from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer # + spam_header = "spam\t" no_spam_header = "ham\t" documents = [] with open ('./dataset/SMSSpamCollection') as file_handle: for line in file_handle: if line.startswith(spam_header): documents.append(line[len(spam_header):]) elif line.startswith(no_spam_header): documents.append(line[len(no_spam_header):]) vectorizer = CountVectorizer(stop_words='english', max_features=2000) term_counts = vectorizer.fit_transform(documents) vocabulary = vectorizer.get_feature_names() # + topic_model = LatentDirichletAllocation(n_components=10) topic_model.fit(term_counts) topics = topic_model.components_ for topic_id, weights in enumerate(topics): print("topic {}".format(topic_id), end=": ") pairs = [] for term_id, value in enumerate(weights): pairs.append((abs(value), vocabulary[term_id])) pairs.sort(key=lambda x: x[0], reverse=True) for pair in pairs[:10]: print(pair[1], end=",") print() # -
ML/first_ML_11_02_TopicModeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Word2Vec알고리즘 (CBOW) # - 내부의 Weight가 업데이트 되는 과정을 자세하게 살펴보는 코드 # "You say goodbye and I say hello." # + import numpy as np # 입력값은 원-핫 벡터형태를 가짐 input1 = np.array([[1, 0, 0, 0, 0, 0, 0]]) # You input2 = np.array([[0, 0, 1, 0, 0, 0, 0]]) # goodbye # - # (입력 x 차원의 크기) - 차원의 크기는 사용자가 선정 ## 초기의 Weight는 랜덤하게 결정됨 W_in = np.random.randn(7, 3) h_1 = np.matmul(input1, W_in) # 은닉층의 값 h_2 = np.matmul(input2, W_in) # 은닉층의 값 h = (h_1+h_2)/2 print(h) # ![image.png](attachment:image.png) W_out = np.random.randn(3, 7) score = np.matmul(h, W_out) print(np.round(score, 4)) def softmax(x): exp_x = np.exp(x) sum_exp_x = np.sum(exp_x) y = exp_x / sum_exp_x return y pred = softmax(score) print(np.round(pred, 4)) # Cross Entropy Loss 계산 def cross_entropy_error(y, t): ''' y : prediction t : target ''' delta = 1e-7 # log의 내부가 0이 되는 것을 방지 # y.shape[0]으로 나눠주는 이유는 배치 사이즈 반영 return -np.sum(t * np.log(y + delta)) / y.shape[0] cross_entropy_error(pred, [0, 1, 0, 0, 0, 0, 0]) # Loss를 통해서 Weight를 업데이트 # Softmax의 미분값 ans = [0, 1, 0, 0, 0, 0, 0] ds = np.round(pred - ans, 4) print(ds) # ds (Delta for W_out) 계산 dW_out = np.outer(h, ds) print(np.round(dW_out, 4)) da = np.dot(ds, W_out.T) print(np.round(da, 4)) dw_1 = np.round(np.outer(np.array([[1,0,0,0,0,0,0]]), (da/2)), 4) print(dw_1) dw_2 = np.round(np.outer(np.array([[0,0,1,0,0,0,0]]), (da/2)), 4) print(dw_2) learning_rate = 1 W_in_new = W_in - learning_rate * dw_1 W_in_new = W_in_new - learning_rate * dw_2 print(np.round(W_in_new, 4)) print(np.round(W_in, 4)) learnoutg_rate = 1 W_out_new = W_out - learnoutg_rate * dW_out print(np.round(W_out_new, 4)) print(np.round(dW_out, 4)) # ## Word2Vec알고리즘 (Skip-Gram) # + import numpy as np # 입력값은 원-핫 벡터형태를 가짐 input = np.array([[0, 1, 0, 0, 0, 0, 0]]) # say output1 = np.array([[1, 0, 0, 0, 0, 0, 0]]) # you output2 = np.array([[0, 0, 1, 0, 0, 0, 0]]) # you # - # (입력 x 차원의 크기) - 차원의 크기는 사용자가 선정 ## 초기의 Weight는 랜덤하게 결정됨 W_in = np.random.randn(7, 3) h = np.matmul(input, W_in) # 은닉층의 값 print(h) W_out = np.random.randn(3, 7) score = np.matmul(h, W_out) print(np.round(score, 4)) pred = softmax(score) print(np.round(pred, 4)) loss1 = cross_entropy_error(pred, [1, 0, 0, 0, 0, 0, 0]) loss2 = cross_entropy_error(pred, [0, 0, 1, 0, 0, 0, 0]) print(np.round(loss1, 4), np.round(loss2, 4)) ds1 = np.round(pred - output1, 4) ds2 = np.round(pred - output2, 4) ds = ds1 + ds2 print(ds) dw_out = np.round(np.outer(h, ds), 4) print(dw_out) da = np.dot(ds, W_out.T) print(np.round(da, 4)) dw_in = np.outer(np.array([[0,1,0,0,0,0,0]]), da) print(np.round(dw_in, 4)) learning_rate = 1 W_in_new = W_in - learning_rate * dw_in print(np.round(W_in_new, 4)) print(np.round(W_in, 4)) learnoutg_rate = 1 W_out_new = W_out - learnoutg_rate * dw_out print(np.round(W_out_new, 4)) print(np.round(W_out, 4))
02. 컨텐츠 기반 추천시스템/02. 컨텐츠 기반 추천시스템 - Word2Vec을 이용한 추천시스템/02. 컨텐츠 기반 추천시스템 - Word2vec을 이용한 추천시스템 (numpy 버전).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import numpy as np import os import matplotlib.pyplot as plt import matplotlib.ticker as ticker # %matplotlib inline from astropy import table from astropy.table import Table from astropy.io import ascii from tabulate import tabulate import pyspherematch as sm from astropy import units as u from astropy.coordinates import SkyCoord, FK4, FK5 SAGA_DIR = os.environ['SAGA_DIR'] SAGA_DROPBOX= os.environ['SAGA_DROPBOX'] # - from palettable.colorbrewer.qualitative import Dark2_8 if 'plt' in locals() and hasattr(plt, 'rcParams'): plt.rcParams['lines.linewidth'] = 2.5 plt.rcParams['font.size'] = 17.0 plt.rcParams['font.weight'] = 'medium' plt.rcParams['font.family'] = 'serif' plt.rcParams['axes.prop_cycle'] = plt.cycler(color=Dark2_8.mpl_colors) plt.rcParams['legend.fontsize'] = 'medium' plt.rcParams['legend.frameon'] = False plt.rcParams['figure.dpi'] = 100 plt.rcParams['figure.figsize'] = 7, 6 plt.rcParams['xtick.major.size'] = 6 plt.rcParams['xtick.minor.size'] = 4 plt.rcParams['ytick.major.size'] = 6 plt.rcParams['ytick.minor.size'] = 4 # + # READ SUBMASTER LIST master = Table.read(SAGA_DROPBOX+'hosts/submaster.ecsv.gz', format='ascii.ecsv') gr_master = master['M_g'] - master['M_r'] print master.columns # CALC NUMBER OF BRIGHT THINGS for Sect 2.1 m1=master['M_K'] < -19.6 m2=master['vhelio'] < 3000 m3=master['distance'] < 40. m4 = master['dist_2MASS_NSA'] < 20. print np.sum(m1&m2&m3&m4) #print np.max(master['vhelio'][m3&m4])/3e5 plt.plot(master['RA'],master['Dec'],'.') plt.plot(master['RA'][~m4],master['Dec'][~m4],'r.',ms=1) # + # MATCH COMPLETE HOSTS AND PROPOSED HOSTS IN SUBMASTER LIST complete_hosts =[147100,166313,165536,61945,149781,33446,150887,132339] partial_hosts = [85746,140594,161174,13927,137625,129237] hosts = np.in1d(master['NSAID'], complete_hosts) part = np.in1d(master['NSAID'], partial_hosts) # + # FIND FLAG0 GALAXIES file = SAGA_DROPBOX + 'hosts/host_catalog_flag0.csv' all_flag0 = ascii.read(file, format='csv') flag0 = np.in1d(master['NSAID'][m4],all_flag0['NSAID']) print np.sum(flag0) # + # MILKY WAY PROPERTIES MK_MW2 = -24.0 # MK_MW_err2 = 0.3 MK_MW1 = -24.2 # MK_MW_err1 = 0.2 Mr_MW = -21. + 5.*np.log10(0.7) Mr_MW_err = 0.38 gr_MW = 0.682 # DIRECTLY FROM LICQUIA gr_MW_err = 0.066 ################ Mr_MW_vdk = -20.7 + 5.*np.log10(0.7) Mr_MW_err_vdk = 0.3 print Mr_MW_vdk gr_MW_vdk = 0.75 gr_MW_err_vdk = 0.19 # - # M31 PROPERTIES MK_M31 = -24.5-0.188 # HAMMER 2001 Mr_M31 = -22.0 gr_M31 = 0.701 # + # PLOT MK VS. RI fig, (ax1, ax2) = plt.subplots(1, 2,sharey=True) plt.rcParams['figure.figsize'] = 10, 5 ax1.plot(master['M_K'],gr_master, 'k.',ms=2,label='_nolegend_') ax1.plot(master['M_K'][flag0],gr_master[flag0], 'ko',color = '0.75',label='_nolegend_',ms=8) ax1.plot(master['M_K'][part],gr_master[part], 'bo',label='_nolegend_',ms=10) ax1.plot(master['M_K'][hosts],gr_master[hosts], 'ro',label='_nolegend_',ms=10) #MW #ax1.errorbar([MK_MW1],[gr_MW_vdk],xerr=[Mr_MW_err_vdk],yerr=[gr_MW_err_vdk],ecolor='#ff6347',capthick=2,linewidth=2,zorder=3) ax1.errorbar([MK_MW2],[gr_MW_vdk],xerr=[MK_MW_err2],yerr=[gr_MW_err_vdk],color='#ff8c00',capthick=2,linewidth=2,zorder=3) #ax1.plot([MK_MW1],[gr_MW_vdk],'y*',markersize=16,color='#ff6347',label='MW - Just et al (2015)',zorder=4) ax1.plot([MK_MW2],[gr_MW_vdk],'y*',markersize=16,color='#ff8c00',label='MW - Drimmel & Spergel (2001)',zorder=4) ax1.plot([MK_M31],[gr_M31],'y*',markersize=16,color='#ba55d3',label='M31 - Hammer et al (2007)',zorder=4) ax1.errorbar([MK_M31],[gr_M31],xerr=[0.1],yerr=[0.1],color='#ba55d3',capthick=2,linewidth=2,zorder=4) ax1.set_ylim(0.38, 1.01) ax1.set_xlim(-25.1,-22.3) ax1.set_ylabel('$(g-r)_o$') ax1.set_xlabel('$M_{K,o}$') ax1.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax1.yaxis.set_major_locator(ticker.MultipleLocator(0.2)) ax1.legend(fontsize=11,loc=3,numpoints = 1) ########################################## # PLOT Mr Vs gr ax2.plot(master['M_r'],gr_master, 'k.',ms=2,label='_nolegend_') ax2.plot(master['M_r'][hosts],gr_master[hosts], 'ro',label='Complete Hosts',ms=10,zorder=4) ax2.plot(master['M_r'][part],gr_master[part], 'bo',label='Partial Hosts',ms=10,zorder=3) ax2.plot(master['M_r'][flag0],gr_master[flag0], 'ko',color = '0.75',label='All SAGA hosts',ms=8) ax2.legend(fontsize=11,loc=2,numpoints = 1) #MW - vdK ax2.errorbar([Mr_MW_vdk],[gr_MW_vdk],xerr=[Mr_MW_err_vdk],yerr=[gr_MW_err_vdk],ecolor='#ff8c00',capthick=2,linewidth=2,zorder=3) ax2.plot([Mr_MW_vdk],[gr_MW_vdk],'y*',markersize=16,color='#ff8c00',label='MW - van der Kruit (1986)',zorder=4) ax2.plot([Mr_M31],[gr_M31],'y*',markersize=16,color='#ba55d3',label='M31 - ',zorder=4) ax2.errorbar([Mr_M31],[gr_M31],xerr=[0.1],yerr=[0.1],color='#ba55d3',capthick=2,linewidth=2,zorder=4) # BY HAND LEGEND ax2.plot([-22.6],[0.45],'y*',markersize=16,color='#ff8c00') ax2.plot([-22.6],[0.41],'y*',markersize=16,color='#ba55d3') ax2.text(-22.45,0.445,'MW - van der Kruit (1986)',fontsize=11) ax2.text(-22.45,0.405,'M31 - Walterbos (1987)',fontsize=11) ax2.set_ylim(0.38, 1.01) ax2.set_xlim(-22.8,-19.8) ax2.set_xlabel('$M_{r,o}$') ax2.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax2.yaxis.set_major_locator(ticker.MultipleLocator(0.2)) #ax2.legend(fontsize=10,loc=3,numpoints = 1) plt.tight_layout(w_pad=0) plt.savefig('fig_hosts.pdf') # - nsa = Table.read (SAGA_DIR+'/cats/nsa_v0_1_3.fits') # + # READ IRAS FAINT SOURCE CATALOG # DOWNLOADED FROM # http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-scan?mission=irsa&submit=Select&projshort=IRAS irasp = Table.read('/Users/marlageha/Projects/SAGA/data/iras_fsc.tbl',format='ipac') ra = 15.*(irasp['rah'] + irasp['ram']/60. + irasp['ras']/3600.) dec = irasp['decd'] + irasp['decm']/60. + irasp['decs']/3600. msk = irasp['decsign'] == '-' dec[msk] = -1.*dec[msk] # CONVERT IRAS COORDINATES FROM 1950 -> 2000! c1 = SkyCoord(ra * u.deg, dec * u.deg, frame=FK4) iras = c1.transform_to(FK5(equinox='J2000')) # + plt.plot(master['RA'][flag0],master['Dec'][flag0],'ro',ms=8) plt.plot(iras.ra,iras.dec,'ro',ms=1) plt.xlim(150,250) plt.ylim(-10,70) # + # MATCH TO NSA m = (nsa['ZDIST']*(3e5) < 6000) nsad = nsa[m] pid1,pid2,d = sm.spherematch(nsad['RA'], nsad['DEC'],\ iras.ra, iras.dec,\ 60./3600,nnearest=1) nmatch = np.size((d > 0.0).nonzero()) print nmatch F_IRAS1 = 2.58*irasp['fnu_60'] + irasp['fnu_100'] F_IRAS = 1.26e-14 *(F_IRAS1) # EQN 2, Kewley 2002 dist = nsad['ZDIST'][pid1]*(3e5/70.)*3.086e22 L_IRAS = 4*np.pi * dist * F_IRAS[pid2] *1e7*dist SFR_IRAS = 7.9e-44 * L_IRAS pSFR_IRAS = np.log10(SFR_IRAS) pra = nsad['RA'][pid1] pdec=nsad['DEC'][pid1] pmass = np.log10(nsad['MASS'][pid1]/0.7**2) # + # HOSTS iras = Table.read('/Users/marlageha/Projects/SAGA/data/saga_iras.dat',format='ascii') m = np.in1d(nsa['NSAID'],iras['nsaid']) dist = nsa['ZDIST'][m]*(3e5/70.)*3.086e22 # CALCULATE SFR F_IRAS = 1.26e-14 *(2.58*iras['60F'] + iras['100F'] ) # EQN 2, Kewley 2002 L_IRAS = 4*np.pi * dist * F_IRAS *1e7*dist SFR_IRAS = 7.9e-44 * L_IRAS hSFR_IRAS = np.log10(SFR_IRAS) hmass = np.log10(nsa['MASS'][m]/0.7**2) n=nsa['NSAID'][m] for (d,h,s) in zip(n,hmass,hSFR_IRAS): print d,h,s # + # HOSTS - partial iras = Table.read('/Users/marlageha/Projects/SAGA/data/saga_IRAS2.dat',format='ascii') m = np.in1d(nsa['NSAID'],iras['nsaid']) dist = nsa['ZDIST'][m]*(3e5/70.)*3.086e22 print nsa['NSAID'][m] # CALCULATE SFR F_IRAS = 1.26e-14 *(2.58*iras['60F'] + iras['100F'] ) # EQN 2, Kewley 2002 L_IRAS = 4*np.pi * dist * F_IRAS *1e7*dist SFR_IRAS = 7.9e-44 * L_IRAS cSFR_IRAS = np.log10(SFR_IRAS) cmass = np.log10(nsa['MASS'][m]/0.7**2) for n,s,m in zip(iras['nsaid'],cSFR_IRAS,nsa['NSAID'][m]): print n,s,m # + # PLOT SFR VS M_STELLAR #mass = np.log10(nsa['MASS']/0.7**2) #id1,id2,d = sm.spherematch(nsa['RA'], nsa['DEC'],\ # pra, pdec,\ # 70./3600,nnearest=1) #nmatch = np.size((d > 0.0).nonzero()) #print nmatch plt.plot(pmass,pSFR_IRAS,'k.',ms=2,label='_nolegend_') m = np.in1d(nsa['NSAID'],all_flag0['NSAID']) flag0_nsa = nsa[m] id1,id2,d = sm.spherematch(flag0_nsa['RA'], flag0_nsa['DEC'],\ pra, pdec,\ 120./3600,nnearest=1) nmatch = np.size((d > 0.0).nonzero()) print nmatch flag0_mass = np.log10(flag0_nsa['MASS']/0.7**2) plt.errorbar([np.log10(6.08e10)],[np.log10(1.65)],xerr=[0.1],yerr=[0.1],ecolor='#ffa500',capthick=2,linewidth=2) plt.plot(flag0_mass[id1],pSFR_IRAS[id2],'ko',ms=8,color='0.75',label='_nolegend_') #--------- mpart = np.in1d(nsa['NSAID'], partial_hosts) host_nsa = nsa[mpart] id1,id2,d = sm.spherematch(host_nsa['RA'], host_nsa['DEC'],\ pra, pdec,\ 120./3600,nnearest=1) nmatch = np.size((d > 0.0).nonzero()) print nmatch host_mass = np.log10(host_nsa['MASS']/0.7**2) plt.plot(host_mass[id1],pSFR_IRAS[id2],'bo',ms=10,label='_nolegend_') #------------HOSTS mhosts = np.in1d(nsa['NSAID'], complete_hosts) host_nsa = nsa[mhosts] print host_nsa['NSAID'] id1,id2,d = sm.spherematch(host_nsa['RA'], host_nsa['DEC'],\ pra, pdec,\ 120./3600,nnearest=1) nmatch = np.size((d > 0.0).nonzero()) print nmatch host_mass = np.log10(host_nsa['MASS']/0.7**2) host_nsaid = host_nsa['NSAID'] plt.plot(host_mass[id1],pSFR_IRAS[id2],'ro',ms=10,label='_nolegend_') plt.plot(cmass,cSFR_IRAS,'bo',ms=10,label='_nolegend_') plt.plot(hmass,hSFR_IRAS,'ro',ms=10,label='_nolegend_') for n,obj in zip(host_nsa['NSAID'][id1],pSFR_IRAS[id2]): print n,obj plt.plot([10.57],[-0.1],'ro',ms=10,label='_nolegend_') # 6.08 +/- 1.14 x 10^10 # 1.65 +/- 0.19 msun/year #plt.plot([np.log10(5.1e10)],[np.log10(2)],'y*',markersize=16,color='#ffa500') plt.plot([np.log10(6.08e10)],[np.log10(1.65)],'y*',markersize=17,color='#ffa500',label='MW - Licquia et al (2015)',zorder=4) plt.errorbar([np.log10(6.08e10)],[np.log10(1.65)],xerr=[0.1],yerr=[0.1],ecolor='#ffa500',capthick=2,linewidth=2,zorder=3) # M31 plt.plot([np.log10(10.3e10)],[np.log10(0.7)],'y*',markersize=16,color='#ba55d3',label='M31 - Lewis et al (2015)',zorder=4) plt.errorbar([np.log10(10.3e10)],[np.log10(0.7)],xerr=[0.1],yerr=[0.1],ecolor='#ba55d3',capthick=2,linewidth=2,zorder=3) #plt.plot([np.log10(5.7e10)],[np.log10(1.65)],'y*',markersize=16,color='#ffa500') plt.ylim(-1.45,1.4) plt.xlim(11.5,9.5) plt.xlabel('log$_{10}[M_{\mathrm{stellar}} (M_{\odot}) ]$') plt.ylabel('log$_{10}$[SFR (M$_{\mathrm{\odot}}$/yr)]') plt.rcParams['figure.figsize'] = 5.5, 5 plt.legend(fontsize=11,loc=3,numpoints = 1) plt.tight_layout(w_pad=0) plt.savefig('fig_hosts_Mstar.pdf') # -
plot_Fig1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 2: Course Grade # # In a previous homework you wrote a program to calculate the final grade letter grade in IST256 when given the total points out of 600. # # The code appears below. Feel free to execute this code a few times in order to get a feel for how it works. # + print("IST256 Grade Calculator") points = int(input("Enter total points out of 600:")) if points >= 570: grade = "A" elif points >= 540: grade = "A-" elif points >= 510: grade = "B+" elif points >= 480: grade = "B" elif points >= 450: grade = "B-" elif points >= 420: grade = "C+" elif points >= 390: grade = "C" elif points >= 360: grade = "C-" elif points >= 300: grade = "D" else: grade = "F" print("Grade:", grade) # - # # Write - Refactor - Test - Rewrite Approach # # The best way to get good at writing functions, a skill you will need to master to become a respectable programmer, is to use the **Write - Refactor - Test - Rewrite** approach. In this approch we: # # 1. Write our code # 2. Refactor it into a function # 3. Test the function to make sure it works as expected # 4. Rewrite our code to call our new function # # ## Step 1: Write code # # The program has been written for you above. Your goal is to refactor it into a function, test the function then re-write the code. # # ## Step 2: Refactor Into a function # # One reason to re-factor code into a function is to simplify our code. It's easier to understand `GetGrade()` as opposed to all of those `if ..elif` statements which make up `GetGrade` itself. # # Now re-factor the code into a user-defined Python function. The name of the function should be `GetGrade()`. The function should take a number of points as input and return the appropriate letter grade based on those points. # # **NOTE:** There should not be any `print()` or `input()` statements in your function! # # + ## Step 2: Todo Write function defintion ## Function: GetGrade ## Arguments: points (eg. 540) ## Returns: grade (A, B+ etc...) #TODO: Write function definition here def GetGrade(points): if points >= 570: grade = "A" elif points >= 540: grade = "A-" elif points >= 510: grade = "B+" elif points >= 480: grade = "B" elif points >= 450: grade = "B-" elif points >= 420: grade = "C+" elif points >= 390: grade = "C" elif points >= 360: grade = "C-" elif points >= 300: grade = "D" else: grade = "F" return grade # - # ## Step 3: Test our function # # With the function complete, we need to test our function. The simplest way to do that is call the function with inputs we expect and verify the output. For example: # # ``` # WHEN point='570' We EXPECT GetGrade(points) to return A # WHEN points='540' We EXPECT GetGrade(points) to return A- # ``` # # The first two are written for you but you will need to write the remaining tests to tests all the cases. As a general rule, there should be one test for each kind of output (in this case A through F). You only need to test at the boundaires. No need to test every single value between 0 and 600. # + # Step 3: Write tests. The first two tests were written for you. You must write the rest print("WHEN point='570' We EXPECT GetGrade(points) to return A ACTUAL:", GetGrade(570)) print("WHEN point='540' We EXPECT GetGrade(points) to return A- ACTUAL:", GetGrade(540)) #todo... # - # ## Step 4: rewrite the program to use the function # # Finally re-write the original program, with a twist. Follow this algorithm # # ``` # loop # input a grade or type 'quit', save in variable text # if text equals 'quit' # break from the loop # convert the text to an integer, store in the variable number # call the GetGrade function with the number as input, store the output in letter grade # print the letter grade # ``` # + ## Step 4: Write program here from the algorithm above def GetGrade(points): #enter function in code if points >= 570: grade = "A" elif points >= 540: grade = "A-" elif points >= 510: grade = "B+" elif points >= 480: grade = "B" elif points >= 450: grade = "B-" elif points >= 420: grade = "C+" elif points >= 390: grade = "C" elif points >= 360: grade = "C-" elif points >= 300: grade = "D" else: grade = "F" return grade print("-" * 40) print("IST256 Grade Calculator") print("-" * 40) print("Enter a numerical points earned and get letter grade.") print("-" * 40) while True: points = input("Input points or type 'quit' to exit: ") if points == 'quit': print("-" * 40) print("Thank you for using Grade Calculator. Goodbye.") print("-" * 40) break else: try: points = float(points) if points in range (0, 601): final_grade = GetGrade(points) print("-" * 40) print(points, "points will results in a letter grade of", final_grade, end=".\n") print("-" * 40) elif points < 0: print("-" * 40) print("Error 10003: Negative Number Entered.") print("-" * 40) elif points > 600: print("-" * 40) print("Error 10002: Bonus Point Alert. A student can not earn more than 600 points.") print("-" * 40) except ValueError: print("-" * 40) print("Error 10001: Invalid Entry. You did not enter a number. Please ensure you are entering a numerical value or 'quit' to end the program.") print("-" * 40) # - # ## Step 6: Questions # # 1. Provide 3 advantages for programming a user function in your code like `GetGrade` # 2. If we want to guarantee our `GetGrade` function is 100% accurate, what is the minimum number of test cases must we provide in step 3? # 3. How many times is the `GetGrade` function called in the sample run provided? How many times was it called in step 3? # # ## Reminder of Evaluation Criteria # # 1. What the problem attempted (analysis, code, and answered questions) ? # 2. What the problem analysis thought out? (does the program match the plan?) # 3. Does the code execute without syntax error? # 4. Does the code solve the intended problem? # 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors) #
content/lessons/06/Now-You-Code/NYC2-Course-Grade.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # finding branching ratio between nitrate and NO2 + alkoxy under atm conditions. Numbers come from atkinson 1997 import math import numpy as np import pandas as pd def get_nitrate_yield(carbon_number, pressure, temperature, carbon_type, other_functional_groups=None): """ Returns the expected fraction of ROO + NO reaction that forms RONO2 (instead of RO + NO2). carbon number is the number of carbon atoms in the compound pressure is given in pascals temperature in K carbon type is a string of 'primary', 'secondary', and 'tertiary' other_functional_groups is a list of other functional groups that are known to adjust the rate. Currently accepted functional groups are: hydroxy group (halves the yield) from Carter and Atkinson 1987 """ # derived inputs concentration = pressure / 8.314 / temperature /10**6 *6.022e23 #molecule/cm3 # diven constants y_inf = 0.826 alpha = 1.94e-22 #cm3/mol-1 beta = 0.97 m0 = 0.0 m_inf = 8.1 F = 0.411 if carbon_number < 2: raise AttributeError('get_nitrate_ratio is only valid for 2 or more carbons. '\ 'See Atkinson 1997 for more information') # calculations y0 = alpha * math.exp(beta * carbon_number) numerator = y0*concentration * (temperature/300.)**(-m0) denominator = y_inf * (temperature/300.)**-m_inf zeta = 1 / (1+ (np.log10(numerator / denominator ))**2) # final answer rate_ratio = (numerator / (1+ numerator/denominator)) * F**zeta rate_yield = 1 / (1 / rate_ratio + 1) # go from ka/kb to ka/(ka+kb) if carbon_type == 'primary': rate_yield *= 0.4 elif carbon_type == 'tertiary': rate_yield *= 0.3 elif carbon_type != 'secondary': raise ValueError('Only primary, secondary and tertiary accepted for carbon_type') if other_functional_groups is not None: for fg in other_functional_groups: if (fg == 'hydroxy') or (fg == 'OH'): rate_yield /= 2 #from atkinson 1997 for hydroxy groups formed from alkenes return rate_yield # + # test get nitrate ratio # using data of carter and atkinson 1989 data = [(3, 'secondary', 299, 2.37e19, 0.047), (4, 'secondary', 299, 2.37e19, 0.083), (5, 'secondary', 284, 2.52e19, 0.158), (6, 'secondary', 299, 2.37e19, 0.193), (6, 'secondary', 281, 1.20e19, 0.179), (3, 'primary', 299, 2.37e19, 0.019), (5, 'primary', 282, 2.51e19, 0.065), (6, 'tertiary', 298, 2.38e19, 0.059)] for carbon_number, carbon_type, temperature, density, expected_output in data: pressure = density * 8.314 * temperature * 10**6 / 6.022e23 nitrate_yield = get_nitrate_yield(carbon_number, pressure, temperature, carbon_type) if abs(nitrate_yield - expected_output) > 0.001: print('Model result different than Carter and Atkinson for data: '+str((carbon_number, carbon_type, temperature, density, expected_output))) print('Expected {}, got {}'.format(expected_output, nitrate_yield)) # + [markdown] tags=[] # # use RMG to get plog of data # - from rmgpy.kinetics.arrhenius import Arrhenius, PDepArrhenius from rmgpy.chemkin import write_kinetics_entry from rmgpy.reaction import Reaction from rmgpy.species import Species number_carbons = 4 carbon_type = 'primary' peroxy_name = 'PC4H9O2' nitrate_name = 'PC4H9ONO2' alkoxy_name = 'PC4H9O' peroxy_smiles = 'CCCCO[O]' nitrate_smiles = 'CCCCO[N+](=O)[O-]' alkoxy_smiles = 'CCCC[O]' use_total_rate = True other_functional_groups = ['OH'] # get the alkoxy rates if use_total_rate: #atkinson 1997 for C2+ total_kinetics = Arrhenius(A=(2.7e-12*6.022e23,'cm^3/(mol*s)'),n=0,Ea=(-360*8.314,('J/mol'))) else: #anderlohr 2009 alkoxy_kinetics = Arrhenius(A=(4.7e12,'cm^3/(mol*s)'),n=0,Ea=(-358,('cal/mol'))) temperatures = np.linspace(250,1250,20) pressures = np.logspace(3,7,5) data = np.ndarray((len(temperatures),len(pressures))) data_alkoxy = np.ndarray((len(temperatures),len(pressures))) for i1, t in enumerate(temperatures): for i2, p in enumerate(pressures): nitrate_yield = get_nitrate_yield(number_carbons,p,t,carbon_type,other_functional_groups=other_functional_groups) if use_total_rate: total_rate = total_kinetics.get_rate_coefficient(t) data[i1,i2] = total_rate * nitrate_yield data_alkoxy[i1,i2] = total_rate * (1-nitrate_yield) else: nitrate_to_NO2_ratio = 1 / (1 / nitrate_yield - 1) data[i1,i2] = alkoxy_kinetics.get_rate_coefficient(t) * nitrate_to_NO2_ratio nitrate_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data, 'm^3/(mol*s)') if use_total_rate: alkoxy_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data_alkoxy, 'm^3/(mol*s)') # + no = Species().from_smiles('[N]=O') peroxy = Species(label = peroxy_name).from_smiles(peroxy_smiles) nitrate = Species(label = nitrate_name).from_smiles(nitrate_smiles) rxn = Reaction(reactants = [no,peroxy], products = [nitrate], kinetics = nitrate_rate) print(write_kinetics_entry(rxn, [no,peroxy,nitrate])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # - if use_total_rate: alkoxy = Species(label = alkoxy_name).from_smiles(alkoxy_smiles) no2 = Species().from_smiles('N(=O)[O]') rxn2 = Reaction(reactants = [no,peroxy], products = [alkoxy, no2], kinetics = alkoxy_rate) print(write_kinetics_entry(rxn2, [no,peroxy,alkoxy,no2])) # this is in units of kcal/mol and mol and may need to have the activation energy modified #nitrate print(repr(rxn.kinetics)) print(repr(rxn2.kinetics)) # ensure the rates add up errors = np.ndarray((len(temperatures),len(pressures))) for i1, temperature in enumerate(temperatures): for i2, pressure in enumerate(pressures): if use_total_rate: r1 = rxn.get_rate_coefficient(temperature, pressure) r2 = rxn2.get_rate_coefficient(temperature, pressure) total = total_kinetics.get_rate_coefficient(temperature) error = abs(r1 + r2 - total) / total errors[i1, i2] = error # + [markdown] tags=[] # # compare atkinson to quantum # Here I output atkinson rates of RO2 + NO at 300, 800, and 1500 and compare it to the RRCM results by http://dx.doi.org/10.1016/j.comptc.2017.04.015 # - import numpy as np atkinson_rxn = lambda t: 2.7e-12*np.exp(360./t) temps = [300,800,1500] atkinson_rates = np.array([atkinson_rxn(t) for t in temps]) #these were translated by engague from figure 7 in the paper at low pressure limits. ng_rates = np.array([2.08e-11,2.47e-11,6.58e-11]) rate_diff = ng_rates/ atkinson_rates rate_diff # It seems like a different rate coefficient is predicted by the quantum and atkinson, with quantum overestimating the rate, which is typical. The rate overestimate is 2 times at atm temperature, 6 times at 800K and 20x at 1500K. # + [markdown] tags=[] # # Compare Atkinson with Jenkins # # This is to compare the results by Atkinson with those by the more recent publication of Jenkins (doi: 10.5194/acp-19-7691-2019). # # # - def get_nitrate_yield_jenkins(carbon_number, pressure, temperature, carbon_type, other_functional_groups=None): """ Returns the expected fraction of ROO + NO reaction that forms RONO2 (instead of RO + NO2). carbon number is the number of carbon atoms in the compound pressure is given in pascals temperature in K carbon type is a string of 'primary', 'secondary', and 'tertiary' other_functional_groups is a list of other functional groups that are known to adjust the rate. Currently accepted functional groups are: hydroxy group (halves the yield) from Jenkins et al. 2019 (doi: 10.5194/acp-19-7691-2019) """ # derived inputs concentration = pressure / 8.314 / temperature /10**6 *6.022e23 #molecule/cm3 # diven constants y_inf = .43 alpha = 2e-22 #cm3/mol-1 beta = 1 m0 = 0.0 m_inf = 8 F = 0.41 if carbon_number < 2: raise AttributeError('get_nitrate_ratio is only valid for 2 or more carbons. '\ 'See Atkinson 1997 for more information') # calculations y0 = alpha * math.exp(beta * carbon_number) numerator = y0*concentration * (temperature/300.)**(-m0) denominator = y_inf * (temperature/300.)**-m_inf zeta = 1 / (1+ (np.log10(numerator / denominator ))**2) # final answer rate_ratio = (numerator / (1+ numerator/denominator)) * F**zeta rate_yield = 1 / (1 / rate_ratio + 1) # go from ka/kb to ka/(ka+kb) if carbon_type == 'primary': rate_yield *= 0.65 elif carbon_type == 'tertiary': rate_yield *= 1 elif carbon_type != 'secondary': raise ValueError('Only primary, secondary and tertiary accepted for carbon_type') if other_functional_groups is not None: for fg in other_functional_groups: if (fg == 'hydroxy') or (fg == 'OH'): rate_yield *= 0.65 #for the equivalent group of atkinson 1997 for hydroxy groups formed from alkenes return rate_yield data_jenkins = np.ndarray((len(temperatures),len(pressures))) data_alkoxy_jenkins = np.ndarray((len(temperatures),len(pressures))) for i1, t in enumerate(temperatures): for i2, p in enumerate(pressures): nitrate_yield = get_nitrate_yield_jenkins(number_carbons,p,t,carbon_type,other_functional_groups=other_functional_groups) if use_total_rate: total_rate = total_kinetics.get_rate_coefficient(t) data_jenkins[i1,i2] = total_rate * nitrate_yield data_alkoxy_jenkins[i1,i2] = total_rate * (1-nitrate_yield) else: nitrate_to_NO2_ratio = 1 / (1 / nitrate_yield - 1) data_jenkins[i1,i2] = alkoxy_kinetics.get_rate_coefficient(t) * nitrate_to_NO2_ratio # + [markdown] tags=[] # # use RMG to get plog of data # - temperatures = np.linspace(250,600,20) pressures = np.logspace(3,7,5) from rmgpy.kinetics.arrhenius import Arrhenius, PDepArrhenius from rmgpy.chemkin import write_kinetics_entry from rmgpy.reaction import Reaction from rmgpy.species import Species # ## n-propane peroxy radical number_carbons = 3 carbon_type = 'primary' peroxy_name = 'npropyloo' nitrate_name = 'npropylONO2' alkoxy_name = 'npropyloxy' peroxy_smiles = 'CCCO[O]' nitrate_smiles = 'CCCO[N+](=O)[O-]' alkoxy_smiles = 'CCC[O]' use_total_rate = False other_functional_groups = [] # get the alkoxy rates if use_total_rate: #atkinson 1997 for C2+ total_kinetics = Arrhenius(A=(2.7e-12*6.022e23,'cm^3/(mol*s)'),n=0,Ea=(-360*8.314,('J/mol'))) else: #anderlohr 2009 alkoxy_kinetics = Arrhenius(A=(4.7e12,'cm^3/(mol*s)'),n=0,Ea=(-358,('cal/mol'))) data = np.ndarray((len(temperatures),len(pressures))) data_alkoxy = np.ndarray((len(temperatures),len(pressures))) for i1, t in enumerate(temperatures): for i2, p in enumerate(pressures): nitrate_yield = get_nitrate_yield_jenkins(number_carbons,p,t,carbon_type,other_functional_groups=other_functional_groups) if use_total_rate: total_rate = total_kinetics.get_rate_coefficient(t) data[i1,i2] = total_rate * nitrate_yield data_alkoxy[i1,i2] = total_rate * (1-nitrate_yield) else: nitrate_to_NO2_ratio = 1 / (1 / nitrate_yield - 1) data[i1,i2] = alkoxy_kinetics.get_rate_coefficient(t) * nitrate_to_NO2_ratio nitrate_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data, 'm^3/(mol*s)') if use_total_rate: alkoxy_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data_alkoxy, 'm^3/(mol*s)') # + no = Species().from_smiles('[N]=O') peroxy = Species(label = peroxy_name).from_smiles(peroxy_smiles) nitrate = Species(label = nitrate_name).from_smiles(nitrate_smiles) rxn = Reaction(reactants = [no,peroxy], products = [nitrate], kinetics = nitrate_rate) print(write_kinetics_entry(rxn, [no,peroxy,nitrate])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # - if use_total_rate: alkoxy = Species(label = alkoxy_name).from_smiles(alkoxy_smiles) no2 = Species().from_smiles('N(=O)[O]') rxn2 = Reaction(reactants = [no,peroxy], products = [alkoxy, no2], kinetics = alkoxy_rate) print(write_kinetics_entry(rxn2, [no,peroxy,alkoxy,no2])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # ## i-propane peroxy radical number_carbons = 3 carbon_type = 'secondary' peroxy_name = 'ipropyloo' nitrate_name = 'ipropylONO2' alkoxy_name = 'ipropyloxy' peroxy_smiles = 'CC(C)O[O]' nitrate_smiles = 'CC(C)O[N+](=O)[O-]' alkoxy_smiles = 'CC(C)[O]' use_total_rate = False other_functional_groups = [] # get the alkoxy rates if use_total_rate: #atkinson 1997 for C2+ total_kinetics = Arrhenius(A=(2.7e-12*6.022e23,'cm^3/(mol*s)'),n=0,Ea=(-360*8.314,('J/mol'))) else: #anderlohr 2009 alkoxy_kinetics = Arrhenius(A=(4.7e12,'cm^3/(mol*s)'),n=0,Ea=(-358,('cal/mol'))) data = np.ndarray((len(temperatures),len(pressures))) data_alkoxy = np.ndarray((len(temperatures),len(pressures))) for i1, t in enumerate(temperatures): for i2, p in enumerate(pressures): nitrate_yield = get_nitrate_yield_jenkins(number_carbons,p,t,carbon_type,other_functional_groups=other_functional_groups) if use_total_rate: total_rate = total_kinetics.get_rate_coefficient(t) data[i1,i2] = total_rate * nitrate_yield data_alkoxy[i1,i2] = total_rate * (1-nitrate_yield) else: nitrate_to_NO2_ratio = 1 / (1 / nitrate_yield - 1) data[i1,i2] = alkoxy_kinetics.get_rate_coefficient(t) * nitrate_to_NO2_ratio nitrate_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data, 'm^3/(mol*s)') if use_total_rate: alkoxy_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data_alkoxy, 'm^3/(mol*s)') # + no = Species().from_smiles('[N]=O') peroxy = Species(label = peroxy_name).from_smiles(peroxy_smiles) nitrate = Species(label = nitrate_name).from_smiles(nitrate_smiles) rxn = Reaction(reactants = [no,peroxy], products = [nitrate], kinetics = nitrate_rate) print(write_kinetics_entry(rxn, [no,peroxy,nitrate])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # - if use_total_rate: alkoxy = Species(label = alkoxy_name).from_smiles(alkoxy_smiles) no2 = Species().from_smiles('N(=O)[O]') rxn2 = Reaction(reactants = [no,peroxy], products = [alkoxy, no2], kinetics = alkoxy_rate) print(write_kinetics_entry(rxn2, [no,peroxy,alkoxy,no2])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # ## butane peroxy radical number_carbons = 4 carbon_type = 'primary' peroxy_name = 'PC4H9O2' nitrate_name = 'PC4H9ONO2' alkoxy_name = 'PC4H9O' peroxy_smiles = 'CCCCO[O]' nitrate_smiles = 'CCCCO[N+](=O)[O-]' alkoxy_smiles = 'CCCC[O]' use_total_rate = True other_functional_groups = [] # get the alkoxy rates if use_total_rate: #atkinson 1997 for C2+ total_kinetics = Arrhenius(A=(2.7e-12*6.022e23,'cm^3/(mol*s)'),n=0,Ea=(-360*8.314,('J/mol'))) else: #anderlohr 2009 alkoxy_kinetics = Arrhenius(A=(4.7e12,'cm^3/(mol*s)'),n=0,Ea=(-358,('cal/mol'))) data = np.ndarray((len(temperatures),len(pressures))) data_alkoxy = np.ndarray((len(temperatures),len(pressures))) for i1, t in enumerate(temperatures): for i2, p in enumerate(pressures): nitrate_yield = get_nitrate_yield_jenkins(number_carbons,p,t,carbon_type,other_functional_groups=other_functional_groups) if use_total_rate: total_rate = total_kinetics.get_rate_coefficient(t) data[i1,i2] = total_rate * nitrate_yield data_alkoxy[i1,i2] = total_rate * (1-nitrate_yield) else: nitrate_to_NO2_ratio = 1 / (1 / nitrate_yield - 1) data[i1,i2] = alkoxy_kinetics.get_rate_coefficient(t) * nitrate_to_NO2_ratio nitrate_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data, 'm^3/(mol*s)') if use_total_rate: alkoxy_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data_alkoxy, 'm^3/(mol*s)') # + no = Species().from_smiles('[N]=O') peroxy = Species(label = peroxy_name).from_smiles(peroxy_smiles) nitrate = Species(label = nitrate_name).from_smiles(nitrate_smiles) rxn = Reaction(reactants = [no,peroxy], products = [nitrate], kinetics = nitrate_rate) print(write_kinetics_entry(rxn, [no,peroxy,nitrate])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # - if use_total_rate: alkoxy = Species(label = alkoxy_name).from_smiles(alkoxy_smiles) no2 = Species().from_smiles('N(=O)[O]') rxn2 = Reaction(reactants = [no,peroxy], products = [alkoxy, no2], kinetics = alkoxy_rate) print(write_kinetics_entry(rxn2, [no,peroxy,alkoxy,no2])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # ## gamma-isobutanol peroxy radical number_carbons = 5 carbon_type = 'primary' peroxy_name = 'gRO2' nitrate_name = 'NO2OCC(CO)C' alkoxy_name = '[O]CC(CO)C' peroxy_smiles = 'CC(CO)CO[O]' nitrate_smiles = 'CC(CO)CO[N+](=O)[O-]' alkoxy_smiles = 'CC(C)C[O]' use_total_rate = True other_functional_groups = ['OH'] # get the alkoxy rates if use_total_rate: #atkinson 1997 for C2+ total_kinetics = Arrhenius(A=(2.7e-12*6.022e23,'cm^3/(mol*s)'),n=0,Ea=(-360*8.314,('J/mol'))) else: #anderlohr 2009 alkoxy_kinetics = Arrhenius(A=(4.7e12,'cm^3/(mol*s)'),n=0,Ea=(-358,('cal/mol'))) data = np.ndarray((len(temperatures),len(pressures))) data_alkoxy = np.ndarray((len(temperatures),len(pressures))) for i1, t in enumerate(temperatures): for i2, p in enumerate(pressures): nitrate_yield = get_nitrate_yield_jenkins(number_carbons,p,t,carbon_type,other_functional_groups=other_functional_groups) if use_total_rate: total_rate = total_kinetics.get_rate_coefficient(t) data[i1,i2] = total_rate * nitrate_yield data_alkoxy[i1,i2] = total_rate * (1-nitrate_yield) else: nitrate_to_NO2_ratio = 1 / (1 / nitrate_yield - 1) data[i1,i2] = alkoxy_kinetics.get_rate_coefficient(t) * nitrate_to_NO2_ratio nitrate_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data, 'm^3/(mol*s)') if use_total_rate: alkoxy_rate = PDepArrhenius().fit_to_data(temperatures,pressures, data_alkoxy, 'm^3/(mol*s)') # + no = Species().from_smiles('[N]=O') peroxy = Species(label = peroxy_name).from_smiles(peroxy_smiles) nitrate = Species(label = nitrate_name).from_smiles(nitrate_smiles) rxn = Reaction(reactants = [no,peroxy], products = [nitrate], kinetics = nitrate_rate) print(write_kinetics_entry(rxn, [no,peroxy,nitrate])) # this is in units of kcal/mol and mol and may need to have the activation energy modified # - if use_total_rate: alkoxy = Species(label = alkoxy_name).from_smiles(alkoxy_smiles) no2 = Species().from_smiles('N(=O)[O]') rxn2 = Reaction(reactants = [no,peroxy], products = [alkoxy, no2], kinetics = alkoxy_rate) print(write_kinetics_entry(rxn2, [no,peroxy,alkoxy,no2])) # this is in units of kcal/mol and mol and may need to have the activation energy modified
code/carter_atkinson_NO_branching.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.2 64-bit (''.py39mlenv'': venv)' # language: python # name: python3 # --- # + import pandas as pd import numpy as np import talib import ccxt import joblib import lightgbm as lgb import matplotlib.pyplot as plt import japanize_matplotlib import numba import numpy as np from scipy.stats import ttest_1samp import seaborn as sns from datetime import datetime import time from pprint import pprint import json # + # jsonからdfを作成 def make_df(file_path, min): price = [] file = open(file_path, 'r', encoding='utf-8') data = json.load(file) if data['result'][str(min)]: for i in data['result'][str(min)]: if i[1] and i[2] and i[3] and i[4]: price.append({ 'close_time': i[0], 'close_time_dt': datetime.fromtimestamp(i[0]).strftime('%Y/%m/%d %H:%M'), 'open_price': i[1], 'high_price': i[2], 'low_price': i[3], 'close_price': i[4], }) df = pd.json_normalize(price) return df else: print('データがありません') return def # - min = 60 df = make_df('./data_sample.json', 60) df df_ = pd.json_normalize(df) df_ df
my_model/first_try.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to scientific computing with Python # <NAME> (<EMAIL> at <EMAIL>) # # The latest version of this [IPython notebook](http://ipython.org/notebook.html) lecture is available at [http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures). # # The other notebooks in this lecture series are indexed at [http://jrjohansson.github.io](http://jrjohansson.github.io). # ## The role of computing in science # Science has traditionally been divided into experimental and theoretical disciplines, but during the last several decades computing has emerged as a very important part of science. Scientific computing is often closely related to theory, but it also has many characteristics in common with experimental work. It is therefore often viewed as a new third branch of science. In most fields of science, computational work is an important complement to both experiments and theory, and nowadays a vast majority of both experimental and theoretical papers involve some numerical calculations, simulations or computer modeling. # <center> # <img src="images/theory-experiment-computation.png" width="300"> # </center> # # In experimental and theoretical sciences there are well established codes of conducts for how results and methods are published and made available to other scientists. For example, in theoretical sciences, derivations, proofs and other results are published in full detail, or made available upon request. Likewise, in experimental sciences, the methods used and the results are published, and all experimental data should be available upon request. It is considered unscientific to withhold crucial details in a theoretical proof or experimental method, that would hinder other scientists from replicating and reproducing the results. # # In computational sciences there are not yet any well established guidelines for how source code and generated data should be handled. For example, it is relatively rare that source code used in simulations for published papers are provided to readers, in contrast to the open nature of experimental and theoretical work. And it is not uncommon that source code for simulation software is withheld and considered a competitive advantage (or unnecessary to publish). # # However, this issue has recently started to attract increasing attention, and a number of editorials in high-profile journals have called for increased openness in computational sciences. Some prestigious journals, including Science, have even started to demand of authors to provide the source code for simulation software used in publications to readers upon request. # # Discussions are also ongoing on how to facilitate distribution of scientific software, for example as supplementary materials to scientific papers. # ### References # * [Reproducible Research in Computational Science](http://dx.doi.org/10.1126/science.1213847), <NAME>, Science 334, 1226 (2011). # # * [Shining Light into Black Boxes](http://dx.doi.org/10.1126/science.1218263), A. Morin et al., Science 336, 159-160 (2012). # # * [The case for open computer programs](http://dx.doi.org/doi:10.1038/nature10836), <NAME>, Nature 482, 485 (2012). # ## Requirements on scientific computing # **Replication** and **reproducibility** are two of the cornerstones in the scientific method. With respect to numerical work, complying with these concepts have the following practical implications: # # * Replication: An author of a scientific paper that involves numerical calculations should be able to rerun the simulations and replicate the results upon request. Other scientist should also be able to perform the same calculations and obtain the same results, given the information about the methods used in a publication. # # * Reproducibility: The results obtained from numerical simulations should be reproducible with an independent implementation of the method, or using a different method altogether. # # # In summary: A sound scientific result should be reproducible, and a sound scientific study should be replicable. # # # To achieve these goals, we need to: # # * Keep and take note of *exactly* which source code and version that was used to produce data and figures in published papers. # # * Record information of which version of external software that was used. Keep access to the environment that was used. # # * Make sure that old codes and notes are backed up and kept for future reference. # # * Be ready to give additional information about the methods used, and perhaps also the simulation codes, to an interested reader who requests it (even years after the paper was published!). # # * Ideally codes should be published online, to make it easier for other scientists interested in the codes to access it. # ### Tools for managing source code # Ensuring replicability and reprodicibility of scientific simulations is a *complicated problem*, but there are good tools to help with this: # # * Revision Control System (RCS) software. # * Good choices include: # * git - http://git-scm.com # * mercurial - http://mercurial.selenic.com. Also known as `hg`. # * subversion - http://subversion.apache.org. Also known as `svn`. # # * Online repositories for source code. Available as both private and public repositories. # * Some good alternatives are # * Github - http://www.github.com # * Bitbucket - http://www.bitbucket.com # * Privately hosted repositories on the university's or department's servers. # # #### Note # # Repositories are also excellent for version controlling manuscripts, figures, thesis files, data files, lab logs, etc. Basically for any digital content that must be preserved and is frequently updated. Again, both public and private repositories are readily available. They are also excellent collaboration tools! # ## What is Python? # [Python](http://www.python.org/) is a modern, general-purpose, object-oriented, high-level programming language. # # General characteristics of Python: # # * **clean and simple language:** Easy-to-read and intuitive code, easy-to-learn minimalistic syntax, maintainability scales well with size of projects. # * **expressive language:** Fewer lines of code, fewer bugs, easier to maintain. # # Technical details: # # * **dynamically typed:** No need to define the type of variables, function arguments or return types. # * **automatic memory management:** No need to explicitly allocate and deallocate memory for variables and data arrays. No memory leak bugs. # * **interpreted:** No need to compile the code. The Python interpreter reads and executes the python code directly. # # Advantages: # # * The main advantage is ease of programming, minimizing the time required to develop, debug and maintain the code. # * Well designed language that encourage many good programming practices: # * Modular and object-oriented programming, good system for packaging and re-use of code. This often results in more transparent, maintainable and bug-free code. # * Documentation tightly integrated with the code. # * A large standard library, and a large collection of add-on packages. # # Disadvantages: # # * Since Python is an interpreted and dynamically typed programming language, the execution of python code can be slow compared to compiled statically typed programming languages, such as C and Fortran. # * Somewhat decentralized, with different environment, packages and documentation spread out at different places. Can make it harder to get started. # ## What makes python suitable for scientific computing? # <img src="images/optimizing-what.png" width="600"> # # * Python has a strong position in scientific computing: # * Large community of users, easy to find help and documentation. # # * Extensive ecosystem of scientific libraries and environments # * numpy: http://numpy.scipy.org - Numerical Python # * scipy: http://www.scipy.org - Scientific Python # * matplotlib: http://www.matplotlib.org - graphics library # # * Great performance due to close integration with time-tested and highly optimized codes written in C and Fortran: # * blas, atlas blas, lapack, arpack, Intel MKL, ... # # * Good support for # * Parallel processing with processes and threads # * Interprocess communication (MPI) # * GPU computing (OpenCL and CUDA) # # * Readily available and suitable for use on high-performance computing clusters. # # * No license costs, no unnecessary use of research budget. # # ### The scientific python software stack # <!-- <img src="files/images/scientific-python-stack.svg" width="300"> --> # <img src="images/scientific-python-stack.png" width="300"> # ### Python environments # Python is not only a programming language, but often also refers to the standard implementation of the interpreter (technically referred to as [CPython](http://en.wikipedia.org/wiki/CPython)) that actually runs the python code on a computer. # # There are also many different environments through which the python interpreter can be used. Each environment has different advantages and is suitable for different workflows. One strength of python is that it is versatile and can be used in complementary ways, but it can be confusing for beginners so we will start with a brief survey of python environments that are useful for scientific computing. # ### Python interpreter # The standard way to use the Python programming language is to use the Python interpreter to run python code. The python interpreter is a program that reads and execute the python code in files passed to it as arguments. At the command prompt, the command ``python`` is used to invoke the Python interpreter. # # For example, to run a file ``my-program.py`` that contains python code from the command prompt, use:: # # $ python my-program.py # # We can also start the interpreter by simply typing ``python`` at the command line, and interactively type python code into the interpreter. # # <!-- <img src="files/images/python-screenshot.jpg" width="600"> --> # <img src="images/python-screenshot.jpg" width="600"> # # # This is often how we want to work when developing scientific applications, or when doing small calculations. But the standard python interpreter is not very convenient for this kind of work, due to a number of limitations. # ### IPython # IPython is an interactive shell that addresses the limitation of the standard python interpreter, and it is a work-horse for scientific use of python. It provides an interactive prompt to the python interpreter with a greatly improved user-friendliness. # # <!-- <img src="files/images/ipython-screenshot.jpg" width="600"> --> # <img src="images/ipython-screenshot.jpg" width="600"> # # Some of the many useful features of IPython includes: # # * Command history, which can be browsed with the up and down arrows on the keyboard. # * Tab auto-completion. # * In-line editing of code. # * Object introspection, and automatic extract of documentation strings from python objects like classes and functions. # * Good interaction with operating system shell. # * Support for multiple parallel back-end processes, that can run on computing clusters or cloud services like Amazon EC2. # # ### IPython notebook # [IPython notebook](http://ipython.org/notebook.html) is an HTML-based notebook environment for Python, similar to Mathematica or Maple. It is based on the IPython shell, but provides a cell-based environment with great interactivity, where calculations can be organized and documented in a structured way. # # <!-- <img src="files/images/ipython-notebook-screenshot.jpg" width="800"> --> # <img src="images/ipython-notebook-screenshot.jpg" width="800"> # # Although using a web browser as graphical interface, IPython notebooks are usually run locally, from the same computer that run the browser. To start a new IPython notebook session, run the following command: # # $ ipython notebook # # from a directory where you want the notebooks to be stored. This will open a new browser window (or a new tab in an existing window) with an index page where existing notebooks are shown and from which new notebooks can be created. # ### Spyder # [Spyder](http://code.google.com/p/spyderlib/) is a MATLAB-like IDE for scientific computing with python. It has the many advantages of a traditional IDE environment, for example that everything from code editing, execution and debugging is carried out in a single environment, and work on different calculations can be organized as projects in the IDE environment. # # <!-- <img src="files/images/spyder-screenshot.jpg" width="800"> --> # <img src="images/spyder-screenshot.jpg" width="800"> # # Some advantages of Spyder: # # * Powerful code editor, with syntax high-lighting, dynamic code introspection and integration with the python debugger. # * Variable explorer, IPython command prompt. # * Integrated documentation and help. # ## Versions of Python # There are currently two versions of python: Python 2 and Python 3. Python 3 will eventually supercede Python 2, but it is not backward-compatible with Python 2. A lot of existing python code and packages has been written for Python 2, and it is still the most wide-spread version. For these lectures either version will be fine, but it is probably easier to stick with Python 2 for now, because it is more readily available via prebuilt packages and binary installers. # # To see which version of Python you have, run # # $ python --version # Python 2.7.3 # $ python3.2 --version # Python 3.2.3 # # Several versions of Python can be installed in parallel, as shown above. # # ## Installation # ### Conda # The best way set-up an scientific Python environment is to use the cross-platform package manager `conda` from Continuum Analytics. First download and install miniconda http://conda.pydata.org/miniconda.html or Anaconda (see below). Next, to install the required libraries for these notebooks, simply run: # # $ conda install ipython ipython-notebook spyder numpy scipy sympy matplotlib cython # # This should be sufficient to get a working environment on any platform supported by `conda`. # ### Linux # In Ubuntu Linux, to installing python and all the requirements run: # # $ sudo apt-get install python ipython ipython-notebook # $ sudo apt-get install python-numpy python-scipy python-matplotlib python-sympy # $ sudo apt-get install spyder # ### MacOS X # *Macports* # # Python is included by default in Mac OS X, but for our purposes it will be useful to install a new python environment using [Macports](http://www.macports.org/), because it makes it much easier to install all the required additional packages. Using Macports, we can install what we need with: # # $ sudo port install py27-ipython +pyside+notebook+parallel+scientific # $ sudo port install py27-scipy py27-matplotlib py27-sympy # $ sudo port install py27-spyder # # These will associate the commands `python` and `ipython` with the versions installed via macports (instead of the one that is shipped with Mac OS X), run the following commands: # # $ sudo port select python python27 # $ sudo port select ipython ipython27 # # *Fink* # # Or, alternatively, you can use the [Fink](http://www.finkproject.org/) package manager. After installing Fink, use the following command to install python and the packages that we need: # # $ sudo fink install python27 ipython-py27 numpy-py27 matplotlib-py27 scipy-py27 sympy-py27 # $ sudo fink install spyder-mac-py27 # ### Windows # Windows lacks a good packaging system, so the easiest way to setup a Python environment is to install a pre-packaged distribution. Some good alternatives are: # # * [Enthought Python Distribution](http://www.enthought.com/products/epd.php). EPD is a commercial product but is available free for academic use. # * [Anaconda](http://continuum.io/downloads.html). The Anaconda Python distribution comes with many scientific computing and data science packages and is free, including for commercial use and redistribution. It also has add-on products such as Accelerate, IOPro, and MKL Optimizations, which have free trials and are free for academic use. # * [Python(x,y)](http://code.google.com/p/pythonxy/). Fully open source. # # # # #### Note # # EPD and Anaconda are also available for Linux and Max OS X. # ## Further reading # * [Python](http://www.python.org). The official Python web site. # * [Python tutorials](http://docs.python.org/2/tutorial). The official Python tutorials. # * [Think Python](http://www.greenteapress.com/thinkpython). A free book on Python. # ## Python and module versions # Since there are several different versions of Python and each Python package has its own release cycle and version number (for example scipy, numpy, matplotlib, etc., which we installed above and will discuss in detail in the following lectures), it is important for the reproducibility of an IPython notebook to record the versions of all these different software packages. If this is done properly it will be easy to reproduce the environment that was used to run a notebook, but if not it can be hard to know what was used to produce the results in a notebook. # # To encourage the practice of recording Python and module versions in notebooks, I've created a simple IPython extension that produces a table with versions numbers of selected software components. I believe that it is a good practice to include this kind of table in every notebook you create. # # To install this IPython extension, use `pip install version_information`: # + jupyter={"outputs_hidden": false} # you only need to do this once # !pip3 install --upgrade version_information # - # or alternatively run (deprecated method): # + active="" # # you only need to do this once # %install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py # - # Now, to load the extension and produce the version table # + jupyter={"outputs_hidden": false} # %load_ext version_information # %version_information numpy, scipy, matplotlib, sympy, version_information # -
001-Jupyter/001-Tutorials/004-Scientific-Python-Lectures/Lecture-0-Scientific-Computing-with-Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # As usual we start loading the packages that we will use in our notebook # + import tensorflow as tf import numpy as np import pandas as pd from sklearn import model_selection from sklearn.preprocessing import LabelEncoder # - #PRINT VERSION!! tf.__version__ train_df = pd.read_csv("train_languages.csv")#here we have the dataset we extracted train_df.head() len(train_df) #we print the length, not a big one but sufficient # A key step is to label encode the target variable from text to number # # + Y = train_df['language'] encoder = LabelEncoder() encoder.fit(Y) Y = encoder.transform(Y) Y = tf.keras.utils.to_categorical( Y, num_classes=4 #equals to the number of languages ) # - # As we mentioned in the slides we will perform the previous text processing steps except for stopword removal. train_df['sentence_lower'] = train_df["sentence"].str.lower() train_df['sentence_no_punctuation'] = train_df['sentence_lower'].str.replace('[^\w\s]','') train_df['sentence_no_punctuation'] = train_df["sentence_no_punctuation"].fillna("fillna") max_features=5000 #we set maximum number of words to 5000 maxlen=400 #we set maximum sequence length to 400 tok = tf.keras.preprocessing.text.Tokenizer(num_words=max_features) #again tokenizer step tok.fit_on_texts(list(train_df['sentence_no_punctuation'])) #fit to cleaned text print(len(tok.word_index)) vocab_size = len(tok.word_index) + 1 #this represents the number of words that we tokenize different from max_features but necessary for #the definition of the dimension of the embedding space train_df = tok.texts_to_sequences(list(train_df['sentence_no_punctuation'])) #this is how we create sequences train_df = tf.keras.preprocessing.sequence.pad_sequences(train_df, maxlen=maxlen) #let's execute pad step # + from sklearn.model_selection import train_test_split #divide into train and test set # - X_train, X_test, y_train, y_test = train_test_split(train_df, Y, test_size=0.1, random_state=42) embedding_dim = 50 #this is the final dimension of the embedding space. # Let's write down the model # + model = tf.keras.models.Sequential([ tf.keras.layers.Embedding(input_dim=vocab_size, #embedding input output_dim=embedding_dim,#embedding output input_length=maxlen), #maximum length of an input sequence tf.keras.layers.Flatten(), #flatten layer tf.keras.layers.Dense(4, activation=tf.nn.softmax) #ouput layer a Dense layer with 4 probabilities #we also define our final activation function which is the softmax function typical for multiclass #classifiction problems ]) # + model.compile(optimizer='adam', loss='categorical_crossentropy', #we recommend this loss function you metrics=['accuracy']) # - model.summary() #here we show the architecture model.fit(np.array(X_train), np.array(y_train), epochs=3) #let's fit the model # Remember the train_test_split? now we use the test to evaluate our model model.evaluate(np.array(X_test), np.array(y_test)) # LOOKS LIKE WE HAVE A PERFECT MODEL!! # LET'S TAKE A LOOK AT THE CONFUSION MATRIX OF OUR EVALUATION SET!! from sklearn.metrics import confusion_matrix #we import this package from sklearn and output it predictions = model.predict(X_test) #here we make predictions cm = confusion_matrix(predictions.argmax(axis=1), y_test.argmax(axis=1))#we generate the confusion matrix cm #well this is really perfect! # Let's try brand new text #these are the codes for each language in order to evaluate properly print('english', encoder.transform(['english'])) print('french', encoder.transform(['french'])) print('italian', encoder.transform(['italian'])) print('spanish', encoder.transform(['spanish'])) # In this experiment we will predict the language of the same sentence in the different languages #new_text = ["tensorflow is a great tool you can find a lot of tutorials from packt"] #new_text = ["tensorflow est un excellent outil vous pouvez trouver beaucoup de tutoriels de packt"] #new_text = ["tensorflow è un ottimo strumento puoi trovare molti tutorial di packt"] new_text = ["tensorflow es una gran herramienta puedes encontrar muchos tutoriales de packt"] test_text = tok.texts_to_sequences(new_text) #this is how we create sequences test_text = tf.keras.preprocessing.sequence.pad_sequences(test_text, maxlen=maxlen) #let's execute pad step np.set_printoptions(suppress=True) predictions = model.predict(test_text) print(predictions.argmax()) print(predictions) #spanish you can get confused with italian which makes sense since they are more similar languages import wikipedia # Let's build a brand new data set with only spanish and let's see if we recognize it ... #language codes #english: en #italian: it #french: fr #spanish: es new_wiki_text = [] wikipedia.set_lang('es') for i in range(0, 5): print(i) random = wikipedia.random(1) try: new_wiki_text.append([wikipedia.page(random).summary]) except wikipedia.exceptions.DisambiguationError as e: random = wikipedia.random(1) new_wiki_text = pd.DataFrame(new_wiki_text) new_wiki_text.columns = ['sentence'] new_wiki_text new_wiki_text['sentence_lower'] = new_wiki_text["sentence"].str.lower() new_wiki_text['sentence_no_punctuation'] = new_wiki_text['sentence_lower'].str.replace('[^\w\s]','') new_wiki_text['sentence_no_punctuation'] = new_wiki_text["sentence_no_punctuation"].fillna("fillna") np.set_printoptions(suppress=True) test_wiki_text = tok.texts_to_sequences(list(new_wiki_text['sentence_no_punctuation'] )) #this is how we create sequences test_wiki_text = tf.keras.preprocessing.sequence.pad_sequences(test_wiki_text, maxlen=maxlen) #let's execute pad step predictions = model.predict(test_wiki_text) print(predictions) print('english', encoder.transform(['english'])) print('french', encoder.transform(['french'])) print('italian', encoder.transform(['italian'])) print('spanish', encoder.transform(['spanish'])) # WE DID A GOOD JOB!!
classifier_language_recognition-tensor-flow-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2.7.16 64-bit ('MacOS') # language: python # name: python3 # --- # # Introduction to Programming # # Topics for today will include: # - Demo Pygame # - Pep8 # - Extensions # - Improving Problem Solving # ## Demo Pygame # --- # A big part of being a computer scientist is understanding documentation and being able to navigate and use new tools and tooling as it comes about. At the end of last class we did that a little ourselves. We're going to go through one of the pygame tutorials and see how they go about using the library. # # I'm going to go through and demonstrate some of the things that are needed to get up and running. Here are the links that I'll be going through. They are pretty good guides but they do demonstrate the level of technicality that these docs are written to. It's a great example of the attention to detail that is required at times to learn new technologies and implement new tools. # # - https://www.pygame.org/wiki/GettingStarted # - https://www.pygame.org/wiki/tutorials # - https://coderslegacy.com/python/pygame-platformer-game-development/ # ## [Pep8](https://www.python.org/dev/peps/pep-0008/) # --- # # Pep8 is a style standard for the Python language. Style is important because good style can make code easier to understand and debug when things go wrong. It also makes it so that when we start to collaborate on code and write code in others modules that it's uniform. If two people write their code in a similar manner that makes it so that the code can be written in about half the time. # # Some of the important pieces that PEP8 tries to regulate are as follows: # # - Variable, Class, Module, File Name, and Function naming conventions # - Tabs vs Spaces # - Maximum Line Length # - Importing Modules # - Commentting and Documenting Code # - And a lot more. # # For the rest of the semester we'll be trying to adhere to some of the things that PEP8 requires of programmers. # # # # ## Extensions # --- # Something that hinges on the previous topic that we haven't gone over in great detail are exploiting Extensions. This is for a good reason. It's often thought that you should learn how to do things from scratch before you learn the shortcuts. This way you gain an appreciation for the way things are and the tools that you're using. # # For this reason up till now I haven't recommended any extensions. I personally don't typically use a ton of extensions but below are four that make a significant difference in my experience for different reasons. # # First we have SonarLint. Before we talk about SonarLint we need to talk about what a code linter is. A code linter runs through your code and looks for problems in it. The reason that this is useful is that it does it before the code even runs. So that mean syntatical errors, cyclomatic complexity (I'll explain in class), runtime errors, naming errors, etc... Can be sniffed and snuffed out ahead of time saving you the hastle of having to debug things. # # This is probably the most useful of the components shown. As well as the one I'd say I require you to install. # # - https://www.sonarlint.org # - https://marketplace.visualstudio.com/items?itemName=oderwat.indent-rainbow # - https://marketplace.visualstudio.com/items?itemName=mhutchie.git-graph # - https://marketplace.visualstudio.com/items?itemName=eamodio.gitlens # # # ## Improving Problem Solving [(Example for class)](https://www.hackerrank.com/challenges/list-comprehensions/problem) # --- # This section is something that typically isn't taught in a CS course and is more of a personal thought to include. That being said this isn't what has to be done and may not work in all scenarios. This is more a note to the piece of CS that isn't typically talked about but has a place in every conversation regarding computing, tech, and the world in general. We need a method to solve problems. We all don't attack problems the same but the following is how I typically approach issues. # # ### I have a problem and I've read through it # --- # At this point I either understand the question fully, I have some idea of what we're doing, I'm clueless, or I'm reading the question again because I undestood nothing the first time and it's because of my inability to read and not because I can't solve the problem. # # # ### When we get a problem we need to understand what we're doing. # So I have a conundrum, I don't understand the problem. Do i understand parts of the problem? Am I close to understanding it and am just missing a couple pieces? Have I done something like this before that I can equate this too? How can I get myself to the point where I understand what this is trying to tell me? # # ### Break the problem down. # Let's start by finding the seams in the problem. That way we can break the problem down into smaller problems. Often we'll find that complex things are made up of a bunch of smaller complex things. All this to say it's hard to see an elephant for what it is standing 3 inches away from the elephant. We need to step back and see the elephant or in the case of this bad metaphor our problem for what it is. A bunch of smaller problems. # # ### We get more managable pieces, then figure out what each part needs to do. # Now we have more managable pieces. We can start figuring out how to tend to all of the smaller pieces. The cycle may restart here if one of the pieces is still complex but you're now closer to solving the issue that you're having. You have this smaller hurdle to jump over than you did before. # # We're gonna continue as if our pieces are all small enough though. We've got managable pieces. We know what they're each responsible for. Now it's time to start building these things out. # # ### Get it into terms we can understand # Ok so we have our "managable" piece. Sometimes you're doing something that you're weak in or just can't seem to understand a topic. Sometimes you just need to get it related to something that you do understand. # # Both an artist and a mathematition can draw a square. They care about different things but generally they both know. what a square is. Most of the time you to start just need to generally understand what you need to do. So if you're the mathematition or the artist trying to understand something in the other realm the fine details can come later as long as you can get a general understanding. # # ### Scale down # This part is probably my favorite and most comforting step. My problem seems insurmountable. I don't know how I could ever do it. # # Let's scale it down. I need to find out if 47,055,833,459 is a prime number. *cough cough* ~This is your assignment (sorta)~ That number is ridiculous. Let's start with a number I know is prime like 2 or 5 or 7. I'm going to find out if these smaller numbers are prime and ignore the big number for now. # # With this we can learn a valuable lesson. We're looking inbetween the lines. We need something that discovers prime numbers. Not large prime numbers. Just prime numbers. I say this to demonstrate that we need to look at the simple case for this. The big number is there to scare you. You need to focus on the process and the solution. If your process is sound the size of the problem becomes less of an issue. # # ![Car example](https://hackernoon.com/hn-images/1*1L6_1aPP-_kFDAfbt9DkUQ.jpeg) # # ### Find a solution # Ok I've scaled my issue down. I'm going to find out if numbers from 1-10 are prime or composite. Now I can start implementing a solution. This part is super important to think about. How is this done now. Has anyone done this or something like it? # # Now we don't want to take answers but we know how to determine if a number is prime or not. Now we need to abstract that solution so that I can give it any number. Meaning that my solution has to be dynamic. I can't have hard coded numbers in there. We also need to handle edge cases. Is 1 a prime number? (No.) # # ### Scale up # Now that we have our solution and we've tested it on the small scale level, it's time to attack our big scary number. # # 47,055,833,459 # # We may be successful here. We may have to scale back down and tweak our solution with a bigger test range. What's important is that we've made a lot of progress. Time to tinker till we get it right. # # ### Fix edge cases (Sometimes) # Even when our code is right and working sometimes we have edge cases that we didn't account for. # # Did you test negative numbers? # # Did you test 0? # # Edge/Fringe cases will always exist so don't feel too bad if you don't get them all on the first pass.
JupyterNotebooks/Lessons/Lesson 7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Part3. Predictive-Modeling # ref : https://www.kaggle.com/ash316/eda-to-prediction-dietanic # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns plt.style.use('fivethirtyeight') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # + from sklearn.model_selection import train_test_split from sklearn import svm from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn import metrics # - # ## Load & Split preprocessed dataset # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" data = pd.read_csv('/kaggle/input/tatanic-train-preprocessed/train_preprocessed.csv') data.head(2) # + X_train, X_val, y_train, y_val = train_test_split(data.iloc[:, 1:], data['Survived'], stratify=data['Survived'], test_size=0.3, random_state=0) # Check the balance of labels display(y_train.value_counts(normalize=True)) display(y_val.value_counts(normalize=True)) display(X_train.head(2)) # - # ## model 1. Radial Support Vector Machines(rbf-SVM) # + svm_clf = svm.SVC(C=1.0, kernel='rbf', gamma=0.1) svm_clf.fit(X_train, y_train) prediction = svm_clf.predict(X_val) score = metrics.accuracy_score(y_val, prediction) print('accuracy of rbf-SVM:', score) # - # ## model 2. Linear Support Vector Machine(linear-SVM) # + svm_clf = svm.SVC(C=0.1, kernel='linear', gamma=0.1) svm_clf.fit(X_train, y_train) prediction = svm_clf.predict(X_val) score = metrics.accuracy_score(y_val, prediction) print('accuracy of linear-SVM:', score) # - # ## model 3. Logistic Regression # + logistic_clf = LogisticRegression(max_iter=1000) logistic_clf.fit(X_train, y_train) prediction = logistic_clf.predict(X_val) score = metrics.accuracy_score(y_val, prediction) print('accuracy of logistic regression:', score) # - # ## model 4. Decision Tree # + dt_clf = DecisionTreeClassifier() dt_clf.fit(X_train, y_train) prediction = dt_clf.predict(X_val) score = metrics.accuracy_score(y_val, prediction) print('accuracy of Decision Tree:', score) # - # ## model 5. K-Nearest Neighbors(KNN) # + knn_clf = KNeighborsClassifier() knn_clf.fit(X_train, y_train) prediction = knn_clf.predict(X_val) score = metrics.accuracy_score(y_val, prediction) print('accuracy of KNN:', score) # - # The accuracy of KNN model changes as the **n_neighbors** parameter changes. The default value of n_neighbors is 5. We can compare the accuracy of KNN models with different n_neighbors parameters. # + acc_list = [] for tmp_param in range(1, 11): tmp_knn_clf = KNeighborsClassifier(n_neighbors = tmp_param) tmp_knn_clf.fit(X_train, y_train) tmp_acc = metrics.accuracy_score(y_val, tmp_knn_clf.predict(X_val)) acc_list.append(tmp_acc) plt.plot(range(1,11), acc_list) plt.xticks(range(1,11)) plt.show() print(f'>>> Accuracy list: {acc_list}') print(f'>>> Maximum accuracy: {max(acc_list)}') # - # ## model 6. Gaussian Naive Bayes # + nb_clf = GaussianNB() nb_clf.fit(X_train, y_train) prediction = nb_clf.predict(X_val) score = metrics.accuracy_score(y_val, prediction) print('accuracy of Naive Bayes:', score) # - # ## model 7. Random Forest # + rf_clf = RandomForestClassifier(n_estimators=100) rf_clf.fit(X_train, y_train) prediction = rf_clf.predict(X_val) score = metrics.accuracy_score(y_val, prediction) print('accuracy of Random Forest:', score) # - # The validation accuracy of models is not the only metric to decide best model. The robustness is also important. The accuracy of a model changes as new data is input. This is known as **model variance**.<br><br> # To overcome this and get a generalized(robust) model, we use **Cross Validation**, more specifically **K-fold Cross Validation**. # ## Cross Validation from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_val_score # + kfold = StratifiedKFold(n_splits=10, random_state=22, shuffle=True) model_names = ['rbf-SVM', 'linear-SVM', 'logistic regression', 'Decision Tree', 'KNN', 'Naive Bayes', 'Random Forest'] models = [svm.SVC(C=1.0, kernel='rbf', gamma=0.1), svm.SVC(C=0.1, kernel='linear', gamma=0.1), LogisticRegression(max_iter=1000), DecisionTreeClassifier(), KNeighborsClassifier(n_neighbors=3), GaussianNB(), RandomForestClassifier(n_estimators=100)] acc_list = [] acc_mean_list = [] acc_std_list = [] for tmp_model in models: cv_result = cross_val_score(tmp_model, data.iloc[:, 1:], data['Survived'], cv=kfold, scoring='accuracy') acc_list.append(cv_result) acc_mean_list.append(cv_result.mean()) acc_std_list.append(cv_result.std()) pd.DataFrame({'CV_mean':acc_mean_list, 'CV_std':acc_std_list}, index=model_names).sort_values('CV_mean') # + plt.figure(figsize=(15, 7)) # Draw boxplot of each model tmp_df = pd.concat([pd.DataFrame(acc_list, index=model_names), pd.DataFrame({'CV_mean':acc_mean_list}, index=model_names)], axis=1) tmp_df.sort_values('CV_mean').drop(columns='CV_mean').T.boxplot() # Annotate CV_mean on each boxplot acc_mean_sorted = sorted(acc_mean_list) plt.scatter(range(1,8), acc_mean_sorted, color='red') for tmp_x, tmp_y in zip(np.arange(1,8), acc_mean_sorted): plt.text(tmp_x+0.1, tmp_y, s=f'CV_mean\n{tmp_y:.2f}') plt.show() # - # - Top-4 models in regard to accuracy are **Random Forest, Naive Bayes, linear-SVM, logistic regression**. Random Forest seems most robust among them. # - However, only using classification accuracy is sometimes misleading due to data imbalance. We can get a better version of summarized result with the help of **confusion matrix**, which shows how many mistakes the model made in each case. # ## Confusion Matrix # + fig, ax = plt.subplots(3, 3, figsize=(12, 10)) for tmp_ax, tmp_model, tmp_name in zip(ax.ravel(), models, model_names): y_pred = cross_val_predict(tmp_model, data.iloc[:, 1:], data['Survived'], cv=kfold) sns.heatmap(metrics.confusion_matrix(data['Survived'], y_pred), ax=tmp_ax, annot=True, fmt='2.0f') tmp_ax.set_title(f'Matrix for {tmp_name}', size=15) plt.subplots_adjust(hspace=0.3,wspace=0.3) plt.show() # - # - The left diagonal of confusion matrix shows the number of correct predictions while the right one shows wrong predictions. # - Looking all the matrices, **linear-SVM** is good at predicting deceased passengers and **Naive Bayes** is good at predicting survived passengers. # ## Hyper-parameter Tuning 1. Naive Bayes # I will tune the hyper-parameters for the 4 best models i.e. Naive Bayes, Random Forest, SVM and Logistic Regression # + from sklearn.model_selection import GridSearchCV param={'var_smoothing':[1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10]} gd = GridSearchCV(estimator=GaussianNB(), param_grid=param, verbose=True, n_jobs=-1) gd.fit(data.iloc[:, 1:], data['Survived']) print('>>> best score :', gd.best_score_) print('>>> best param set :', gd.best_estimator_) # - # ## Hyper-parameter Tuning 2. Random Forest # + n_estimators = range(100, 1000, 100) min_samples_split = [1, 2, 3, 5, 7, 10] max_features = ['sqrt', 'log2'] bootstrap = [True, False] param = {'n_estimators':n_estimators, 'min_samples_split':min_samples_split, 'max_features':max_features, 'bootstrap':bootstrap} gd = GridSearchCV(estimator=RandomForestClassifier(random_state=0), param_grid=param, verbose=True, n_jobs=-1) gd.fit(data.iloc[:, 1:], data['Survived']) print('>>> best score :', gd.best_score_) print('>>> best param set :', gd.best_estimator_) # - # ## Hyper-parameter Tuning 3. SVM # + C = [(i+1)/10 for i in range(10)] + [0.05, 0.25] gamma = [(i+1)/10 for i in range(10)] kernel = ['rbf', 'linear'] param = {'C':C, 'gamma':gamma, 'kernel':kernel} gd = GridSearchCV(estimator=svm.SVC(random_state=0), param_grid=param, verbose=True, n_jobs=-1) gd.fit(data.iloc[:, 1:], data['Survived']) print('>>> best score :', gd.best_score_) print('>>> best param set :', gd.best_estimator_) # - # ## Hyper-parameter Tuning 4. Logistic Regression # + penalty = ['l1', 'l2', 'elasticnet'] C = [0.25, 0.5, 0.75, 1.0, 1.25, 1.5] solver = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'] max_iter = [1000, 1500, 2000] param={'penalty':penalty, 'C':C, 'solver':solver, 'max_iter':max_iter} gd = GridSearchCV(estimator=LogisticRegression(random_state=0), param_grid=param, verbose=True, n_jobs=-1) gd.fit(data.iloc[:, 1:], data['Survived']) print('>>> best score :', gd.best_score_) print('>>> best param set :', gd.best_estimator_) # - # - The best score of Naive Bayes is **81.37%** with **var_smoothing=1e-05** # - The best score of Random Forest is **84.06%** with **n_estimators=900, min_samples_split=10, max_features='sqrt' and bootstrap=True** # - The best score of SVM is **82.94%** with **C=0.5, gamma=0.1 and kernel='linear'** # - The best score of Logistic Regression is **82.38%** with **penalty='l2', C=0.75, solver='lbfgs' and max_iter=1000** # ## Ensembling # - Ensembling is a good way to increase the accuracy, stability or performance of a model. In simple words, it is the combination of various simple models to create a single powerful model. # - Ensembling can be done in ways like: **Voting Classifier, Bagging, Boosting**. # ## Ensembling 1. Voting Classifier # - It is the simplest way of combining predictions from many different simple machine learning models. It gives an average prediction result based on the prediction of all the submodels. The submodels or the basemodels are all of different types. # + from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import VotingClassifier ensemble_clf = VotingClassifier(estimators=[('SVM', svm.SVC(C=0.5, gamma=0.1, kernel='linear', probability=True)), ('LR', LogisticRegression(random_state=0, penalty='l2', C=0.75, solver='lbfgs', max_iter=1000)), ('DT', DecisionTreeClassifier()), ('KNN', KNeighborsClassifier(n_neighbors=3)), ('NB', GaussianNB(var_smoothing=1e-05)), ('RanFor', RandomForestClassifier(n_estimators=900, min_samples_split=10, max_features='sqrt', bootstrap=True))], voting='soft', n_jobs=-1) kfold = StratifiedKFold(n_splits=10, random_state=22, shuffle=True) cv_score = cross_val_score(ensemble_clf, X=data.iloc[:, 1:], y=data['Survived'], cv=kfold, scoring='accuracy', n_jobs=-1) print('>>> The cross validation accuracy:', cv_score.mean()) ensemble_clf.fit(X_train, y_train) print('>>> The validation accuracy:', ensemble_clf.score(X_val, y_val)) # - # ## Ensembling 2. Bagging # - Bagging works by applying similar classifiers on small partitions of the dataset and then taking the average of all the predictions. Due to the averaging, there is reduction in variance. Unlike Voting Classifier, Bagging makes use of same classifiers. # - Bagging works best with models with high variance. An example for this can be Decision Tree or Random Forest. # #### 1) Bagged KNN # + from sklearn.ensemble import BaggingClassifier ensemble_clf = BaggingClassifier(base_estimator=KNeighborsClassifier(n_neighbors=3), n_estimators=700, max_samples=1.0, max_features=1.0, random_state=0, n_jobs=-1) kfold = StratifiedKFold(n_splits=10, random_state=22, shuffle=True) cv_score = cross_val_score(ensemble_clf, X=data.iloc[:, 1:], y=data['Survived'], cv=kfold, scoring='accuracy') print('>>> The cross validation accuracy of Bagged KNN:', cv_score.mean()) ensemble_clf.fit(X_train, y_train) prediction = ensemble_clf.predict(X_val) print('>>> The validation accuracy of Bagged KNN:', metrics.accuracy_score(y_val, prediction)) # - # #### 2) Bagged Decision Tree # + ensemble_clf = BaggingClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, max_samples=1.0, max_features=1.0, random_state=0, n_jobs=-1) kfold = StratifiedKFold(n_splits=10, random_state=22, shuffle=True) cv_score = cross_val_score(ensemble_clf, X=data.iloc[:, 1:], y=data['Survived'], cv=kfold, scoring='accuracy') print('>>> The cross validation accuracy of Bagged Decision Tree:', cv_score.mean()) ensemble_clf.fit(X_train, y_train) prediction = ensemble_clf.predict(X_val) print('>>> The validation accuracy of Bagged Decision Tree:', metrics.accuracy_score(y_val, prediction)) # - # ## Ensembling 3. Boosting # - Boosting is an ensembling technique which uses sequential learning of classifiers. It is a step-by-step enhancement of a weak model. # - Boosting works as follows: A model is first trained on the complete dataset. Then the model will get some instances right while some wrong. Now in the next iteration, the learner will focus more on the wrongly predicted instances or give more weight to them. Thus, it will try to predict the wrong instance corretly. Now this iterative process continues, and the model will keep developing until the limit reached on the accuracy. # #### 1) AdaBoost (Adaptive Boosting) # - The weak learner or estimator in this case is a Decision Tree. But we can change the default base_estimator to any algorithm of our choice. # + from sklearn.model_selection import cross_val_score from sklearn.model_selection import StratifiedKFold from sklearn.ensemble import AdaBoostClassifier ada = AdaBoostClassifier(n_estimators=200, learning_rate=0.1, random_state=0) kfold = StratifiedKFold(n_splits=10, random_state=22, shuffle=True) cv_score = cross_val_score(ada, data.iloc[:, 1:], data['Survived'], scoring='accuracy', cv=kfold, n_jobs=-1) print('>>> The cross validation accuracy of AdaBoost(DT):', cv_score.mean()) # - # #### 2) Stochastic Gradient Boosting # - Here, the weak learner is also a Decision Tree. # + from sklearn.ensemble import GradientBoostingClassifier grad = GradientBoostingClassifier(n_estimators=500, learning_rate=0.1, random_state=0) kfold = StratifiedKFold(n_splits=10, random_state=22, shuffle=True) cv_score = cross_val_score(grad, data.iloc[:, 1:], data['Survived'], scoring='accuracy', cv=kfold, n_jobs=-1) print('>>> The cross validation accuracy of Stochastic Gradient Boosting(DT):', cv_score.mean()) # - # #### 3) XGBoost # + import xgboost as xgb xgb_clf = xgb.XGBClassifier(learning_rate=0.1, n_jobs=-1, random_state=0, n_estimators=900) kfold = StratifiedKFold(n_splits=10, random_state=22, shuffle=True) cv_score = cross_val_score(xgb_clf, data.iloc[:, 1:], data['Survived'], scoring='accuracy', cv=kfold, n_jobs=-1) print('>>> The cross validation accuracy of XGBoost:', cv_score.mean()) # - # ## Hyper-parameter Tuning for Stochastic Gradient Boosting # - The Stochastic Gradient Boosting model showed best accuracy(0.8339). So I'll try to increase the accuracy of it by Hyper-parameter Tuning. # + from sklearn.model_selection import GridSearchCV param = { 'learning_rate':[0.05, 0.1, 0.4, 0.7, 1.0], 'n_estimators':list(range(200, 1100, 200)), 'subsample':[0.4, 0.7, 1.0], 'min_samples_split':[2, 6, 10], 'max_depth':[2, 3, 5] } kfold5 = StratifiedKFold(n_splits=5, shuffle=True, random_state=22) gd = GridSearchCV(estimator=GradientBoostingClassifier(random_state=0), param_grid=param, scoring='f1', n_jobs=-1, cv=kfold5, verbose=True) gd.fit(data.iloc[:,1:], data.Survived) print('>>> best score:', gd.best_score_) print('>>> best estimator:', gd.best_estimator_) # - # - The maximum score(f1 score) with Stochastic Gradient Boosting is 0.759 (f1 score)<br> # (Hyper-parameters : **learning_rate=0.05, n_estimators=800, subsample=1.0, min_samples_split=2, max_depth=3**) # #### Confusion Matrix for the best model # + from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix grad = GradientBoostingClassifier(learning_rate=0.05, n_estimators=800, subsample=1.0, min_samples_split=2, max_depth=3) prediction = cross_val_predict(grad, data.iloc[:, 1:], data.Survived, cv=kfold5) sns.heatmap(confusion_matrix(data.Survived, prediction), cmap='winter', annot=True, fmt='2.0f') plt.show() # - # ## Feature Importance # + grad.fit(data.iloc[:,1:], data.Survived) plt.figure(figsize=(8, 6)) pd.Series(data=grad.feature_importances_, index=data.iloc[:,1:].columns).\ sort_values(ascending=True).plot.barh(width=0.8) plt.title('Feature Importance of Stochastic Gradient Boosting') plt.show() # - # - Most important features are Initial_Mr, Fare, Pclass and Age # - The Sex feature doesn't seem to give high importance, which is surprising because we observed that the Sex feature combined with some other features like Pclass was giving a good differentiating factor. However, we have already seent the meaningful correlation between Sex and Initial, so they both refer to the gender.
kaggle_titanic/200519_Titanic_study-03_Predictive-Modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark import SparkConf, SparkContext #conf = SparkConf().setMaster("yarn") sc = SparkContext() sc text = sc.textFile("hdfs://192.168.1.100:9000/csv/student.csv") first = text.first() first data = text.filter(lambda x: x != first) data.first() data = data.map(lambda x: x.split(",")) data.take(1) data.map(lambda x: x[0:3] + x[3:5] + x[19:]).take(3) first def to_int(x): for i, j in enumerate(x[3:-2]): x[i + 3] = int(j) return x def student_filter(school=None, div=None, level=None, grade=None, gender=None, loc=None, year=None): filter_data = data if school: filter_data = filter_data.filter(lambda x: x[0] == school) if div: filter_data = filter_data.filter(lambda x: x[1] == div) if level: filter_data = filter_data.filter(lambda x: x[2] == level) if loc: filter_data = filter_data.filter(lambda x: x[19] == loc) if year: filter_data = filter_data.filter(lambda x: x[20] == year) if grade: if grade == "一年級": filter_data = filter_data.map(lambda x: x[0:5] + x[19:]) elif grade == "二年級": filter_data = filter_data.map(lambda x: x[0:3] + x[5:7] + x[19:]) elif grade == "三年級": filter_data = filter_data.map(lambda x: x[0:3] + x[7:9] + x[19:]) elif grade == "四年級": filter_data = filter_data.map(lambda x: x[0:3] + x[9:11] + x[19:]) elif grade == "五年級": filter_data = filter_data.map(lambda x: x[0:3] + x[11:13] + x[19:]) elif grade == "六年級": filter_data = filter_data.map(lambda x: x[0:3] + x[13:15] + x[19:]) elif grade == "七年級": filter_data = filter_data.map(lambda x: x[0:3] + x[15:17] + x[19:]) elif grade == "延修生": filter_data = filter_data.map(lambda x: x[0:3] + x[17:]) if gender == "男生": filter_data = filter_data.map(lambda x: x[0:4] + x[5:]) elif gender == "女生": filter_data = filter_data.map(lambda x: x[0:3] + x[4:]) if grade == None and gender: if gender == "男生": filter_data = filter_data.map(lambda x: x[0:3] + x[3:18:2] + x[19:]) elif gender == "女生": filter_data = filter_data.map(lambda x: x[0:3] + x[4:19:2] + x[19:]) filter_data = filter_data.map(to_int) return filter_data.map(lambda x: x[0:3] + [sum(x[3:-2])] + x[-2:]).collect() def initializaion(): textRDD = sc.textFile("hdfs://name:9000/csv/student.csv") text1 = textRDD.collect() textList = [] for data1 in text1: textList.append(data1.split(',')) tableRDD = sc.parallelize(textList) return tableRDD student_filter(school='國立臺灣大學', gender='男生') student_filter(school='國立臺灣大學', grade='一年級', gender='男生')
notebook/spark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="mD5BJNj3tbL7" # # Retraining a pretrained network # # Here we'll take an in-depth look at how we can use dtoolAI to help with retraining a pre-trained image recognition network on new types of image. Using a pre-trained network makes training much faster. # # We're going to load a network trained on the ImageNet <http://www.image-net.org/> dataset, a large collection of images with 1000 different labels. We'll then retrain our network on new data. # # + [markdown] colab_type="text" id="FrV77IZjtp0d" # ## Loading and examining data # # To provde a simple example, we've created a small DataSet containing just two categories of images from the CalTech 101 objects <http://www.vision.caltech.edu/Image_Datasets/Caltech101/> dataset. If you'd like to do this using your own data, the second half of the dtoolAI documentation on retraining: <https://dtoolai.readthedocs.io/en/latest/retraining.html#part-2-with-raw-data> explains how to prepare data. # # Let's load the dataset: # + colab={} colab_type="code" id="r-_ev0VMtbL9" from dtoolai.data import ImageDataSet, scaled_float_array_to_pil_image # + colab={} colab_type="code" id="5_e4_r9btbME" train_ds = ImageDataSet("http://bit.ly/3aRvimq") # + [markdown] colab_type="text" id="V5gD39-buvfF" # We can look at the metadata associated with this training DataSet: # + colab={} colab_type="code" id="ZBZTc1B9ulXv" print(train_ds.dataset.get_readme_content()) # + [markdown] colab_type="text" id="JilHlilLtbML" # Now we can extract a single image and label to look at: # + colab={} colab_type="code" id="Ub_IDDIctbMM" imarray, label = train_ds[0] # + colab={} colab_type="code" id="F4o6WUXltbMQ" scaled_float_array_to_pil_image(imarray) # + [markdown] colab_type="text" id="UxrhBiD_tbMW" # We can check this images label, both numerically: # + colab={} colab_type="code" id="J1jc8xxjtbMX" label # + [markdown] colab_type="text" id="1wQzsOeBtbMb" # and, by looking up the categorical encoding, work out what this means: # + colab={} colab_type="code" id="bQteuocytbMc" train_ds.cat_encoding # + [markdown] colab_type="text" id="uZIvdylmtbMh" # or we can look at another example: # + colab={} colab_type="code" id="583Ff68CtbMi" imarray, label = train_ds[3] scaled_float_array_to_pil_image(imarray) # + [markdown] colab_type="text" id="1P9e74MEtbMm" # ## Setting parameters # # Before training, we'll need to set some parameters. We do this using dtoolAI's ``Parameters`` class, which provides support for recording these parameters automatically during model training. # + colab={} colab_type="code" id="XKCvdJtxtbMm" from dtoolai.parameters import Parameters # + [markdown] colab_type="text" id="Ush-BDQDtbMp" # We need to make sure that we tell the model we'll create how many categories # it will need to classify. This corresponds to the size of the category encoding # in our input dataset. # + colab={} colab_type="code" id="arJGnSFwtbMr" init_params = { 'n_outputs': len(train_ds.cat_encoding) } params = Parameters( batch_size=4, learning_rate=0.001, n_epochs=1, init_params=init_params ) # + [markdown] colab_type="text" id="7mO8T2t1whQG" # ## Loading a pretrained model # + [markdown] colab_type="text" id="9pWWPCPNtbMu" # Then we load our pretrained model. We're using ResNet <https://arxiv.org/abs/1512.03385>, with a new classifier added at the end. # + colab={} colab_type="code" id="tOTEYdgatbMv" from dtoolai.models import ResNet18Pretrained model = ResNet18Pretrained(**init_params) # + [markdown] colab_type="text" id="6BP1ZS_ftbM3" # Now we need to set a loss function and an optimiser: # + colab={} colab_type="code" id="CaC4LHXStbM4" import torch loss_fn = torch.nn.CrossEntropyLoss() optim = torch.optim.SGD(model.parameters(), lr=params.learning_rate) # + [markdown] colab_type="text" id="gdvLFuOsvYje" # ## Retraining the model # # Now we're ready to retrain the model on our new data. # # First we'll import the functions dtoolAI provides to support training/retraining: # # # + colab={} colab_type="code" id="ErtlAfTftbM7" from dtoolai.training import train_model_with_metadata_capture from dtoolcore import DerivedDataSetCreator # + [markdown] colab_type="text" id="IGngFa_5vfN8" # We'll need to create a directory to which we can write our trained model: # + colab={} colab_type="code" id="mCQd36sptbM-" import os os.mkdir("../scratch") # + [markdown] colab_type="text" id="YlGr6mIfvlz5" # Now we're ready to train our model. This might take a few minutes!: # + colab={} colab_type="code" id="k5QGFXMStbNC" with DerivedDataSetCreator('twocat.image.model', '../scratch', train_ds) as output_ds: train_model_with_metadata_capture(model, train_ds, optim, loss_fn, params, output_ds) # + [markdown] colab_type="text" id="qGNOJV-YtbNE" # ## Evaluating the retrained model # + [markdown] colab_type="text" id="DR4ggWyLtsNN" # To evaluate the model, we can take advantage of the way the two category DataSet has been created. Some of the images are marked as training data, and some as test data. When we loaded the data earlier, we got the training set, now we can load the test set: # + colab={} colab_type="code" id="7nSqhEwGweDg" test_ds = ImageDataSet("http://bit.ly/3aRvimq", usetype="test") # + [markdown] colab_type="text" id="GjXhgrsjxm22" # We can check that the train and test DataSets have different sizes: # + colab={} colab_type="code" id="QtmlNKN3xFlU" print(f"Training dataset has {len(train_ds)} items, test dataset has {len(test_ds)}") # + [markdown] colab_type="text" id="HFGfJPmCy8Wk" # Now we can a helper function to evaluate our model: # + colab={} colab_type="code" id="TRnS7LhaxQTt" from dtoolai.utils import evaluate_model from torch.utils.data import DataLoader # + [markdown] colab_type="text" id="RKjYPmGBzKvi" # Then we run the evaluation: # + colab={} colab_type="code" id="HaryWFSRx9-_" test_dl = DataLoader(test_ds) correct = evaluate_model(model, test_dl) print(f"Model correct on {correct} out of {len(test_ds)} items") # + [markdown] colab_type="text" id="o92Dig4_yURm" # ## Improving the model # # The model only trained for a single epoch. Let's see if we can improve its performance by training for longer. # # First we'll change our parameters to train for 5 epochs, rather than one, then create a new retrained model: # + colab={} colab_type="code" id="kn6Ze6c-ybHw" params = Parameters( batch_size=4, learning_rate=0.001, n_epochs=5, init_params=init_params ) with DerivedDataSetCreator('twocat.image.model.5', '../scratch', train_ds) as output_ds: train_model_with_metadata_capture(model, train_ds, optim, loss_fn, params, output_ds) # + [markdown] colab_type="text" id="CAzmCkZay5z8" # Now we can evaluate our new model: # + colab={} colab_type="code" id="1gPoDh5hztBf" correct = evaluate_model(model, test_dl) print(f"Model correct on {correct} out of {len(test_ds)} items") # + [markdown] colab_type="text" id="8htGUA-6zw2E" # Much better! # + [markdown] colab_type="text" id="MA4rddI7z6H9" # ## Applying the model to a new image # # Let's try applying our model to a new image. You can use the example image below, an image of a hedgehog from wikipedia, or find your own. If it's not a hedgehog or a llama, it might confuse the model though! # # First we'll need some libraries to load the image: # + colab={} colab_type="code" id="N3QaZioj0jL0" from imageio import imread from PIL import Image # + [markdown] colab_type="text" id="qnX8oiDC73BX" # Now we can load the image from a URL: # + colab={} colab_type="code" id="pq45mGQK0ddN" imarray = imread("https://upload.wikimedia.org/wikipedia/commons/7/72/Igel.JPG") image = Image.fromarray(imarray) # + [markdown] colab_type="text" id="DGnPIcgb8Hso" # now we can look at it: # + colab={} colab_type="code" id="bLMQ2DCP8Leo" image # + [markdown] colab_type="text" id="hWkMIFNA8RYQ" # Now let's load the model that we trained: # + colab={} colab_type="code" id="Vy1GZewJ0uU-" from dtoolai.trained import TrainedTorchModel # + colab={} colab_type="code" id="jIH-0CL43aOR" model = TrainedTorchModel("../scratch/twocat.image.model") # + [markdown] colab_type="text" id="v7NFYQt_8U9V" # We need to do some work to convert the image format into that which the model expects. dtoolAI has recorded the image dimensions used by the model in the model's metadata, so we can retrieve these to use for the conversion: # + colab={} colab_type="code" id="Mj16T_wB3wDD" dim = model.model_params['input_dim'] channels = model.model_params['input_channels'] input_format = [channels, dim, dim] # + [markdown] colab_type="text" id="REzul5Rd8jxt" # Now we'll load helper functions to convert the image: # + colab={} colab_type="code" id="mo4h066L6WB7" from dtoolai.imageutils import coerce_to_target_dim from torchvision.transforms.functional import to_tensor resized_converted = coerce_to_target_dim(image, input_format) as_tensor = to_tensor(resized_converted) # + [markdown] colab_type="text" id="jLsoRB4J8qMM" # Then we can use the model to categorise the image: # + colab={} colab_type="code" id="k48zx8pC6vlu" result = model.predict(as_tensor[None]) # + [markdown] colab_type="text" id="qgeAom2B8u-I" # and check the classification: # + colab={} colab_type="code" id="M5p9tFSf7Q5F" print(f"Classified image as {result}")
notebooks/RetrainingExplained.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # By encoding **categorical variables**, you'll obtain your best results thus far! # # # Setup # # The questions below will give you feedback on your work. Run the following cell to set up the feedback system. # Set up code checking from learntools.core import binder binder.bind(globals()) from learntools.ml_intermediate.ex3 import * print("Setup Complete") # In this exercise, you will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course). # # ![Ames Housing dataset image](https://i.imgur.com/lTJVG4e.png) # # Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`. # + import pandas as pd from sklearn.model_selection import train_test_split # Read the data X = pd.read_csv('../input/train.csv', index_col='Id') X_test = pd.read_csv('../input/test.csv', index_col='Id') # Remove rows with missing target, separate target from predictors X.dropna(axis=0, subset=['SalePrice'], inplace=True) y = X.SalePrice X.drop(['SalePrice'], axis=1, inplace=True) # To keep things simple, we'll drop columns with missing values cols_with_missing = [col for col in X.columns if X[col].isnull().any()] X.drop(cols_with_missing, axis=1, inplace=True) X_test.drop(cols_with_missing, axis=1, inplace=True) # Break off validation set from training data X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) # - # Use the next code cell to print the first five rows of the data. X_train.head() # Notice that the dataset contains both numerical and categorical variables. You'll need to encode the categorical data before training a model. # # To compare different models, you'll use the same `score_dataset()` function from the tutorial. This function reports the [mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) (MAE) from a random forest model. # + from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error # function for comparing different approaches def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=100, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) # - # # Step 1: Drop columns with categorical data # # You'll get started with the most straightforward approach. Use the code cell below to preprocess the data in `X_train` and `X_valid` to remove columns with categorical data. Set the preprocessed DataFrames to `drop_X_train` and `drop_X_valid`, respectively. # + # Fill in the lines below: drop columns in training and validation data drop_X_train = ____ drop_X_valid = ____ # Check your answers step_1.check() # - # #%%RM_IF(PROD)%% drop_X_train = X_train.select_dtypes(exclude=['object']) drop_X_valid = X_valid.select_dtypes(exclude=['object']) step_1.assert_check_passed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_1.hint() #_COMMENT_IF(PROD)_ step_1.solution() # Run the next code cell to get the MAE for this approach. print("MAE from Approach 1 (Drop categorical variables):") print(score_dataset(drop_X_train, drop_X_valid, y_train, y_valid)) # # Step 2: Label encoding # # Before jumping into label encoding, we'll investigate the dataset. Specifically, we'll look at the `'Condition2'` column. The code cell below prints the unique entries in both the training and validation sets. print("Unique values in 'Condition2' column in training data:", X_train['Condition2'].unique()) print("\nUnique values in 'Condition2' column in validation data:", X_valid['Condition2'].unique()) # If you now write code to: # - fit a label encoder to the training data, and then # - use it to transform both the training and validation data, # # you'll get an error. Can you see why this is the case? (_You'll need to use the above output to answer this question._) #_COMMENT_IF(PROD)_ step_2.a.hint() #_COMMENT_IF(PROD)_ step_2.a.solution() # This is a common problem that you'll encounter with real-world data, and there are many approaches to fixing this issue. For instance, you can write a custom label encoder to deal with new categories. The simplest approach, however, is to drop the problematic categorical columns. # # Run the code cell below to save the problematic columns to a Python list `bad_label_cols`. Likewise, columns that can be safely label encoded are stored in `good_label_cols`. # + # All categorical columns object_cols = [col for col in X_train.columns if X_train[col].dtype == "object"] # Columns that can be safely label encoded good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])] # Problematic columns that will be dropped from the dataset bad_label_cols = list(set(object_cols)-set(good_label_cols)) print('Categorical columns that will be label encoded:', good_label_cols) print('\nCategorical columns that will be dropped from the dataset:', bad_label_cols) # - # Use the next code cell to label encode the data in `X_train` and `X_valid`. Set the preprocessed DataFrames to `label_X_train` and `label_X_valid`, respectively. # - We have provided code below to drop the categorical columns in `bad_label_cols` from the dataset. # - You should label encode the categorical columns in `good_label_cols`. # + from sklearn.preprocessing import LabelEncoder # Drop categorical columns that will not be encoded label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) # Apply label encoder ____ # Your code here # Check your answer step_2.b.check() # + # #%%RM_IF(PROD)%% # Drop categorical columns that will not be encoded label_X_train = X_train.drop(bad_label_cols, axis=1) label_X_valid = X_valid.drop(bad_label_cols, axis=1) # Apply label encoder label_encoder = LabelEncoder() for col in set(good_label_cols): label_X_train[col] = label_encoder.fit_transform(X_train[col]) label_X_valid[col] = label_encoder.transform(X_valid[col]) step_2.b.assert_check_passed() # - # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_2.b.hint() #_COMMENT_IF(PROD)_ step_2.b.solution() # Run the next code cell to get the MAE for this approach. print("MAE from Approach 2 (Label Encoding):") print(score_dataset(label_X_train, label_X_valid, y_train, y_valid)) # # Step 3: Investigating cardinality # # So far, you've tried two different approaches to dealing with categorical variables. And, you've seen that encoding categorical data yields better results than removing columns from the dataset. # # Soon, you'll try one-hot encoding. Before then, there's one additional topic we need to cover. Begin by running the next code cell without changes. # + # Get number of unique entries in each column with categorical data object_nunique = list(map(lambda col: X_train[col].nunique(), object_cols)) d = dict(zip(object_cols, object_nunique)) # Print number of unique entries by column, in ascending order sorted(d.items(), key=lambda x: x[1]) # - # The output above shows, for each column with categorical data, the number of unique values in the column. For instance, the `'Street'` column in the training data has two unique values: `'Grvl'` and `'Pave'`, corresponding to a gravel road and a paved road, respectively. # # We refer to the number of unique entries of a categorical variable as the **cardinality** of that categorical variable. For instance, the `'Street'` variable has cardinality 2. # # Use the output above to answer the questions below. # + # Fill in the line below: How many categorical variables in the training data # have cardinality greater than 10? high_cardinality_numcols = ____ # Fill in the line below: How many columns are needed to one-hot encode the # 'Neighborhood' variable in the training data? num_cols_neighborhood = ____ # Check your answers step_3.a.check() # + # #%%RM_IF(PROD)%% high_cardinality_numcols = 3 num_cols_neighborhood = 25 step_3.a.assert_check_passed() # - # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_3.a.hint() #_COMMENT_IF(PROD)_ step_3.a.solution() # For large datasets with many rows, one-hot encoding can greatly expand the size of the dataset. For this reason, we typically will only one-hot encode columns with relatively low cardinality. Then, high cardinality columns can either be dropped from the dataset, or we can use label encoding. # # As an example, consider a dataset with 10,000 rows, and containing one categorical column with 100 unique entries. # - If this column is replaced with the corresponding one-hot encoding, how many entries are added to the dataset? # - If we instead replace the column with the label encoding, how many entries are added? # # Use your answers to fill in the lines below. # + # Fill in the line below: How many entries are added to the dataset by # replacing the column with a one-hot encoding? OH_entries_added = ____ # Fill in the line below: How many entries are added to the dataset by # replacing the column with a label encoding? label_entries_added = ____ # Check your answers step_3.b.check() # + # #%%RM_IF(PROD)%% OH_entries_added = 990000 label_entries_added = 0 step_3.b.assert_check_passed() # - # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_3.b.hint() #_COMMENT_IF(PROD)_ step_3.b.solution() # # Step 4: One-hot encoding # # In this step, you'll experiment with one-hot encoding. But, instead of encoding all of the categorical variables in the dataset, you'll only create a one-hot encoding for columns with cardinality less than 10. # # Run the code cell below without changes to set `low_cardinality_cols` to a Python list containing the columns that will be one-hot encoded. Likewise, `high_cardinality_cols` contains a list of categorical columns that will be dropped from the dataset. # + # Columns that will be one-hot encoded low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10] # Columns that will be dropped from the dataset high_cardinality_cols = list(set(object_cols)-set(low_cardinality_cols)) print('Categorical columns that will be one-hot encoded:', low_cardinality_cols) print('\nCategorical columns that will be dropped from the dataset:', high_cardinality_cols) # - # Use the next code cell to one-hot encode the data in `X_train` and `X_valid`. Set the preprocessed DataFrames to `OH_X_train` and `OH_X_valid`, respectively. # - The full list of categorical columns in the dataset can be found in the Python list `object_cols`. # - You should only one-hot encode the categorical columns in `low_cardinality_cols`. All other categorical columns should be dropped from the dataset. # + from sklearn.preprocessing import OneHotEncoder # Use as many lines of code as you need! OH_X_train = ____ # Your code here OH_X_valid = ____ # Your code here # Check your answer step_4.check() # + # #%%RM_IF(PROD)%% # Apply one-hot encoder to each column with categorical data OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cardinality_cols])) OH_cols_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cardinality_cols])) # One-hot encoding removed index; put it back OH_cols_train.index = X_train.index OH_cols_valid.index = X_valid.index # Remove categorical columns (will replace with one-hot encoding) num_X_train = X_train.drop(object_cols, axis=1) num_X_valid = X_valid.drop(object_cols, axis=1) # Add one-hot encoded columns to numerical features OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1) OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1) step_4.assert_check_passed() # - # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_4.hint() #_COMMENT_IF(PROD)_ step_4.solution() # Run the next code cell to get the MAE for this approach. print("MAE from Approach 3 (One-Hot Encoding):") print(score_dataset(OH_X_train, OH_X_valid, y_train, y_valid)) # # Step 5: Generate test predictions and submit your results # # After you complete Step 4, if you'd like to use what you've learned to submit your results to the leaderboard, you'll need to preprocess the test data before generating predictions. # # **This step is completely optional, and you do not need to submit results to the leaderboard to successfully complete the exercise.** # # Check out the previous exercise if you need help with remembering how to save your results to CSV. Once you have generated a file with your results, follow the instructions below: # - Begin by clicking on the blue **COMMIT** button in the top right corner. This will generate a pop-up window. # - After your code has finished running, click on the blue **Open Version** button in the top right of the pop-up window. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions. # - Click on the **Output** tab on the left of the screen. Then, click on the **Submit to Competition** button to submit your results to the leaderboard. # - If you want to keep working to improve your performance, select the blue **Edit** button in the top right of the screen. Then you can change your model and repeat the process. # + # (Optional) Your code here # - # # Keep going # # Continue to learn how to use **[pipelines](#$NEXT_NOTEBOOK_URL$)** to preprocess datasets with a mixture of categorical variables and missing values.
notebooks/ml_intermediate/raw/ex3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # agent # This script defines the actions that an agent (individual) can take to improve his/her networth. class Agent: '''Creates instance of an agent that contain current and projected wealth given investment options''' def __init__(self, income, expenses, assets, liabilities, horizon=5, discount_rate = .03): '''Keyword arguments: income -- monthly income expenses -- monthly expenses assets -- total assets liabilites -- total liabilities horizon -- time horizon for optimized plan to consider ''' self.income = income self.expenses = expenses self.assets = assets self.liabilities = liabilities self.horizon = horizon self.discount_rate = discount_rate return None pass chickens = Agent(income=5000, expenses = 2300, assets = 100000, liabilities = 34000, horizon = 5)
net-worth-optimizer/agent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os os.environ['KERAS_BACKEND']='tensorflow' import datetime import keras from keras.models import Sequential from keras.layers import Dense, Dropout import keras.optimizers import numpy as np import lib.data_loader as dl # - # # Multilayer Perceptron Exercise # The goal of this exercise is to build and train a Multilayer perceptron NN to recognize hand-written digits, i.e. from 0 to 9. num_classes=10 # ## The dataset # The dataset we will be using for this session is called MNIST (http://yann.lecun.com/exdb/mnist/). This is a well known example: # # >The MNIST database was constructed from NIST's Special Database 3 and Special Database 1 which contain binary images of handwritten digits. NIST originally designated SD-3 as their training set and SD-1 as their test set. However, SD-3 is much cleaner and easier to recognize than SD-1. The reason for this can be found on the fact that SD-3 was collected among Census Bureau employees, while SD-1 was collected among high-school students. Drawing sensible conclusions from learning experiments requires that the result be independent of the choice of training set and test among the complete set of samples. Therefore it was necessary to build a new database by mixing NIST's datasets. # # The MNIST training set is composed of 30,000 patterns from SD-3 and 30,000 patterns from SD-1. Our test set was composed of 5,000 patterns from SD-3 and 5,000 patterns from SD-1. The 60,000 pattern training set contained examples from approximately 250 writers. # It is important noting that the digits have been size-normalized and centered in a fixed-size image. # Let's read our dataset and draw a sample from it. (x_train, y_train), (x_test, y_test) = dl.load_mnist_data_for_mlp() # load the dataset print("The shape of the X training set: {}".format(x_train.shape)) print("The shape of the Y training set: {}".format(y_train.shape)) print("The shape of the X test set: {}".format(x_test.shape)) print("The shape of the Y test set: {}".format(y_test.shape)) # We will use an auxiliary function to plot it. random_indexes = np.random.randint(len(x_train), size=5) print("The selected random indexes: {}".format(random_indexes)) for index in random_indexes: label = y_train[index] digit = x_train[index] dl.plot_digit(digit, label) x_train = x_train/255 x_test = x_test/255 # ## Specifying the NN # We will start with a very simple NN with a single fullly connected hidden layer. Our model is sequential, as we don't have feedback in it. First we instatiate the model: model = Sequential() # Then we add the layers to it. For the first layer we specify that it is a fully connected layer by instantiating a `Dense` object, then need to specify the `input_shape` parameter, that we get from our dataset. After that, we specify a Rectified Linear Unit as its activation function. model.add(Dense(128, activation='relu', input_shape=(784,))) model.add(Dropout(0.25)) model.add(Dense(128, activation='relu', input_shape=(784,))) # We then specify an output layer with a `softmax` activation function. This function maps an array into a probability distribution with 1 as the sum. model.add(Dense(10, activation='softmax')) # We can check our model using the `summary()` method. model.summary() # After dat we need to compile our model, by setting which optimizer it will use and what are the metrics that we will use for fitting the model. # A list of optimizers can be found <a href="https://keras.io/optimizers/">here</a>. # A list of loss functions can be found <a href="https://keras.io/losses/">here</a>. # A list of metrics can be found <a href="https://keras.io/metrics/">here</a>. model.compile(loss='categorical_crossentropy', optimizer="rmsprop", metrics=['accuracy']) # Now we can train our model. We have to specify a few parameters first. # We have to specify the number of `epochs`, i.e., the number of forward/backpropagation cycles; the `batch_size`, i.e., the number of samples used to train at a time - this parameter optimizes the memory/cache usage. We can also specify a `verbose` parameter to have some feedback while we are training our neural network. batch_size = 128 epochs = 20 # Now it is training time: t = datetime.datetime.now() history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1) print('Training time: %s' % (datetime.datetime.now() - t)) # Now we can evaluate our model. score = model.evaluate(x_test, y_test, verbose=0) print("Available metrics are: \n{}".format(model.metrics_names)) print(score) # Let's again draw a random sample from our test dataset and see how good our predictor is: random_draw = np.random.randint(len(x_test), size=5) print("Randomly selected test observations index: {}".format(random_draw)) for i in random_draw: predicted_value = model.predict_classes(x=x_test[[i]], verbose=False) print("Value Predicted: {}".format(predicted_value)) print("Plot: ") dl.plot_digit(x_test[i], y_test[i]) # # The tasks # Now it is time to get our hands dirty. # ## 1. Build a dummy classifier. # The accuracy we are getting so far is really bad. How about building one of the dummest classifiers there is: throwing a 10 face die? # ## 2. Improving the dataset. # Neural Networks are known to behave better on normalized datasets. By knowing that there only 256 (from 0 to 255) possible values for the gray scale. How about normalize the dataset? # ## 3. Changing the NN topology # Try tweaking the NN topology a bit. Make some small changes in the model: # 1. Increase the number of neurons at the hidden layer to `128`. # 1. Add a new 128-neuron `Dense` layer. # 1. Increase the number of epochs to 10, then 20. # 1. Add new `Dropout` layers in between the hidden layers (20% is a good start for a dropout). # 1. Try to use the `categorical_crossentropy` loss function. # 1. Try to use the `RMSProp` optimizer.
MNIST MLP-Exercise-Answers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook demonstrates the most basic capabilities of the pyvw python->VW interface. The inferface (unlike the rest of VW :P) is extensively documented, so if you are confused, look at the python docs! # # Any pyvw application needs to begin by importing `pyvw`. from __future__ import print_function from vowpalwabbit import pyvw # One we've imported pyvw, we can initialize VW either by passing a command line string (e.g., "--quiet -q ab --l2 0.01") or, in a more python-friendly manner, providing those as named arguments. Here we do the latter. vw = pyvw.vw(quiet=True, q='ab', l2=0.01) # VW objects can do a lot, but the most important thing the can do is create examples and train/predict on those examples. # # One way to create an example is to pass a string. This is the equivalent of a string in a VW file. For instance: ex = vw.example('1 |a two features |b more features here') # As promised there is documentation; for instance: help(ex.learn) # Let's run that learn function and get a prediction: ex.learn() print('current prediction =', ex.get_updated_prediction()) # Here, `get_updated_prediction` retrieves the prediction made internally during learning. The "updated" aspect means "if I were to make a prediction on this example *after* this call to `learn`, what would that prediction be?" # # Okay, so the prediction isn't quite where we want it yet. Let's learn a few more times and then print the prediction. ex.learn() ; ex.learn() ; ex.learn() ; ex.learn() print('current prediction =', ex.get_updated_prediction()) # This is now quite a bit closer to what is desired. # # Now let's create a new example using the other form of example creation: python dictionaries. Here, you must provide a dictionary that maps namespaces (eg, 'a' and 'b') to lists of features. Features can either be strings (eg `"foo"`), or pairs of string/floats (eg `("foo", 0.5)`). We'll create an example that's similar, but not identical to, the previous example to see how well VW has generalized. # # Note that in this setup there is no label provided, which means that this will be considered a test example. ex2 = vw.example({ 'a': ['features'], 'b': ['more', 'features', 'there'] }) # Given this example, we execute `learn`. But since it's a test example (no label), this will only make a prediction! ex2.learn() print('current prediction =', ex2.get_simplelabel_prediction()) # Because this is a test example, we can get the raw prediction with `get_simplelabel_prediction()`. This is `simplelabel` because it's a regression problem. If we were doing, for instance, One-Against-All multiclass prediction, we would use `get_multiclass_prediction`, etc. # # This prediction is only about half of what we want, but we're also missing a number of features. # # Let's now give this example a label and train on it a few times: ex2.set_label_string('-2.0') ex2.learn() ; ex2.learn() ; ex2.learn() ; ex2.learn() ; ex2.learn() print('current prediction =', ex2.get_simplelabel_prediction()) # Now we can go back and see how this has affected the prediction behavior on the original example `ex`. We do this first by removing the label and then calling `learn` to make a prediction. ex.set_label_string('') ex.learn() print('current prediction =', ex.get_simplelabel_prediction()) # Clearly this has had an impact on the prediction for the first example. Let's put the label back and then iterate between learning on `ex` and `ex2`: ex.set_label_string('1') for i in range(10): ex.learn() ex2.learn() print('ex prediction =', ex.get_updated_prediction()) print('ex2 prediction =', ex2.get_updated_prediction()) # After a handful of updates, we can see that the prediction for `ex` is going back toward `1.0` and for `ex2` back toward `-2.0`. # # Now that we're done, it's safest to tell VW that we're done with these examples and that it can garbage collect them. (This should happen by default when they pass out of scope per Python's build in garbage collector, but that may not run soon enough if you're manipulating large numbers of examples at once!) vw.finish_example(ex) vw.finish_example(ex2) # Finally, when we're done with VW entirely, or perhaps want to start up a new VW instance, it's good behavior to close out any old ones. This is especially important if we wanted to save a model to disk: calling `vw.finish()` tells it to write the file. You can add `f='mymodel'` to the initialization line of the `vw` object if you want to play around with this! vw.finish() # This is the end of the intro. For more, look at `test.py` in the `python` directory of the VW distribution; this contains some more examples. For even more, look at the python docs in `pyvw.py`, for instance `help(pyvw.vw)` and so on! # # Happy VW-Pythoning!
python/examples/VW_in_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Full text classification project # # Sample project for classifying movie reviews as positive or negative # ! ls ../UPDATED_NLP_COURSE/TextFiles # + import numpy as np import pandas as pd df = pd.read_csv('../UPDATED_NLP_COURSE/TextFiles/moviereviews.tsv', sep='\t') df.head() # - df.shape print(df['review'][0]) # Check if there is some missing data df.isnull().sum() # It seems we are missing some reviews, let's get rid of them since they are not much of use. df.dropna(inplace=True) df.isnull().sum() df.shape # However, we might have reviews which are just blanks, these aren't very useful neither, so let's get rid of them. df = df[df.review.apply(lambda t: not t.isspace())] df.head() df.shape # Now let's train-test split # + from sklearn.model_selection import train_test_split X = df.review y = df.label X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123) # + from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC clf_pipeline = Pipeline([ ('tfidf', TfidfVectorizer()), ('clf', LinearSVC()) ]) clf_pipeline.fit(X_train, y_train) # + from sklearn.metrics import confusion_matrix, classification_report predictions = clf_pipeline.predict(X_test) print("Confusion Matrix: \n") print(confusion_matrix(y_test, predictions), "\n") print("Classification Report: \n") print(classification_report(y_test, predictions)) # -
notebooks/text-classification/Full Text Classification Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Digit recognition # This notebook will explore the dataset from Kaggle's competition on the digit recognition and will use a number of Neural Networks to demonstrate their usage and utility. # + # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory from subprocess import check_output print(check_output(["ls", "."]).decode("utf8")) # Any results you write to the current directory are saved as output. # - myTrainDf = pd.read_csv('./train.csv') myTestDf = pd.read_csv('./test.csv') # + # Display some digits to understand how the data is structured def formatDigit (aLine): return np.array(aLine).reshape((28,28)) NUM_EXAMPLES = 3 for i in (myTrainDf.shape[0]*np.random.rand(NUM_EXAMPLES)).astype(int): myImage = formatDigit(myTrainDf.iloc[i][1:]) plt.matshow(myImage, cmap='gray') plt.colorbar() plt.title('Example for digit ' + str(myTrainDf.iloc[i][0])) plt.show() # - # Now that we see how the data is organized, let's use a MLP, with an architecture as the one taught in "Machine Learning" from Stanford in coursera.org to recognize the digits. # # The MLP will have 3 layers. # # - The input layer will have 784 units # - The hidden layer will have 25 units # - The output layer will have, obviously, 10 units. It will output 1 or 0 depending on the classification. # # Note that the sizes of the input and output layers are given by the X and Y datasets. # + from sklearn.neural_network import MLPClassifier myX = myTrainDf[myTrainDf.columns[1:]] myY = myTrainDf[myTrainDf.columns[0]] # Use 'adam' solver for large datasets, alpha is the regularization term. # Display the optimization by showing the cost. myClf = MLPClassifier(hidden_layer_sizes=25, activation='logistic', solver='adam', alpha=1e-5, verbose=True) myClf.fit(myX, myY) # - # Get the training error myPredY = myClf.predict(myX) # + from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score # Generic function to assess performance in two datasets def showPerformance (aY, aYpred): # Ensure np.array print ('*** Performance Statistics ***') print ('Accuracy: ', accuracy_score(aY, aYpred)) print ('Precision: ', precision_score(aY, aYpred, average='micro')) print ('Recall: ', recall_score(aY, aYpred, average='micro')) print ('F1: ', f1_score(aY, aYpred, average='micro')) showPerformance(myY, myPredY) # - # The results are quite good, as expected, now let's make a prediction for the test set. myYtestPred = myClf.predict(myTestDf) myOutDf = pd.DataFrame(index=myTestDf.index+1, data=myYtestPred) myOutDf.reset_index().to_csv('submission.csv', header=['ImageId', 'Label'],index=False) # # Cross-validation set and regularization parameter # The following code will split the test set into a training set and cross-validation set. # + REG_ARRAY = [100, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 0] def splitDataset (aDf, aFrac): aTrainDf = aDf.sample(frac=aFrac) aXvalDf = aDf.iloc[[x for x in aDf.index if x not in aTrainDf.index]] return aTrainDf, aXvalDf mySampleTrainDf, mySampleXvalDf = splitDataset(myTrainDf, 0.8) myAccuracyDf = pd.DataFrame(index=REG_ARRAY, columns=['Accuracy']) for myAlpha in REG_ARRAY: print ('Training with regularization param ', str(myAlpha)) myClf = MLPClassifier(hidden_layer_sizes=25, activation='logistic', solver='adam', alpha=myAlpha, verbose=False) myClf.fit(mySampleTrainDf[mySampleTrainDf.columns[1:]], mySampleTrainDf['label']) myYpred = myClf.predict(mySampleXvalDf[mySampleXvalDf.columns[1:]]) myAccuracyDf.loc[myAlpha, 'Accuracy'] = accuracy_score(mySampleXvalDf['label'], myYpred) # - myAccuracyDf # From here one can tell that the default regularization parameter (around 1e-5) # ## Multiple layers REG_ARRAY = [100, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 0] myAccuracyDf = pd.DataFrame(index=REG_ARRAY, columns=['Accuracy']) for myAlpha in REG_ARRAY: print ('Training with regularization param ', str(myAlpha)) myClf = MLPClassifier(hidden_layer_sizes=[400, 400, 100, 25], activation='logistic', solver='adam', alpha=myAlpha, verbose=False) myClf.fit(mySampleTrainDf[mySampleTrainDf.columns[1:]], mySampleTrainDf['label']) myYpred = myClf.predict(mySampleXvalDf[mySampleXvalDf.columns[1:]]) myAccuracyDf.loc[myAlpha, 'Accuracy'] = accuracy_score(mySampleXvalDf['label'], myYpred) myAccuracyDf # Let's produce a new output file with no regularization and a complex MLP of 784x400x400x100x25x10 myClf = MLPClassifier(hidden_layer_sizes=[400, 400, 100, 25], activation='logistic', solver='adam', alpha=0, verbose=True) myClf.fit(myTrainDf[myTrainDf.columns[1:]], myTrainDf['label']) myYtestPred = myClf.predict(myTestDf) myOutDf = pd.DataFrame(index=myTestDf.index+1, data=myYtestPred) myOutDf.reset_index().to_csv('submission2.csv', header=['ImageId', 'Label'],index=False)
digit_recognition/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week0: IBM Quantum Challenge - Learning the Basics # The purpose of this exercise is to help beginners learn the basics of quantum computation. If you already understand this content, you can skip this material. (This document is based on the chapter ["The Atoms of Computation"](https://qiskit.org/textbook/ch-states/atoms-computation.html) from the Qiskit Textbook) # ## What is Quantum Computation? Comparing with Classical Computation # # Programming a quantum computer is now something that anyone can do from the comfort of their own home. # # But what to create? What is a quantum program anyway? In fact, what is a quantum computer? # # These questions can be answered by making comparisons to standard digital computers. In this article, we will look at the basic principles behind these devices. To help us transition over to quantum computing later, we will use the same tools here as we will be using for calculations on quantum computers. # ## Splitting information into bits # # The first thing we need to know about is the idea of bits. These are designed to be the world’s simplest alphabet. With only two characters, 0 and 1, we can represent any piece of information. # # One example is decimal notation numbers. You are probably used to representing a number through a string of ten digits: 0, 1, 2, 3, 4, 5, 6, 7, 8, and 9. In this string of digits, each digit represents how many times the number contains a certain power of ten. For example, when we write 9213, we mean # # # # $$ 9000 + 200 + 10 + 3 $$ # # # # or, expressed in a way that emphasizes the powers of ten # # # # $$ (9\times10^3) + (2\times10^2) + (1\times10^1) + (3\times10^0) $$ # # # # Though we usually use this system based on the number 10, we can just as easily use one based on any other number. The binary number system, for example, is based on the number two. This means using the two characters 0 and 1 to express numbers as multiples of powers of two. For example, 9213 becomes 10001111111101, since # # # # $$ 9213 = (1 \times 2^{13}) + (0 \times 2^{12}) + (0 \times 2^{11})+ (0 \times 2^{10}) +(1 \times 2^9) + (1 \times 2^8) + (1 \times 2^7) \\\\ \,\,\, + (1 \times 2^6) + (1 \times 2^5) + (1 \times 2^4) + (1 \times 2^3) + (1 \times 2^2) + (0 \times 2^1) + (1 \times 2^0) $$ # These strings of bits, known as binary strings, can be used to represent more than just numbers. For example, there is a way to represent any character using bits. For any letter, number, or punctuation mark that you want to use, you can find a corresponding string of at most eight bits using [this table](https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.networkcomm/conversion_table.htm). Though these are quite arbitrary, this is a widely agreed-upon standard. In fact, this is what was used to transmit this article to you through the internet. # # This is how all information is represented in computers. Whether numbers, letters, images, or sounds, it all exists in the form of binary strings. # # Like our standard digital computers, quantum computers are also based on this same basic idea. The main difference is that they use *qubits*, an extension of the bit to quantum mechanics. # ## Building an 8-bit computer with IBM Quantum Experience # IBM Quantum Experience is a tool for creating and executing quantum circuits. It is also known as Composer because its GUI resembles a staff notation on a musical score. Let's make an 8-bit computer using this Circuit Composer. # 1. Create an account at [IBM Quantum Experience](https://quantum-computing.ibm.com/) (if you haven't done it already). # 2. Click on "Start a new circuit" in the center or on the Circuit Composer icon from the bar on the left to launch the Circuit Composer. You can also click on [this link](https://quantum-computing.ibm.com/composer/new-experiment) for quick access. # 3. You can build a circuit by directly dragging and dropping the quantum gate. # 4. You can also directly edit the Code editor on the right to build the circuit. from IPython.display import Image, display Image('composer01.jpg') # Now, we prepare 8 quantum registers and 8 classical registers (i.e. qreg [8], creg [8]), and place the measurement operators (gray gate with meter figure) in order on the line extending from each qubit. Let's place them. (If you are having trouble creating the circuit, you can get it from [here](https://quantum-computing.ibm.com/composer/new-experiment?initial=N4IgdghgtgpiBcICqYAuBLVAbGATABAMboBOhArpiADQgCOEAzlAiAPIAKAogHICKAQQDKAWXwAmAHQAGANwAdMOjCEs5XDHzz6MLOgBGARknLC2hWEV0SMAOb46AbQAcAXQuEb9wi-eLFsEzkNg6O0q74ALQAfERhfmCBjMGaToYRMXHpFkkpoeIZsT4FOTBBIU4AzIVx1aXlqY4ALDU%2BLfXJFY4ArK09CbldAGx9Ix15TgDsfdOyNCAajJ7oAA4YAPZgrCAAvkA)) Image('composer02.jpg') # What is shown in this figure is what is called a "circuit". A circuit manipulates the state of a qubit in a time-evolving manner from left to right of a line, but the logic circuit of a classical computer that we are familiar with can be made in the same way using a quantum circuit. # There is nothing special in this example. Eight qubits are prepared, each assigned a number from 0 to 7. A "measurement" operation is applied to each qubit, and this "measurement" reads a value of '0' or '1'. # Qubits are always initialized to give the output ```0```. Since we don't do anything to our qubits in the circuit above other than performing a "measurement" operation, this is exactly the result we will get when we measure them. # You can select 'Measurement Probabilities' from the dropdown menu on the tab below Composer. This will display the following histogram and you can see how all the qubits are returning the state '0' (i.e. '00000000'). Image('histogram01.jpg',width="300", height="200") # We use NOT gates to encode bits other than '0'. The NOT gate, which is the most basic operation in computer arithmetic, inverts '0' to '1' and '1' to '0'. To apply this to the Circuit, select the dark blue icon labeled $\oplus$ from the displayed set of quantum gates and drag and drop it onto the composer. (We would like you to do it by yourself, but you can also get the same circuit from [here](https://quantum-computing.ibm.com/composer/new-experiment?initial=N4IgdghgtgpiBcICqYAuBLVAbGATABAMboBOhArpiADQgCOEAzlAiAPIAKAogHICKAQQDKAWXwAmAHQAGANwAdMOjCEs5XDHzz6MLOgBGARknLC2hWEV0SMAOb46AbQAcAXQuEb9wi-eLFAB4OjgDsfmCwTOQ2wdKu%2BAC0AHxEjnEWkYzRmk6G8cmpeRkwUTFO4vkpPhXFpTmOAMyVqU21WWWOACzNPt1t2cEArD2Ow-0dAGwjU%2BP1YYlVoe40IBqMnugADhgA9mCsIAC%2BQA).) Image('composer03.jpg') # You can see from the histogram that now our computer outputs the string ```10000000``` instead. This circuit is the same as an 8-bit classical computer. Image('histogram02.jpg',width="300", height="200") # The bit we flipped, which comes from qubit 7, lives on the far left of the string. This is because Qiskit notation numbers the bits in a string from right to left. Some prefer to number their bits the other way around, but Qiskit's system certainly has its advantages in binary representation of integer values with qubits. Specifically, it means that qubit 7 is telling us about how many $2^7$s we have in our number ($2^7$ = 128). So by flipping this bit to '1', we have now written the number 128 in our simple 8-bit computer. # ## Let's encode any number with the same circuit # Now try out writing another integer number for yourself (less than 128). You could do your age, for example. Let's use a search engine to find out what the number looks like in binary in advance (if it includes a ‘0b’, just ignore/truncate it and then add 0s to the left side to make the whole 8 digits).<br/> # The following is the circuit when encoding '34' as an input value. Qubit 5 and qubit 1 are inverted.(i.e. 34 = 1 x 2<sup>5</sup> + 1 x 2<sup>1</sup>) # (We would like you to try it by yourself, but you can get the same circuit also from [here](https://quantum-computing.ibm.com/composer/new-experiment?initial=N4IgdghgtgpiBcICqYAuBLVAbGATABAMboBOhArpiADQgCOEAzlAiAPIAKAogHICKAQQDKAWXwAmAHQAGANwAdMOjCEs5XDHzz6MLOgBGARknLC2hWEV0SMAOb46AbQAcAXQuEb9wi-eLFAB4OjoZ%2BYEFOAKxhsEzkNsHSrvgAtAB8RI5JFrGM8ZpOoakZPqE5MHEJTuLJ6Zk15ZUFjgDMtSWtMRV5VY4ALO2ZA409zdHFmdEj%2BcEAbIM%2B89O9AOwLjmuyNCAajJ7oAA4YAPZgrCAAvkA).) Image('composer04.jpg',width="600", height="400") # ## For Week1 Exercises # # Now we know how to encode information in a quantum computer. You can also code the same circuit using Qiskit (please refer to the chapter [The Atoms of Computation](https://qiskit.org/textbook/ch-states/atoms-computation.html) in the Qiskit Textbook). In the next step (Week1 exercises) we will look at how to perform actual operations (combining inputs and returning desired outputs). Specifically, you will learn how to make an adder in a quantum circuit such as 1+1. # # <!-- # Continue to [Week1-A](../week-1/ex_1a_en.ipynb). # -->
challenges/ibm-quantum/iqc-fall-2020/week-0/ex_0_en.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .shiing # language: python # name: .shiing # --- # + import sys sys.path.append('..') import numpy as np import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D from tensorflow.python.keras import backend as K import time from sklearn.model_selection import train_test_split from keras.optimizers import Adam, SGD # + np.random.seed(0) num_classes = 2 # input image dimensions img_rows, img_cols = 28, 28 # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() X = np.vstack((x_train, x_test)) y = np.hstack((y_train, y_test)) ind = (y == 9) + (y == 7) X, y = X[ind], y[ind] X = X.astype('float32') X += .01*abs(np.random.randn(14251, 28, 28)) y[y==7], y[y==9] = 0, 1 if K.image_data_format() == 'channels_first': X = X.reshape(x.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: X = X.reshape(X.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) X /= 255. # convert class vectors to binary class matrices y = keras.utils.to_categorical(y, num_classes) # + ## define the learning models def cnn(): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.Adam(0.0005), metrics=['accuracy']) return model model, model_mask = cnn(), cnn() # + ## fitting param from keras.callbacks import EarlyStopping es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=0, patience=15, restore_best_weights=True) fit_params = {'callbacks': [es], 'epochs': 5, 'batch_size': 32, 'validation_split': .2, 'verbose': 0} split_params = {'split': 'one-split', 'perturb': None, 'num_perm': 100, 'ratio_grid': [.2, .4, .6, .8], 'perturb_grid': [.001, .005, .01, .05, .1], 'min_inf': 100, 'min_est': 1000, 'ratio_method': 'fuse', 'cv_num': 1, 'cp': 'min', 'verbose': 1} # + ## Inference based on dnn_inference from dnn_inference import DnnT inf_feats = [[np.arange(19,28), np.arange(13,20)], [np.arange(21,28), np.arange(4, 13)],[np.arange(7,16), np.arange(9,16)]] shiing = DnnT(inf_cov=inf_feats, model=model, model_mask=model_mask, change='mask', eva_metric='zero-one') p_value_tmp = shiing.testing(X, y, cv_num=3, cp='hommel', fit_params=fit_params, split_params=split_params) shiing.visual(X,y) print('P-values: %s' %p_value_tmp)
tests/.ipynb_checkpoints/MNIST_demo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp; **JLUFE** &emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&emsp;&ensp;**Fall 2021(Sep-Jan)** # # # **<div align="center">Homework Assignment Report</div>** # <br> # # **<div align="center">JILIN UNIVERSITY OF FINANCE AND ECONOMICS</div>** # <br> # # **<div align="center"><span style="color:blue">College of Managment Science and Information Engineering</span></div>** # # **<div align="center">BSc in <span style="color:blue">Data Science and Big Data Technology</span></div>** # # **<div align="center">(2021)</div>** # # <br> # # **<div align="center">MODULE: Intelligent Technology</div>** # # **<div align="center">Homework Assignment: 05</div>** # # **<div align="center">Functions</div>** # # **<div align="center">04/11/2021</div>** # # <br> # # **<div align="center">Submitted by:</div>** # # **<div align="center"><span style="color:blue">Milan(米兰) 0318021907632 (2005)</span></div>** # **<div align="center">QQ: <span style="color:blue">3086215265</span> | Github ID: <span style="color:blue">milaan9</span></div>** # # Instructions: # # # 1. I have added tips and required learning resources for each question, which helps you to solve the problems. # # 2. Finish the assignment on your **OWN**. **<span style="color:red">Any student find copying/sharing from classmates or internet will get '0' points!!!</span>** # # 3. After <span class='label label-default'>Accepting this assignment</span> from ➞ **[GitHub Clasroom link](https://classroom.github.com/a/I_ScEHXx)**, Github will create private repository of the assignment in your GitHub Classroom account. # # 4. In your repository <span class='label label-default'>Clone</span> ➞ <span class='label label-default'>Download ZIP</span> in your computer. # # 5. Change your ➞ **College**, **Major**, **Name**, **Student number**, **Class number**, **QQ number** and **GitHub ID** # # 6. Once you finish the Assignment **[convert your .ipynb file into PDF](https://github.com/milaan9/91_Python_Mini_Projects/tree/main/001_Convert_IPython_to_PDF)** (both **.ipynb** and **.pdf** file will be required!) # # 7. To submit your assignment, go to GitHub Classroom repository and <span class='label label-default'>Add file</span> ➞ <span class='label label-default'>Upload files</span> ➞ <span class='label label-default'>Commit changes</span> # 1. Replace the question (**.ipynb**) file with your solution (**.ipynb**) file. # 2. Also, upload (**.pdf**) converted file of your solution (**.ipynb**) file. # # Python Assignment 05 # # Functions ➞ <span class='label label-default'>Level 1, 2 & 3</span> # # >**Note:** Please create new cell for each question # ### Part A ➞ <span class='label label-default'>Level 1</span> # >**Note:** Please create new cell for each question # # 1. Area of a circle is calculated as follows: **area = $πr^2$** and **perimeter = $2πr$**. Write a function that calculates **`area_of_circle`** and **`perimeter_of_circle`** by taking user input for value of **`r`**. # 2. Write a function called **`add_all_nums`** which takes arbitrary number of arguments and sums all the arguments. Check if all the list items are number data types. If not do give a reasonable feedback. # 3. Temperature in **°C** can be converted to **°F** using this formula: **°F = $(°C * 9/5) + 32$**. Write a function which converts **°C to °F**, **`convert_celsius_2_fahrenheit`**. # 4. Write a function called **`check_season`**, it takes a month parameter and returns the season: **`Autumn`**, **`Winter`**, **`Spring`** or **`Summer`**. # 5. Write a function called **`calculate_slope`** which return the slope of a linear equation # 6. Quadratic equation is calculated as follows: **$ax² + bx + c = 0$**. Write a function which calculates solution set of a quadratic equation, **`solve_quadratic_eqn`**. # 7. Declare a function named **`print_list`**. It takes a list as a parameter and it prints out each element of the list. # 8. Declare a function named **`reverse_list`**. It takes an array as a parameter and it returns the reverse of the array (use loops). # # - ```py # print(reverse_list([1, 2, 3, 4, 5])) # #[5, 4, 3, 2, 1] # print(reverse_list1(["A", "B", "C"])) # #["C", "B", "A"] # ``` # # 9. Declare a function named **`capitalize_list_items`**. It takes a list as a parameter and it returns a capitalized list of items # 10. Declare a function named **`add_item`**. It takes a list and an item parameters. It returns a list with the item added at the end. # # - ```py # food_staff = ['Potato', 'Tomato', 'Mango', 'Milk'] # print(add_item(food_staff, 'Fungi')) #['Potato', 'Tomato', 'Mango', 'Milk', 'Fungi'] # numbers = [2, 3, 7, 9] # print(add_item(numbers, 5)) #[2, 3, 7, 9, 5] # ``` # # 11. Declare a function named **`remove_item`**. It takes a list and an item parameters. It returns a list with the item removed from it. # # - ```py # food_staff = ['Potato', 'Tomato', 'Mango', 'Milk'] # print(remove_item(food_staff, 'Mango')) # ['Potato', 'Tomato', 'Milk'] # numbers = [2, 3, 7, 9] # print(remove_item(numbers, 3)) # [2, 7, 9] # ``` # # 12. Declare a function named **`sum_of_numbers`**. It takes a number parameter and it adds all the numbers in that range. # # - ```sh # print(sum_of_numbers(5)) # 15 # print(sum_all_numbers(10)) # 55 # print(sum_all_numbers(100)) # 5050 # ``` # # 13. Declare a function named **`sum_of_odds`**. It takes a number parameter and it adds all the odd numbers in that range. # 14. Declare a function named **`sum_of_even`**. It takes a number parameter and it adds all the even numbers in that range. # + # Solution: def area_of_circle(r): pi = 3.14 area = pi*r**2 print(area) return area def perimeter_of_circle(r): pi = 3.14 perimeter = 2*pi*r print(perimeter) return perimeter # - def add_all_nums(num): sum=0 from a in list: sum=sum+n if type(a)==int: print("true") print(sum) def check_season(month): if month == "February" or month == "March" or month == "April": print("Spring") if month == "May" or month == "June" or month == "July": print("Summer") if month == "August" or month == "September" or month == "October": print("Autumn") if month == "November" or month == "December" or month == "January": print("Winter") a=check_season("November") def solve_quadratic_eqn(a,b,c): m= b**2 - 4*a*c if m<0: print("此方程无解!") elif m==0: x=b/(-2*a) print("x1=x2=",x) else: x1=(b+m**0.5)/(-2*a) x2 =(b-m**0.5)/(-2*a) print("x1=",x1) print("x2=",x2) def print_list(list): for a in list: print(a) def reverse_list(list): reverselist = [] for a in range(len(list)): reverselist.append(list[len(list)-1-a]) return reverselist print(reverse_list([1, 2, 3, 4, 5])) #[5, 4, 3, 2, 1] print(reverse_list(["A", "B", "C"])) #["C", "B", "A"] def capitalize_list_items(item): item = upper(item) # ### Part B ➞ <span class='label label-default'>Level 2</span> # >**Note:** Please create new cell for each question # # 1. Declare a function named **`evens_and_odds`**. It takes a positive integer as parameter and it counts number of evens and odds in the number. # # - ```py # print(evens_and_odds(100)) # #The number of odds are 50. # #The number of evens are 51. # ``` # # 2. Call your function **`factorial`**, it takes a whole number as a parameter and it return a factorial of the number # 3. Call your function **`is_empty`**, it takes a parameter and it checks if it is empty or not # 4. Write different functions which take lists. They should **`calculate_mean`**, **`calculate_median`**, **`calculate_mode`**, **`calculate_range`**, **`calculate_variance`**, **`calculate_std`** (standard deviation). # ### Part C ➞ <span class='label label-default'>Level 3</span> # >**Note:** Please create new cell for each question # # 1. Write a function called **`is_prime`**, which checks if a number is prime and prints all prime numbers in that range. # 2. Write a functions which checks if all items are *unique* in the list. # 3. Write a function which checks if all the items of the list are of the *same data type*. # 4. Write a function which check if provided variable is a valid *python variable* # 5. Go to the data folder and access the **[countries-data.py](https://github.com/milaan9/03_Python_Flow_Control/blob/main/countries_details_data.py)** file. # # - Create a function called the **`most_spoken_languages`** in the world. It should return 10 or 20 most spoken languages in the world in descending order # - Create a function called the **`most_populated_countries`**. It should return 10 or 20 most populated countries in descending order. # + # Solution:
005_Python_HW_Assignment_05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.3 64-bit # language: python # name: python38364bit537b7389689349c297126cea58c73bfb # --- # + [markdown] id="y_3QacO3kHRY" colab_type="text" # # Poster Notebook: Histogram of associations between Protein/Gene Expression and Clinical Features # ``` # Created: 09-20-2020 # URL: https://github.com/isb-cgc/Community-Notebooks/blob/master/FeaturedNotebooks/ACM_BCB_2020_POSTER_Histogram_Associations_ProteinGeneExpression_vs_ClinicalFeatures.ipynb # Notes: This notebook supports the POSTER : "Multi-omics Data Integration in the Cloud: Analysis # of Statistically Significant Associations Between Clinical and Molecular Features in Breast Cancer" # by <NAME>, <NAME>, and <NAME> , presented in the ACM Conference on Bioinformatics, # Computational Biology, and Health Informatics, 2020. # ``` # *** # # This Notebook computes statistically significant associations between Protein/Gene expression and clinical features of Breast cancer, using data available in TCGA BigQuery tables. # # The associations were computed using the Kruskal Wallis (KW) test, implemented as user defined function in Bigquery. Details of the KW test and its implementatin can be found in: https://github.com/jrossthomson/bigquery-utils/tree/master/udfs/statslib # # A histogram of the number of cases with significant associations is generated after the Kruskal Wallis test. # # + [markdown] id="nqdesxBuxkcG" colab_type="text" # # Setup # + id="TppLwn_uF4Y1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="7a2e71d2-1a64-42bb-ba1c-feb625df3e88" import sys import matplotlib.pyplot as plt import seaborn as sns import pandas_gbq # #! {sys.executable} -m pip install matplotlib seaborn # #! {sys.executable} -m pip install google-cloud # #! {sys.executable} -m pip install google-cloud # #! {sys.executable} -m pip install google-auth print({sys.executable}) from platform import python_version print(python_version()) # + [markdown] id="_OeROlcGWi5-" colab_type="text" # # Authentication # The first step is to authorize access to BigQuery and the Google Cloud. For more information see ['Quick Start Guide to ISB-CGC'](https://isb-cancer-genomics-cloud.readthedocs.io/en/latest/sections/HowToGetStartedonISB-CGC.html) and alternative authentication methods can be found [here](https://googleapis.github.io/google-cloud-python/latest/core/auth.html). # + id="b-debebxHIWw" colab_type="code" colab={} from google.colab import auth import google.auth auth.authenticate_user() my_project_id = "" # write your project id here #credentials, your_project_id = google.auth.default() # + [markdown] id="uPBtiq2QXAu6" colab_type="text" # # Number of proteins with significant associations with Clinical feateures # + id="-0m6TjJtF4Y9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="e1cb58c5-9583-413f-f136-aac419849a53" cancer_type = 'TCGA-BRCA' # https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/tcga-study-abbreviations significance_level = '0.001' sql = ''' with the_goods as ( SELECT p.project_short_name as study, gene_name as g, c.feature.key as c, `isb-cgc-bq.functions.kruskal_wallis_current`(array_agg((c.feature.value,protein_expression))) as reso FROM `isb-cgc.TCGA_hg19_data_v0.Protein_Expression` p JOIN `isb-cgc-bq.supplementary_tables.Abdilleh_etal_ACM_BCB_2020_TCGA_bioclin_v0_Clinical_UNPIVOT` c ON c.case_barcode = substr(p.sample_barcode,0,12) WHERE 1=1 AND c.feature.value != "null" AND p.project_short_name = "{0}" GROUP BY study, g, c HAVING reso.DoF >2 and reso.DoF < 10 and reso.p <= {1} ORDER BY study, reso.p, c ) # the_goods select c as Clinical_feature, COUNT(g) as Protein from the_goods group by c order by Protein DESC '''.format( cancer_type , significance_level ) df1 = pandas_gbq.read_gbq(sql,project_id=my_project_id ) df1 # + [markdown] id="mC3oX5xbBnBU" colab_type="text" # # Number of Genes with expressions that are significantly associated with Clinical feateures # # + id="Mcq77dM7BsXx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="5833a024-a493-4803-a560-a8d3a7d69258" sql = ''' with the_goods as ( SELECT p.project_short_name as study, p.HGNC_gene_symbol as g, c.feature.key as c, `isb-cgc-bq.functions.kruskal_wallis_current`(array_agg((c.feature.value,normalized_count))) as reso FROM `isb-cgc.TCGA_hg19_data_v0.RNAseq_Gene_Expression_UNC_RSEM` p JOIN `isb-cgc-bq.supplementary_tables.Abdilleh_etal_ACM_BCB_2020_TCGA_bioclin_v0_Clinical_UNPIVOT` c ON c.case_barcode = substr(p.sample_barcode,0,12) WHERE 1=1 AND c.feature.value != "null" AND p.project_short_name = "{0}" and HGNC_gene_symbol in ( SELECT gene_name FROM `isb-cgc.TCGA_hg19_data_v0.Protein_Expression` GROUP BY 1 ) GROUP BY study, g, c HAVING reso.DoF >2 and reso.DoF < 10 and reso.p <= {1} ORDER BY study, reso.p, c ) # the_goods select c as Clinical_feature, COUNT(g) as Gene_expression from the_goods group by c order by Gene_expression DESC '''.format( cancer_type , significance_level ) df2 = pandas_gbq.read_gbq(sql, project_id=my_project_id) df2 # + [markdown] id="RawwNakAHlQ5" colab_type="text" # # Plot histogram # + id="49nQ4olTizDo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="4ac97e50-78c1-4923-fe92-c1b4e9d0733e" # join tables df = df1.set_index('Clinical_feature').join(df2.set_index('Clinical_feature'), how='inner') # Generate plot ax = df.plot.bar(rot=45) plt.ylabel('Count', fontweight='bold') plt.xlabel('Clinical feature', fontweight='bold')
FeaturedNotebooks/ACM_BCB_2020_POSTER_Histogram_Associations_ProteinGeneExpression_vs_ClinicalFeatures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!DOCTYPE html> # <html> # <body> # <div align="center"> # <h3>Prepared by <NAME></h3> # # <h1>Pandas - Series & Dataframes</h1> # # # </div> # </body> # </html> import numpy as np import pandas as pd import matplotlib.pyplot as plt import glob import re import math import warnings warnings.filterwarnings("ignore") # # Series # ## Create Series # Create series from Nump Array v = np.array([1,2,3,4,5,6,7]) s1 = pd.Series(v) s1 #Datatype of Series s1.dtype # number of bytes allocated to each item s1.itemsize # Number of bytes consumed by Series s1.nbytes # Shape of the Series s1.shape # number of dimensions s1.ndim # Length of Series len(s1) s1.count() s1.size # Create series from List s0 = pd.Series([1,2,3],index = ['a','b','c']) s0 # Modifying index in Series s1.index = ['a' , 'b' , 'c' , 'd' , 'e' , 'f' , 'g'] s1 # Create Series using Random and Range function v2 = np.random.random(10) ind2 = np.arange(0,10) s = pd.Series(v2,ind2) v2 , ind2 , s # Creating Series from Dictionary dict1 = {'a1' :10 , 'a2' :20 , 'a3':30 , 'a4':40} s3 = pd.Series(dict1) s3 pd.Series(99, index=[0, 1, 2, 3, 4, 5]) # ## Slicing Series s # Return all elements of the series s[:] # First three element of the Series s[0:3] # Last element of the Series s[-1:] # Fetch first 4 elements in a series s[:4] # Return all elements of the series except last two elements. s[:-2] # Return all elements of the series except last element. s[:-1] # Return last two elements of the series s[-2:] # # Return last element of the series s[-1:] s[-3:-1] # ## Append Series s2 = s1.copy() s2 s3 # Append S2 & S3 Series s4 = s2.append(s3) s4 # When "inplace=False" it will return a new copy of data with the operation performed s4.drop('a4' , inplace=False) s4 # When we use "inplace=True" it will affect the dataframe s4.drop('a4', inplace=True) s4 s4 = s4.append(pd.Series({'a4': 7})) s4 # ## Operation on Series v1 = np.array([10,20,30]) v2 = np.array([1,2,3]) s1 = pd.Series(v1) s2 = pd.Series(v2) s1 , s2 # Addition of two series s1.add(s2) # Subtraction of two series s1.sub(s2) # Subtraction of two series s1.subtract(s2) # Increment all numbers in a series by 9 s1.add(9) # Multiplication of two series s1.mul(s2) # Multiplication of two series s1.multiply(s2) # Multiply each element by 1000 s1.multiply(1000) # Division s1.divide(s2) # Division s1.div(s2) # MAX number in a series s1.max() # Min number in a series s1.min() # Average s1.mean() # Median s1.median() # Standard Deviation s1.std() # Series comparison s1.equals(s2) s4 =s1 # Series comparison s1.equals(s4) s5 = pd.Series([1,1,2,2,3,3], index=[0, 1, 2, 3, 4, 5]) s5 s5.value_counts() # # DataFrame # ## Create DataFrame df = pd.DataFrame() df # Create Dataframe using List lang = ['Java' , 'Python' , 'C' , 'C++'] df = pd.DataFrame(lang) df # Add column in the Dataframe rating = [1,2,3,4] df[1] = rating df df.columns = ['Language','Rating'] df # + # Create Dataframe from Dictionary data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}] df2 = pd.DataFrame(data) df3 = pd.DataFrame(data, index=['row1', 'row2'], columns=['a', 'b']) df4 = pd.DataFrame(data, index=['row1', 'row2'], columns=['a', 'b' ,'c']) df5 = pd.DataFrame(data, index=['row1', 'row2'], columns=['a', 'b' ,'c' , 'd']) # - df2 df3 df4 df5 # Create Dataframe from Dictionary df0 = pd.DataFrame({'ID' :[1,2,3,4] , 'Name' :['Asif' , 'Basit' , 'Ross' , 'John']}) df0 # + # Create a DataFrame from Dictionary of Series dict = {'A' : pd.Series([1, 2, 3], index=['a', 'b', 'c']), 'B' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])} df1 = pd.DataFrame(dict) df1 # - # #### Dataframe of Random Numbers with Date Indices dates = pd.date_range(start='2020-01-20', end='2020-01-26') dates dates = pd.date_range('today',periods= 7) dates dates = pd.date_range(start='2020-01-20', periods=7) dates M = np.random.random((7,7)) M dframe = pd.DataFrame(M , index=dates) dframe #Changing Column Names dframe.columns = ['C1' , 'C2' , 'C3', 'C4', 'C5', 'C6', 'C7'] dframe # List Index dframe.index # List Column Names dframe.columns # Datatype of each column dframe.dtypes # Sort Dataframe by Column 'C1' in Ascending Order dframe.sort_values(by='C1') # Sort Dataframe by Column 'C1' in Descending Order dframe.sort_values(by='C1' , ascending=False) # ## Delete Column in DataFrame df1 # Delete Column using "del" function del df1['B'] df1 df5 # Delete Column using pop() df5.pop('c') df5 # + dict = {'A' : pd.Series([1, 2, 3,11], index=['a', 'b', 'c','d']), 'B' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])} df12 = pd.DataFrame(dict) df12 # - df12.drop(['A'], axis=1,inplace=True) df12 # ## Delete Rows in DataFrame col1 = np.linspace(10, 100, 30) col2 = np.random.randint(10,100,30) df10 = pd.DataFrame({"C1" : col1 , "C2" :col2}) df10 # Delete rows with index values 17,18,19 df10 = df10.drop([17,18,19], axis=0) df10 # Delete rows with index values 16 without using assignment operation df10.drop([16], axis=0,inplace=True) df10 df10.drop(df10.index[5] , inplace=True) df10 #Delete first three rows df10 = df10.iloc[3:,] df10 #Delete last four rows df10 = df10.iloc[:-4,] df10 #Keep top 10 rows df10 = df10.iloc[:10,] df10 df10 df10.index[df10['C2'] == 56].tolist() # Delete row based on Column value df10.drop(df10.index[df10['C2'] == 56].tolist() , axis=0,inplace=True) df10 # Delete row based on Column value df10 = df10.drop(df10[df10["C2"]==79].index) df10 # Delete all rows with column C2 value 14 df10 = df10[df10.C2 != 44] df10 # Delete all rows with column C2 value 88 & 55 using isin operator df10 = df10[~(df10.C2.isin ([21,48]))] df10 # Keep all rows with column C2 value 10,89,31 & 64 using isin operator df10 = df10[df10.C2.isin ([42,76])] df10 # + dict = {'A' : pd.Series([1, 2, 3,11], index=['a', 'b', 'c','d']), 'B' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])} df11 = pd.DataFrame(dict) df11 # - #Delete all rows with label "d" df11.drop("d", axis=0,inplace=True) df11 df13 = pd.DataFrame({ 'ID' :[1,2,3,4] , 'Name' :['Asif' , 'Basit' , 'Ross' , 'John'] , 'location' : ['India' , 'Australia','UK' , 'US'] }) df13 ind = df13[((df13.Name == 'Ross') &(df13.ID == 3) & (df13.location == 'UK'))].index df13.drop(ind,inplace=True) df13 # ## Data Selection in Dataframe df df.index = [1,2,3,4] df # Data selection using row label df.loc[1] # Data selection using position (Integer Index based) df.iloc[1] df.loc[1:2] df.iloc[1:2] # Data selection based on Condition df.loc[df.Rating > 2] df1 # Row & Column label based selection df1.loc['a'] df1.iloc['a'] # This will throw error because iloc will not work on labels dframe # Data selection using Row Label dframe['2020-01-20' : '2020-01-22' ] # Selecting all rows & selected columns dframe.loc[:,['C1' , 'C7']] #row & column label based selection dframe.loc['2020-01-20' : '2020-01-22',['C1' , 'C7']] # Data selection based on Condition dframe[dframe['C1'] > 0.5] # Data selection based on Condition dframe[(dframe['C1'] > 0.5) & (dframe['C4'] > 0.5)] # Data selection using position (Integer Index based) dframe.iloc[0][0] # Select all rows & first three columns dframe.iloc[:,0:3] dframe.iloc[0][0] = 10 # Display all rows where C1 has value of 10 or 20 dframe[dframe['C1'].isin([10,20])] # ## Set Value # Set value of 888 for all elements in column 'C1' dframe['C1'] = 888 dframe # Set value of 777 for first three rows in Column 'C6' dframe.at[0:3,'C6'] = 777 dframe # Set value of 333 in first row and third column dframe.iat[0,2] = 333 dframe dframe.iloc[0,2] = 555 dframe # Create Copy of the calling objects data along with indices. # Modifications to the data or indices of the copy will not be reflected in the original object dframe1 = dframe.copy(deep=True) dframe1[(dframe1['C1'] > 0.5) & (dframe1['C4'] > 0.5)] = 0 dframe1[dframe1['C1'] == 0] # Replace zeros in Column C1 with 99 dframe1[dframe1['C1'].isin([0])] = 99 dframe1 dframe # Display all rows where value of C1 is 99 dframe1[dframe1['C1'] == 99] # ## Dealing with NULL Values dframe.at[0:8 , 'C7'] = np.NaN dframe.at[0:2 , 'C6'] = np.NaN dframe.at[5:6 , 'C5'] = np.NaN dframe # Detect Non-Missing Values # It will return True for NOT-NULL values and False for NULL values dframe.notna() # Detect Missing or NULL Values # It will return True for NULL values and False for NOT-NULL values dframe.isna() # Fill all NULL values with 1020 dframe = dframe.fillna(1020) dframe dframe.at[0:5 , 'C7'] = np.NaN dframe.at[0:2 , 'C6'] = np.NaN dframe.at[5:6 , 'C5'] = np.NaN dframe # Replace Null values in Column 'C5' with number 123 # Replace Null values in Column 'C6' with number 789 dframe.fillna(value={'C5' : 123 , 'C6' : 789}) #Replace first NULL value in Column C7 with 789 dframe.fillna(value={'C7' : 789} , limit=1) # Drop Rows with NULL values dframe.dropna() # Drop Columns with NULL values dframe.dropna(axis='columns') dframe # Drop Rows with NULL values present in C5 or C6 dframe.dropna(subset=['C5' ,'C6']) # ## Descriptive Statistics # Fill NULL values with 55 dframe.fillna(55 , inplace=True) dframe # Mean of all Columns dframe.mean() # Max value per column dframe.max() # Min value per column dframe.min() # Median dframe.median() dframe.std() #Standard Deviation dframe.var() #Variance #Lower Quartile / First Quartile dframe.quantile(0.25) #Second Quartile / Median dframe.quantile(0.50) # Upper Quartile dframe.quantile(0.75) #IQR (Interquartile Range) dframe.quantile(0.75) - dframe.quantile(0.25) # SUM of column values dframe.sum() # GENERATES DESCRIPTIVE STATS dframe.describe() #Return unbiased skew # https://www.youtube.com/watch?v=HnMGKsupF8Q dframe.skew() # Return unbiased kurtosis using Fisher’s definition of kurtosis # https://www.youtube.com/watch?v=HnMGKsupF8Q dframe.kurt() #Correlation # https://www.youtube.com/watch?v=qtaqvPAeEJY&list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9&index=10 # https://www.youtube.com/watch?v=xZ_z8KWkhXE&list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9&index=11 dframe.corr() #Covariance # https://www.youtube.com/watch?v=qtaqvPAeEJY&list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9&index=10 # https://www.youtube.com/watch?v=xZ_z8KWkhXE&list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9&index=11 dframe.cov() import statistics as st dframe.at[3:6,'C1'] = 22 dframe # Average st.mean(dframe['C1']) # Hormonic Mean st.harmonic_mean(dframe['C1']) #Returns average of the two middle numbers when length is EVEN arr = np.array([1,2,3,4,5,6,7,8]) st.median(arr) # low median of the data with EVEN length st.median_low(arr) # High median of the data with EVEN length st.median_high(arr) # Mode of Dataset st.mode(dframe['C7']) # Sample Variance st.variance(dframe['C1']) #Population Variance st.pvariance(dframe['C1']) #Sample Standard Deviation st.stdev(dframe['C1']) #Population Standard Deviation st.pstdev(dframe['C1']) # ## Apply function on Dataframe dframe # Finding MAX value in Columns dframe.apply(max) # Finding minimum value in Columns dframe.apply(min) #Sum of Column Values dframe.apply(sum) #Sum of Column Values dframe.apply(np.sum) # Sum of rows dframe.apply(np.sum ,axis=1) # Square root of all values in a DataFrame dframe.applymap(np.sqrt) # Square root of all values in a DataFrame dframe.applymap(math.sqrt) dframe.applymap(float) # Using Lambda function in Dataframes dframe.apply(lambda x: min(x)) # Using Lambda function in Dataframes dframe.apply(lambda x: x*x) # # Merge Dataframes daf1 = pd.DataFrame ({'id': ['1', '2', '3', '4', '5'], 'Name': ['Asif', 'Basit', 'Bran', 'John', 'David']}) daf1 daf2 = pd.DataFrame ({'id': ['1', '2', '6', '7', '8'], 'Score': [40 , 60 , 80 , 90 , 70]}) daf2 # Inner Join pd.merge(daf1, daf2, on='id', how='inner') # Full Outer Join pd.merge(daf1, daf2, on='id', how='outer') # Left Outer Join pd.merge(daf1, daf2, on='id', how='left') #Right Outer Join pd.merge(daf1, daf2, on='id', how='right') # # Importing multiple CSV files in DataFrame # Append all CSV files path =r'C:\Users\DELL\Documents\GitHub\Public\COVID-19\COVID-19\csse_covid_19_data\csse_covid_19_daily_reports' filenames = glob.glob(path + "/*.csv") covid = pd.DataFrame() for f in filenames: df = pd.read_csv(f) covid = covid.append(df,ignore_index=True,sort=True) # Top 10 rows of the Dataframe covid.head(10) # Bottom 10 rows of the Dataframe covid.tail(10) # Unique values in Country column covid['Country/Region'].unique() # Number of Unique values in Country column covid['Country/Region'].nunique() #Dataframe information covid.info() # Reading columns covid['Country/Region'].head(10) # Reading columns df1 = covid[['Country/Region' ,'Province/State','Confirmed' , 'Last Update']] df1.head(10) #Read specific rows df1.iloc[1:4] #Filter data df1.loc[df1['Country/Region']== 'India'] #Sort Data Frame display('Sorted Data Frame', df1.sort_values(['Country/Region'], ascending=True).head(5)) #Sort Data Frame display('Sorted Data Frame', df1.sort_values(['Country/Region'], ascending=False).head(5)) #Sort Data Frame - Ascending on "Country" & descending on "Last update" display('Sorted Data Frame', df1.sort_values(['Country/Region', 'Last Update'], ascending=[1,0]).head(5)) #Iterating through the dataset for index , row in df1.iterrows(): if (row['Country/Region'] == 'Indonesia' ): display(row[['Country/Region' ,'Confirmed']]) #Unique Values covid['Country/Region'].drop_duplicates(keep='first').head(10) # Countries impacted with Coronavirus countries = covid['Country/Region'].unique() type(countries) , countries df2 = pd.read_csv('Pokemon.csv') df2.head(5) # Sum of Columns df2['Total'] = df2['HP'] + df2['Attack'] df2.head(5) # Sum of Columns df2['Total'] = df2.iloc[:,4:10].sum(axis=1) df2.head(5) # + #Shifting "Total" column cols = list(df2.columns) df2 = df2[cols[0:10] + [cols[-1]] + cols[10:12]] df2.head(5) # + #Shifting "Legendary" column - Index location -1 or 12 cols = list(df2.columns) df2 = df2[cols[0:10] + [cols[-1]] + cols[10:12]] df2.head(5) # + #Shifting "Generation" column - Index location -1 or 12 cols = list(df2.columns) df2 = df2[cols[0:10] + [cols[12]] + cols[10:12]] df2.head(5) # + #Save to CSV file df2.to_csv('poke_updated.csv') # + #Save to CSV file without index column df2.to_csv('poke_updated1.csv', index=False) # - df2.head(10) # Save Dataframe as text file df2.to_csv('poke.txt' , sep='\t' , index=False) # Save Dataframe as xlsx file df2.to_excel('poke.xlsx') # Save Dataframe as xlsx file without row names df2.to_excel('poke.xlsx', index=0) # + #Filtering using loc df2.loc[df2['Type 2'] == 'Dragon'] # - #Filtering using loc df3 = df2.loc[(df2['Type 2'] == 'Dragon') & (df2['Type 1'] == 'Dark')] df3 # + #Reset index for Dataframe df3 keeping old index column df4 = df3.reset_index() df4 # + #Reset index for Dataframe df3 removing old index column df3.reset_index(drop=True , inplace=True) df3 # - df2.head(10) # # LIKE OPERATION IN PANDAS df2.Name.str.contains("rill").head(10) # Display all rows containing Name "rill" df2.loc[df2.Name.str.contains("rill")] # Exclude all rows containing "rill" df2.loc[~df2.Name.str.contains("rill")].head(10) # + #Display all rows with Type-1 as "Grass" and Type-2 as "Poison" df2.loc[df2['Type 1'].str.contains("Grass") & df2['Type 2'].str.contains("Poison")] # - df2.loc[df2['Type 1'].str.contains('Grass|Water',regex = True)].head(10) # + # Due to Case-sensitive it will not return any data df2.loc[df2['Type 1'].str.contains('grass|water',regex = True)].head(10) # + # To ignore case we can use "case = False" df2.loc[df2['Type 1'].str.contains('grass|water', case = False ,regex = True)].head(10) # + # To ignore case we can use "Flags = re.I" df2.loc[df2['Type 1'].str.contains('grass|water',flags = re.I ,regex = True)].head(10) # - # # Regex in Pandas dataframe # + #Get all rows with name starting with "wa" df2.loc[df2.Name.str.contains('^Wa',flags = re.I ,regex = True)].head(10) # + #Get all rows with name starting with "wa" followed by any letter between a-l df2.loc[df2.Name.str.contains('^Wa[a-l]+',flags = re.I ,regex = True)].head(10) # + #Get all rows with name starting with x , y, z df2.loc[df2.Name.str.contains('^[x-z]',flags = re.I ,regex = True)] # - # Extracting first 3 characters from "Name" column df2['Name2'] = df2.Name.str.extract(r'(^\w{3})') df2.head(5) # Return all rows with "Name" starting with character 'B or b' df2.loc[df2.Name.str.match(r'(^[B|b].*)')].head(5) # # Replace values in dataframe df2.head(10) df2['Type 1'] = df2['Type 1'].replace({"Grass" : "Meadow" , "Fire" :"Blaze"}) df2.head(10) df2['Type 2'] = df2['Type 2'].replace({"Poison" : "Venom"}) df2.head(5) df2['Type 2'] = df2['Type 2'].replace(['Venom' , 'Dragon'] , 'DANGER') df2.head(10) df2.loc[df2['Type 2'] == 'DANGER' , 'Name2'] = np.NaN df2.head(10) df2.loc[df2['Total'] > 400 , ['Name2' , 'Legendary']] = 'ALERT' df2.head(10) df2.loc[df2['Total'] > 400 , ['Legendary' , 'Name2']] = ['ALERT-1' , 'ALERT-2'] df2.head(10) # # Group By df = pd.read_csv('poke_updated1.csv') df.head(5) df.groupby(['Type 1']).mean().head(10) df.groupby(['Type 1']).mean().sort_values('Attack' , ascending = False).head(10) df.groupby(['Type 1']).mean().sort_values('Defense' , ascending = False).head(10) df.groupby(['Type 1']).mean().sort_values('Speed' , ascending = False).head(10) df.sum() df.groupby(['Type 2']).sum().head(5) df.count() df['count1'] = 0 df.groupby(['Type 2']).count()['count1'] df['count1'] = 0 df.groupby(['Type 1']).count()['count1'] df['count1'] = 0 df.groupby(['Type 1' , 'Type 2' , 'Legendary']).count()['count1'] # # Loading Data in Chunks for df in pd.read_csv('poke_updated1.csv', chunksize=10): print(df) df df1 = pd.DataFrame() for df in pd.read_csv('poke_updated1.csv', chunksize=10): df1 = pd.concat([df1 ,df]) df1.head(15) # # Stack & unstack in Pandas # + col = pd.MultiIndex.from_product([['2010','2015'],['Literacy' , 'GDP']]) data =([[80,7,88,6],[90,8,92,7],[89,7,91,8],[87,6,93,8]]) df6 = pd.DataFrame(data, index=['India','USA' , 'Russia' , 'China'], columns=col) df6 # - # Stack() Function stacks the columns to rows. st_df = df6.stack() st_df #Unstacks the row to columns unst_df = st_df.unstack() unst_df unst_df = unst_df.unstack() unst_df unst_df = unst_df.unstack() unst_df # # PIVOT Tables # + data = { 'Country':['India','USA' , 'Russia' , 'China','India','USA' , 'Russia' , 'China','India','USA' , 'Russia' , 'China','India','USA' , 'Russia' , 'China'], 'Year':['2010','2010','2010','2010' , '2010','2010','2010','2010','2015','2015','2015','2015','2015','2015','2015','2015'], 'Literacy/GDP':['GDP' , 'GDP' , 'GDP' , 'GDP','Literacy' , 'Literacy', 'Literacy' , 'Literacy','GDP' , 'GDP','GDP' , 'GDP','Literacy' , 'Literacy','Literacy' , 'Literacy'], 'Value':[7,8,7,6,80,90,89,87,6,7,8, 8, 88 , 92 , 91 ,93]} df7 = pd.DataFrame(data,columns=['Country','Year','Literacy/GDP','Value']) df7 # - # Pivot table with SUM aggregation pd.pivot_table(df7 , index= ['Year' , 'Literacy/GDP'] , aggfunc='sum') # Pivot table with MEAN aggregation pd.pivot_table(df7 , index= ['Year' , 'Literacy/GDP'] , aggfunc='mean') # # Hierarchical indexing df7 df8=df7.set_index(['Year', 'Literacy/GDP']) df8 df8.index df8.loc['2010'] df8.loc[['2010']] df8.loc['2015','Literacy'] df8.loc['2015','Literacy'] df8=df7.set_index(['Year', 'Literacy/GDP' , 'Country']) df8 # ### SWAP Columns in Hierarchical indexing df7 df8=df7.set_index(['Year', 'Literacy/GDP']) df8 # Swaping the columns in Hierarchical index df9 = df8.swaplevel('Year', 'Literacy/GDP') df9 # Swaping the columns in Hierarchical index df9 = df9.swaplevel('Year', 'Literacy/GDP') df9 # # Crosstab in Pandas df7 pd.crosstab(df7['Literacy/GDP'] , df7.Value , margins=True) # 2 way cross table pd.crosstab(df7.Year , df7['Literacy/GDP'] , margins=True) # 3 way cross table pd.crosstab([df7.Year , df7['Literacy/GDP']] , df7.Country, margins=True) # # Row & Column Bind # ### Row Bind df8 = pd.DataFrame({'ID' :[1,2,3,4] , 'Name' :['Asif' , 'Basit' , 'Ross' , 'John'] , 'Score' :[99 , 66 , 44 , 33]}) df8 df9 = pd.DataFrame({'ID' :[5,6,7,8] , 'Name' :['Michelle' , 'Ramiro' , 'Vignesh' , 'Damon'] , 'Score' :[78 , 54 , 77 , 87]}) df9 # Row Bind with concat() function pd.concat([df8 , df9]) # Row Bind with append() function df8.append(df9) # ### Column Bind df10 = pd.DataFrame({'ID' :[1,2,3,4] , 'Name' :['Asif' , 'Basit' , 'Ross' , 'John']}) df10 df11 = pd.DataFrame({'Age' :[20,30,35,40] , 'Score' :[99 , 66 , 44 , 33]}) df11 pd.concat([df10,df11] , axis = 1)
pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from enum import Enum class Event(list): def __call__(self, *args, **kwargs): for item in self: item(*args, **kwargs) class Game: def __init__(self): self.events = Event() def rat_status(self, args): self.events(args) class StatusRat(Enum): LIVE = 1 DEAD = 2 class EntersInfo: def __init__(self, rat, status): self.rat = rat self.status = status class Rat: def __init__(self, game): self.game = game self.attack = 1 self.status = StatusRat.LIVE self.game.events.append(self.rat_modifier) self.game.rat_status(EntersInfo(self, self.status)) def rat_modifier(self, args): if self != args.rat and args.status.LIVE == StatusRat.LIVE: self.attack += 1 self.game.rat_status(EntersInfo(self, self.status)) elif self != args.rat and args.status.DEAD == StatusRat.DEAD: self.attack -= 1 else: pass def __exit__(self): self.rat_died() def rat_died(self): self.status = StatusRat.DEAD self.game.rat_status(self, self.status) # - game = Game() rat = Rat(game) rat2 = Rat(game) rat2.attack rat.attack
Observer/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.4 64-bit # name: python3 # --- # + [markdown] id="dzNng6vCL9eP" # #CS231n Python Tutorial With Google Colab # + [markdown] id="0vJLt3JRL9eR" # This tutorial was originally written by [<NAME>](https://web.eecs.umich.edu/~justincj/) for cs231n. It was adapted as a Jupyter notebook for cs228 by [<NAME>](http://web.stanford.edu/~kuleshov/) and [<NAME>](https://symsys.stanford.edu/viewing/symsysaffiliate/21335). # # This version has been adapted for Colab by <NAME> for the Spring 2020 edition of [cs231n](https://cs231n.github.io/). It runs Python3 by default. # + [markdown] id="qVrTo-LhL9eS" # ##Introduction # + [markdown] id="9t1gKp9PL9eV" # Python is a great general-purpose programming language on its own, but with the help of a few popular libraries (numpy, scipy, matplotlib) it becomes a powerful environment for scientific computing. # # We expect that many of you will have some experience with Python and numpy; for the rest of you, this section will serve as a quick crash course both on the Python programming language and on the use of Python for scientific computing. # # Some of you may have previous knowledge in Matlab, in which case we also recommend the numpy for Matlab users page (https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html). # + [markdown] id="U1PvreR9L9eW" # In this tutorial, we will cover: # # * Basic Python: Basic data types (Containers, Lists, Dictionaries, Sets, Tuples), Functions, Classes # * Numpy: Arrays, Array indexing, Datatypes, Array math, Broadcasting # * Matplotlib: Plotting, Subplots, Images # * IPython: Creating notebooks, Typical workflows # + [markdown] id="nxvEkGXPM3Xh" # ## A Brief Note on Python Versions # # As of Janurary 1, 2020, Python has [officially dropped support](https://www.python.org/doc/sunset-python-2/) for `python2`. We'll be using Python 3.7 for this iteration of the course. You can check your Python version at the command line by running `python --version`. In Colab, we can enforce the Python version by clicking `Runtime -> Change Runtime Type` and selecting `python3`. Note that as of April 2020, Colab uses Python 3.6.9 which should run everything without any errors. # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="1L4Am0QATgOc" outputId="bb5ee3ac-8683-44ab-e599-a2077510f327" # !python --version # + [markdown] id="JAFKYgrpL9eY" # ##Basics of Python # + [markdown] id="RbFS6tdgL9ea" # Python is a high-level, dynamically typed multiparadigm programming language. Python code is often said to be almost like pseudocode, since it allows you to express very powerful ideas in very few lines of code while being very readable. As an example, here is an implementation of the classic quicksort algorithm in Python: # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="cYb0pjh1L9eb" outputId="9a8e37de-1dc1-4092-faee-06ad4ff2d73a" def quicksort(arr): if len(arr) <= 1: return arr pivot = arr[len(arr) // 2] left = [x for x in arr if x < pivot] middle = [x for x in arr if x == pivot] right = [x for x in arr if x > pivot] return quicksort(left) + middle + quicksort(right) print(quicksort([3,6,8,10,1,2,1])) # + [markdown] id="NwS_hu4xL9eo" # ###Basic data types # + [markdown] id="DL5sMSZ9L9eq" # ####Numbers # + [markdown] id="MGS0XEWoL9er" # Integers and floats work as you would expect from other languages: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="KheDr_zDL9es" outputId="1db9f4d3-2e0d-4008-f78a-161ed52c4359" x = 3 print(x, type(x)) # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="sk_8DFcuL9ey" outputId="dd60a271-3457-465d-e16a-41acf12a56ab" print(x + 1) # Addition print(x - 1) # Subtraction print(x * 2) # Multiplication print(x ** 2) # Exponentiation # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="U4Jl8K0tL9e4" outputId="07e3db14-3781-42b7-8ba6-042b3f9f72ba" x += 1 print(x) x *= 2 print(x) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="w-nZ0Sg_L9e9" outputId="3aa579f8-9540-46ef-935e-be887781ecb4" y = 2.5 print(type(y)) print(y, y + 1, y * 2, y ** 2) # + [markdown] id="r2A9ApyaL9fB" # Note that unlike many languages, Python does not have unary increment (x++) or decrement (x--) operators. # # Python also has built-in types for long integers and complex numbers; you can find all of the details in the [documentation](https://docs.python.org/3.7/library/stdtypes.html#numeric-types-int-float-long-complex). # + [markdown] id="EqRS7qhBL9fC" # ####Booleans # + [markdown] id="Nv_LIVOJL9fD" # Python implements all of the usual operators for Boolean logic, but uses English words rather than symbols (`&&`, `||`, etc.): # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="RvoImwgGL9fE" outputId="1517077b-edca-463f-857b-6a8c386cd387" t, f = True, False print(type(t)) # + [markdown] id="YQgmQfOgL9fI" # Now we let's look at the operations: # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="6zYm7WzCL9fK" outputId="f3cebe76-5af4-473a-8127-88a1fd60560f" print(t and f) # Logical AND; print(t or f) # Logical OR; print(not t) # Logical NOT; print(t != f) # Logical XOR; # + [markdown] id="UQnQWFEyL9fP" # ####Strings # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="AijEDtPFL9fP" outputId="2a6b0cd7-58f1-43cf-e6b7-bf940d532549" hello = 'hello' # String literals can use single quotes world = "world" # or double quotes; it does not matter print(hello, len(hello)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="saDeaA7hL9fT" outputId="2837d0ab-9ae5-4053-d087-bfa0af81c344" hw = hello + ' ' + world # String concatenation print(hw) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Nji1_UjYL9fY" outputId="0149b0ca-425a-4a34-8e24-8dff7080922e" hw12 = '{} {} {}'.format(hello, world, 12) # string formatting print(hw12) # + [markdown] id="bUpl35bIL9fc" # String objects have a bunch of useful methods; for example: # + colab={"base_uri": "https://localhost:8080/", "height": 121} id="VOxGatlsL9fd" outputId="ab009df3-8643-4d3e-f85f-a813b70db9cb" s = "hello" print(s.capitalize()) # Capitalize a string print(s.upper()) # Convert a string to uppercase; prints "HELLO" print(s.rjust(7)) # Right-justify a string, padding with spaces print(s.center(7)) # Center a string, padding with spaces print(s.replace('l', '(ell)')) # Replace all instances of one substring with another print(' world '.strip()) # Strip leading and trailing whitespace # + [markdown] id="06cayXLtL9fi" # You can find a list of all string methods in the [documentation](https://docs.python.org/3.7/library/stdtypes.html#string-methods). # + [markdown] id="p-6hClFjL9fk" # ###Containers # + [markdown] id="FD9H18eQL9fk" # Python includes several built-in container types: lists, dictionaries, sets, and tuples. # + [markdown] id="UsIWOe0LL9fn" # ####Lists # + [markdown] id="wzxX7rgWL9fn" # A list is the Python equivalent of an array, but is resizeable and can contain elements of different types: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="hk3A8pPcL9fp" outputId="b545939a-580c-4356-db95-7ad3670b46e4" xs = [3, 1, 2] # Create a list print(xs, xs[2]) print(xs[-1]) # Negative indices count from the end of the list; prints "2" # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="YCjCy_0_L9ft" outputId="417c54ff-170b-4372-9099-0f756f8e48af" xs[2] = 'foo' # Lists can contain elements of different types print(xs) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="vJ0x5cF-L9fx" outputId="a97731a3-70e1-4553-d9e0-2aea227cac80" xs.append('bar') # Add a new element to the end of the list print(xs) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="cxVCNRTNL9f1" outputId="508fbe59-20aa-48b5-a1b2-f90363e7a104" x = xs.pop() # Remove and return the last element of the list print(x, xs) # + [markdown] id="ilyoyO34L9f4" # As usual, you can find all the gory details about lists in the [documentation](https://docs.python.org/3.7/tutorial/datastructures.html#more-on-lists). # + [markdown] id="ovahhxd_L9f5" # ####Slicing # + [markdown] id="YeSYKhv9L9f6" # In addition to accessing list elements one at a time, Python provides concise syntax to access sublists; this is known as slicing: # + colab={"base_uri": "https://localhost:8080/", "height": 139} id="ninq666bL9f6" outputId="c3c2ed92-7358-4fdb-bbc0-e90f82e7e941" nums = list(range(5)) # range is a built-in function that creates a list of integers print(nums) # Prints "[0, 1, 2, 3, 4]" print(nums[2:4]) # Get a slice from index 2 to 4 (exclusive); prints "[2, 3]" print(nums[2:]) # Get a slice from index 2 to the end; prints "[2, 3, 4]" print(nums[:2]) # Get a slice from the start to index 2 (exclusive); prints "[0, 1]" print(nums[:]) # Get a slice of the whole list; prints ["0, 1, 2, 3, 4]" print(nums[:-1]) # Slice indices can be negative; prints ["0, 1, 2, 3]" nums[2:4] = [8, 9] # Assign a new sublist to a slice print(nums) # Prints "[0, 1, 8, 9, 4]" # + [markdown] id="UONpMhF4L9f_" # ####Loops # + [markdown] id="_DYz1j6QL9f_" # You can loop over the elements of a list like this: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="4cCOysfWL9gA" outputId="560e46c7-279c-409a-838c-64bea8d321c4" animals = ['cat', 'dog', 'monkey'] for animal in animals: print(animal) # + [markdown] id="KxIaQs7pL9gE" # If you want access to the index of each element within the body of a loop, use the built-in `enumerate` function: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="JjGnDluWL9gF" outputId="81421905-17ea-4c5a-bcc0-176de19fd9bd" animals = ['cat', 'dog', 'monkey'] for idx, animal in enumerate(animals): print('#{}: {}'.format(idx + 1, animal)) # + [markdown] id="arrLCcMyL9gK" # ####List comprehensions: # + [markdown] id="5Qn2jU_pL9gL" # When programming, frequently we want to transform one type of data into another. As a simple example, consider the following code that computes square numbers: # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="IVNEwoMXL9gL" outputId="d571445b-055d-45f0-f800-24fd76ceec5a" nums = [0, 1, 2, 3, 4] squares = [] for x in nums: squares.append(x ** 2) print(squares) # + [markdown] id="7DmKVUFaL9gQ" # You can make this code simpler using a list comprehension: # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="kZxsUfV6L9gR" outputId="4254a7d4-58ba-4f70-a963-20c46b485b72" nums = [0, 1, 2, 3, 4] squares = [x ** 2 for x in nums] print(squares) # + [markdown] id="-D8ARK7tL9gV" # List comprehensions can also contain conditions: # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="yUtgOyyYL9gV" outputId="1ae7ab58-8119-44dc-8e57-fda09197d026" nums = [0, 1, 2, 3, 4] even_squares = [x ** 2 for x in nums if x % 2 == 0] print(even_squares) # + [markdown] id="H8xsUEFpL9gZ" # ####Dictionaries # + [markdown] id="kkjAGMAJL9ga" # A dictionary stores (key, value) pairs, similar to a `Map` in Java or an object in Javascript. You can use it like this: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="XBYI1MrYL9gb" outputId="8e24c1da-0fc0-4b4c-a3e6-6f758a53b7da" d = {'cat': 'cute', 'dog': 'furry'} # Create a new dictionary with some data print(d['cat']) # Get an entry from a dictionary; prints "cute" print('cat' in d) # Check if a dictionary has a given key; prints "True" # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="pS7e-G-HL9gf" outputId="feb4bf18-c0a3-42a2-eaf5-3fc390f36dcf" d['fish'] = 'wet' # Set an entry in a dictionary print(d['fish']) # Prints "wet" # + colab={"base_uri": "https://localhost:8080/", "height": 165} id="tFY065ItL9gi" outputId="7e42a5f0-1856-4608-a927-0930ab37a66c" print(d['monkey']) # KeyError: 'monkey' not a key of d # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="8TjbEWqML9gl" outputId="ef14d05e-401d-4d23-ed1a-0fe6b4c77d6f" print(d.get('monkey', 'N/A')) # Get an element with a default; prints "N/A" print(d.get('fish', 'N/A')) # Get an element with a default; prints "wet" # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="0EItdNBJL9go" outputId="652a950f-b0c2-4623-98bd-0191b300cd57" del d['fish'] # Remove an element from a dictionary print(d.get('fish', 'N/A')) # "fish" is no longer a key; prints "N/A" # + [markdown] id="wqm4dRZNL9gr" # You can find all you need to know about dictionaries in the [documentation](https://docs.python.org/2/library/stdtypes.html#dict). # + [markdown] id="IxwEqHlGL9gr" # It is easy to iterate over the keys in a dictionary: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="rYfz7ZKNL9gs" outputId="155bdb17-3179-4292-c832-8166e955e942" d = {'person': 2, 'cat': 4, 'spider': 8} for animal, legs in d.items(): print('A {} has {} legs'.format(animal, legs)) # + [markdown] id="17sxiOpzL9gz" # Dictionary comprehensions: These are similar to list comprehensions, but allow you to easily construct dictionaries. For example: # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="8PB07imLL9gz" outputId="e9ddf886-39ed-4f35-dd80-64a19d2eec9b" nums = [0, 1, 2, 3, 4] even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0} print(even_num_to_square) # + [markdown] id="V9MHfUdvL9g2" # ####Sets # + [markdown] id="Rpm4UtNpL9g2" # A set is an unordered collection of distinct elements. As a simple example, consider the following: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="MmyaniLsL9g2" outputId="8f152d48-0a07-432a-cf98-8de4fd57ddbb" animals = {'cat', 'dog'} print('cat' in animals) # Check if an element is in a set; prints "True" print('fish' in animals) # prints "False" # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="ElJEyK86L9g6" outputId="b9d7dab9-5a98-41cd-efbc-786d0c4377f7" animals.add('fish') # Add an element to a set print('fish' in animals) print(len(animals)) # Number of elements in a set; # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="5uGmrxdPL9g9" outputId="e644d24c-26c6-4b43-ab15-8aa81fe884d4" animals.add('cat') # Adding an element that is already in the set does nothing print(len(animals)) animals.remove('cat') # Remove an element from a set print(len(animals)) # + [markdown] id="zk2DbvLKL9g_" # _Loops_: Iterating over a set has the same syntax as iterating over a list; however since sets are unordered, you cannot make assumptions about the order in which you visit the elements of the set: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="K47KYNGyL9hA" outputId="4477f897-4355-4816-b39b-b93ffbac4bf0" animals = {'cat', 'dog', 'fish'} for idx, animal in enumerate(animals): print('#{}: {}'.format(idx + 1, animal)) # + [markdown] id="puq4S8buL9hC" # Set comprehensions: Like lists and dictionaries, we can easily construct sets using set comprehensions: # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="iw7k90k3L9hC" outputId="72d6b824-6d31-47b2-f929-4cf434590ee5" from math import sqrt print({int(sqrt(x)) for x in range(30)}) # + [markdown] id="qPsHSKB1L9hF" # ####Tuples # + [markdown] id="kucc0LKVL9hG" # A tuple is an (immutable) ordered list of values. A tuple is in many ways similar to a list; one of the most important differences is that tuples can be used as keys in dictionaries and as elements of sets, while lists cannot. Here is a trivial example: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="9wHUyTKxL9hH" outputId="cdc5f620-04fe-4b0b-df7a-55b061d23d88" d = {(x, x + 1): x for x in range(10)} # Create a dictionary with tuple keys t = (5, 6) # Create a tuple print(type(t)) print(d[t]) print(d[(1, 2)]) # + colab={"base_uri": "https://localhost:8080/", "height": 165} id="HoO8zYKzL9hJ" outputId="28862bfc-0298-40d7-f8c4-168e109d2d93" t[0] = 1 # + [markdown] id="AXA4jrEOL9hM" # ###Functions # + [markdown] id="WaRms-QfL9hN" # Python functions are defined using the `def` keyword. For example: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="kiMDUr58L9hN" outputId="9f53bf9a-7b2a-4c51-9def-398e4677cd6c" def sign(x): if x > 0: return 'positive' elif x < 0: return 'negative' else: return 'zero' for x in [-1, 0, 1]: print(sign(x)) # + [markdown] id="U-QJFt8TL9hR" # We will often define functions to take optional keyword arguments, like this: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="PfsZ3DazL9hR" outputId="6e6af832-67d8-4d8c-949b-335927684ae3" def hello(name, loud=False): if loud: print('HELLO, {}'.format(name.upper())) else: print('Hello, {}!'.format(name)) hello('Bob') hello('Fred', loud=True) # + [markdown] id="ObA9PRtQL9hT" # ###Classes # + [markdown] id="hAzL_lTkL9hU" # The syntax for defining classes in Python is straightforward: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="RWdbaGigL9hU" outputId="4f6615c5-75a7-4ce4-8ea1-1e7f5e4e9fc3" class Greeter: # Constructor def __init__(self, name): self.name = name # Create an instance variable # Instance method def greet(self, loud=False): if loud: print('HELLO, {}'.format(self.name.upper())) else: print('Hello, {}!'.format(self.name)) g = Greeter('Fred') # Construct an instance of the Greeter class g.greet() # Call an instance method; prints "Hello, Fred" g.greet(loud=True) # Call an instance method; prints "HELLO, FRED!" # + [markdown] id="3cfrOV4dL9hW" # ##Numpy # + [markdown] id="fY12nHhyL9hX" # Numpy is the core library for scientific computing in Python. It provides a high-performance multidimensional array object, and tools for working with these arrays. If you are already familiar with MATLAB, you might find this [tutorial](http://wiki.scipy.org/NumPy_for_Matlab_Users) useful to get started with Numpy. # + [markdown] id="lZMyAdqhL9hY" # To use Numpy, we first need to import the `numpy` package: # + id="58QdX8BLL9hZ" import numpy as np # + [markdown] id="DDx6v1EdL9hb" # ###Arrays # + [markdown] id="f-Zv3f7LL9hc" # A numpy array is a grid of values, all of the same type, and is indexed by a tuple of nonnegative integers. The number of dimensions is the rank of the array; the shape of an array is a tuple of integers giving the size of the array along each dimension. # + [markdown] id="_eMTRnZRL9hc" # We can initialize numpy arrays from nested Python lists, and access elements using square brackets: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="-l3JrGxCL9hc" outputId="8d9dad18-c734-4a8a-ca8c-44060a40fb79" a = np.array([1, 2, 3]) # Create a rank 1 array print(type(a), a.shape, a[0], a[1], a[2]) a[0] = 5 # Change an element of the array print(a) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="ma6mk-kdL9hh" outputId="0b54ff2f-e7f1-4b30-c653-9bf81cb8fbb0" b = np.array([[1,2,3],[4,5,6]]) # Create a rank 2 array print(b) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="ymfSHAwtL9hj" outputId="5bd292d8-c751-43b9-d480-f357dde52342" print(b.shape) print(b[0, 0], b[0, 1], b[1, 0]) # + [markdown] id="F2qwdyvuL9hn" # Numpy also provides many functions to create arrays: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="mVTN_EBqL9hn" outputId="d267c65f-ba90-4043-cedb-f468ab1bcc5d" a = np.zeros((2,2)) # Create an array of all zeros print(a) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="skiKlNmlL9h5" outputId="7d1ec1b5-a1fe-4f44-cbe3-cdeacad425f1" b = np.ones((1,2)) # Create an array of all ones print(b) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="HtFsr03bL9h7" outputId="2688b157-2fad-4fc6-f20b-8633207f0326" c = np.full((2,2), 7) # Create a constant array print(c) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="-QcALHvkL9h9" outputId="5035d6fe-cb7e-4222-c972-55fe23c9d4c0" d = np.eye(2) # Create a 2x2 identity matrix print(d) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="RCpaYg9qL9iA" outputId="25f0b387-39cf-42f3-8701-de860cc75e2e" e = np.random.random((2,2)) # Create an array filled with random values print(e) # + [markdown] id="jI5qcSDfL9iC" # ###Array indexing # + [markdown] id="M-E4MUeVL9iC" # Numpy offers several ways to index into arrays. # + [markdown] id="QYv4JyIEL9iD" # Slicing: Similar to Python lists, numpy arrays can be sliced. Since arrays may be multidimensional, you must specify a slice for each dimension of the array: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="wLWA0udwL9iD" outputId="99f08618-c513-4982-8982-b146fc72dab3" import numpy as np # Create the following rank 2 array with shape (3, 4) # [[ 1 2 3 4] # [ 5 6 7 8] # [ 9 10 11 12]] a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) # Use slicing to pull out the subarray consisting of the first 2 rows # and columns 1 and 2; b is the following array of shape (2, 2): # [[2 3] # [6 7]] b = a[:2, 1:3] print(b) # + [markdown] id="KahhtZKYL9iF" # A slice of an array is a view into the same data, so modifying it will modify the original array. # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="1kmtaFHuL9iG" outputId="ee3ab60c-4064-4a9e-b04c-453d3955f1d1" print(a[0, 1]) b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1] print(a[0, 1]) # + [markdown] id="_Zcf3zi-L9iI" # You can also mix integer indexing with slice indexing. However, doing so will yield an array of lower rank than the original array. Note that this is quite different from the way that MATLAB handles array slicing: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="G6lfbPuxL9iJ" outputId="a225fe9d-2a29-4e14-a243-2b7d583bd4bc" # Create the following rank 2 array with shape (3, 4) a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) print(a) # + [markdown] id="NCye3NXhL9iL" # Two ways of accessing the data in the middle row of the array. # Mixing integer indexing with slices yields an array of lower rank, # while using only slices yields an array of the same rank as the # original array: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="EOiEMsmNL9iL" outputId="ab2ebe48-9002-45a8-9462-fd490b467f40" row_r1 = a[1, :] # Rank 1 view of the second row of a row_r2 = a[1:2, :] # Rank 2 view of the second row of a row_r3 = a[[1], :] # Rank 2 view of the second row of a print(row_r1, row_r1.shape) print(row_r2, row_r2.shape) print(row_r3, row_r3.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 104} id="JXu73pfDL9iN" outputId="6c589b85-e9b0-4c13-a39d-4cd9fb2f41ac" # We can make the same distinction when accessing columns of an array: col_r1 = a[:, 1] col_r2 = a[:, 1:2] print(col_r1, col_r1.shape) print() print(col_r2, col_r2.shape) # + [markdown] id="VP3916bOL9iP" # Integer array indexing: When you index into numpy arrays using slicing, the resulting array view will always be a subarray of the original array. In contrast, integer array indexing allows you to construct arbitrary arrays using the data from another array. Here is an example: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="TBnWonIDL9iP" outputId="c29fa2cd-234e-4765-c70a-6889acc63573" a = np.array([[1,2], [3, 4], [5, 6]]) # An example of integer array indexing. # The returned array will have shape (3,) and print(a[[0, 1, 2], [0, 1, 0]]) # The above example of integer array indexing is equivalent to this: print(np.array([a[0, 0], a[1, 1], a[2, 0]])) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="n7vuati-L9iR" outputId="c3e9ba14-f66e-4202-999e-2e1aed5bd631" # When using integer array indexing, you can reuse the same # element from the source array: print(a[[0, 0], [1, 1]]) # Equivalent to the previous integer array indexing example print(np.array([a[0, 1], a[0, 1]])) # + [markdown] id="kaipSLafL9iU" # One useful trick with integer array indexing is selecting or mutating one element from each row of a matrix: # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="ehqsV7TXL9iU" outputId="de509c40-4ee4-4b7c-e75d-1a936a3350e7" # Create a new array from which we will select elements a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) print(a) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="pAPOoqy5L9iV" outputId="f812e29b-9218-4767-d3a8-e9854e754e68" # Create an array of indices b = np.array([0, 2, 0, 1]) # Select one element from each row of a using the indices in b print(a[np.arange(4), b]) # Prints "[ 1 6 7 11]" # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="6v1PdI1DL9ib" outputId="89f50f82-de1b-4417-e55c-edbc0ee07584" # Mutate one element from each row of a using the indices in b a[np.arange(4), b] += 10 print(a) # + [markdown] id="kaE8dBGgL9id" # Boolean array indexing: Boolean array indexing lets you pick out arbitrary elements of an array. Frequently this type of indexing is used to select the elements of an array that satisfy some condition. Here is an example: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="32PusjtKL9id" outputId="8782e8ec-b78d-44d7-8141-23e39750b854" import numpy as np a = np.array([[1,2], [3, 4], [5, 6]]) bool_idx = (a > 2) # Find the elements of a that are bigger than 2; # this returns a numpy array of Booleans of the same # shape as a, where each slot of bool_idx tells # whether that element of a is > 2. print(bool_idx) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="cb2IRMXaL9if" outputId="5983f208-3738-472d-d6ab-11fe85b36c95" # We use boolean array indexing to construct a rank 1 array # consisting of the elements of a corresponding to the True values # of bool_idx print(a[bool_idx]) # We can do all of the above in a single concise statement: print(a[a > 2]) # + [markdown] id="CdofMonAL9ih" # For brevity we have left out a lot of details about numpy array indexing; if you want to know more you should read the documentation. # + [markdown] id="jTctwqdQL9ih" # ###Datatypes # + [markdown] id="kSZQ1WkIL9ih" # Every numpy array is a grid of elements of the same type. Numpy provides a large set of numeric datatypes that you can use to construct arrays. Numpy tries to guess a datatype when you create an array, but functions that construct arrays usually also include an optional argument to explicitly specify the datatype. Here is an example: # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="4za4O0m5L9ih" outputId="2ea4fb80-a4df-43f9-c162-5665895c13ae" x = np.array([1, 2]) # Let numpy choose the datatype y = np.array([1.0, 2.0]) # Let numpy choose the datatype z = np.array([1, 2], dtype=np.int64) # Force a particular datatype print(x.dtype, y.dtype, z.dtype) # + [markdown] id="RLVIsZQpL9ik" # You can read all about numpy datatypes in the [documentation](http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html). # + [markdown] id="TuB-fdhIL9ik" # ###Array math # + [markdown] id="18e8V8elL9ik" # Basic mathematical functions operate elementwise on arrays, and are available both as operator overloads and as functions in the numpy module: # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="gHKvBrSKL9il" outputId="a8a924b1-9d60-4b68-8fd3-e4657ae3f08b" x = np.array([[1,2],[3,4]], dtype=np.float64) y = np.array([[5,6],[7,8]], dtype=np.float64) # Elementwise sum; both produce the array print(x + y) print(np.add(x, y)) # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="1fZtIAMxL9in" outputId="122f1380-6144-4d6c-9d31-f62d839889a2" # Elementwise difference; both produce the array print(x - y) print(np.subtract(x, y)) # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="nil4AScML9io" outputId="038c8bb2-122b-4e59-c0a8-a091014fe68e" # Elementwise product; both produce the array print(x * y) print(np.multiply(x, y)) # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="0JoA4lH6L9ip" outputId="12351a74-7871-4bc2-97ce-a508bf4810da" # Elementwise division; both produce the array # [[ 0.2 0.33333333] # [ 0.42857143 0.5 ]] print(x / y) print(np.divide(x, y)) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="g0iZuA6bL9ir" outputId="29927dda-4167-4aa8-fbda-9008b09e4356" # Elementwise square root; produces the array # [[ 1. 1.41421356] # [ 1.73205081 2. ]] print(np.sqrt(x)) # + [markdown] id="a5d_uujuL9it" # Note that unlike MATLAB, `*` is elementwise multiplication, not matrix multiplication. We instead use the dot function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. dot is available both as a function in the numpy module and as an instance method of array objects: # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="I3FnmoSeL9iu" outputId="46f4575a-2e5e-4347-a34e-0cc5bd280110" x = np.array([[1,2],[3,4]]) y = np.array([[5,6],[7,8]]) v = np.array([9,10]) w = np.array([11, 12]) # Inner product of vectors; both produce 219 print(v.dot(w)) print(np.dot(v, w)) # + [markdown] id="vmxPbrHASVeA" # You can also use the `@` operator which is equivalent to numpy's `dot` operator. # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="vyrWA-mXSdtt" outputId="a9aae545-2c93-4649-b220-b097655955f6" print(v @ w) # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="zvUODeTxL9iw" outputId="4093fc76-094f-4453-a421-a212b5226968" # Matrix / vector product; both produce the rank 1 array [29 67] print(x.dot(v)) print(np.dot(x, v)) print(x @ v) # + colab={"base_uri": "https://localhost:8080/", "height": 121} id="3V_3NzNEL9iy" outputId="af2a89f9-af5d-47a6-9ad2-06a84b521b94" # Matrix / matrix product; both produce the rank 2 array # [[19 22] # [43 50]] print(x.dot(y)) print(np.dot(x, y)) print(x @ y) # + [markdown] id="FbE-1If_L9i0" # Numpy provides many useful functions for performing computations on arrays; one of the most useful is `sum`: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="DZUdZvPrL9i0" outputId="99cad470-d692-4b25-91c9-a57aa25f4c6e" x = np.array([[1,2],[3,4]]) print(np.sum(x)) # Compute sum of all elements; prints "10" print(np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]" print(np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]" # + [markdown] id="ahdVW4iUL9i3" # You can find the full list of mathematical functions provided by numpy in the [documentation](http://docs.scipy.org/doc/numpy/reference/routines.math.html). # # Apart from computing mathematical functions using arrays, we frequently need to reshape or otherwise manipulate data in arrays. The simplest example of this type of operation is transposing a matrix; to transpose a matrix, simply use the T attribute of an array object: # + colab={"base_uri": "https://localhost:8080/", "height": 104} id="63Yl1f3oL9i3" outputId="c75ac7ba-4351-42f8-a09c-a4e0d966ab50" print(x) print("transpose\n", x.T) # + colab={"base_uri": "https://localhost:8080/", "height": 104} id="mkk03eNIL9i4" outputId="499eec5a-55b7-473a-d4aa-9d023d63885a" v = np.array([[1,2,3]]) print(v ) print("transpose\n", v.T) # + [markdown] id="REfLrUTcL9i7" # ###Broadcasting # + [markdown] id="EygGAMWqL9i7" # Broadcasting is a powerful mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array. # # For example, suppose that we want to add a constant vector to each row of a matrix. We could do it like this: # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="WEEvkV1ZL9i7" outputId="3896d03c-3ece-4aa8-f675-aef3a220574d" # We will add the vector v to each row of the matrix x, # storing the result in the matrix y x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) v = np.array([1, 0, 1]) y = np.empty_like(x) # Create an empty matrix with the same shape as x # Add the vector v to each row of the matrix x with an explicit loop for i in range(4): y[i, :] = x[i, :] + v print(y) # + [markdown] id="2OlXXupEL9i-" # This works; however when the matrix `x` is very large, computing an explicit loop in Python could be slow. Note that adding the vector v to each row of the matrix `x` is equivalent to forming a matrix `vv` by stacking multiple copies of `v` vertically, then performing elementwise summation of `x` and `vv`. We could implement this approach like this: # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="vS7UwAQQL9i-" outputId="8621e502-c25d-4a18-c973-886dbfd1df36" vv = np.tile(v, (4, 1)) # Stack 4 copies of v on top of each other print(vv) # Prints "[[1 0 1] # [1 0 1] # [1 0 1] # [1 0 1]]" # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="N0hJphSIL9jA" outputId="def6a757-170c-43bf-8728-732dfb133273" y = x + vv # Add x and vv elementwise print(y) # + [markdown] id="zHos6RJnL9jB" # Numpy broadcasting allows us to perform this computation without actually creating multiple copies of v. Consider this version, using broadcasting: # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="vnYFb-gYL9jC" outputId="df3bea8a-ad72-4a83-90bb-306b55c6fb93" import numpy as np # We will add the vector v to each row of the matrix x, # storing the result in the matrix y x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]]) v = np.array([1, 0, 1]) y = x + v # Add v to each row of x using broadcasting print(y) # + [markdown] id="08YyIURKL9jH" # The line `y = x + v` works even though `x` has shape `(4, 3)` and `v` has shape `(3,)` due to broadcasting; this line works as if v actually had shape `(4, 3)`, where each row was a copy of `v`, and the sum was performed elementwise. # # Broadcasting two arrays together follows these rules: # # 1. If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have the same length. # 2. The two arrays are said to be compatible in a dimension if they have the same size in the dimension, or if one of the arrays has size 1 in that dimension. # 3. The arrays can be broadcast together if they are compatible in all dimensions. # 4. After broadcasting, each array behaves as if it had shape equal to the elementwise maximum of shapes of the two input arrays. # 5. In any dimension where one array had size 1 and the other array had size greater than 1, the first array behaves as if it were copied along that dimension # # If this explanation does not make sense, try reading the explanation from the [documentation](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) or this [explanation](http://wiki.scipy.org/EricsBroadcastingDoc). # # Functions that support broadcasting are known as universal functions. You can find the list of all universal functions in the [documentation](http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs). # # Here are some applications of broadcasting: # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="EmQnwoM9L9jH" outputId="f59e181e-e2d4-416c-d094-c4d003ce8509" # Compute outer product of vectors v = np.array([1,2,3]) # v has shape (3,) w = np.array([4,5]) # w has shape (2,) # To compute an outer product, we first reshape v to be a column # vector of shape (3, 1); we can then broadcast it against w to yield # an output of shape (3, 2), which is the outer product of v and w: print(np.reshape(v, (3, 1)) * w) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="PgotmpcnL9jK" outputId="567763d3-073a-4e3c-9ebe-6c7d2b6d3446" # Add a vector to each row of a matrix x = np.array([[1,2,3], [4,5,6]]) # x has shape (2, 3) and v has shape (3,) so they broadcast to (2, 3), # giving the following matrix: print(x + v) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="T5hKS1QaL9jK" outputId="5f14ac5c-7a21-4216-e91d-cfce5720a804" # Add a vector to each column of a matrix # x has shape (2, 3) and w has shape (2,). # If we transpose x then it has shape (3, 2) and can be broadcast # against w to yield a result of shape (3, 2); transposing this result # yields the final result of shape (2, 3) which is the matrix x with # the vector w added to each column. Gives the following matrix: print((x.T + w).T) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="JDUrZUl6L9jN" outputId="53e99a89-c599-406d-9fe3-7aa35ae5fb90" # Another solution is to reshape w to be a row vector of shape (2, 1); # we can then broadcast it directly against x to produce the same # output. print(x + np.reshape(w, (2, 1))) # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="VzrEo4KGL9jP" outputId="53c9d4cc-32d5-46b0-d090-53c7db57fb32" # Multiply a matrix by a constant: # x has shape (2, 3). Numpy treats scalars as arrays of shape (); # these can be broadcast together to shape (2, 3), producing the # following array: print(x * 2) # + [markdown] id="89e2FXxFL9jQ" # Broadcasting typically makes your code more concise and faster, so you should strive to use it where possible. # + [markdown] id="iF3ZtwVNL9jQ" # This brief overview has touched on many of the important things that you need to know about numpy, but is far from complete. Check out the [numpy reference](http://docs.scipy.org/doc/numpy/reference/) to find out much more about numpy. # + [markdown] id="tEINf4bEL9jR" # ##Matplotlib # + [markdown] id="0hgVWLaXL9jR" # Matplotlib is a plotting library. In this section give a brief introduction to the `matplotlib.pyplot` module, which provides a plotting system similar to that of MATLAB. # + id="cmh_7c6KL9jR" import matplotlib.pyplot as plt # + [markdown] id="jOsaA5hGL9jS" # By running this special iPython command, we will be displaying plots inline: # + id="ijpsmwGnL9jT" # %matplotlib inline # + [markdown] id="U5Z_oMoLL9jV" # ###Plotting # + [markdown] id="6QyFJ7dhL9jV" # The most important function in `matplotlib` is plot, which allows you to plot 2D data. Here is a simple example: # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="pua52BGeL9jW" outputId="9ac3ee0f-7ff7-463b-b901-c33d21a2b10c" # Compute the x and y coordinates for points on a sine curve x = np.arange(0, 3 * np.pi, 0.1) y = np.sin(x) # Plot the points using matplotlib plt.plot(x, y) # + [markdown] id="9W2VAcLiL9jX" # With just a little bit of extra work we can easily plot multiple lines at once, and add a title, legend, and axis labels: # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="TfCQHJ5AL9jY" outputId="fdb9c033-0f06-4041-a69d-a0f3a54c7206" y_sin = np.sin(x) y_cos = np.cos(x) # Plot the points using matplotlib plt.plot(x, y_sin) plt.plot(x, y_cos) plt.xlabel('x axis label') plt.ylabel('y axis label') plt.title('Sine and Cosine') plt.legend(['Sine', 'Cosine']) # + [markdown] id="R5IeAY03L9ja" # ###Subplots # + [markdown] id="CfUzwJg0L9ja" # You can plot different things in the same figure using the subplot function. Here is an example: # + colab={"base_uri": "https://localhost:8080/", "height": 281} id="dM23yGH9L9ja" outputId="14dfa5ea-f453-4da5-a2ee-fea0de8f72d9" # Compute the x and y coordinates for points on sine and cosine curves x = np.arange(0, 3 * np.pi, 0.1) y_sin = np.sin(x) y_cos = np.cos(x) # Set up a subplot grid that has height 2 and width 1, # and set the first such subplot as active. plt.subplot(2, 1, 1) # Make the first plot plt.plot(x, y_sin) plt.title('Sine') # Set the second subplot as active, and make the second plot. plt.subplot(2, 1, 2) plt.plot(x, y_cos) plt.title('Cosine') # Show the figure. plt.show() # + [markdown] id="gLtsST5SL9jc" # You can read much more about the `subplot` function in the [documentation](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.subplot). # + id="eJXA5AWSL9jc"
OOP/Practice Sessions/colab_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ee ee.Initialize() from geetools import tools from ipygee import * test_i = ee.Image([0,1,2,3]).rename('a', 'b', 'c', 'd') eprint(test_i.bandNames()) pattern = 'new pattern {band} ' renamed = tools.image.renamePattern(test_i, pattern) eprint(renamed.bandNames()) renamed2 = tools.image.renamePattern(test_i, pattern, bands=['a', 'b']) eprint(renamed2.bandNames()) pattern2 = 'no{band}' renamed3 = tools.image.renamePattern(test_i, pattern2, bands=['a', 'b']) eprint(renamed3.bandNames()) pattern3 = 'nothing' renamed4 = tools.image.renamePattern(test_i, pattern3, bands=['a', 'b']) eprint(renamed4.bandNames())
notebooks/image/renamePattern.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modeling and Simulation in Python # # Chapter 10: Vectors # # Copyright 2017 <NAME> # # License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) # # + # If you want the figures to appear in the notebook, # and you want to interact with them, use # # %matplotlib notebook # If you want the figures to appear in the notebook, # and you don't want to interact with them, use # # %matplotlib inline # If you want the figures to appear in separate windows, use # # %matplotlib qt5 # tempo switch from one to another, you have to select Kernel->Restart # %matplotlib notebook from modsim import * # - # ### Vectors # A `Vector` object is like a combination of a NumPy array and a Pint Quantity. # # I'll start by grabbing the units we'll need. m = UNITS.meter s = UNITS.second kg = UNITS.kilogram # Here's a two dimensional `Vector` in meters. A = Vector(3, 4) * m # We can access the elements by name. A.x A.y # The magnitude is the length of the vector. A.mag # The angle is the number of radians between the vector and the positive x axis. A.angle # If we make another `Vector` with the same units, B = Vector(1, 2) * m # We can add `Vector` objects like this A + B # And substract like this: A - B # We can compute the Euclidean distance between two Vectors. A.dist(B) # And the difference in angle A.diff_angle(B) # If we are given the magnitude and angle of a vector, what we have is the representation of the vector in polar coordinates. mag = A.mag angle = A.angle # We can use `pol2cart` to convert from polar to Cartesian coordinates, and then use the Cartesian coordinates to make a `Vector` object. # # In this example, the `Vector` we get should have the same components as `A`. x, y = pol2cart(angle, mag) Vector(x, y) # **Exercise:** Create a `Vector` named `a_grav` that represents acceleration due to gravity, with x component 0 and y component $-9.8$ meters / second$^2$. a_grav = Vector(0,-9.8) * (m / (s*s) ) # ### Degrees and radians # # Pint provides units to represent degree and radians. degree = UNITS.degree radian = UNITS.radian # If you have an angle in degrees, angle = 45 * degree angle # You can convert to radians. angle_rad = angle.to(radian) angle_rad # If it's already in radians, `to` does the right thing. angle_rad.to(radian) # You can also convert from radians to degrees. angle_rad.to(degree) # As an alterative, you can use `np.deg2rad`, which works with Pint quantities, but it also works with simple numbers and NumPy arrays: np.deg2rad(angle) # **Exercise:** Create a `Vector` named `a_force` that represents acceleration due to a force of 0.5 Newton applied to an object with mass 0.3 kilograms, in a direction 45 degrees up from the positive x-axis. # # Add `a_force` to `a_drag` from the previous exercise. If that addition succeeds, that means that the units are compatible. Confirm that the total acceleration seems to make sense. mag = 1.667 *(m/(s*s)) angle = 45 * degree angle_rad = angle.to(radian) x, y = pol2cart(angle_rad, mag) a_force = Vector(x, y) a_force + a_grav # ### Baseball # Here's a `Condition` object that contains the parameters for the Manny Ramirez problem. condition = Condition(x = 0 * m, y = 1 * m, g = 9.8 * m/s**2, mass = 145e-3 * kg, diameter = 73e-3 * m, rho = 1.2 * kg/m**3, C_d = 0.3, angle = 30 * degree, velocity = 40 * m / s, duration = 5.1 * s) # And here's the function that uses the `Condition` object to make a `System` object. def make_system(condition): """Make a system object. condition: Condition object with angle, velocity, x, y, diameter, duration, g, mass, rho, and C_d returns: System object """ unpack(condition) # convert angle to degrees theta = np.deg2rad(angle) # compute x and y components of velocity vx, vy = pol2cart(theta, velocity) # make the initial state init = State(x=x, y=y, vx=vx, vy=vy) # compute area from diameter area = np.pi * (diameter/2)**2 # compute timestamps ts = linspace(0, duration, 101) return System(init=init, g=g, mass=mass, area=area, rho=rho, C_d=C_d, ts=ts) # Here's how we use it: system = make_system(condition) system # Here's the slope function that computes acceleration due to gravity and drag. def slope_func(state, t, system): """Computes derivatives of the state variables. state: State (x, y, x velocity, y velocity) t: time system: System object with g, rho, C_d, area, mass returns: sequence (vx, vy, ax, ay) """ x, y, vx, vy = state unpack(system) a_grav = Vector(0, -g) v = Vector(vx, vy) f_drag = -rho * v.mag * v * C_d * area / 2 a_drag = f_drag / mass a = a_grav + a_drag return vx, vy, a.x, a.y # Always test the slope function with the initial conditions. slope_func(system.init, 0, system) # Now we can run `odeint` run_odeint(system, slope_func) # Here are the first few time steps. system.results.head() # And the last few. The last value of `y` is negative, indicating that the ball hit the ground before the end of the simulation. system.results.tail() # ### Visualizing the results # # We can extract the x and y components as `Series` objects. xs = system.results.x ys = system.results.y # The simplest way to visualize the results is to plot x and y as functions of time. # + newfig() plot(xs, label='x') plot(ys, label='y') decorate(xlabel='Time (s)', ylabel='Position (m)') savefig('chap10-fig01.pdf') # - # We can plot the velocities the same way. vxs = system.results.vx vys = system.results.vy # The x velocity slows down due to drag. The y velocity drops quickly while drag and gravity are in the same direction, then more slowly after the ball starts to fall. # + newfig() plot(vxs, label='vx') plot(vys, label='vy') decorate(xlabel='Time (s)', ylabel='Velocity (m/s)') # - # Another way to visualize the results is to plot y versus x. The result is the trajectory of the ball through its plane of motion. # + newfig() plot(xs, ys, label='trajectory') decorate(xlabel='x position (m)', ylabel='y position (m)') savefig('chap10-fig02.pdf') # - # We can also animate the flight of the ball. If there's an error in the simulation, we can sometimes spot it by looking at animations. # + newfig() decorate(xlabel='x position (m)', ylabel='y position (m)', xlim=[0, 105], ylim=[-5, 35], legend=False) for x, y in zip(xs, ys): plot(x, y, 'bo', update=True) sleep(0.01) # - # Here's a function that encapsulates that code and runs the animation in (approximately) real time. def animate2d(xs, ys, speedup=1): """Animate the results of a projectile simulation. xs: x position as a function of time ys: y position as a function of time speedup: how much to divide `dt` by """ # get the time intervals between elements ts = xs.index dts = np.diff(ts) dts = np.append(dts, 0) # decorate the plot newfig() decorate(xlabel='x position (m)', ylabel='y position (m)', xlim=[xs.min(), xs.max()], ylim=[ys.min(), ys.max()], legend=False) # loop through the values for x, y, dt in zip(xs, ys, dts): plot(x, y, 'bo', update=True) sleep(dt / speedup) animate2d(system.results.x, system.results.y) # **Exercise:** Run the simulation for a few different launch angles and visualize the results. Are they consistent with your expectations? Yes # ### Finding the range # Next we'll find the time and distance when the ball hits the ground. condition.set(duration=7*s) system = make_system(condition) run_odeint(system, slope_func) # We have to interpolate y to find the landing time, then interpolate x to find the range. def interpolate_range(results): """Computes the range of the ball when it lands. results: TimeFrame with x and y returns: distance in meters """ xs = results.x ys = results.y t_end = ys.index[-1] if ys[t_end] > 0: msg = """The final value of y is still positive; looks like the simulation didn't run long enough.""" raise ValueError(msg) t_peak = ys.argmax() descent = ys.loc[t_peak:] T = interp_inverse(descent, kind='cubic') t_land = T(0) X = interpolate(xs, kind='cubic') return X(t_land) # Here's the result. interpolate_range(system.results) # **Exercise:** The baseball stadium in Denver, Colorado is 1,580 meters above sea level, where the density of air is about 1.0 kg / meter$^3$. How much farther would a ball hit with the same velocity and launch angle travel? # + # Hint: rather than modify `condition`, make a copy condition2 = Condition(condition) condition2.set(rho = 1 * kg/m**3) system = make_system(condition2) run_odeint(system, slope_func) # + interpolate_range(system.results) 102.22512880611951 - 97.09774794768241 # - # ### Optimal launch angle # # To find the launch angle that maximizes range, we need a function that takes launch angle and returns range. def range_func(angle, condition): """Computes range for a given launch angle. angle: launch angle in degrees condition: Condition object returns: distance in meters """ condition.set(angle=angle) system = make_system(condition) run_odeint(system, slope_func) x_range = interpolate_range(system.results) print(angle) return x_range # Let's test `range_func`. # %time range_func(45, condition) # And sweep through a range of angles. # + angles = linspace(30, 60, 11) sweep = SweepSeries() for angle in angles: x_range = range_func(angle, condition) print(angle, x_range) sweep[angle] = x_range # - # Plotting the `Sweep` object, it looks like the peak is between 40 and 45 degrees. # + newfig() plot(sweep) decorate(xlabel='Launch angle (degree)', ylabel='Range (m)', legend=False) savefig('chap10-fig03.pdf') # - # We can use `max_bounded` to search for the peak efficiently. # %time res = max_bounded(range_func, [0, 90], condition) # The result is an `OptimizeResult` object. type(res) # With the following variables. res # So the optimal angle is about 41 degrees, and the resulting range is 103 meters. # **Exercise:** Add a print statement to `range_func` that prints `angle`. Then run `max_bounded` again so you can see how many times it calls `range_func` and what the arguments are. # ### Turning off units # # Each time `range_func` runs, it calls `odeint`, which runs `slope_func` many times. And each time `slop_func` runs, it checks the units for all computations, which takes some time. We can speed up the whole process by removing the units from the computation (now that we are satisfied that they are correct). # # Because of the way we organized the code, all units are in the `Condition` object, so we can "turn off units" by defining a new `Condition` object with no units: condition = Condition(g = 9.8, mass = 145e-3, diameter = 73e-3, rho = 1.2, C_d = 0.3, angle = 45, velocity = 40, duration = 7) # Now `range_func` and `max_bounded` are substantially faster. # %time range_func(45, condition) # %time res = max_bounded(range_func, [0, 90], condition) # ### The Manny Ramirez problem # # Finally, let's solve the Manny Ramirez problem: # # *What is the minimum effort required to hit a home run in Fenway Park?* # # Fenway Park is a baseball stadium in Boston, Massachusetts. One of its most famous features is the "Green Monster", which is a wall in left field that is unusually close to home plate, only 310 feet along the left field line. To compensate for the short distance, the wall is unusually high, at 37 feet. # # Although the problem asks for a minimum, it is not an optimization problem. Rather, we want to solve for the initial velocity that just barely gets the ball to the top of the wall, given that it launches at the optimal angle. # # And we have to be careful about what we mean by "optimal". For this problem, we don't want the longest range, we want the maximum height at the point where it reaches the wall. # # If you are ready to solve the problem on your own, go ahead. Otherwise I will walk you through the process with an outline and some starter code. # # As a first step, write a function called `height_func` that takes a launch angle and a condition as parameters, simulates the flights of a baseball, and returns the height of the baseball when it reaches a point 94.5 meters (310 feet) from home plate. def height_func(angle, condition): condition.set(angle=angle) system=make_system(condition) run_odeint(system,slope_func) """xs = system.results.x ys = system.results.y newfig() plot(xs,label='x') plot(ys,label='y') decorate(xlabel='Time (s)', ylabel='Position (m)')""" T = interp_inverse(system.results.x) wall = T(94.5) Y = interpolate(system.results.y) return Y(wall) # Test your function with a launch angle of 45 degrees: height_func(45, condition) # Now use `max_bounded` to find the optimal angle. Is it higher or lower than the angle that maximizes range? # %time res = max_bounded(range_func, [0, 90], condition) # The following lines compute the height of the ball at the wall, given that it's launched at the optimal angle. angle = res.x height = height_func(angle, condition) height # Next, we need to find the height of the ball at the wall, for a given velocity, given that it's launched at the optimal angle. # # Write a function called `best_height` that takes velocity and a `Condition` object as parameters. It should use `max_bounded` to find the optimal launch angle, then compute and the highest possible height of the ball at the wall, for the given velocity. def best_height(velocity , condition): condition.set(velocity=velocity) res = max_bounded(range_func, [0, 90], condition) angle = res.x height =height_func(angle,condition) return height # Use this code to test `best_height` best_height(40, condition) # Finally, we want to use `fsolve` to find the initial velocity that makes the height of the ball exactly 11 meters when it reaches the wall. # # To use `fsolve`, we need an error function that returns 0 when we have the right velocity. Write a function called `error_func` that takes a velocity and a `Condition` object, uses `best_height` to find the height of the ball at the wall, and returns the difference between the result and the target value (11 meters). # + def error_func(velocity, condition): best = best_height(velocity,condition) return best - 11 # - # Test your error function like this: error_func(40, condition) # Then use `fsolve` to find the answer to the problem, the minimum velocity that gets the ball out of the park. min_velocity = fsolve(error_func,40,condition) # + # Solution goes here # - # And just to check, run `best_height` with the value you found. The result should be 11 meters. best_height(min_velocity, condition)
code/chap10-mine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JoshuaDiazAtencia/OOP-1-1/blob/main/Prelim_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="2lv5Xbeb30_J" # PROBLEM 1 # + colab={"base_uri": "https://localhost:8080/"} id="RqWFC9Xr2_QV" outputId="030e7196-ce0a-4ed8-853a-9dc2096b8ff6" n = 20 total_numbers = n sum = 0 while n >= 0: sum += n n -= 1 print("sum =", sum) average = sum / total_numbers print("Average = ", average) # + [markdown] id="HLbo9B8T33eq" # PROBLEM 2 # 1. Write a Python to display your full name, student number, age, and course # 2. Create a class named Student with attributes: Name, Student_No, Age, School, and Course # 3. Create an object name Myself and assign an instance for each attribute. # 4. Create a method Info() using an instantiation of a class. # + colab={"base_uri": "https://localhost:8080/"} id="Z--lJZhW35-X" outputId="a77cb081-18f3-443c-a257-564825a019fd" class Student: def __init__ (self, Name, Student_No, Age, School,Course): self.Name = Name self.Student_No = Student_No self.Age = Age self.School = School self.Course = Course def Info(self): print("Hi! my name is", self.Name, self.Age, "years old,","with student number", self.Student_No) print("Currently taking",self.Course, "at",self.School) Myself = Student("<NAME>", 202101624,18,"Cavite State University Main Campus","Bachelor of Science in Computer Engineering") Myself.Info()
Prelim_Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GSD: Calculating %G+C for the nuclear and mitochondrial genomes of S288C reference sequence at SGD and the PacBio set # # What is exact %G+C for nuclear and mitochondrial genomes of the *S. crevisiae* S288C reference sequence at SGD and a dozen-or-so PacBio-sequenced *cerevisiae* and *paradoxus* genomes from [Yue et al., 2017](https://www.ncbi.nlm.nih.gov/pubmed/28416820)? # # # References for the sequence data: # - [Contrasting evolutionary genome dynamics between domesticated and wild yeasts. # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Nat Genet. 2017 Jun;49(6):913-924. doi: 10.1038/ng.3847. Epub 2017 Apr 17. PMID: 28416820](https://www.ncbi.nlm.nih.gov/pubmed/28416820) # # # - [Life with 6000 genes. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Science. 1996 Oct 25;274(5287):546, 563-7. PMID: 8849441](https://www.ncbi.nlm.nih.gov/pubmed/8849441) # # Links to the specific data are included in the code. # # Uses code developed for `GSD Assessing_ambiguous_nts_in_complete_PB_genomes.ipynb`, `GSD Assessing_ambiguous_nts_in_nuclear_PB_genomes.ipynb`, `GSD Assessing_ambiguous_nts_in_1011_collection_genomes.ipynb`, `GSD Calculating_GC_for_nuclear_and_mitochondrial_genomes_of_SGD_reference.ipynb`. # # ----- # ## Preparation # # Get the packages and sequence data necessary. # # !pip install pyfaidx # Get the S228C genomes data by running these commands. # + # Cannot simply use `S288C_reference_sequence_R64-2-1_20150113.fsa` at http://sgd-archive.yeastgenome.org/sequence/S288C_reference/genome_releases/S288C_reference_genome_Current_Release.tgz # because want nuclear & mito sequences here to be separate # NUCLEAR def add_strain_id_to_description_line(file,strain_id): ''' Takes a file and edits every description line to add strain_id after the caret. Saves the fixed file ''' import sys output_file_name = "temp.txt" # prepare output file for saving so it will be open and ready with open(output_file_name, 'w') as output_file: # read in the input file with open(file, 'r') as input_handler: # prepare to give feeback later or allow skipping to certain start lines_processed = 0 for line in input_handler: lines_processed += 1 if line.startswith(">"): rest_o_line = line.split(">") new_line = ">"+strain_id + rest_o_line[1] else: new_line = line # Send text to output output_file.write(new_line) # replace the original file with edited # !mv temp.txt {file} # Feedback sys.stderr.write("\n{} chromosome identifiers tagged.".format(file)) chr_nums = ["01","02","03","04","05","06","07","08","09","10","11", "12","13","14","15","16"] g_fns = [] for chr_num in chr_nums: fn = "chr"+chr_num+".fsa" # !curl -OL http://sgd-archive.yeastgenome.org/sequence/S288C_reference/chromosomes/fasta/{fn} g_fns.append(fn) # edit or replace description lines to work similar to PacBio tag = "SGD_REFchr"+chr_num+" " add_strain_id_to_description_line(fn,tag) # zip all nuclear chromosomes together as one file as they are from https://yjx1217.github.io/Yeast_PacBio_2016/data/ # !cat {" ".join(g_fns)} > SGD_REF_nuc.genome.fa # !rm chr*.fsa #MITO # !curl -OL http://sgd-archive.yeastgenome.org/sequence/S288C_reference/chromosomes/fasta/chrmt.fsa # Edit the description line to be clear and concise for later % rename to match nuclear # !sed -i '1s/.*/>SGD_REFmito/' chrmt.fsa # #!head chrmt.fsa # !mv chrmt.fsa SGD_REF_mito.genome.fa # IF WANTED ALL SGD reference sequence, including mitochondria, as one file, could just use this: ''' # Get SGD reference sequence that includes nuclear and mitochondrial sequence as one file, # among others. I'll use file name for the reference genome worked out # in `GSD Assessing_ambiguous_nts_in_nuclear_PB_genomes.ipynb`, so more of the # previously worked out code will work. !curl -OL http://sgd-archive.yeastgenome.org/sequence/S288C_reference/genome_releases/S288C_reference_genome_Current_Release.tgz !tar -xzf S288C_reference_genome_Current_Release.tgz !rm S288C_reference_genome_Current_Release.tgz !mv S288C_reference_genome_R64-2-1_20150113/S288C_reference_sequence_R64-2-1_20150113.fsa ./SGD_REF.genome.fa !rm -rf S288C_reference_genome_R64-2-1_20150113 '''; # - # Get the sequences from Yue et al., 2017 (PacBio-sequenced genomes) using the next two cells. import pandas as pd # Prepare for getting PacBio (Yue et al 2017 sequences) #make a list of the strain designations yue_et_al_strains = ["S288C","DBVPG6044","DBVPG6765","SK1","Y12", "YPS128","UWOPS034614","CBS432","N44","YPS138", "UFRJ50816","UWOPS919171"] # Get & unpack the genome sequences from strains for s in yue_et_al_strains: # !curl -LO http://yjx1217.github.io/Yeast_PacBio_2016/data/Nuclear_Genome/{s}.genome.fa.gz # !curl -OL http://yjx1217.github.io/Yeast_PacBio_2016/data/Mitochondrial_Genome/{s}.mt.genome.fa.gz # !gunzip -f {s}.genome.fa.gz # !gunzip -f {s}.mt.genome.fa.gz # rename the files to follow the convention used for SGD reference # !mv {s}.genome.fa {s}_nuc.genome.fa # !mv {s}.mt.genome.fa {s}_mito.genome.fa # add identifiers to each `chr` so results for each strain clear later for s in yue_et_al_strains: add_strain_id_to_description_line(s+"_nuc.genome.fa",s) add_strain_id_to_description_line(s+"_mito.genome.fa",s) # Make a list of all `genome.fa` files, excluding `genome.fa.nhr` and `genome.fa.nin` and `genome.fansq` # The excluding was only necessary because I had run some BLAST queries preiminarily in development. Normally, # it would just be the `.re.fa` at the outset. (But keeping because removal ability could be useful.) fn_to_check = "genome.fa" genomes = [] import os import fnmatch for file in os.listdir('.'): if fnmatch.fnmatch(file, '*'+fn_to_check): if not file.endswith(".nhr") and not file.endswith(".nin") and not file.endswith(".nsq") : genomes.append(file) genomes # # Now you are prepared to analyze each genome. # ## Calculating %G+C for the nuclear and mitochondrial genomes of S288C reference sequence at SGD, along with the PacBio-sequenced genomes from Yue et al., 2017 # First count all the letters present and make a dataframe and then add a column with %G+C: from pyfaidx import Fasta import pandas as pd import collections nt_counts = {} for g in genomes: if ".genome.fa" in g: strain_id = g.split(".genome.fa")[0] else: strain_id = g.split(".re.fa")[0][18:] concatenated_seqs = "" chrs = Fasta(g) for x in chrs: #print(x.name) concatenated_seqs += str(x) nt_counts[strain_id] = collections.Counter(concatenated_seqs) nt_count_df = pd.DataFrame.from_dict(nt_counts, orient='index').fillna(0) nt_count_df["Total_nts"] = nt_count_df.sum(1) def percent_GCcalc(items): ''' takes a list of three and calculates percentage of sum of first two itemswithin total (second item) ''' return (items[0] + items[1])/items[2] nt_count_df['%G+C'] = nt_count_df[['C','G','Total_nts']].apply(percent_GCcalc, axis=1) nt_count_df = nt_count_df.sort_values('Total_nts',ascending=False) #nt_count_df = nt_count_df.sort_values(['% N', 'Total_nts'],ascending=[0,0]) nt_count_df = nt_count_df.sort_index() # df.iloc[np.lexsort((df.index, df.A.values))] # from https://stackoverflow.com/a/49354905/8508004 #nt_count_df_styled = nt_count_df.style.format({'Total_nts':'{:.2E}','% N':'{:.2%}'}) nt_count_df_styled = nt_count_df.style.format({'Total_nts':'{:.2E}','%G+C':'{:.2%}'}) nt_count_df_styled # To make it easier to compare the different mitochondrial sequences to each other, we can sort on the %G+C. Plus, we'll add the species info. # + # adding species info preparation species_dict = { "SGD_REF":"cerevisiae", "S288C":"cerevisiae", "DBVPG6765":"cerevisiae", "Y12":"cerevisiae", "YPS128":"cerevisiae", "UWOPS034614":"cerevisiae", "CBS432":"paradoxus", "N44":"paradoxus", "YPS138":"paradoxus", "UWOPS919171":"paradoxus", "SK1":"cerevisiae", "DBVPG6044":"cerevisiae", "UFRJ50816":"paradoxus", } # dictionary came from `GSD Rpb1_orthologs_in_PB_genomes.ipynb` orginally # make the species_dict work for the nuclear- and mito-distringuised versions nuc_species = {k+"_nuc":v for k,v in species_dict.items()} mito_species = {k+"_mito":v for k,v in species_dict.items()} #merge the dictionaries species_dict_for_nuc_n_mito = {**nuc_species, **mito_species} nt_count_df_alt = nt_count_df.sort_values('%G+C',ascending=False) #nt_count_df_alt = nt_count_df_alt.sort_values(['Total_nts','%G+C'],ascending=[0,0]) nt_count_df_alt['species'] = nt_count_df_alt.index.map(species_dict_for_nuc_n_mito) # while adding species info, add it to the first dataframe above too nt_count_df['species'] = nt_count_df.index.map(species_dict_for_nuc_n_mito) #nt_count_df = nt_count_df.sort_values(['% N', 'Total_nts'],ascending=[0,0]) #nt_count_df = nt_count_df.sort_index() # df.iloc[np.lexsort((df.index, df.A.values))] # from https://stackoverflow.com/a/49354905/8508004 #nt_count_df_styled = nt_count_df.style.format({'Total_nts':'{:.2E}','% N':'{:.2%}'}) nt_count_df_styled_alt = nt_count_df_alt.style.format({'Total_nts':'{:.2E}','%G+C':'{:.2%}'}) nt_count_df_styled_alt # - # The mitochondrial sequences actually vary by quite a large percentage of %G+C and dip rather low. Especially given [Langkjaer (more accurately Langkjær) et al., 2003 (PMID: 12799436)](https://www.ncbi.nlm.nih.gov/pubmed/12799436) , first hit that currently comes up when you search 'percent GC cerevisiae mitochondrial', says, "The S.cerevisiae mtDNA is characterised by a very low GC content, 17–18%..." ( I need to see how the number of classified GC elments and ori sequences relates to that pattern.) *paradoxus* tends to be on the lower side; however, the lowest of this group is a *cerevisiae*. When considering the data here on cerevisiae ranging much more than appreciated in general literature (and considering the 1011 cerevisiae set in `GSD Adding_percentGC_to_nt_counts_for_mito_genomes_from_1011_collection.ipynb` where even more variation is implied with the cavear that many of the sequences have a lot of Ns, I recalled that the two strains in [Wolters et al 2015 (PMID: 26062918)](https://www.ncbi.nlm.nih.gov/pubmed/26062918) were sequenced by single-molecule technology. They cite the total %G+C in there as NCYC3594 with 16.1% and NCYC3585 with 15.1 % and it turns out those two sequences have no `N`s, , see `GSD Calculating_ambiguous_nts_and_percentGC_for_mitochondrial_genomes_of_NCYC3594_and_NCYC3585fromWoltersETal2015.ipynb`, and so can take those values as accurate. # # For context: Freel et al., 2015 PMID: 25969454 actually had *paradoxus* mitochondrial genome as 14% GC; however, they had cerevisiae only at 17% in there supplemental data table `Table_S1.xls`. More importantly for context, they had 80 yeast sequences there with only six below 17% G+C. # + #nt_count_df.to_csv('SGD_REF_and_PB_nt_count.tsv', sep='\t',index = False) #nt_count_df.to_pickle("SGD_REF_and_PB_nt_count.pkl") # - # ----
notebooks/GSD/GSD Calculating_GC_for_nuclear_and_mitochondrial_genomes_of_SGD_reference_and_PB_set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- title = "Fetching data from a CSW catalog" name = '2015-10-12-fetching_data' # + # %matplotlib inline import seaborn seaborn.set(style='ticks') import os from datetime import datetime from IPython.core.display import HTML import warnings warnings.simplefilter("ignore") # Metadata and markdown generation. hour = datetime.utcnow().strftime('%H:%M') comments = "true" date = '-'.join(name.split('-')[:3]) slug = '-'.join(name.split('-')[3:]) metadata = dict(title=title, date=date, hour=hour, comments=comments, slug=slug, name=name) markdown = """Title: {title} date: {date} {hour} comments: {comments} slug: {slug} {{% notebook {name}.ipynb cells[2:] %}} """.format(**metadata) content = os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir, '{}.md'.format(name))) with open('{}'.format(content), 'w') as f: f.writelines(markdown) html = """ <small> <p> This post was written as an IPython notebook. It is available for <a href="http://ioos.github.com/system-test/downloads/ notebooks/%s.ipynb">download</a>. You can also try an interactive version on <a href="http://mybinder.org/repo/ioos/system-test/">binder</a>.</p> <p></p> """ % (name) # - # This notebook shows a typical workflow to query a [Catalog Service for the Web (CSW)](https://en.wikipedia.org/wiki/Catalog_Service_for_the_Web) and creates a request for data endpoints that are suitable for download. # # The catalog of choice is the NGCD geoportal (http://www.ngdc.noaa.gov/geoportal/csw) and we want to query it using a geographical bounding box, a time range, and a variable of interested. # # The example below will fetch Sea Surface Temperature (SST) data from all available observations and models in the Boston Harbor region. # The goal is to assess the water temperature for the of the [Boston Light Swim](http://bostonlightswim.org/) event. # # We will search for data $\pm$ 4 days centered at the event date. # + from datetime import datetime, timedelta event_date = datetime(2015, 8, 15) start = event_date - timedelta(days=4) stop = event_date + timedelta(days=4) # - # The bounding box is slightly larger than the Boston harbor to assure we get some data. # + spacing = 0.25 bbox = [-71.05-spacing, 42.28-spacing, -70.82+spacing, 42.38+spacing] # - # The `CF_names` object is just a Python dictionary whose keys are [SOS names](http://www.opengeospatial.org/standards/sos) and the values contain all possible combinations of temperature variables names in the [CF conventions](http://cfconventions.org/Data/cf-conventions/cf-conventions-1.6/build/cf-conventions.html). Note that we also define a units object. # We use the object units to coerce all data to `celcius`. # + import iris from utilities import CF_names sos_name = 'sea_water_temperature' name_list = CF_names[sos_name] units = iris.unit.Unit('celsius') # - # Now it is time to stitch all that together. # For that we will use [OWSLib\*](https://geopython.github.io/OWSLib/). # # Constructing the filter is probably the most complex part. # We start with a list comprehension using the `fes.Or` to create the variables filter. # The next step is to exclude some unwanted results (ROMS Average files) using `fes.Not`. # To select the desired dates we wrote a wrapper function that takes the start and end dates of the event. # Finally, we apply the `fes.And` to join all the conditions above in one filter list. # # # \* OWSLib is a Python package for client programming with Open Geospatial Consortium (OGC) web service (hence OWS) interface standards, and their related content models. # + from owslib import fes from utilities import fes_date_filter kw = dict(wildCard='*', escapeChar='\\', singleChar='?', propertyname='apiso:AnyText') or_filt = fes.Or([fes.PropertyIsLike(literal=('*%s*' % val), **kw) for val in name_list]) # Exclude ROMS Averages and History files. not_filt = fes.Not([fes.PropertyIsLike(literal='*Averages*', **kw)]) begin, end = fes_date_filter(start, stop) filter_list = [fes.And([fes.BBox(bbox), begin, end, or_filt, not_filt])] # - # Now we are ready to load a `csw` object and feed it with the filter we created. # + code_folding=[] from owslib.csw import CatalogueServiceWeb csw = CatalogueServiceWeb('http://www.ngdc.noaa.gov/geoportal/csw', timeout=60) csw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full') fmt = '{:*^64}'.format print(fmt(' Catalog information ')) print("CSW version: {}".format(csw.version)) print("Number of datasets available: {}".format(len(csw.records.keys()))) # - # We found 13 datasets! # Not bad for such a narrow search area and time-span. # # What do we have there? # Let's use the custom `service_urls` function to split the datasets into [OPeNDAP](https://en.wikipedia.org/wiki/OPeNDAP) and SOS endpoints. # + code_folding=[] from utilities import service_urls dap_urls = service_urls(csw.records, service='odp:url') sos_urls = service_urls(csw.records, service='sos:url') print(fmt(' SOS ')) for url in sos_urls: print('{}'.format(url)) print(fmt(' DAP ')) for url in dap_urls: print('{}.html'.format(url)) # - # We will ignore the SOS endpoints for now and use only the DAP endpoints. # But note that some of those SOS and DAP endpoints look suspicious. # The Scripps Institution of Oceanography (SIO/UCSD) data should not appear in a search for the Boston Harbor. # That is a [known issue](https://github.com/ioos/secoora/issues/197) and we are working to sort it out. Meanwhile we have to filter out all observations form the DAP with the `is_station` function. # # However, that filter still leaves behind URLs like http://tds.maracoos.org/thredds/dodsC/SST-Three-Agg.nc.html. That is probably satellite data and not model. # # In an ideal world all datasets would have the metadata `coverage_content_type` defined. With the `coverage_content_type` we could tell models apart automatically. # Until then we will have to make due with the heuristic function `is_model` from the `utilities` module. # The `is_model` function works by comparing the metadata (and sometimes the data itself) against a series of criteria, # like grid conventions, # to figure out if a dataset is model data or not. # Because the function operates on the data we will call it later on when we start downloading the data. # + from utilities import is_station non_stations = [] for url in dap_urls: try: if not is_station(url): non_stations.append(url) except RuntimeError as e: print("Could not access URL {}. {!r}".format(url, e)) dap_urls = non_stations print(fmt(' Filtered DAP ')) for url in dap_urls: print('{}.html'.format(url)) # - # We still need to find endpoints for the observations. # For that we'll use [pyoos'](https://github.com/ioos/pyoos) `NdbcSos` and `CoopsSos`collectors. # The `pyoos` API is different from OWSLib's, but note that we are re-using the same query variables we create for the catalog search (`bbox`, `start`, `stop`, and `sos_name`.) # + from pyoos.collectors.ndbc.ndbc_sos import NdbcSos collector_ndbc = NdbcSos() collector_ndbc.set_bbox(bbox) collector_ndbc.end_time = stop collector_ndbc.start_time = start collector_ndbc.variables = [sos_name] ofrs = collector_ndbc.server.offerings title = collector_ndbc.server.identification.title print(fmt(' NDBC Collector offerings ')) print('{}: {} offerings'.format(title, len(ofrs))) # - # That number is misleading! # Do we have 955 buoys available there? # What exactly are the offerings? # There is only one way to find out. # Let's get the data! # + from utilities import collector2table, get_ndbc_longname ndbc = collector2table(collector=collector_ndbc) names = [] for s in ndbc['station']: try: name = get_ndbc_longname(s) except ValueError: name = s names.append(name) ndbc['name'] = names ndbc.set_index('name', inplace=True) ndbc.head() # - # That makes more sense. # Two buoys were found in the bounding box, # and the name of at least one of them makes sense. # # Now the same thing for `CoopsSos`. # + from pyoos.collectors.coops.coops_sos import CoopsSos collector_coops = CoopsSos() collector_coops.set_bbox(bbox) collector_coops.end_time = stop collector_coops.start_time = start collector_coops.variables = [sos_name] ofrs = collector_coops.server.offerings title = collector_coops.server.identification.title print(fmt(' Collector offerings ')) print('{}: {} offerings'.format(title, len(ofrs))) # + from utilities import get_coops_metadata coops = collector2table(collector=collector_coops) names = [] for s in coops['station']: try: name = get_coops_metadata(s)[0] except ValueError: name = s names.append(name) coops['name'] = names coops.set_index('name', inplace=True) coops.head() # - # We found one more. # Now we can merge both into one table and start downloading the data. # + from pandas import concat all_obs = concat([coops, ndbc]) all_obs.head() # + from pandas import DataFrame from owslib.ows import ExceptionReport from utilities import pyoos2df, save_timeseries iris.FUTURE.netcdf_promote = True data = dict() col = 'sea_water_temperature (C)' for station in all_obs.index: try: idx = all_obs['station'][station] df = pyoos2df(collector_ndbc, idx, df_name=station) if df.empty: df = pyoos2df(collector_coops, idx, df_name=station) data.update({idx: df[col]}) except ExceptionReport as e: print("[{}] {}:\n{}".format(idx, station, e)) # - # The cell below reduces or interpolates, # depending on the original frequency of the data, # to 1 hour frequency time-series. # + from pandas import date_range index = date_range(start=start, end=stop, freq='1H') for k, v in data.iteritems(): data[k] = v.reindex(index=index, limit=1, method='nearest') obs_data = DataFrame.from_dict(data) obs_data.head() # - # And now the same for the models. Note that now we use the `is_model` to filter out non-model endpotins. # + import warnings from iris.exceptions import (CoordinateNotFoundError, ConstraintMismatchError, MergeError) from utilities import (quick_load_cubes, proc_cube, is_model, get_model_name, get_surface) cubes = dict() for k, url in enumerate(dap_urls): print('\n[Reading url {}/{}]: {}'.format(k+1, len(dap_urls), url)) try: cube = quick_load_cubes(url, name_list, callback=None, strict=True) if is_model(cube): cube = proc_cube(cube, bbox=bbox, time=(start, stop), units=units) else: print("[Not model data]: {}".format(url)) continue cube = get_surface(cube) mod_name, model_full_name = get_model_name(cube, url) cubes.update({mod_name: cube}) except (RuntimeError, ValueError, ConstraintMismatchError, CoordinateNotFoundError, IndexError) as e: print('Cannot get cube for: {}\n{}'.format(url, e)) # - # And now we can use the `iris` cube objects we collected to download model data near the buoys we found above. # We will need `get_nearest_water` to search the 10 nearest model # points at least 0.08 degrees away from each buys. # # (This step is still a little bit clunky and need some improvements!) # + from iris.pandas import as_series from utilities import (make_tree, get_nearest_water, add_station, ensure_timeseries, remove_ssh) model_data = dict() for mod_name, cube in cubes.items(): print(fmt(mod_name)) try: tree, lon, lat = make_tree(cube) except CoordinateNotFoundError as e: print('Cannot make KDTree for: {}'.format(mod_name)) continue # Get model series at observed locations. raw_series = dict() for station, obs in all_obs.iterrows(): try: kw = dict(k=10, max_dist=0.08, min_var=0.01) args = cube, tree, obs.lon, obs.lat series, dist, idx = get_nearest_water(*args, **kw) except ValueError as e: status = "No Data" print('[{}] {}'.format(status, obs.name)) continue if not series: status = "Land " else: series = as_series(series) raw_series.update({obs['station']: series}) status = "Water " print('[{}] {}'.format(status, obs.name)) if raw_series: # Save that model series. model_data.update({mod_name: raw_series}) del cube # - # To end this post let's plot the 3 buoys we found together with the nearest model grid point. # + import matplotlib.pyplot as plt buoy = '44013' fig , ax = plt.subplots(figsize=(11, 2.75)) obs_data[buoy].plot(ax=ax, label='Buoy') for model in model_data.keys(): try: model_data[model][buoy].plot(ax=ax, label=model) except KeyError: pass # Could not find a model at this location. leg = ax.legend() # + buoy = '44029' fig , ax = plt.subplots(figsize=(11, 2.75)) obs_data[buoy].plot(ax=ax, label='Buoy') for model in model_data.keys(): try: model_data[model][buoy].plot(ax=ax, label=model) except KeyError: pass # Could not find a model at this location. leg = ax.legend() # + buoy = '8443970' fig , ax = plt.subplots(figsize=(11, 2.75)) obs_data[buoy].plot(ax=ax, label='Buoy') for model in model_data.keys(): try: model_data[model][buoy].plot(ax=ax, label=model) except KeyError: pass # Could not find a model at this location. leg = ax.legend() # - # That is it! # We fetched data based only on a bounding box, time-range, and variable name. # The workflow is not as smooth as we would like. # We had to mix `OWSLib` catalog searches with to different `pyoos` collector to download the observed and modeled data. # Another hiccup are all the workarounds used to go from iris cubes to pandas series/dataframes. # There is a clear need to a better way to represent CF feature types in a single Python object. # # To end this post check out the full version of the [Boston Light Swim](http://mybinder.org/repo/ocefpaf/boston_light_swim) notebook. (Specially the interactive map at the end.) HTML(html)
content/downloads/notebooks/2015-10-12-fetching_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MLP example using PySNN # + import torch from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torchvision import transforms from tqdm import tqdm from pysnn.connection import Linear from pysnn.neuron import FedeNeuron, Input from pysnn.learning import FedeSTDP from pysnn.encoding import PoissonEncoder from pysnn.network import SNNNetwork from pysnn.datasets import AND, BooleanNoise, Intensity # - # ## Parameter defintions # + # Architecture n_in = 6 n_hidden = 10 n_out = 1 # Data duration = 200 intensity = 40 num_workers = 0 batch_size = 1 # Neuronal Dynamics thresh = 0.8 v_rest = 0 alpha_v = 0.2 tau_v = 5 alpha_t = 1.0 tau_t = 5 duration_refrac = 5 dt = 1 delay = 3 i_dynamics = (dt, alpha_t, tau_t) n_dynamics = (thresh, v_rest, alpha_v, alpha_v, dt, duration_refrac, tau_v, tau_t) c_dynamics = (batch_size, dt, delay) # Learning lr = 0.0001 w_init = 0.5 a = 0.5 # - # ## Network definition # The API is mostly the same aas for regular PyTorch. The main differences are that layers are composed of a `Neuron` and `Connection` type, # and the layer has to be added to the network by calling the `add_layer` method. Lastly, all objects return both a # spike (or activation potential) object and a trace object. class Network(SNNNetwork): def __init__(self): super(Network, self).__init__() # Input self.input = Input( (batch_size, 1, n_in), *i_dynamics, update_type="exponential" ) # Layer 1 self.mlp1_c = Linear(n_in, n_hidden, *c_dynamics) self.neuron1 = FedeNeuron((batch_size, 1, n_hidden), *n_dynamics) self.add_layer("fc1", self.mlp1_c, self.neuron1) # Layer 2 self.mlp2_c = Linear(n_hidden, n_out, *c_dynamics) self.neuron2 = FedeNeuron((batch_size, 1, n_out), *n_dynamics) self.add_layer("fc2", self.mlp2_c, self.neuron2) def forward(self, input): x, t = self.input(input) # Layer 1 x, t = self.mlp1_c(x, t) x, t = self.neuron1(x, t) # Layer out x, t = self.mlp2_c(x, t) x, t = self.neuron2(x, t) return x # ## Dataset # Simple Boolean AND dataset, generated to match the input dimensions of the network. # + data_transform = transforms.Compose( [ # BooleanNoise(0.2, 0.8), Intensity(intensity) ] ) lbl_transform = transforms.Lambda(lambda x: x * intensity) train_dataset = AND( data_encoder=PoissonEncoder(duration, dt), data_transform=data_transform, lbl_transform=lbl_transform, repeats=n_in / 2, ) train_dataloader = DataLoader( train_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers ) # - # ## Training # Perform training and write network graph to TensorBoard (requires SummaryWriterX and tensorboard install). # + device = torch.device("cpu") net = Network() # Add graph to tensorboard logger = SummaryWriter() input = next(iter(train_dataloader)) input = input[0][:, :, :, 0] logger.add_graph(net, input) # Learning rule definition layers = net.layer_state_dict() learning_rule = FedeSTDP(layers, lr, w_init, a) # Training loop out = [] for batch in tqdm(train_dataloader): single_out = [] sample, label = batch # Iterate over input's time dimension for idx in range(sample.shape[-1]): input = sample[:, :, :, idx] single_out.append(net(input)) learning_rule.step() net.reset_state() out.append(torch.stack(single_out, dim=-1)) print(out[0].shape)
examples/.ipynb_checkpoints/mlp_example-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt # # Reflect Tables into SQLAlchemy ORM # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # We can view all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) # + #class Station(Base): #__tablename__ = 'station' #station = Column(String(255)) #name = Column(String(255)) #latitude = Column(Float) #longitude = Column(Float) #elevation = Column(Float) #class Measurement(Base): #__tablename__ = 'measurement' #station = Column(String(255)) #date = Column(Date) #prcp = Column(Float) #tobs = Column(Integer) enddate = engine.execute('SELECT MAX (date) FROM measurement').fetchall() # - # # Exploratory Climate Analysis # + # Design a query to retrieve the last 12 months of precipitation data and plot the results # Calculate the date 1 year ago from the last data point in the database year = dt.datetime.strptime(enddate[0][0], "%Y-%m-%d").year month = dt.datetime.strptime(enddate[0][0], "%Y-%m-%d").month day = dt.datetime.strptime(enddate[0][0], "%Y-%m-%d").day start_year = year - 1 start_day = day + 1 # Perform a query to retrieve the data and precipitation scores result = engine.execute(f'SELECT * FROM measurement WHERE date BETWEEN DATE("{start_year}-{month:02d}-{start_day}") AND DATE("{year}-{month:02d}-{day}")').fetchall() # Save the query results as a Pandas DataFrame and set the index to the date column result = pd.DataFrame(result) # Sort the dataframe by date result[2] = pd.to_datetime(result[2]) result = result.sort_values(by=[2]) #result.reindex() results = result.drop(columns=[0,1], index=[2]) results = results.fillna(value=0) rain = results.groupby(by=[2]) rain_mean = rain.mean().reset_index() rain_mean = rain_mean.rename(columns={2:'Date',3:'Precipitation', 4:'Obs'}) rain_mean.head() # + # Use Pandas Plotting with Matplotlib to plot the data rain_mean.plot(x='Date', y='Precipitation', color="blue", label="Precipitation") #x_axis = np.arange(len(rain_df)) #tick_locations = [value for value in x_axis] #plt.figure(figsize=(20,3)) #plt.bar(x_axis, rain_df["Inches"], color='r', alpha=0.5, align="center") #plt.xticks(tick_locations, rain_df["State"], rotation="vertical") #plt.xlim(-0.75, len(x_axis)) #plt.ylim(0, max(rain_df["Inches"])+10) #plt.title("Average Rain per State") #plt.xlabel("State") plt.ylabel("Inches") plt.tight_layout() plt.savefig("precipitation.png") plt.show() # - # Use Pandas to calcualte the summary statistics for the precipitation data summary = result.fillna(value=0) summary = summary.drop(columns=[0]) summary = summary.rename(columns={1:'Station', 2:'Date',3:'Precipitation', 4:'Obs'}) summary_mean = summary.groupby(by=['Date']) summary_mean = summary_mean.mean() summary_mean = summary_mean.reset_index() summary_mean.head() daily_avg = summary_mean['Precipitation'].mean() average_total = summary_mean['Precipitation'].sum() daily_avg = '{0:.2f}'.format(daily_avg) average_total = '{0:.2f}'.format(average_total) print(f'The daily average rainfall across all stations is {daily_avg} in.') print(f'The total rainfall by daily average across all stations is {average_total} in.') # Design a query to show how many stations are available in this dataset? s = result[1].nunique() print(f'There are {s} stations available in the dataset.') # What are the most active stations? (i.e. what stations have the most rows)? # List the stations and the counts in descending order. station = result.drop(columns=[0]) station = station.fillna(value=0) stations = station.groupby(by=[1]) stations = stations.sum().reset_index() stations = stations.rename(columns={1:'Station', 2:'Date',3:'Precipitation', 4:'Obs'}) stations = stations.sort_values(by=['Obs'], ascending=False).reset_index() stations.head() # + # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature of the most active station? #--------What temperature data?----------- # - # Choose the station with the highest number of temperature observations. # Query the last 12 months of temperature observation data for this station and plot the results as a histogram # ![precipitation](Images/station-histogram.png) # + # This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() # function usage example print(calc_temps('2012-02-28', '2012-03-05')) # - # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title # Use the average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) # + # Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation # - # ## Optional Challenge Assignment
climate_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Problem set 4 # # - Below are the functions necessary to complete problem set 4 # - We will use an internal data set that is available in r-studio # ## A.1 Import receipts data #Imports internal data set df = read.csv('ps4data.csv') # ## A.2 Import packages library(dplyr) #Data manipulation # # 1. Standard error calculation # # - `PS3 corresponding problem: 1.a` # # $$SE_{\bar{x}}= \frac{s}{\sqrt{n}}$$ # # ## 1.1 Standard error by hand # ### 1.1.1 Create new data frame with education data only df_ed = df %>% select(educ) df_ed %>% head(2) # ### 1.1.2 Calculate deviations from mean # $$x_i-\bar{x}$$ # # You can create a new column using the following code: Dataframe$New_column_name df_ed$educ_mean = mean(df_ed$educ) df_ed %>% head(2) df_ed$deviation = (df_ed$educ-df_ed$educ_mean)^2 df_ed %>% head(2) # ### 1.1.3 Sum the deviations # df_ed$xi_sum = sum(df_ed$deviation) df_ed %>% head(3) # ### 1.1.4 Calculate n (this is simply the length of data a.k.a number of observations # df_ed$n = length(df_ed$educ) df_ed %>% head(3) # ### 1.1.5 Calculate the variance and standard deviation # df_ed$variance = df_ed$xi_sum/(df_ed$n-1) df_ed %>% head(3) df_ed$standard_deviation = sqrt(df_ed$variance) df_ed %>% head(3) # ### 1.1.6 Calculate standard error df_ed$standard_error = df_ed$standard_deviation/sqrt(length(df_ed$standard_deviation)) df_ed %>% head(3) # ### 1.1.7 Check answer # # + length(df_ed$educ) var(df_ed$educ) sd(df_ed$educ) # - # ## 1.2. Confidence interval # # $$\bar{x} \pm t\frac{s}{\sqrt{n}}$$ # # - `PS3 corresponding problem: 1.a` # # ### 1.2.1 Find t-score tscore = qt(0.975, df=length(df$edu)-1) #t score tscore # ### 1.2.2 Confidence intervals std_error = df_ed$standard_error[1] std_error confidence_int = tscore*std_error x_mean = mean(df$edu) x_mean x_mean-confidence_int x_mean+confidence_int # # 2. T-test: means # - `PS3 corresponding problem: 1.b & 1.c` # mtcars = mtcars mtcars %>% head(3) t.test(mtcars$mpg, mu=15) # # 3. T-test: 2-sided # - `PS3 corresponding problem: 1.d` # # ## 3.1 Compare mean weight of cars with automatic and manual # ### 3.1.1 filter weight values for automatic cars wt_automatic = mtcars %>% filter(am==1) %>% select(wt) wt_automatic %>% head(2) # ### 3.1.2 filter weight values for automatic cars wt_manual = mtcars %>% filter(am==0) %>% select(wt) wt_manual %>% head(2) # ### 3.1.3 Use "t.test()" function test_solution = t.test(wt_automatic, wt_manual) test_solution # ### 3.1.4 Extract p-value from solution pvalue = test_solution$p.value pvalue if (pvalue <.05){ print('Reject the null') print('Evidence that the means are statisticaly different') } if(pvalue >.05){ print('Fail to reject null') print('No evidence that means are statisticaly different') } # # 4. T-test 1-sided # - `PS3 corresponding problem: 1.f` # t.test(wt_automatic, wt_manual, alternative='less') t.test(wt_automatic, wt_manual, alternative='greater') # # 5. Functions # ## 5.1 Basic function first_function = function(a,b){ answer = a+b return (answer) } first_function(2,3) # ## 5.2 Intermediate function sum_column = function(column){ answer = sum(column) return (answer) } sum_column(df$age) # ### 5.2.1 Intermediate function continued summary_stats = function(column){ sum = sum(column) mean = mean(column) std = sd(column) print(paste('Sum:', sum)) print(paste('Mean:', mean)) print(paste('STD:', std)) } summary_stats(df$age) # ## 5.3 Standard error function # # $$\hat{\sigma}=\sqrt{\frac{1}{N-1}\sum_{i=1}^{N} (X_{i} -\bar{X})^2}$$ # # + std_error = function(column){ n = length(column)-1 #length sum_diff = sum((column-mean(column))^2) #variance sd = sqrt(sum_diff/n) #Sd calculation standard_error = sd/sqrt(n) return (standard_error) } # - # ### 5.3.1 Plug in data to function std_error(df$edu) # ### 5.3.2 Check answer sd(df$edu)/sqrt(length(df$edu)) # ## 5.4 Confidience interval function confidence_interval = function(column, confidence_int){ n = length(column)-1 #length sum_diff = sum((column-mean(column))^2) #variance sd = sqrt(sum_diff/n) #Sd calculation standard_error = sd/sqrt(n) ############################################################################################ t = qt(confidence_int, length(column)-1) #t score solu = t*standard_error x_mean = round(mean(column),3) #rounds to 6 digits conf_lower = round(x_mean-solu,6) conf_upper = round(x_mean+solu,6) print(paste('Mean:', x_mean,'+-', round(solu,3), sep='')) print(paste('Upper bound: ', conf_upper, ' Lower bound: ', conf_lower, sep='')) } confidence_interval(df$edu, .95)
R_basics/Disc7_PS4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Imbana/ClasificacionLimonesCNN/blob/editar/Codigo_proyecto_final.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="YypcA2Olmc6p" # --- # # Clasificacion de limones Tahiti con redes neuronales convolucionales (CCN) # --- # se recomienda configurar el enterno de ejecucion con TPU(Una unidad de procesamiento tensorial) sirve como acelerador de IA desarrollado por Google para el aprendizaje automático con redes neuronales artificiales y más específicamente optimizado para usar TensorFlow. entorno de ejecucion>>cambiar tipo de entorno de ejecucion>>TPU # # + [markdown] id="9d4cZA1Um6AH" # CONFIGURACION DEL ENTORNO Y LIBRERIAS # + id="Bkje8ZvOmSTL" outputId="59a92cd3-01a5-4c97-8347-bef34e30dac4" colab={"base_uri": "https://localhost:8080/", "height": 35} #se conecta con google drive donde se encuentran las carpetas con las imagenes de los limones from google.colab import drive drive.mount('/content/drive/') # + id="5K6x_cOrnVtG" #librerias import matplotlib.pylab as plt import tensorflow as tf import numpy as np import os import cv2 import PIL import glob import shutil from PIL import Image import keras_preprocessing from keras_preprocessing import image from keras_preprocessing.image import ImageDataGenerator from tensorflow import keras #from tensorflow.keras import models from tensorflow import lite # + id="zV4ap3ntnmkz" # + [markdown] id="St8sQILitICN" # --- # #Generar el dataset # --- # Se copia la ruta de la carpeta donde estan las imagenes y se realiza una generacion de dataset con las caracteristicas necesarias como tamaño de imagen, escala, rotacion, tamaño de batch etc. # + [markdown] id="uEbacb_KBQpw" # PREPOCESAMIENTO DE LAS IMAGENES # # Para este paso se necesita carpetas de imagenes con las categorias de limones, o lo que se quiera clasificar, en nuestro caso 3 carpetas diferentes con imagenes de limones con Acaros, Buenos y con manchas # # # Antes de crear los arrays que entran a la red neuronal se necesita hacerle un preprocesamiento a estas carpetas o datos, se necesita dividir en tres tipo de datos, los de entrenamiento y los de validacion (test y para prueba), se necesita que las imagenes sean aleatorias, y que cada carpeta poseea la misma cantidad de imagenes para no sobresaturar un clase, ademas de que podemos aumetar el dataset con modificaciones a las imagenes modificaciones como de rotacion, modificacion de entorno, color etc. # # + id="3JU42Xqev6uU" # + id="YrbsW0MqQqvU" # + id="wQRmjDFuBLrH" # crear las carpetas, se necesita la carpeta Train, Test y prueba. dentro de esta 3 carpetas para las clases de limones import os import glob import shutil import random # la primera ruta es la direccion donde se va a guardar los datos #new_folder_path = os.path.join('/content/drive/My Drive/Proyecto_grado/Fotos/Datos4Clases') new_folder_path = os.path.join('/DatosPrueba') Data_train=os.path.join(new_folder_path,'Datos_Train') Data_test=os.path.join(new_folder_path,'Datos_Test') Data_prueba=os.path.join(new_folder_path,'Datos_Prueba') if not os.path.exists(new_folder_path): # Check la existencia del directorio os.mkdir(new_folder_path) os.mkdir(Data_train) os.mkdir(Data_test) os.mkdir(Data_prueba) #os.mkdir(os.path.join(Data_train,"Maduros")) os.mkdir(os.path.join(Data_train,"Acaros")) os.mkdir(os.path.join(Data_train,"Buenos")) os.mkdir(os.path.join(Data_train,"Manchas")) #os.mkdir(os.path.join(Data_test,"Maduros")) os.mkdir(os.path.join(Data_test,"Acaros")) os.mkdir(os.path.join(Data_test,"Buenos")) os.mkdir(os.path.join(Data_test,"Manchas")) #os.mkdir(os.path.join(Data_prueba,"Maduros")) os.mkdir(os.path.join(Data_prueba,"Acaros")) os.mkdir(os.path.join(Data_prueba,"Buenos")) os.mkdir(os.path.join(Data_prueba,"Manchas")) # Copia de datos de /Fotos/Maduros a /Fotos/Data/Maduros version 2.0 #ruta donde se encuentran los datos originales parent_dir = '/content/drive/My Drive/Proyecto_grado/Fotos/Raw-Data' ################################################################################ # Lista los datos de la carpeta y los pone aleatoriamente (desordena las imagenes) f_b = glob.glob(os.path.join(parent_dir,'Buenos','*.jpg')) random.shuffle(f_b) #f_m = glob.glob(os.path.join(parent_dir,'Maduros','*.jpg')) #random.shuffle(f_m) f_a = glob.glob(os.path.join(parent_dir,'Acaros','*.jpg')) random.shuffle(f_a) f_s = glob.glob(os.path.join(parent_dir,'Manchas','*.jpg')) random.shuffle(f_s) ################################################## # ver la carpeta con menos datos 4 clases #valores=[len(f_b),len(f_m),len(f_a),len(f_s)] valores=[len(f_b),len(f_a),len(f_s)] # para tres clases print(min(valores)) print('Buenos: {}'.format(len(f_b))) #print('Maduros: {}'.format(len(f_m))) print('Acaros: {}'.format(len(f_a))) print('Manchas: {}'.format(len(f_s))) ################################################################################ # pegar las imagenes en las nuevas carpetas limit = min(valores) limite70=int(limit*0.8) limite20=int(limit*0.15) limite10=int(limit*0.05) i = 0 for f in range(limite70): # nombre de la imagen name = 'train' + str(i) + '.jpg' n1 = 'h' + str(i) + '.jpg' # New name for the images flip horizontal n2 = 'v' + str(i) + '.jpg' # New name for the images flip vertical # n3 = 'r' + str(i) + '.jpg' # New name for the images rotated #pegar una imagen en un nuevo path con un nuevo nombre (Train) #im = Image.open(f_s[f]) #i1=im.transpose(PIL.Image.FLIP_LEFT_RIGHT) #i1.save(os.path.join(os.path.join(Data_train, 'Manchas'), n1), quality=95) #i2 = im.transpose(PIL.Image.FLIP_TOP_BOTTOM) #i2.save(os.path.join(os.path.join(Data_train, 'Manchas'), n2), quality=95) shutil.copy(f_s[f], os.path.join(os.path.join(Data_train, 'Manchas'), name)) #im = Image.open(f_b[f]) #i1=im.transpose(PIL.Image.FLIP_LEFT_RIGHT) #i1.save(os.path.join(os.path.join(Data_train, 'Buenos'), n1), quality=95) #i2 = im.transpose(PIL.Image.FLIP_TOP_BOTTOM) #i2.save(os.path.join(os.path.join(Data_train, 'Buenos'), n2), quality=95) shutil.copy(f_b[f], os.path.join(os.path.join(Data_train, 'Buenos'), name)) #shutil.copy(f_m[f], os.path.join(os.path.join(Data_train, 'Maduros'), name)) #im = Image.open(f_a[f]) #i1=im.transpose(PIL.Image.FLIP_LEFT_RIGHT) #i1.save(os.path.join(os.path.join(Data_train, 'Acaros'), n1), quality=95) #i2 = im.transpose(PIL.Image.FLIP_TOP_BOTTOM) #i2.save(os.path.join(os.path.join(Data_train, 'Acaros'), n2), quality=95) shutil.copy(f_a[f], os.path.join(os.path.join(Data_train, 'Acaros'), name)) i+=1 #pegar una imagen en un nuevo path con un nuevo nombre (Test) i = 0 for f in range(limite70,limite70+limite20): name = 'test' + str(i) + '.jpg' n1 = 'h' + str(i) + '.jpg' # New name for the images flip horizontal n2 = 'v' + str(i) + '.jpg' # New name for the images flip vertical # n3 = 'r' + str(i) + '.jpg' # New name for the images rotated #pegar una imagen en un nuevo path con un nuevo nombre (Train) #im = Image.open(f_s[f]) #i1=im.transpose(PIL.Image.FLIP_LEFT_RIGHT) #i1.save(os.path.join(os.path.join(Data_test, 'Manchas'), n1), quality=95) #i2 = im.transpose(PIL.Image.FLIP_TOP_BOTTOM) #i2.save(os.path.join(os.path.join(Data_test, 'Manchas'), n2), quality=95) shutil.copy(f_s[f], os.path.join(os.path.join(Data_test, 'Manchas'), name)) #im = Image.open(f_b[f]) #i1=im.transpose(PIL.Image.FLIP_LEFT_RIGHT) #i1.save(os.path.join(os.path.join(Data_test, 'Buenos'), n1), quality=95) #i2 = im.transpose(PIL.Image.FLIP_TOP_BOTTOM) #i2.save(os.path.join(os.path.join(Data_test, 'Buenos'), n2), quality=95) shutil.copy(f_b[f], os.path.join(os.path.join(Data_test, 'Buenos'), name)) #shutil.copy(f_m[f], os.path.join(os.path.join(Data_train, 'Maduros'), name)) #im = Image.open(f_a[f]) #i1=im.transpose(PIL.Image.FLIP_LEFT_RIGHT) #i1.save(os.path.join(os.path.join(Data_test, 'Acaros'), n1), quality=95) #i2 = im.transpose(PIL.Image.FLIP_TOP_BOTTOM) #i2.save(os.path.join(os.path.join(Data_test, 'Acaros'), n2), quality=95) shutil.copy(f_a[f], os.path.join(os.path.join(Data_test, 'Acaros'), name)) i+=1 #pegar una imagen en un nuevo path con un nuevo nombre (Prueba) i = 0 for f in range(limite70+limite20,limite70+limite20+limite10): name = 'prueba' + str(i) + '.jpg' #n3 = 'r' + str(i) + '.jpg' # New name for the images rotated shutil.copy(f_s[f], os.path.join(os.path.join(Data_prueba, 'Manchas'), name)) shutil.copy(f_b[f], os.path.join(os.path.join(Data_prueba, 'Buenos'), name)) #shutil.copy(f_m[f], os.path.join(os.path.join(Data_prueba, 'Maduros'), name)) shutil.copy(f_a[f], os.path.join(os.path.join(Data_prueba, 'Acaros'), name)) i+=1 # + id="AVMZmGHfZP3Q" outputId="2ca5d165-b8f1-4b27-f22b-f83493ed51d5" colab={"base_uri": "https://localhost:8080/", "height": 70} # mirar cuantas imagenes tiene cada carpeta parent_dir = '/content/drive/My Drive/Proyecto_grado/Fotos/Raw-Data' f_b = glob.glob(os.path.join(parent_dir,'Buenos','*.jpg')) #f_m = glob.glob(os.path.join(parent_dir,'Maduros','*.jpg')) f_a = glob.glob(os.path.join(parent_dir,'Acaros','*.jpg')) f_s = glob.glob(os.path.join(parent_dir,'Manchas','*.jpg')) print('Buenos: {}'.format(len(f_b))) #print('Maduros: {}'.format(len(f_m))) print('Acaros: {}'.format(len(f_a))) print('Manchas: {}'.format(len(f_s))) # + id="n4RvKMVfcrpT" #enpaquetar zip o inlcuso otros formatos import shutil archivo_zip = shutil.make_archive("/content/drive/My Drive/Dataset/Dataset3Clases", "zip", "/DatosPrueba") # + id="JEv-rETXpOAA" # %ls # %cd /.. #llevar el directorio a la carpeta # %ls # + id="1sCAQ-k9qIxa" # Archivo a copiar #Path donde se va a copiar solo copiar # %cp -av /DatosPrueba /content/drive/My\ Drive/Dataset/Datos3Clases # + id="JUhC2mCxplQ9" TRAINING_DIR = "/DatosPrueba/Datos_Train" #Data_train training_datagen = ImageDataGenerator( rescale = 1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, fill_mode='nearest') VALIDATION_DIR = "/DatosPrueba/Datos_Test" #Data_test validation_datagen = ImageDataGenerator(rescale = 1./255) train_generator = training_datagen.flow_from_directory( TRAINING_DIR, target_size=(224,224), class_mode='categorical', shuffle=True, batch_size=32 ) validation_generator = validation_datagen.flow_from_directory( VALIDATION_DIR, target_size=(224,224), class_mode='categorical', shuffle=True, batch_size=32 ) #visualizar informacion del dataset image_batch_train, label_batch_train = next(iter(train_generator)) print("composicion de un batch: ", image_batch_train.shape) print("composicion de la etiqueta: ", label_batch_train.shape) print(train_generator.class_indices) # + id="VjjgDp_Ww5-b" np.save('xtrain.npy',validation_generator) # + id="5ITlwXO_-VlC" print(train_generator) print(validation_generator.n) print(train_generator.batch_size) print(train_generator.samples) print(validation_generator.batch_size) # + [markdown] id="DsL3aYv4s1Ta" # --- # #Configuracion del modelo de red neuronal # --- # se extrae de la libreria de keras, luego de modifica sus capas finales para clasificar 3 tipos de clases # # ``` # # Tiene # ``` # # # + id="n_NEb1vX7MNi" #un modelo creado desde cero """model = tf.keras.models.Sequential([ # Note the input shape is the desired size of the image 150x150 with 3 bytes color # This is the first convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), # The second convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The third convolution tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The fourth convolution tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # Flatten the results to feed into a DNN # tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.5), # 512 neuron hidden layer tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(3, activation='softmax') ])""" # + id="jHhQS5JhqB35" # se extrae un modelo base con sus pesos entrenados(weights='imagenet') y sin incluir las ultimas capas(include_top=False) luego se modifica agregando las ultimas capas def modelo1(): base_model = tf.keras.applications.MobileNetV2(input_shape=(224,224,3), include_top=False, weights='imagenet') base_model.trainable = False model = tf.keras.Sequential([ base_model, tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(3, activation='softmax') ]) return model # + id="XGZJUhERrzgs" #Creacion de otro modelo con menos capas finales ## mobile = keras.applications.mobilenet.MobileNet() def modelo2(): modeloV2 = tf.keras.applications.MobileNetV2(input_shape=(224,224,3),include_top=False, weights='imagenet') modeloV2.trainable = False model = tf.keras.Sequential([modeloV2, keras.layers.GlobalAveragePooling2D(), keras.layers.Dense(3, activation='softmax')]) return model # + id="_GAOSI4TzPN_" #mobile=modelo1() #mobile.summary() base_model = tf.keras.applications.MobileNet(input_shape=(224,224,3), include_top=False, weights='imagenet') base_model.summary() # + [markdown] id="_y_f1c979vJJ" # --- # ##Entrenamiento del modelo # --- # se especifica la configuracion del entrenamiento como tipo de optimizador de gradiente, funciin de costo, cuantas epocas etc. # + id="3m2FustH8G2Z" #llama modelo model=modelo1() # configuro el entrenamiento de la red model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy']) # valores para pasos en epocas steps_per_epoch = np.ceil(train_generator.samples/train_generator.batch_size) val_steps_per_epoch = np.ceil(validation_generator.samples/validation_generator.batch_size) # entrenamiento de la red neuronal history=model.fit( train_generator, steps_per_epoch=18, epochs=25, verbose = 1, validation_data = validation_generator, validation_steps=val_steps_per_epoch ) # + [markdown] id="hBK_ms-jhyys" # --- # Pruebas del modelo o test del modelo entrenado # # --- # # # # + id="vTC5ytsi9uUK" # Evaluar en el modelo final_loss, final_accuracy = model.evaluate(validation_generator, steps =5) print("Final loss: {:.2f}".format(final_loss)) print("Final accuracy: {:.2f}%".format(final_accuracy * 100)) #mobileNETV2 primero # + id="TIM3JRPplpOD" ##### #grafica del entrenamiento y la validacion acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) params = {"text.color" : "red", "xtick.color" : "white", "ytick.color" : "white"} plt.rcParams.update(params) plt.savefig("/content/Resultado.jpg") plt.show() # + id="73jtGM8Ti7GM" #prediccion de una imagen especifica # funion para preparar una imagen para pasarla por el modelo entrenado Ruta=('/DatosPrueba/Datos_Prueba/Manchas/prueba15.jpg') img=plt.imread(Ruta) plt.imshow(img) plt.show() def prepare_image(file): img_path = '' img = image.load_img(img_path + file, target_size=(224, 224)) img_array = image.img_to_array(img) # convertir en array img_array_expanded_dims = np.expand_dims(img_array, axis=0) # agregar el cero return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims) #prediccion preprocessed_image = prepare_image(Ruta) predictions = model.predict(preprocessed_image) print(predictions) print(train_generator.class_indices) # + id="HDCOJpkJV-bG" dataset_labels = sorted(train_generator.class_indices.items(), key=lambda pair:pair[1]) dataset_labels = np.array([key.title() for key, value in dataset_labels]) print(dataset_labels) # + id="5xJCfxEdVGI3" import pandas as pd val_image_batch, val_label_batch = next(iter(validation_generator)) true_label_ids = np.argmax(val_label_batch, axis=-1) print("Validation batch shape:", val_image_batch.shape) tf_model_predictions = model(val_image_batch) print("Prediction results shape:", tf_model_predictions.shape) tf_pred_dataframe = pd.DataFrame(tf_model_predictions.numpy()) tf_pred_dataframe.columns = dataset_labels print("Prediction results for the first elements") tf_pred_dataframe.head() # + id="834XXZ_cWRey" predicted_ids = np.argmax(tf_model_predictions, axis=-1) predicted_labels = dataset_labels[predicted_ids] #Print images batch and labels predictions plt.figure(figsize=(10,15)) plt.subplots_adjust(hspace=0.4) for n in range(30): plt.subplot(6,5,n+1) plt.imshow(val_image_batch[n]) color = "green" if predicted_ids[n] == true_label_ids[n] else "red" plt.title(predicted_labels[n].title(), color=color) plt.axis('off') _ = plt.suptitle("Model predictions (green: correct, red: incorrect)") plt.savefig("/content/ImagenelimonesPrueba.jpg") # + [markdown] id="wMN_KWlNi7g7" # # --- # # # EXPORTAR TU MODELO A TFLITE # # # --- # # el modelo entrenado anteriormente esta en tensorflow ahora se buscar convertido a tensorflow lite para correrlo en la raspberry. # # se debe de crear tanto el modelo como las equites de salida para ver en que posicion esta cada categoria de limon(maduro, manchas, acaros, bueno) # + id="xcXph5gBjKm2" converter = lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() open("modelofinal.tflite", 'wb').write(tflite_model) etiquetas=train_generator.class_indices file = open("/content/prueba.txt", "w") for llaves in etiquetas.keys(): file.write(llaves + os.linesep) file.close() # + id="MwGEAq7QqiU8" print("Number of layers in the base model: ", len(model.layers)) print('Number of trainable variables = {}'.format(len(model.trainable_variables))) # + [markdown] id="iw-dCeQ1cH56" # --- # # AXELERATE # --- # # + id="MXRpsVZTcT3f" outputId="7df3e67c-f073-4b08-977d-d26e66949a9d" colab={"base_uri": "https://localhost:8080/", "height": 796} # #%tensorflow_version 1.x #we need imgaug 0.4 for image augmentations to work properly, see https://stackoverflow.com/questions/62580797/in-colab-doing-image-data-augmentation-with-imgaug-is-not-working-as-intended # !pip uninstall -y imgaug && pip uninstall -y albumentations && pip install imgaug==0.4 # !git clone https://github.com/AIWintermuteAI/aXeleRate.git import sys sys.path.append('/content/aXeleRate') from axelerate import setup_training,setup_inference # + id="XYRpOjcPcXTS" TRAINING_DIR=("/DatosPrueba1/Datos_Train") VALIDATION_DIR=("/DatosPrueba1/Datos_Test") config = { "model" : { "type": "Classifier", "architecture": "MobileNet1_0", "input_size": 224, "fully-connected": [], "labels": [], "dropout" : 0.2 }, "weights" : { "full": "", "backend": "imagenet", "save_bottleneck": False }, "train" : { "actual_epoch": 25, "train_image_folder": TRAINING_DIR, "train_times": 4, "valid_image_folder": VALIDATION_DIR, "valid_times": 4, "valid_metric": "val_accuracy", "batch_size": 32, "learning_rate": 1e-3, "saved_folder": F"/content/drive/My Drive/Proyecto_grado/Fotos", "first_trainable_layer": "", "augumentation": True }, "converter" : { "type": ["k210","tflite"] } } # + id="ZJqJRlBdcmkY" from keras import backend as K K.clear_session() model_path = setup_training(config_dict=config)
Codigo_proyecto_final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Training Logistic Regression via Stochastic Gradient Ascent # # The goal of this notebook is to implement a logistic regression classifier using stochastic gradient ascent. You will: # # * Extract features from Amazon product reviews. # * Write a function to compute the derivative of log likelihood function with respect to a single coefficient. # * Implement stochastic gradient ascent. # * Compare convergence of stochastic gradient ascent with that of batch gradient ascent. import numpy as np import pandas as pd import json import string import matplotlib.pyplot as plt # %matplotlib inline # ## Load and process review dataset # For this assignment, we will use the same subset of the Amazon product review dataset that we used in Module 3 assignment. The subset was chosen to contain similar numbers of positive and negative reviews, as the original dataset consisted of mostly positive reviews. products = pd.read_csv('../data/amazon_baby_subset.csv') with open('../data/important_words.json') as important_words_file: important_words = json.load(important_words_file) print (important_words[:3]) # Just like we did previously, we will work with a hand-curated list of important words extracted from the review data. We will also perform 2 simple data transformations: # # 1. Remove punctuation using [Python's built-in](https://docs.python.org/2/library/string.html) string manipulation functionality. # 2. Compute word counts (only for the important_words) # # Refer to Module 3 assignment for more details. # + products = products.fillna({'review':''}) # fill in N/A's in the review column translator = str.maketrans('', '', string.punctuation) def remove_punctuation(text): return text.translate(translator) products['review_clean'] = products['review'].apply(remove_punctuation) products.head(3) # - for word in important_words: products[word] = products['review_clean'].apply(lambda s : s.split().count(word)) # The dataframe **products** now contains one column for each of the 193 **important_words**. products['perfect'][:3] products.head(2) # ### Split data into training and validation sets # # We will now split the data into a 90-10 split where 90% is in the training set and 10% is in the validation set. We use `seed=1` so that everyone gets the same result. # + with open('../data/module-10-assignment-train-idx.json') as train_data_file: train_data_idx = json.load(train_data_file) with open('../data/module-10-assignment-validation-idx.json') as validation_data_file: validation_data_idx = json.load(validation_data_file) print (train_data_idx[:3]) print (validation_data_idx[:3]) # - train_data = products.iloc[train_data_idx] train_data.head(2) validation_data = products.iloc[validation_data_idx] validation_data.head(2) print ('Training set : %d data points' % len(train_data)) print ('Validation set: %d data points' % len(validation_data)) def get_numpy_data(dataframe, features, label): dataframe['constant'] = 1 features = ['constant'] + features features_frame = dataframe[features] feature_matrix = features_frame.values label_sarray = dataframe[label] label_array = label_sarray.values return(feature_matrix, label_array) feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment') feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment') print (feature_matrix_train.shape) print (feature_matrix_valid.shape) # ##### Quiz Question 1 #####: # In Module 3 assignment, there were 194 features (an intercept + one feature for each of the 193 important words). In this assignment, we will use stochastic gradient ascent to train the classifier using logistic regression. How does the changing the solver to stochastic gradient ascent affect the number of features? # # ##### Answer: Stay the same # ## Building on logistic regression # # Let us now build on Module 3 assignment. Recall from lecture that the link function for logistic regression can be defined as: # # $$ # P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))}, # $$ # # where the feature vector $h(\mathbf{x}_i)$ is given by the word counts of **important_words** in the review $\mathbf{x}_i$. # # # We will use the **same code** as in Module 3 assignment to make probability predictions, since this part is not affected by using stochastic gradient ascent as a solver. Only the way in which the coefficients are learned is affected by using stochastic gradient ascent as a solver. # + ''' feature_matrix: N * D(intercept term included) coefficients: D * 1 predictions: N * 1 produces probablistic estimate for P(y_i = +1 | x_i, w). estimate ranges between 0 and 1. ''' def predict_probability(feature_matrix, coefficients): # Take dot product of feature_matrix and coefficients # YOUR CODE HERE score = np.dot(feature_matrix, coefficients) # N * 1 # Compute P(y_i = +1 | x_i, w) using the link function # YOUR CODE HERE predictions = 1.0/(1+np.exp(-score)) # return predictions return predictions # - # ## Derivative of log likelihood with respect to a single coefficient # # Let us now work on making minor changes to how the derivative computation is performed for logistic regression. # # Recall from the lectures and Module 3 assignment that for logistic regression, **the derivative of log likelihood with respect to a single coefficient** is as follows: # # $$ # \frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) # $$ # # In Module 3 assignment, we wrote a function to compute the derivative of log likelihood with respect to a single coefficient $w_j$. The function accepts the following two parameters: # * `errors` vector containing $(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w}))$ for all $i$ # * `feature` vector containing $h_j(\mathbf{x}_i)$ for all $i$ # # Complete the following code block: """ errors: N * 1 feature: N * 1 derivative: 1 """ def feature_derivative(errors, feature): # Compute the dot product of errors and feature derivative = np.dot(np.transpose(errors), feature) # Return the derivative return derivative # **Note**. We are not using regularization in this assignment, but, as discussed in the optional video, stochastic gradient can also be used for regularized logistic regression. # To verify the correctness of the gradient computation, we provide a function for computing average log likelihood (which we recall from the last assignment was a topic detailed in an advanced optional video, and used here for its numerical stability). # # To track the performance of stochastic gradient ascent, we provide a function for computing **average log likelihood**. # # $$\ell\ell_A(\mathbf{w}) = \color{red}{\frac{1}{N}} \sum_{i=1}^N \Big( (\mathbf{1}[y_i = +1] - 1)\mathbf{w}^T h(\mathbf{x}_i) - \ln\left(1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))\right) \Big) $$ # # **Note** that we made one tiny modification to the log likelihood function (called **compute_log_likelihood**) in our earlier assignments. We added a $\color{red}{1/N}$ term which averages the log likelihood accross all data points. The $\color{red}{1/N}$ term makes it easier for us to compare stochastic gradient ascent with batch gradient ascent. We will use this function to generate plots that are similar to those you saw in the lecture. def compute_avg_log_likelihood(feature_matrix, sentiment, coefficients): indicator = (sentiment==+1) scores = np.dot(feature_matrix, coefficients) logexp = np.log(1. + np.exp(-scores)) # scores.shape (53072L, 1L) # indicator.shape (53072L,) # Simple check to prevent overflow mask = np.isinf(logexp) logexp[mask] = -scores[mask] lp = np.sum( ( indicator.reshape(scores.shape)-1 )*scores - logexp )/len(feature_matrix) return lp # ##### Quiz Question 2#####: # Recall from the lecture and the earlier assignment, the log likelihood (without the averaging term) is given by # # $$\ell\ell(\mathbf{w}) = \sum_{i=1}^N \Big( (\mathbf{1}[y_i = +1] - 1)\mathbf{w}^T h(\mathbf{x}_i) - \ln\left(1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))\right) \Big) $$ # # How are the functions $\ell\ell(\mathbf{w})$ and $\ell\ell_A(\mathbf{w})$ related? # # ##### Answer#####: llA(w) = 1/N *ll(w) # ## Modifying the derivative for stochastic gradient ascent # # Recall from the lecture that the gradient for a single data point $\color{red}{\mathbf{x}_i}$ can be computed using the following formula: # # $$ # \frac{\partial\ell_{\color{red}{i}}(\mathbf{w})}{\partial w_j} = h_j(\color{red}{\mathbf{x}_i})\left(\mathbf{1}[y_\color{red}{i} = +1] - P(y_\color{red}{i} = +1 | \color{red}{\mathbf{x}_i}, \mathbf{w})\right) # $$ # # # ** Computing the gradient for a single data point** # # Do we really need to re-write all our code to modify $\partial\ell(\mathbf{w})/\partial w_j$ to $\partial\ell_{\color{red}{i}}(\mathbf{w})/{\partial w_j}$? # # # Thankfully **No!**. Using NumPy, we access $\mathbf{x}_i$ in the training data using `feature_matrix_train[i:i+1,:]` # and $y_i$ in the training data using `sentiment_train[i:i+1]`. We can compute $\partial\ell_{\color{red}{i}}(\mathbf{w})/\partial w_j$ by re-using **all the code** written in **feature_derivative** and **predict_probability**. # # # We compute $\partial\ell_{\color{red}{i}}(\mathbf{w})/\partial w_j$ using the following steps: # * First, compute $P(y_i = +1 | \mathbf{x}_i, \mathbf{w})$ using the **predict_probability** function with `feature_matrix_train[i:i+1,:]` as the first parameter. # * Next, compute $\mathbf{1}[y_i = +1]$ using `sentiment_train[i:i+1]`. # * Finally, call the **feature_derivative** function with `feature_matrix_train[i:i+1, j]` as one of the parameters. # # Let us follow these steps for `j = 1` and `i = 10`: # + j = 1 # Feature number i = 10 # Data point number coefficients = np.zeros(194) # A point w at which we are computing the gradient. predictions = predict_probability(feature_matrix_train[i:i+1,:], coefficients) indicator = (sentiment_train[i:i+1]==+1) errors = indicator - predictions gradient_single_data_point = feature_derivative(errors, feature_matrix_train[i:i+1,j]) print ("Gradient single data point: %s" % gradient_single_data_point) print (" --> Should print 0.0") # - # ##### Quiz Question 3:##### # The code block above computed $\partial\ell_{\color{red}{i}}(\mathbf{w})/{\partial w_j}$ for `j = 1` and `i = 10`. Is $\partial\ell_{\color{red}{i}}(\mathbf{w})/{\partial w_j}$ a scalar or a 194-dimensional vector? # ##### Answer:##### # scalar # ## Modifying the derivative for using a batch of data points # # Stochastic gradient estimates the ascent direction using 1 data point, while gradient uses $N$ data points to decide how to update the the parameters. In an optional video, we discussed the details of a simple change that allows us to use a **mini-batch** of $B \leq N$ data points to estimate the ascent direction. This simple approach is faster than regular gradient but less noisy than stochastic gradient that uses only 1 data point. Although we encorage you to watch the optional video on the topic to better understand why mini-batches help stochastic gradient, in this assignment, we will simply use this technique, since the approach is very simple and will improve your results. # # Given a mini-batch (or a set of data points) $\mathbf{x}_{i}, \mathbf{x}_{i+1} \ldots \mathbf{x}_{i+B}$, the gradient function for this mini-batch of data points is given by: # $$ # \color{red}{\sum_{s = i}^{i+B}} \frac{\partial\ell_{s}}{\partial w_j} = \color{red}{\sum_{s = i}^{i + B}} h_j(\mathbf{x}_s)\left(\mathbf{1}[y_s = +1] - P(y_s = +1 | \mathbf{x}_s, \mathbf{w})\right) # $$ # # # ** Computing the gradient for a "mini-batch" of data points** # # Using NumPy, we access the points $\mathbf{x}_i, \mathbf{x}_{i+1} \ldots \mathbf{x}_{i+B}$ in the training data using `feature_matrix_train[i:i+B,:]` # and $y_i$ in the training data using `sentiment_train[i:i+B]`. # # We can compute $\color{red}{\sum_{s = i}^{i+B}} \partial\ell_{s}/\partial w_j$ easily as follows: # + j = 1 # Feature number i = 10 # Data point start B = 10 # Mini-batch size coefficients = np.zeros(194) # A point w at which we are computing the gradient. predictions = predict_probability(feature_matrix_train[i:i+B,:], coefficients) indicator = (sentiment_train[i:i+B]==+1) errors = indicator - predictions gradient_mini_batch = feature_derivative(errors, feature_matrix_train[i:i+B,j]) print ("Gradient mini-batch data points: %s" % gradient_mini_batch) print (" --> Should print 1.0") # - # ##### Quiz Question 4: ##### # The code block above computed # $\color{red}{\sum_{s = i}^{i+B}}\partial\ell_{s}(\mathbf{w})/{\partial w_j}$ # for `j = 10`, `i = 10`, and `B = 10`. Is this a scalar or a 194-dimensional vector? # ##### Answer: # Scalar # # ##### Quiz Question 5: # ** For what value of `B` is the term # $\color{red}{\sum_{s = 1}^{B}}\partial\ell_{s}(\mathbf{w})/\partial w_j$ # the same as the full gradient # $\partial\ell(\mathbf{w})/{\partial w_j}$? Hint: consider the training set we are using now. # # ##### Answer: # 47780 # ### Averaging the gradient across a batch # # It is a common practice to normalize the gradient update rule by the batch size B: # # $$ # \frac{\partial\ell_{\color{red}{A}}(\mathbf{w})}{\partial w_j} \approx \color{red}{\frac{1}{B}} {\sum_{s = i}^{i + B}} h_j(\mathbf{x}_s)\left(\mathbf{1}[y_s = +1] - P(y_s = +1 | \mathbf{x}_s, \mathbf{w})\right) # $$ # In other words, we update the coefficients using the **average gradient over data points** (instead of using a summation). By using the average gradient, we ensure that the magnitude of the gradient is approximately the same for all batch sizes. This way, we can more easily compare various batch sizes of stochastic gradient ascent (including a batch size of **all the data points**), and study the effect of batch size on the algorithm as well as the choice of step size. # # # ## Implementing stochastic gradient ascent # # Now we are ready to implement our own logistic regression with stochastic gradient ascent. Complete the following function to fit a logistic regression model using gradient ascent: from math import sqrt def logistic_regression_SG(feature_matrix, sentiment, initial_coefficients, step_size, batch_size, max_iter): log_likelihood_all = [] # make sure it's a numpy array coefficients = np.array(initial_coefficients) # set seed=1 to produce consistent results np.random.seed(seed=1) # Shuffle the data before starting permutation = np.random.permutation(len(feature_matrix)) feature_matrix = feature_matrix[permutation,:] sentiment = sentiment[permutation] i = 0 # index of current batch # Do a linear scan over data for itr in range(max_iter): # Predict P(y_i = +1|x_i,w) using your predict_probability() function # Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,:] ### YOUR CODE HERE predictions = predict_probability(feature_matrix[i:(i+batch_size),:], coefficients) # Compute indicator value for (y_i = +1) # Make sure to slice the i-th entry with [i:i+batch_size] ### YOUR CODE HERE indicator = (sentiment[i:i+batch_size]==+1) # Compute the errors as indicator - predictions errors = indicator - predictions for j in range(len(coefficients)): # loop over each coefficient # Recall that feature_matrix[:,j] is the feature column associated with coefficients[j] # Compute the derivative for coefficients[j] and save it to derivative. # Make sure to slice the i-th row of feature_matrix with [i:i+batch_size,j] ### YOUR CODE HERE derivative = feature_derivative(errors, feature_matrix[i:i+batch_size,j]) # Compute the product of the step size, the derivative, and # the **normalization constant** (1./batch_size) ### YOUR CODE HERE coefficients[j] += step_size * derivative * 1. / batch_size # Checking whether log likelihood is increasing # Print the log likelihood over the *current batch* lp = compute_avg_log_likelihood(feature_matrix[i:i+batch_size,:], sentiment[i:i+batch_size], coefficients) log_likelihood_all.append(lp) if itr <= 15 or (itr <= 1000 and itr % 100 == 0) or (itr <= 10000 and itr % 1000 == 0) \ or itr % 10000 == 0 or itr == max_iter-1: data_size = len(feature_matrix) print ('Iteration %*d: Average log likelihood (of data points [%0*d:%0*d]) = %.8f' % \ (int(np.ceil(np.log10(max_iter))), itr, \ int(np.ceil(np.log10(data_size))), i, \ int(np.ceil(np.log10(data_size))), i+batch_size, lp)) # if we made a complete pass over data, shuffle and restart i += batch_size if i+batch_size > len(feature_matrix): permutation = np.random.permutation(len(feature_matrix)) feature_matrix = feature_matrix[permutation,:] sentiment = sentiment[permutation] i = 0 # We return the list of log likelihoods for plotting purposes. return coefficients, log_likelihood_all # **Note**. In practice, the final set of coefficients is rarely used; it is better to use the average of the last K sets of coefficients instead, where K should be adjusted depending on how fast the log likelihood oscillates around the optimum. # ### Checkpoint # # # The following cell tests your stochastic gradient ascent function using a toy dataset consisting of two data points. If the test does not pass, make sure you are normalizing the gradient update rule correctly. # + sample_feature_matrix = np.array([[1.,2.,-1.], [1.,0.,1.]]) sample_sentiment = np.array([+1, -1]) coefficients, log_likelihood = logistic_regression_SG(sample_feature_matrix, sample_sentiment, np.zeros(3), step_size=1., batch_size=2, max_iter=2) print ('-------------------------------------------------------------------------------------') print ('Coefficients learned :', coefficients) print ('Average log likelihood per-iteration :', log_likelihood) if np.allclose(coefficients, np.array([-0.09755757, 0.68242552, -0.7799831]), atol=1e-3)\ and np.allclose(log_likelihood, np.array([-0.33774513108142956, -0.2345530939410341])): # pass if elements match within 1e-3 print ('-------------------------------------------------------------------------------------') print ('Test passed!') else: print ('-------------------------------------------------------------------------------------') print ('Test failed') # - # ## Compare convergence behavior of stochastic gradient ascent # # For the remainder of the assignment, we will compare stochastic gradient ascent against batch gradient ascent. For this, we need a reference implementation of batch gradient ascent. But do we need to implement this from scratch? # # **Quiz Question:** For what value of batch size `B` above is the stochastic gradient ascent function **logistic_regression_SG** act as a standard gradient ascent algorithm? Hint: consider the training set we are using now. # ## Running gradient ascent using the stochastic gradient ascent implementation # Instead of implementing batch gradient ascent separately, we save time by re-using the stochastic gradient ascent function we just wrote &mdash; **to perform gradient ascent**, it suffices to set **`batch_size`** to the number of data points in the training data. Yes, we did answer above the quiz question for you, but that is an important point to remember in the future :) # # **Small Caveat**. The batch gradient ascent implementation here is slightly different than the one in the earlier assignments, as we now normalize the gradient update rule. # # We now **run stochastic gradient ascent** over the **feature_matrix_train** for 10 iterations using: # * `initial_coefficients = np.zeros(194)` # * `step_size = 5e-1` # * `batch_size = 1` # * `max_iter = 10` coefficients, log_likelihood = logistic_regression_SG(feature_matrix_train, sentiment_train,\ initial_coefficients=np.zeros(194),\ step_size=5e-1, batch_size=1, max_iter=10) plt.plot(log_likelihood) plt.show() # **Quiz Question 7**. When you set `batch_size = 1`, as each iteration passes, how does the average log likelihood in the batch change? # * Increases # * Decreases # * Fluctuates # # **Answer:** Fluctuates . # Now run **batch gradient ascent** over the **feature_matrix_train** for 200 iterations using: # * `initial_coefficients = np.zeros(194)` # * `step_size = 5e-1` # * `batch_size = len(feature_matrix_train)` # * `max_iter = 200` # YOUR CODE HERE coefficients_batch, log_likelihood_batch = logistic_regression_SG(feature_matrix_train, sentiment_train,\ initial_coefficients=np.zeros(194),\ step_size=5e-1, batch_size=len(feature_matrix_train), max_iter=200) plt.plot(log_likelihood_batch) plt.show() # **Quiz Question 8**. When you set `batch_size = len(feature_matrix_train)`, as each iteration passes, how does the average log likelihood in the batch change? # * Increases # * Decreases # * Fluctuates # # **Answer:** Increase # ## Make "passes" over the dataset # To make a fair comparison betweeen stochastic gradient ascent and batch gradient ascent, we measure the average log likelihood as a function of the number of passes (defined as follows): # $$ # [\text{# of passes}] = \frac{[\text{# of data points touched so far}]}{[\text{size of dataset}]} # $$ # **Quiz Question 9** Suppose that we run stochastic gradient ascent with a batch size of 100. How many gradient updates are performed at the end of two passes over a dataset consisting of 50000 data points? # ###### Answer: # 1000 # ## Log likelihood plots for stochastic gradient ascent # With the terminology in mind, let us run stochastic gradient ascent for 10 passes. We will use # * `step_size=1e-1` # * `batch_size=100` # * `initial_coefficients` to all zeros. # + step_size = 1e-1 batch_size = 100 num_passes = 10 num_iterations = num_passes * int(len(feature_matrix_train)/batch_size) coefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=1e-1, batch_size=100, max_iter=num_iterations) # - plt.plot(log_likelihood_batch) plt.show() # We provide you with a utility function to plot the average log likelihood as a function of the number of passes. def make_plot(log_likelihood_all, len_data, batch_size, smoothing_window=1, label=''): plt.rcParams.update({'figure.figsize': (9,5)}) log_likelihood_all_ma = np.convolve(np.array(log_likelihood_all), \ np.ones((smoothing_window,))/smoothing_window, mode='valid') plt.plot(np.array(range(smoothing_window-1, len(log_likelihood_all)))*float(batch_size)/len_data, log_likelihood_all_ma, linewidth=4.0, label=label) plt.rcParams.update({'font.size': 16}) plt.tight_layout() plt.xlabel('# of passes over data') plt.ylabel('Average log likelihood per data point') plt.legend(loc='lower right', prop={'size':14}) make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100, label='stochastic gradient, step_size=1e-1') # ## Smoothing the stochastic gradient ascent curve # # The plotted line oscillates so much that it is hard to see whether the log likelihood is improving. In our plot, we apply a simple smoothing operation using the parameter `smoothing_window`. The smoothing is simply a [moving average](https://en.wikipedia.org/wiki/Moving_average) of log likelihood over the last `smoothing_window` "iterations" of stochastic gradient ascent. make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100, smoothing_window=30, label='stochastic gradient, step_size=1e-1') # **Checkpoint**: The above plot should look smoother than the previous plot. Play around with `smoothing_window`. As you increase it, you should see a smoother plot. # ## Stochastic gradient ascent vs batch gradient ascent # # To compare convergence rates for stochastic gradient ascent with batch gradient ascent, we call `make_plot()` multiple times in the same cell. # # We are comparing: # * **stochastic gradient ascent**: `step_size = 0.1`, `batch_size=100` # * **batch gradient ascent**: `step_size = 0.5`, `batch_size=len(feature_matrix_train)` # # Write code to run stochastic gradient ascent for 200 passes using: # * `step_size=1e-1` # * `batch_size=100` # * `initial_coefficients` to all zeros. # + step_size = 1e-1 batch_size = 100 num_passes = 200 num_iterations = num_passes * int(len(feature_matrix_train)/batch_size) ## YOUR CODE HERE coefficients_sgd, log_likelihood_sgd = logistic_regression_SG(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=1e-1, batch_size=100, max_iter=num_iterations) # - # YOUR CODE HERE coefficients_batch, log_likelihood_batch = logistic_regression_SG(feature_matrix_train, sentiment_train,\ initial_coefficients=np.zeros(194),\ step_size=5e-1, batch_size=len(feature_matrix_train), max_iter=200) # We compare the convergence of stochastic gradient ascent and batch gradient ascent in the following cell. Note that we apply smoothing with `smoothing_window=30`. make_plot(log_likelihood_sgd, len_data=len(feature_matrix_train), batch_size=100, smoothing_window=30, label='stochastic, step_size=1e-1') make_plot(log_likelihood_batch, len_data=len(feature_matrix_train), batch_size=len(feature_matrix_train), smoothing_window=1, label='batch, step_size=5e-1') # **Quiz Question 10**: In the figure above, how many passes does batch gradient ascent need to achieve a similar log likelihood as stochastic gradient ascent? # # 1. It's always better # 2. 10 passes # 3. 20 passes # 4. 150 passes or more (This is the answer) # ## Explore the effects of step sizes on stochastic gradient ascent # In previous sections, we chose step sizes for you. In practice, it helps to know how to choose good step sizes yourself. # # To start, we explore a wide range of step sizes that are equally spaced in the log space. Run stochastic gradient ascent with `step_size` set to 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, and 1e2. Use the following set of parameters: # * `initial_coefficients=np.zeros(194)` # * `batch_size=100` # * `max_iter` initialized so as to run 10 passes over the data. # + batch_size = 100 num_passes = 10 num_iterations = num_passes * int(len(feature_matrix_train)/batch_size) coefficients_sgd = {} log_likelihood_sgd = {} for step_size in np.logspace(-4, 2, num=7): coefficients_sgd[step_size], log_likelihood_sgd[step_size] = logistic_regression_SG(feature_matrix_train, sentiment_train,\ initial_coefficients=np.zeros(194),\ step_size=step_size, batch_size=batch_size, max_iter=num_iterations) # - # ### Plotting the log likelihood as a function of passes for each step size # # Now, we will plot the change in log likelihood using the `make_plot` for each of the following values of `step_size`: # # * `step_size = 1e-4` # * `step_size = 1e-3` # * `step_size = 1e-2` # * `step_size = 1e-1` # * `step_size = 1e0` # * `step_size = 1e1` # * `step_size = 1e2` # For consistency, we again apply `smoothing_window=30`. for step_size in np.logspace(-4, 2, num=7): make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100, smoothing_window=30, label='step_size=%.1e'%step_size) # Now, let us remove the step size `step_size = 1e2` and plot the rest of the curves. for step_size in np.logspace(-4, 2, num=7)[0:6]: make_plot(log_likelihood_sgd[step_size], len_data=len(train_data), batch_size=100, smoothing_window=30, label='step_size=%.1e'%step_size) # **Quiz Question 11**: Which of the following is the worst step size? Pick the step size that results in the lowest log likelihood in the end. # 1. 1e-2 # 2. 1e-1 # 3. 1e0 # 4. 1e1 # 5. 1e2 # **Quiz Question**: Which of the following is the best step size? Pick the step size that results in the highest log likelihood in the end. # 1. 1e-4 # 2. 1e-2 # 3. 1e0 (Answer) # 4. 1e1 # 5. 1e2
week7/Training-Logistic-Regression-via-Stochastic-Gradient-Ascent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:env_multilingual_class] # language: python # name: conda-env-env_multilingual_class-py # --- # + [markdown] Collapsed="false" # # The Stanford Sentiment Treebank # The Stanford Sentiment Treebank consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. We use the two-way (positive/negative) class split, and use only sentence-level labels. # + Collapsed="false" from IPython.display import display, Markdown with open('../../doc/env_variables_setup.md', 'r') as fh: content = fh.read() display(Markdown(content)) # + [markdown] Collapsed="false" # ## Import Packages # + Collapsed="false" import tensorflow as tf import tensorflow_datasets from tensorflow.keras.utils import to_categorical from transformers import ( BertConfig, BertTokenizer, XLMRobertaTokenizer, TFBertModel, TFXLMRobertaModel, TFBertForSequenceClassification, glue_convert_examples_to_features, glue_processors ) from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.metrics import classification_report import matplotlib.pyplot as plt from google.cloud import storage import math import numpy as np import os import glob import time from datetime import timedelta import shutil from datetime import datetime import pickle import re import codecs import json from google.api_core.client_options import ClientOptions from googleapiclient import discovery from googleapiclient import errors # + [markdown] Collapsed="false" # ## Check configuration # + Collapsed="false" print(tf.version.GIT_VERSION, tf.version.VERSION) # + Collapsed="false" print(tf.keras.__version__) # + Collapsed="false" gpus = tf.config.list_physical_devices('GPU') if len(gpus)>0: for gpu in gpus: print('Name:', gpu.name, ' Type:', gpu.device_type) else: print('No GPU available !!!!') # + [markdown] Collapsed="false" # ## Define Paths # + Collapsed="false" try: data_dir=os.environ['PATH_DATASETS'] except KeyError: print('missing PATH_DATASETS') try: tensorboard_dir=os.environ['PATH_TENSORBOARD'] except KeyError: print('missing PATH_TENSORBOARD') try: savemodel_dir=os.environ['PATH_SAVE_MODEL'] except KeyError: print('missing PATH_SAVE_MODEL') # + [markdown] Collapsed="false" # ## Import local packages # + Collapsed="false" import preprocessing.preprocessing as pp import utils.model_metrics as mm import utils.model_utils as mu # + Collapsed="false" import importlib importlib.reload(pp); importlib.reload(mm); importlib.reload(mu); # + [markdown] Collapsed="false" # ## Check the census model stored on GCP # + [markdown] Collapsed="false" # The **variables** directory contains a standard training checkpoint (see the guide to training checkpoints). # The **assets** directory contains files used by the TensorFlow graph, for example text files used to initialize vocabulary tables. # The **saved_model.pb** file stores the actual TensorFlow program, or model, and a set of named signatures, each identifying a function that accepts tensor inputs and produces tensor outputs. # + Collapsed="false" # use model trainied with CPU os.environ['MODEL_GCP']='gs://'+os.environ['BUCKET_NAME']+'/census_20200624_101711/keras-job-dir/4/keras_export' # + Collapsed="false" language="bash" # saved_model_cli show --dir $MODEL_GCP --tag_set serve --signature_def serving_default # + [markdown] Collapsed="false" # ## Model serving setup # + Collapsed="false" # Normal VM has a model size of 500 MB # For more you need to use a specific n1-standard-2 VM (2 GB) for online prediction. It is only available in us-central1. region_model = 'us-central1' #region_model = 'europe-west4' #region_model = 'europe-west1' #region_model = 'europe-west6' # + Collapsed="false" regional_endpoint=False if region_model=='europe-west4': regional_endpoint=True elif region_model=='us-central1': regional_endpoint=True print(' Region: {} is a regional endpoint: {}'.format(region_model, regional_endpoint)) # + Collapsed="false" batch_pred=True if batch_pred: regional_endpoint=False # + Collapsed="false" project_name = os.environ['PROJECT_ID'] project_id = 'projects/{}'.format(project_name) if not regional_endpoint: ai_platform_serving = discovery.build('ml', 'v1') else: endpoint = 'https://'+region_model+'-ml.googleapis.com' client_options = ClientOptions(api_endpoint=endpoint) ai_platform_serving = discovery.build('ml', 'v1', client_options=client_options) # to list all model ai_platform_serving_global = discovery.build('ml', 'v1') # + [markdown] Collapsed="false" # ### Check models already deployed # + Collapsed="false" request = ai_platform_serving_global.projects().models().list(parent=project_id) # + Collapsed="false" # Make the call. try: response = request.execute() print('List of model:') if 'models' in response.keys(): for i in response['models']: print(' Model \'s name: {}:'.format(i['name'].split('/')[-1])) print(' descrition: {}'.format(i['description'])) print(' regions: {}'.format(i['regions'])) except errors.HttpError as err: # Something went wrong, print out some information. print('There was an error creating the model. Check the details:') print(err._get_reason()) # + [markdown] Collapsed="false" # ### Create a new model # + Collapsed="false" # defining the name of the model for online prediction if batch_pred: name_model = 'tf_gcp_census_test_batch_'+region_model.replace('-','_') else: name_model = 'tf_gcp_census_test_'+region_model.replace('-','_') description_model = 'this is a model for test using census gcp code' # Create a dictionary with the fields from the request body. request_dict = {'name': name_model, 'regions': [region_model], 'description': description_model, 'labels': {'region': region_model} } # Create a request to call projects.models.create. request = ai_platform_serving.projects().models().create(parent=project_id, body=request_dict) # + Collapsed="false" request_dict # + Collapsed="false" # Make the call. try: response = request.execute() print('Name of the model: {}:'.format(response['name'].split('/')[-1])) print(' descrition: {}'.format(response['description'])) print(' regions: {}'.format(response['regions'])) except errors.HttpError as err: # Something went wrong, print out soFinme information. print('There was an error creating the model. Check the details:') print(err._get_reason()) # + [markdown] Collapsed="false" # ### Defined all parameters and upload our models # + Collapsed="false" # defining the name of the model for online prediction parentId = 'projects/{}/models/{}'.format(project_name, name_model) # Normal VM has a model size of 500 MB for more you need to use a specific n1-standard-2 VM (2 GB) for online prediction. It is only available in us-central1. #region_model = 'us-central1' model_binaries = os.environ['MODEL_GCP'] machine_type='mls1-c1-m2' version = 'V1' # Create a dictionary with the fields from the request body. request_dict = {'machineType': machine_type, 'runtimeVersion': '2.1', 'pythonVersion': '3.7', 'framework': 'TENSORFLOW', 'description': description_model, 'deploymentUri': model_binaries, 'name': version } # Create a request to call projects.models.create. request = ai_platform_serving.projects().models().versions().create(parent=parentId, body=request_dict) # + Collapsed="false" #request_dict # + Collapsed="false" # Make the call. try: response = request.execute() print('Name of the model: {}:'.format(response['name'].split('/')[-1])) print(' descrition: {}'.format(response['metadata']['version']['description'])) print(' runtimeVersion: {}'.format(response['metadata']['version']['runtimeVersion'])) print(' framework: {}'.format(response['metadata']['version']['framework'])) print(' machineType: {}'.format(response['metadata']['version']['machineType'])) print(' pythonVersion: {}'.format(response['metadata']['version']['pythonVersion'])) except errors.HttpError as err: # Something went wrong, print out soFinme information. print('There was an error creating the model. Check the details:') print(err._get_reason()) # + [markdown] Collapsed="false" # ### Check that the new modelal was deployed # + Collapsed="false" request = ai_platform_serving.projects().models().list(parent=project_id) # + Collapsed="false" # Make the call. try: response = request.execute() print('List of model:') for i in response['models']: print(' Model \'s name: {}:'.format(i['name'].split('/')[-1])) print(' descrition: {}'.format(i['description'])) print(' regions: {}'.format(i['regions'])) except errors.HttpError as err: # Something went wrong, print out some information. print('There was an error creating the model. Check the details:') print(err._get_reason()) # + [markdown] Collapsed="false" # ## Model serving inference # + [markdown] Collapsed="false" # ### Prepare data for online prediction for BERT # + [markdown] Collapsed="false" # example of format: # # ``` # {'instances': # [ # {'input_ids': [101, 143, 18267, 15470, 90395, ...], # 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, .....], # 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, .....] # }, # {'input_ids': [101, 17664, 143, 30728, .........], # 'attention_mask': [1, 1, 1, 1, 1, 1, 1, .......], # 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, ....] # } # ] # } # # ``` # + [markdown] Collapsed="false" # ### Prepare data for online prediction for Census # + Collapsed="false" # for census data_prediction=[{'dense_input': [25.0, 0, 7, 0, 0, 0, 0, 0, 0, 40, 0]}] # + Collapsed="false" data_prediction[0] # + [markdown] Collapsed="false" # ### Online prediction # + Collapsed="false" # model version_name='V1' # use the one above or define it below #name_model='...' # use the one above or define it below, be careful with regional endpoint #ai_platform_serving='...' parent = 'projects/{}/models/{}/versions/{}'.format(project_name, name_model, version_name) # data for prediction request_data = {"instances": data_prediction} # Create a request to call projects.models.create. request = ai_platform_serving.projects().predict(body=request_data, name=parent) # + Collapsed="false" #parent # + Collapsed="false" #request_data # + Collapsed="false" # Make the call. try: response = request.execute() print('predictions:') for i in response['predictions']: print(' {}'.format(i)) except errors.HttpError as err: # Something went wrong, print out soFinme information. print('There was an error making prediction. Check the details:') print(err._get_reason()) # + [markdown] Collapsed="false" # ### Batch predictions # + Collapsed="false" # Regional endpoint only support online prediction and AI explanations #ai_platform_serving = discovery.build('ml', 'v1') #name_model='...' version_name=None input_paths='gs://'+os.environ['BUCKET_NAME']+'/serving/sst2/input_predict_gcloud_census.json' output_path='gs://'+os.environ['BUCKET_NAME']+'/batch_prediction_census_'+datetime.now().strftime("%Y_%m_%d_%H%M%S") data_format='TEXT' max_worker_count=20 runtime_version=None not_deployed=False if not_deployed: runtime_version='2.1' uri='gs://'+os.environ['BUCKET_NAME']+'/census_20200706_194610/keras-job-dir/4/keras_export' signatureName='serving_default' # + Collapsed="false" model_id = '{}/models/{}'.format(project_id, name_model) if version_name is not None: version_id = '{}/versions/{}'.format(model_id, version_name) # Make a jobName of the format "model_name_batch_predict_YYYYMMDD_HHMMSS" timestamp = time.strftime('%Y%m%d_%H%M%S', time.gmtime()) job_id = '{}_{}'.format(name_model,timestamp) # Start building the request dictionary with required information. body = {'jobId': job_id, 'predictionInput': { 'dataFormat': data_format, 'inputPaths': [input_paths], 'outputPath': output_path, 'region': region_model} } # Use the version if present, the model (its default version) if not. if not_deployed: body['predictionInput']['uri'] = uri body['predictionInput']['signatureName'] = signatureName else: if version_name is not None: body['predictionInput']['versionName'] = version_id else: body['predictionInput']['modelName'] = model_id # Only include a maximum number of workers or a runtime version if specified. # Otherwise let the service use its defaults. #if max_worker_count: # body['predictionInput']['maxWorkerCount'] = max_worker_count if runtime_version: body['predictionInput']['runtimeVersion'] = runtime_version # Create a request to call projects.models.create. request = ai_platform_serving.projects().jobs().create(parent=project_id, body=body) # + Collapsed="false" body # + Collapsed="false" # Make the call. try: response = request.execute() print('job requested.') # The state returned will almost always be QUEUED. print('state : {}'.format(response['state'])) except errors.HttpError as err: # Something went wrong, print out soFinme information. print('There was an error making prediction. Check the details:') print(err._get_reason()) # + [markdown] Collapsed="false" # ## Test with gcloud # + Collapsed="false" # gcloud command test: OK # #!gcloud ai-platform jobs submit prediction 'tf_bert_classification_test_batch_us_central1_20200825_test_v1' \ # # --model 'tf_gcp_census_test_batch_us_central1' \ # # --input-paths 'gs://multilingual_text_classification/serving/sst2/input_predict_gcloud_census.json' \ # # --output-path 'gs://multilingual_text_classification/test_v1' \ # # --region 'us-central1' \ # # --data-format 'TEXT' # + Collapsed="false" # #!gcloud ai-platform jobs describe job_name... # + Collapsed="false" # #!gcloud ai-platform jobs stream-logs job_name...
notebook/05-Serving/03_Census_model_serving_python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import random import torch.nn as nn import torch.optim as optim import torch.utils.data import torchvision.datasets import torchvision.transforms as transforms import torchvision.utils as vutils import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from IPython.display import HTML from math import prod from tqdm import tqdm # Set random seed for reproducibility manualSeed = 999 #manualSeed = random.randint(1, 10000) # use if you want new results print("Random Seed: ", manualSeed) random.seed(manualSeed); torch.manual_seed(manualSeed); # - # Decide which device we want to run on device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # <div class="alert alert-info"> # <h3>Part 2.3 : OTGAN </h3> # </div> # Implementing the generator and the discriminator class as described in the OTGAN paper class OTGenerator(nn.Module): def __init__(self, lat_dim): super(OTGenerator, self).__init__() self.lin = nn.Linear(lat_dim, 32768) self.conv_1 = nn.Conv2d(1024, 1024, 5, 1, 2) self.conv_2 = nn.Conv2d(512, 512, 5, 1, 2) self.conv_3 = nn.Conv2d(256, 256, 5, 1, 2) self.conv_4 = nn.Conv2d(128, 1, 5, 1, 2) self.upsample = nn.Upsample(scale_factor=2, mode='nearest') self.act = nn.Tanh() self.act_2 = nn.Sigmoid() def forward(self, inp_gen): x = self.lin(inp_gen) x, y = x.chunk(chunks=2, dim=1) x = x * self.act_2(y) x = x.view(x.size(0), 1024, 4, 4) x = self.upsample(x) x = self.conv_1(x) x, y = x.chunk(chunks=2, dim=1) x = x * self.act_2(y) x = self.upsample(x) x = self.conv_2(x) x, y = x.chunk(chunks=2, dim=1) x = x * self.act_2(y) x = self.upsample(x) x = self.conv_3(x) x, y = x.chunk(chunks=2, dim=1) x = x * self.act_2(y) x = self.act(self.conv_4(x)) return x class OTDiscriminator(nn.Module): def __init__(self): super(OTDiscriminator, self).__init__() self.conv_1 = nn.Conv2d(1, 128, 5, 1, 2) self.conv_2 = nn.Conv2d(256, 256, 5, 2, 2) self.conv_3 = nn.Conv2d(512, 512, 5, 2, 2) self.conv_4 = nn.Conv2d(1024, 1024, 5, 2, 2) def forward(self, x): x = self.conv_1(x) x = torch.cat((nn.functional.relu(x), nn.functional.relu(-x)), 1) x = self.conv_2(x) x = torch.cat((nn.functional.relu(x), nn.functional.relu(-x)), 1) x = self.conv_3(x) x = torch.cat((nn.functional.relu(x), nn.functional.relu(-x)), 1) x = self.conv_4(x) x = torch.cat((nn.functional.relu(x), nn.functional.relu(-x)), 1) x = torch.reshape(x, (x.size(0), 32768)) x = nn.functional.normalize(x, p=2, dim=1) return x # Let's code the OTGAN class class OTGAN(): """OTGAN implementation as described in Salimans et al (2018).""" def __init__(self, lat_dim , train_ratio, device): self.lat_dim = lat_dim self.train_ratio = train_ratio self.generator = OTGenerator(lat_dim).to(device) self.discriminator = OTDiscriminator().to(device) self.device = device def compute_cost(self, batch, batch_1): return 1 - (batch @ batch_1.t()) / (torch.norm(batch, p=2) * torch.norm(batch_1, p=2)) def sinkhorn(self, a, b, C, epsilon=0.5, max_iters=100): """Run Sinnkhorn's algorithm""" u = torch.ones_like(a, device=self.device) with torch.no_grad(): K = torch.exp(-C/epsilon) for i in range(max_iters): v = b / torch.matmul(K, u) u = a / torch.matmul(K.T, v) return torch.matmul(torch.diag(u), torch.matmul(K, torch.diag(v))) def train(self, dataloader, nbr_epochs, lr_rate, beta): opt_d = optim.Adam(self.discriminator.parameters(), lr=lr_rate, betas=(beta, 0.999)) opt_g = optim.Adam(self.generator.parameters(), lr=lr_rate, betas=(beta, 0.999)) fixed_noise = 2 * torch.rand(dataloader.batch_size, self.lat_dim, device=self.device) - 1 iters = 0 loss_l, img_list = [], [] for epoch in range(nbr_epochs): for idx, (batch, _) in enumerate(tqdm(dataloader)): batch, batch_1 = batch.chunk(chunks=2, dim=0) batch = batch.to(self.device) batch_1 = batch_1.to(self.device) self.discriminator.zero_grad() self.generator.zero_grad() # Generating fake samples fk_smpl_1 = self.generator(2 * torch.rand(batch.size(0), self.lat_dim, device=self.device) - 1) fk_smpl_2 = self.generator(2 * torch.rand(batch.size(0), self.lat_dim, device=self.device) - 1) # prediction on real and fake batches pred = self.discriminator(batch) pred_2 = self.discriminator(batch_1) fk_pred, fk_pred_2 = self.discriminator(fk_smpl_1), self.discriminator(fk_smpl_2) # Computing costs cost_pred, cost_pred_fk = self.compute_cost(pred, pred_2), self.compute_cost(pred, fk_pred) cost_pred_2_fk, cost_pred_2_fk_2 = self.compute_cost(pred_2, fk_pred), self.compute_cost(pred_2, fk_pred_2) cost_pred_fk_2, cost_fk = self.compute_cost(pred, fk_pred_2), self.compute_cost(fk_pred, fk_pred_2) # using the sinkhorn algorithm to compute optimal matchings a = (torch.ones(batch.size(0)) / batch.size(0)).to(self.device) b = (torch.ones(batch.size(0)) / batch.size(0)).to(self.device) match_pred, match_pred_fk = self.sinkhorn(a, b, cost_pred), self.sinkhorn(a, b, cost_pred_fk) match_pred_fk_2, match_pred_2_fk = self.sinkhorn(a, b, cost_pred_fk_2), self.sinkhorn(a, b, cost_pred_2_fk_2) match_pred_2_fk_2, match_fk = self.sinkhorn(a, b, cost_pred_2_fk_2), self.sinkhorn(a, b, cost_fk) # Compute loss loss_pred = torch.sum(match_pred * cost_pred) loss_pred_fk = torch.sum(match_pred_fk * cost_pred_fk) loss_pred_fk_2 = torch.sum(match_pred_fk_2 * cost_pred_fk_2) loss_pred_2_fk = torch.sum(match_pred_2_fk * cost_pred_2_fk) loss_pred_2_fk_2 = torch.sum(match_pred_2_fk_2 * cost_pred_2_fk_2) loss_fk = torch.sum(match_fk * cost_fk) loss = loss_pred_fk + loss_pred_fk_2 + loss_pred_2_fk + loss_pred_2_fk_2 - 2 * (loss_pred_fk_2 + loss_fk) loss.backward() if idx+1 % self.train_ratio == 0: opt_d.step() else : opt_g.step() # Check how the generator is doing by saving G's output on fixed_noise if (iters % 500 == 0) : with torch.no_grad(): fk_img = self.generator(fixed_noise).detach().cpu() img_list.append(vutils.make_grid(fk_img, padding=2, normalize=True)) iters+=1 print("Epoch [{}/{}], loss: {:.4f}".format(epoch+1, nbr_epochs, loss)) # Save Losses for plotting later loss_l.append(loss) return img_list, loss_l # Hyperparameters for training # + batch_size = 64 image_size = 32 # Number of workers for dataloader workers = 1 train_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST('./', train=True, download=True, transform=transforms.Compose([ transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])), batch_size=batch_size, shuffle=True, num_workers=workers) # Size of z latent vector (i.e. size of generator input) lat_dim = 100 # Number of training epochs nbr_epochs = 20 # Learning rate for optimizers lr = 3e-4 # Beta hyperparam for Adam optimizers beta = 0.5 # generator to discriminator training ratio train_ratio = 3 # - otgan = OTGAN(lat_dim, train_ratio, device) img_list_ot, loss_ot = otgan.train(train_loader, nbr_epochs, lr, beta) # Let's visualise the images # #%%capture fig = plt.figure(figsize=(8,8)) plt.axis("off") ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list_ot] ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True) HTML(ani.to_jshtml()) # + # Grab a batch of real images from the dataloader real_batch = next(iter(train_loader)) # Plot the real images plt.figure(figsize=(15,15)) plt.subplot(1,2,1) plt.axis("off") plt.title("Real Images") plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0))) # Plot the fake images from the last epoch plt.subplot(1,2,2) plt.axis("off") plt.title("Fake Images") plt.imshow(np.transpose(img_list_ot[-1],(1,2,0))) plt.show() # - # A not so fancy loss plot plt.figure(figsize=(15,6)) plt.plot(np.arange(nbr_epochs), np.abs(loss_ot), '-.k', label='absolute value loss') # dashdot black plt.grid() plt.legend(loc="upper right") plt.title('Loss with respect to scene size'); plt.xlabel('Epoch number') plt.ylabel('Loss'); # We can notice that the genrated images do not resembles the one found in the data but this is natural as the batch size was small and this voids one of the pricipals of the OTGAN. Hopefully with more memory one can overcome this issue.
Farhat_Mohamed_Babacar_Sow_OT_project_notebook_part2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import plotly.express as px import copy import datetime import matplotlib.pyplot as plt print(datetime.datetime.now()) # ## Данные df = pd.read_csv('data/features/uuid_x_efofs_class_x_skills.csv', index_col=0) df df_exploded = df.explode('skill') df_ = df_exploded.groupby(['Class', 'skill']).size().reset_index() df_.rename(columns={0: 'count'}, inplace=True ) df_ = df_[df_['count'] > 50] df_
code/research-skills_x_label.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mozillatts (conda) # language: python # name: mozillatts # --- # EDIT to point at WaveRNN folder WaveRNN_path = '~/workspace/WaveRNN/' import sys sys.path.append(WaveRNN_path) from utils.dsp import hp from models.fatchord_version import WaveRNN import torch import numpy as np from pathlib import Path WaveRNN_path = Path(WaveRNN_path) # + hp.configure(WaveRNN_path / 'hparams.py') # Load hparams from file device = torch.device('cpu') model = WaveRNN(rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad=hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.hop_length, sample_rate=hp.sample_rate, mode=hp.voc_mode).to(device) model.load(str(WaveRNN_path / 'pretrained/latest_weights.pyt')) # - file_name = Path('scientists.npy') mel = np.load(WaveRNN_path / file_name) batch_pred = True # False is slower but possibly better _ = model.generate(mel.clip(0,1)[np.newaxis,:,:], file_name.stem + '.wav', batch_pred, 110_000, hp.voc_overlap, hp.mu_law)
notebooks/WaveRNN prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # packages used # + # %matplotlib inline # misc from IPython.display import display, HTML import numpy as np # DATA - prep #kaggle import pandas as pd import sklearn.model_selection # ML - models import sklearn.linear_model import sklearn.tree import sklearn.ensemble import xgboost # hypertune from sklearn.model_selection import GridSearchCV # ML - accuracy import sklearn.metrics # Plot and visualize # import matplotlib.pyplot as plt # import shap # savemodel import joblib # - # # Get data # Setup: # - follow "API credential step" listed here: https://github.com/Kaggle/kaggle-api # - go to https://www.kaggle.com/ (login) # - go to my_profile (download kaggle.json) # - put it in ~/.kaggle/kaggle.json # - `cp ~/Downloads/kaggle.json ~/.kaggle/kaggle.json` # - `chmod 600 ~/.kaggle/kaggle.json` # - Go to kaggle and join competition: # - https://www.kaggle.com/c/titanic # - install kaggle # - download data # - profit!!! # + metadata = { 'basepath' : 'data/', 'dataset':'titanic', 'train' : 'train.csv', 'test' : 'test.csv'} # + # make folder # download .zip # unzip # remove the .zip # (data is placed ../data/titanic) # # !mkdir -p {metadata['basepath']} # # !kaggle competitions download -c dataset {metadata['dataset']} -p {metadata['basepath']} # # !unzip -o {metadata['basepath']}{metadata['dataset']}.zip -d {metadata['basepath']}{metadata['dataset']}/ # # !rm {metadata['basepath']}{metadata['dataset']}.zip # - # # Load and explore # load train = pd.read_csv("{basepath}/{dataset}/{train}".format(**metadata)) test = pd.read_csv("{basepath}/{dataset}/{test}".format(**metadata)) # # Simple imputation + cleaning # + def clean(df): dfc = df.copy() # Simple map dfc['Sex'] = dfc['Sex'].map({"female":0,"male":1}).astype(int) # simple Impute dfc['Age'] = dfc["Age"].fillna(-1) dfc['Fare'] = dfc["Fare"].fillna(-1) # Simple feature engineering (combining two variables) dfc['FamilySize'] = dfc['SibSp'] + dfc['Parch'] + 1 # Simple feature engineering (converting to boolean) dfc['Has_Cabin'] = dfc["Cabin"].apply(lambda x: 0 if type(x) == float else 1) dfc = dfc.drop(["Cabin"],axis=1) # "Stupid feature engineering - apply length dfc['Name_length'] = dfc['Name'].apply(len) dfc = dfc.drop(["Name"],axis=1) dfc['Ticket_length'] = dfc['Ticket'].apply(len) dfc = dfc.drop(["Ticket"],axis=1) # 1-hot encoding - different options are encoded as booleans # ie. 1 categorical - become 3: 0-1 features. dfc['Embarked_Q'] = dfc['Embarked'].apply(lambda x: 1 if x=="Q" else 0) dfc['Embarked_S'] = dfc['Embarked'].apply(lambda x: 1 if x=="S" else 0) dfc['Embarked_C'] = dfc['Embarked'].apply(lambda x: 1 if x=="C" else 0) dfc = dfc.drop(["Embarked"],axis=1) return dfc clean_train = clean(train) clean_test = clean(test) # - #display(pd.DataFrame(clean_test.isna().mean() ,columns=["is na fraction"])) # + target = "Survived" y = clean_train[target] X = clean_train.drop([target],axis=1) # Split data in train and validation seed = 42 test_size = 0.7 X_train, X_val, y_train, y_val = sklearn.model_selection.train_test_split( X, y, random_state = seed, test_size = test_size) # - # # ML # + # xgboost model_xgboost = xgboost.sklearn.XGBClassifier() model_xgboost.fit(X_train, y_train); # - # # Hyper parameter tuning # ## xgboost # + ##### from sklearn.model_selection import GridSearchCV xgb_param_grid = { 'n_estimators' : [100,200,300], 'max_depth': [2,3,5,8], 'min_sample_split': [2,3,5,8], "learning_rate": [0.05,0.1,0.5] } model_xgboost_dummy = xgboost.sklearn.XGBClassifier(min_sample_split=3,learning_rate=0.1) grid_search_xgb = GridSearchCV(estimator = model_xgboost_dummy, param_grid = xgb_param_grid , cv = 5, n_jobs = -1, verbose = 2) grid_search_xgb.fit(X_train, y_train) # + best_grid = grid_search_xgb.best_params_ display(best_grid) model_xgboost_best = xgboost.sklearn.XGBClassifier(**best_grid) model_xgboost_best.fit(X_train,y_train); # - # # Eval ML # + models = { "model_xgboost_best":model_xgboost_best } for name,model in zip(models.keys(),models.values()): acc = sklearn.metrics.accuracy_score( y_true = y_val, y_pred = model.predict(X_val) ) acc_train = sklearn.metrics.accuracy_score( y_true = y_train, y_pred = model.predict(X_train) ) print(name," | train: ",round(acc_train,3)," | test: ",round(acc,3)) # - # # save the model # + # save the model to disk filename = metadata["basepath"] + 'model_xgboost_best.sav' joblib.dump(model_xgboost_best, filename) # some time later... # load the model from disk #loaded_model = joblib.load(filename) # + # old predict for kaggle # passengerid # id = "PassengerId" # out = pd.DataFrame(data = test[id], columns = [id]) # # target # out_target = model_xgboost_best.predict(clean_test) # out[target] = pd.DataFrame(out_target # ,columns = [target] # ,dtype=np.int32 # ) # # put them out # outfile = metadata["basepath"] + "output_xgboost.csv" # out.to_csv(path_or_buf = outfile, # index = False) # - model_xgboost_best.dtypes
08_docker_deploy_ml/train_notebook.ipynb