code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <NAME> # # Objective: # Dataset: # # Status: # # Highlevel Abstraction: Estimators and Transformers # DataFrame->Transformer(conversion of dataframes)->Estimator(ML algos are all estimators). An algo that fit on dataframe to produce Tranformer (change dataframes)... # # Pipeline: Chains of estimators and transformers to form a machine learning workflow # Chains a series of operations to be performed on DataFrames # Working with Spark ML Libraries # # which means no sc but SparkSession which encapsulates spark context sql context and all other context available with spark # # + from pyspark.sql import SparkSession sp=SparkSession\ .builder\ .appName('Predicting the grape variety from wine characteristic')\ .getOrCreate() rawData=spark.read\ .format('csv')\ .option('header','false')\ .load("/home/titan/dataset/ML_JRVI/wine.data") # - rawData dataset=rawData.toDF('Label', 'Alcohol', 'MalicAcid', 'Ash', 'AshAlkalinity', 'Magnesium', 'TotalPhenols', 'Flavanoids', 'NonFlavanoidPhenols', 'Proanthocyanins', 'ColorIntensity', 'Hue', 'OD', 'Proline') dataset dataset.take(3) dataset.show(5) # + from pyspark.ml.linalg import Vectors def vectorize(data): return data.rdd.map(lambda x: [x[0],Vectors.dense(x[1:])]).toDF(['label','features']) #note the mention of '.rdd' logically incoherent #dense vector opposite of spars vector... sparse representation vs dense representation # - vectorisedData=vectorize(dataset) type(vectorisedData) vectorisedData.take(5) vectorisedData.show(5) # + # converting categorical data to from pyspark.ml.feature import StringIndexer labelIndexer=StringIndexer (inputCol='label', outputCol='indexedLabel') # - indexedData=labelIndexer.fit(vectorisedData).transform(vectorisedData) indexedData.take(2) indexedData indexedData.select('label').distinct().show() indexedData.select('indexedLabel').distinct().show() (trainingData,testData)=indexedData.randomSplit([0.8,0.2]) from pyspark.ml.classification import DecisionTreeClassifier dtree= DecisionTreeClassifier(labelCol='indexedLabel', featuresCol='features', impurity='gini', maxDepth=3, ) # + # help(DecisionTreeClassifier) # - model1=dtree.fit(trainingData) # + from pyspark.ml.evaluation import MulticlassClassificationEvaluator evaluator=MulticlassClassificationEvaluator(labelCol='indexedLabel', predictionCol='prediction', metricName='f1') # - transformed_data=model1.transform(testData) transformed_data.show(5) print(evaluator.getMetricName(),'accuracy:', evaluator.evaluate(transformed_data))
ML_Spark_wine_data_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline from __future__ import division, print_function, unicode_literals import numpy as np import matplotlib.pyplot as plt # # Data # height (cm) X = np.array([[147, 150, 153, 158, 163, 165, 168, 170, 173, 175, 178, 180, 183]]).T # weight (kg) y = np.array([[ 49, 50, 51, 54, 58, 59, 60, 62, 63, 64, 66, 67, 68]]).T plt.scatter(X, y) plt.xlabel('Height (cm)') plt.ylabel('Weight (kg)') plt.show() # Weight = w_1 * height + w_0 one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis = 1) Xbar A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, y) w = np.dot(np.linalg.pinv(A), b) print('w = ', w) w_0 = w[0][0] w_1 = w[1][0] x0 = np.linspace(145, 185, 2) y0 = w_0 + w_1*x0 plt.plot(X, y, 'ro') plt.plot(x0, y0) # # Sklearn # + from sklearn import datasets, linear_model regr = linear_model.LinearRegression(fit_intercept=True) regr.fit(X, y) print ('coef_', regr.coef_) print ('intercept_', regr.intercept_) # - y_pred = regr.predict(X) plt.plot(y, 'ro') plt.plot(y_pred, 'g')
Linear Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Comparing CCA Variants # # This tutorial shows a comparison of Canonical Correlation Analysis (CCA), # Kernel CCA (KCCA) with two different types of kernel, and Deep CCA (DCCA). # CCA is equivalent to KCCA with a linear kernel. Each learns kernels suitable # for different situations. The point of this tutorial is to illustrate, in toy # examples, the rough intuition as to when such methods work well and find # highly correlated projections. # # The simulated latent data has two signal dimensions draw from independent # Gaussians. Two views of data were derived from this. # # - View 1: The latent data. # - View 2: A transformation of the latent data. # # To each view, two additional independent Gaussian noise dimensions were added. # # Each 2x2 grid of subplots in the figure corresponds to a transformation and # either the raw data or a CCA variant. The x-axes are the data from view 1 # and the y-axes are the data from view 2. Plotted are the correlations between # the signal dimensions of the raw views and the top two components of each # view after a CCA variant transformation. Linearly correlated plots on the # diagonals of the 2x2 grids indicate that the CCA method was able to # successfully learn the underlying functional relationship between the two # views. # # + # Author: <NAME> # License: MIT from mvlearn.embed import CCA, KMCCA, DCCA from mvlearn.datasets import make_gaussian_mixture import numpy as np import matplotlib.pyplot as plt import matplotlib import seaborn as sns import warnings warnings.filterwarnings("ignore") # GMM settings n_samples = 200 centers = [[0, 0], [0, 0]] covariances = 4*np.array([np.eye(2), np.eye(2)]) transforms = ['linear', 'poly', 'sin'] Xs_train_sets = [] Xs_test_sets = [] for transform in transforms: Xs_train, _ = make_gaussian_mixture( n_samples, centers, covariances, transform=transform, noise=0.25, noise_dims=2, random_state=41) Xs_test, _, latents = make_gaussian_mixture( n_samples, centers, covariances, transform=transform, noise=0.25, noise_dims=2, random_state=42, return_latents=True) Xs_train_sets.append(Xs_train) Xs_test_sets.append(Xs_test) # Plotting parameters labels = latents[:, 0] cmap = matplotlib.colors.ListedColormap( sns.diverging_palette(240, 10, n=len(labels), center='light').as_hex()) cmap = 'coolwarm' method_labels = \ ['Raw Views', 'CCA', 'Polynomial KCCA', 'Gaussian KCCA', 'DCCA'] transform_labels = \ ['Linear Transform', 'Polynomial Transform', 'Sinusoidal Transform'] input_size1 = Xs_train_sets[0][0].shape[1] input_size2 = Xs_train_sets[0][1].shape[1] outdim_size = min(Xs_train_sets[0][0].shape[1], 2) layer_sizes1 = [256, 256, outdim_size] layer_sizes2 = [256, 256, outdim_size] methods = [ CCA(regs=0.1, n_components=2), KMCCA(kernel='poly', regs=0.1, kernel_params={'degree': 2, 'coef0': 0.1}, n_components=2), KMCCA(kernel='rbf', regs=0.1, kernel_params={'gamma': 1/4}, n_components=2), DCCA(input_size1, input_size2, outdim_size, layer_sizes1, layer_sizes2, epoch_num=400) ] fig, axes = plt.subplots(3 * 2, 5 * 2, figsize=(22, 12)) sns.set_context('notebook') for r, transform in enumerate(transforms): axs = axes[2 * r:2 * r + 2, :2] for i, ax in enumerate(axs.flatten()): dim2 = int(i / 2) dim1 = i % 2 ax.scatter( Xs_test_sets[r][0][:, dim1], Xs_test_sets[r][1][:, dim2], cmap=cmap, c=labels, ) ax.set_xticks([], []) ax.set_yticks([], []) if dim1 == 0: ax.set_ylabel(f"View 2 Dim {dim2+1}", fontsize=14) if dim1 == 0 and dim2 == 0: ax.text(-0.4, -0.1, transform_labels[r], transform=ax.transAxes, fontsize=22, rotation=90, verticalalignment='center') if dim2 == 1 and r == len(transforms)-1: ax.set_xlabel(f"View 1 Dim {dim1+1}", fontsize=14) if i == 0 and r == 0: ax.set_title(method_labels[r], {'position': (1.11, 1), 'fontsize': 22}) for c, method in enumerate(methods): axs = axes[2*r: 2*r+2, 2*c+2:2*c+4] Xs = method.fit(Xs_train_sets[r]).transform(Xs_test_sets[r]) for i, ax in enumerate(axs.flatten()): dim2 = int(i / 2) dim1 = i % 2 ax.scatter( Xs[0][:, dim1], Xs[1][:, dim2], cmap=cmap, c=labels, ) if dim2 == 1 and r == len(transforms)-1: ax.set_xlabel(f"View 1 Dim {dim1+1}", fontsize=16) if i == 0 and r == 0: ax.set_title(method_labels[c + 1], {'position': (1.11, 1), 'fontsize': 22}) ax.axis("equal") ax.set_xticks([], []) ax.set_yticks([], []) plt.tight_layout() plt.subplots_adjust(wspace=0.15, hspace=0.15) plt.show()
_downloads/a83b75c7206ec6bf4e8429c28bfdb6e0/plot_cca_comparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Serving Models with GCP Vertex AI Prediction and NVIDIA Triton Server # # This notebook demonstrates how to serve example models using NVIDIA Triton Inference Server and Vertex AI Prediction. # The notebook compiles prescriptive guidance for the following tasks: # # 1. Download Triton sample models # 2. Registering and deploying the models with NGC Triton images into Vertex Prediction Models and Endpoints. # 3. Getting online predictions from the deployed models. # # To fully benefit from the content covered in this notebook, you should have a solid understanding of key Vertex AI Prediction concepts like models, endpoints, and model deployments. We strongly recommend reviewing [Vertex AI Prediction documentation](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions) before proceeding. # # ### Triton Inference Server Overview # # [Triton Inference Server](https://github.com/triton-inference-server/server) provides an inference solution optimized for both CPUs and GPUs. Triton can run multiple models from the same or different frameworks concurrently on a single GPU or CPU. In a multi-GPU server, it automatically creates an instance of each model on each GPU to increase utilization without extra coding. It supports real-time inferencing, batch inferencing to maximize GPU/CPU utilization, and streaming inference with built-in support for audio streaming input. It also supports model ensembles for use cases that require multiple models to perform end-to-end inference. # # The following figure shows the Triton Inference Server high-level architecture. # # <img src="triton_nvidia.png" style="width:70%"/> # # - The model repository is a file-system based repository of the models that Triton will make available for inference. # - Inference requests arrive at the server via either HTTP/REST or gRPC and are then routed to the appropriate per-model scheduler. # - Triton implements multiple scheduling and batching algorithms that can be configured on a model-by-model basis. # - The backend performs inference using the inputs provided in the batched requests to produce the requested outputs. # # Triton server provides readiness and liveness health endpoints, as well as utilization, throughput, and latency metrics, which enable the integration of Triton into deployment environments, such as Vertex AI Prediction. # # Refer to [Triton Inference Server Architecture](https://github.com/triton-inference-server/server/blob/main/docs/architecture.md) for more detailed information. # # ### Triton Inference Server on Vertex AI Prediction # # In this section, we describe the deployment of Triton Inference Server on Vertex AI Prediction. # # Triton Inference Server runs inside a container published by NVIDIA GPU Cloud - [NVIDIA Triton Inference Server Image](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver). NVIDIA and GCP Vertex team collabrate and adds packages and configurations to align Triton with Vertex AI [requirements for custom serving container images](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements). # # An ML model to be served by Triton is registered with Vertex AI Prediction as a `Model`. The `Model`'s metadata reference a location of the ensemble artifacts in Google Cloud Storage and the custom serving container and its configurations. # # Triton loads the models and exposes inference, health, and model management REST endpoints using [standard inference protocols](https://github.com/kserve/kserve/tree/master/docs/predict-api/v2). While deploying to Vertex, Triton will recognize Vertex environment and adopt Vertex AI Prediction protocol for [health checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health) and predictions. # # To invoke the model through the Vertex AI Prediction endpoint you need to format your request using a [standard Inference Request JSON Object](https://github.com/kserve/kserve/blob/master/docs/predict-api/v2/required_api.md#inference) or a [Inference Request JSON Object with a binary extension](https://github.com/triton-inference-server/server/blob/main/docs/protocol/extension_binary_data.md) and submit a request to Vertex AI Prediction [REST rawPredict endpoint](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/rawPredict). You need to use the `rawPredict` rather than `predict` endpoint because inference request formats used by Triton are not compatible with the Vertex AI Prediction [standard input format](https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-custom-models#formatting-prediction-input). # # # ### Notebook flow # # This notebook assumes that you have access to GCP project and Vertex AI Prediction, it is highly recommended to register with NVIDIA GPU Cloud. # # As you walk through the notebook you will execute the following tasks: # # - Configure the notebook environment settings, including GCP project, compute region, and the GCS locations of a sample model # - Register the model as a Vertex Prediction model with Triton container image # - Create a Vertex Prediction endpoint # - Deploy the model endpoint # - Invoke the deployed ensemble model # # ## 1, Setup # # In this section of the notebook you configure your environment settings, including a GCP project, a Vertex AI compute region, and a Vertex AI staging GCS bucket. # You also set the locations of downloaded sample model and a set of constants that are used to create names and display names of Vertex AI Prediction resources. # # First, fetch the model from [Triton Samples Model Repository](https://github.com/triton-inference-server/server/blob/main/docs/examples/fetch_models.sh), and copy the simple model into a GCS bucket - `gs://triton_model_repository/models/`. # # Second, push a Triton container image into GCR (`nvcr.io/nvidia/tritonserver:21.12-py3` -> `gcr.io/{PROJECT_ID}/{IMAGE_NAME}`) # # Make sure to update the below cells with the values reflecting your environment. from google.cloud import aiplatform as vertex_ai # Set the below constants to your project id, a compute region for Vertex AI and a GCS bucket that will be used for Vertex AI staging and storing exported model artifacts. PROJECT_ID = 'merlin-on-gcp' # Change to your project. REGION = 'us-central1' # Change to your region. STAGING_BUCKET = 'triton_model_repository' # Change to your bucket. # `MODEL_ARTIFACTS_REPOSITORY` is a root GCS location where the Triton model artifacts will be stored. MODEL_ARTIFACTS_REPOSITORY = f'gs://{STAGING_BUCKET}/models' # The following set of constants will be used to create names and display names of Vertex Prediction resources like models, endpoints, and model deployments. # + MODEL_NAME = 'simple' MODEL_VERSION = 'v00' MODEL_DISPLAY_NAME = f'triton-{MODEL_NAME}-{MODEL_VERSION}' ENDPOINT_DISPLAY_NAME = f'endpoint-{MODEL_NAME}-{MODEL_VERSION}' IMAGE_NAME = 'triton-deploy' IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}" # - # ## 2. Initialize Vertex AI SDK vertex_ai.init( project=PROJECT_ID, location=REGION, staging_bucket=STAGING_BUCKET ) # ## 3. Uploading the model and its metadata to Vertex Models. # # Refer to [Use a custom container for prediction guide](https://cloud.google.com/vertex-ai/docs/predictions/use-custom-container) for detailed information about creating Vertex AI Prediction `Model` resources. # + serving_container_args = ['tritonserver'] model = vertex_ai.Model.upload( display_name=MODEL_DISPLAY_NAME, serving_container_image_uri=IMAGE_URI, serving_container_args=serving_container_args, artifact_uri=MODEL_ARTIFACTS_REPOSITORY, sync=True ) model.resource_name # - # ## 4. Deploying the model to Vertex AI Prediction. # # Deploying a Vertex AI Prediction `Model` is a two step process. First you create an endpoint that will expose an external interface to clients consuming the model. After the endpoint is ready you can deploy multiple versions of a model to the endpoint. # # Refer to [Deploy a model using the Vertex AI API guide](https://cloud.google.com/vertex-ai/docs/predictions/deploy-model-api) for more information about the APIs used in the following cells. # ### Create the Vertex Endpoint # # Before deploying the model you need to create a Vertex AI Prediction endpoint. endpoint = vertex_ai.Endpoint.create( display_name=ENDPOINT_DISPLAY_NAME ) # ### Deploy the model to Vertex Prediction endpoint # # After the endpoint is ready, you can deploy your model to the endpoint. You will run the Triton Server on a GPU node equipped with the NVIDIA Tesla T4 GPUs. # # Refer to [Deploy a model using the Vertex AI API guide](https://cloud.google.com/vertex-ai/docs/predictions/deploy-model-api) for more information. # + traffic_percentage = 100 machine_type = "n1-standard-4" accelerator_type="NVIDIA_TESLA_T4" accelerator_count = 1 min_replica_count = 1 max_replica_count = 2 model.deploy( endpoint=endpoint, deployed_model_display_name=MODEL_DISPLAY_NAME, machine_type=machine_type, min_replica_count=min_replica_count, max_replica_count=max_replica_count, traffic_percentage=traffic_percentage, accelerator_type=accelerator_type, accelerator_count=accelerator_count, sync=True, ) endpoint.name # - # ## 5. Invoking the model # # To invoke the model through Vertex AI Prediction endpoint you need to format your request using a [standard Inference Request JSON Object](https://github.com/kserve/kserve/blob/master/docs/predict-api/v2/required_api.md#inference) or a [Inference Request JSON Object with a binary extension](https://github.com/triton-inference-server/server/blob/main/docs/protocol/extension_binary_data.md) and submit a request to Vertex AI Prediction [REST rawPredict endpoint](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/rawPredict). You need to use the `rawPredict` rather than `predict` endpoint because inference request formats used by Triton are not compatible with the Vertex AI Prediction [standard input format](https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-custom-models#formatting-prediction-input). # # The below cell shows a sample request body formatted as a [standard Inference Request JSON Object](https://github.com/kserve/kserve/blob/master/docs/predict-api/v2/required_api.md#inference). # # + import json payload = { "id": "0", "inputs": [ { "name": "INPUT0", "shape": [ 1, 16 ], "datatype": "INT32", "parameters": {}, "data": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ] }, { "name": "INPUT1", "shape": [ 1, 16 ], "datatype": "INT32", "parameters": {}, "data": [ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 ] } ] } with open('simple.json', 'w') as f: json.dump(payload, f) # - # You can invoke the Vertex AI Prediction `rawPredict` endpoint using any HTTP tool or library, including `curl`. # + uri = f'https://{REGION}-aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{REGION}/endpoints/{endpoint.name}:rawPredict' # ! curl -X POST \ # -H "Authorization: Bearer $(gcloud auth print-access-token)" \ # -H "Content-Type: application/json" \ # {uri} \ # -d @simple.json # -
vertex-ai-samples/prediction/triton_inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.6.4 # language: julia # name: julia-0.6 # --- using CSV, FileIO data = load("../data/impvol_data.jld2") country_names = CSV.read("../experiments/baseline/output_table.csv")[:country_names] function US_price_index(data, experiment) p_sectoral = data["p_sectoral_data"][1,end,:,:] parameters = load("../experiments/$experiment/common_parameters.jld2")["parameters"] nu = parameters[:nu_njt][1,1,:,:] sigma = parameters[:sigma] return sum(nu .* p_sectoral .^ (1-sigma), 1) .^ (1/(1-sigma)) end cpi = US_price_index(data, "baseline") # to conform with data structures us_cpi = reshape(cpi[end,:] ./ cpi[end,1], (1,1,1,36)) dollar_price_index = data["pwt"] .* us_cpi real_GDP_data = sum(data["va"], 3) ./ dollar_price_index include("../output.jl") using ImpvolOutput parameters = load("../experiments/baseline/common_parameters.jld2")["parameters"] function volatility(scenario) results = load("../experiments/$scenario/results.jld2")["results"] real_GDP_model = sum(ImpvolOutput.make_series(results, :real_GDP), 3) return ImpvolOutput.calculate_volatilities(real_GDP_model, parameters, true)[:].^0.5 end data_volatility = ImpvolOutput.calculate_volatilities(real_GDP_data, parameters, true)[:].^0.5 model_volatility = volatility("baseline/actual") CES05_volatility = volatility("CES0.5/actual") [data_volatility model_volatility CES05_volatility] function regression(x, y) X = [ones(length(x)) x] beta = X \ y R2 = var(X * beta) / var(y) return (beta, R2, X*beta) end using Plots import GR fm = x->repr(round(x, 3)) function plot_model_data(scenario, fname) model_volatility = volatility(scenario) beta, R2, fitted = regression(data_volatility, model_volatility) if beta[2]>0 label = "y = $(fm(beta[1]))+$(fm(beta[2]))x\nR2 = $(fm(R2)), rho = $(fm(R2^.5))" else label = "y = $(fm(beta[1]))-$(fm(-beta[2]))x\nR2 = $(fm(R2)), rho = $(fm(-R2^.5))" end plot(data_volatility, fitted, label=label, xlabel="Data volatility (standard deviation)", size=(800,500)) scatter!(data_volatility, model_volatility, label="", ylabel="Model volatility (standard deviation)") x = 0.002 for i=1:length(data_volatility) annotate!(data_volatility[i], model_volatility[i]+x, text(country_names[i], :black, :center, 10)) end savefig(fname) end plot_model_data("baseline/actual", "../output/Figure2.pdf") plot_model_data("CES0.5/actual", "../output/CES0.5-model-data.pdf") model_volatility = volatility("baseline/actual") # without china beta, R2 = regression([data_volatility[1:4]; data_volatility[6:end]], [model_volatility[1:4]; model_volatility[6:end]]) correlation = R2^.5 # correlation of variances beta, R2 = regression(data_volatility.^2, model_volatility.^2) correlation = R2^.5 # correlation of variances # without china beta, R2 = regression([data_volatility[1:4]; data_volatility[6:end]].^2, [model_volatility[1:4]; model_volatility[6:end]].^2) correlation = R2^.5 plot_model_data("CES1.5/actual", "../output/CES2-model-data.pdf")
notebooks/Create Figure 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import random from random import randrange import configparser import os import pandas as pd import numpy as np from datetime import datetime, timedelta import json import csv from sklearn.externals import joblib # + def load_model_predict_usertype(user_data,bat_type,datafolder_model): ''' Load and evaluate the models ''' model_path = '{}\\user{}_model.sav'.format(datafolder_model,str(bat_type)) knn = joblib.load(model_path) #Train the model predictions = knn.predict(user_data) return predictions def retrieve_user_history(u_id, bat_type, datafolder_ge): data_ge_path = '{}\\ML_UserType_data_Bat{}.csv'.format(datafolder_ge,str(bat_type)) bat_df = pd.read_csv(data_ge_path) user_history = bat_df[bat_df.u_id==u_id] # print('actual type',user_history.u_type.iloc[0]) user_history = user_history.drop(columns=['u_type','u_id']) return user_history def retrieve_users_bat_type(u_id, datafolder_ge): data_ge_path = '{}\\user_list.csv'.format(datafolder_ge) user_df = pd.read_csv(data_ge_path) bat_type = user_df[user_df['u_id'] ==u_id].Battery_Type.iloc[0] bat_type = int(bat_type.strip('bat')) return bat_type def read_config(): config = configparser.ConfigParser() config_fileName = ('config.ini') config.read(config_fileName) datafolder_model =config['MODEL']['MODEL_FOLDER'] datafolder_ge =config['DATASET']['DATA_FOLDER_GE'] return datafolder_model, datafolder_ge def main(arg): datafolder_model, datafolder_ge = read_config() user_id = arg bat_type = retrieve_users_bat_type(user_id, datafolder_ge) user_data = retrieve_user_history(user_id, bat_type, datafolder_ge) user_type = load_model_predict_usertype(user_data, bat_type, datafolder_model) if user_type== 1: utype='LOW' elif user_type == 2: utype='MID' else: utype='HIGH' return utype # + ### retrieve ML model prediction of the user type # + if __name__ == "__main__": # main(sys.argv[1]) k = main(1700) print(k) # -
8_usertype_predictor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![imagen](../../imagenes/python.jpg) # # Python Basics II # # Ya hemos visto cómo declarar variables, qué tipos hay, y otras funcionalidades importantes de Python como sus flujos de ejecución o las formas que tenemos de comentar el código. En este Notebook aprenderás a realizar **operaciones con tus variables** y descubrirás las colecciones mediante uno de los objetos más usados en Python: **las listas**. # # 1. [Operaciones aritméticas](#1.-Operaciones-aritméticas) # 2. [Operaciones comparativas](#2.-Operaciones-comparativas) # 3. [Operaciones con booleanos](#3.-Operaciones-con-booleanos) # 4. [Funciones *Built-in*](#4.-Funciones-Built-in) # 5. [Métodos](#5.-Métodos) # 6. [Listas](#6.-Listas) # 7. [Resumen](#7.-Resumen) # ## 1. Operaciones aritméticas # En el Notebook *Python Basics I* ya vimos por encima las principales operaciones aritméticas en Python. Las recordamos: # * Sumar: `+` # * Restar: `-` # * Multiplicar: `*` # * Dividir: `/` # * Elevar: `**` # * Cociente division: `//` # * Resto de la división: `%` # <table align="left"> # <tr><td width="80"><img src="../../imagenes/ejercicio.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>Ejercicio de operaciones aritméticas</h3> # # # <ol> # <li>Declara una variable int</li> # <li>Declara otra variable float.</li> # <li>Suma ambas variables. ¿Qué tipo de dato es el resultado?</li> # <li>Multiplica ambas variables</li> # <li>Eleva una variable a la potencia de la otra</li> # <li>Calcula el resto de dividir 12/5</li> # </ol> # # </td></tr> # </table> var_int = 10 var_float = 10. var_sum = var_int + var_float print(var_sum) var_prod = var_int * var_float print(var_prod) var_pot = var_int ** var_float print(var_pot) print(15 % 5) # ### Propiedad conmutativa, asociativa, distributiva y el paréntesis # Si queremos concatenar varias operaciones, ten siempre en cuenta las propiedades matemáticas de la multiplicación # + print("Conmutativa") print(2 * 3) print(3 * 2) print("\nAsociativa") # Recuerda que "\n" se usa para que haya un salto de linea en el output. print(2 * (3 + 5)) print(2 * 3 + 2 * 5) print("\nDistributiva") print((3 * 2) * 5) print(3 * (2 * 5)) print("\nEl Orden de operaciones se mantiene. Siempre podemos usar paréntesis") print(2 * (2 + 3) * 5) print((2 * 2 + 3 * 5)/(4 + 7)) # - # ### Operaciones más complejas # Si salimos de las operaciones básicas de Python, tendremos que importar módulos con más funcionalidades en nuestro código. Esto lo haremos mediante la sentencia `import math`. `math` es un módulo con funciones ya predefinidas, que no vienen por defecto en el núcleo de Python. De esta forma será posible hacer cálculos más complejos como: # # * Raíz cuadrada # * Seno/Coseno # * Valor absoluto # *... # # El módulo es completísimo y si estás buscando alguna operación matemática, lo más seguro es que ya esté implementada. Te dejo por aquí el [link a la documentación del módulo.](https://docs.python.org/3/library/math.html). import math print(math.sqrt(25)) print(math.fabs(-4)) print(math.acos(0)) # Como en todos los lenguajes de programación, suele haber una serie de componentes básicos (variables, operaciones aritméticas, tipos de datos...) con los que podemos hacer muchas cosas. Ahora bien, si queremos ampliar esas funcionalidades, se suelen importar nuevos módulos, con funciones ya hechas de otros usuarios, como en el caso del módulo `math`. Veremos esto de los módulos más adelante. # <table align="left"> # <tr><td width="80"><img src="../../imagenes/error.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>ERRORES Dividir por cero</h3> # # </td></tr> # </table> # Cuidado cuando operamos con 0s. Las indeterminaciones y valores infinitos suponen errores en el código. Por suerte, la descripción de estos errores es bastante explícita, obteniendo un error de tipo `ZeroDivisionError` 4/0 # Hay valores que se salen del dominio de algunas funciones matemáticas, como es el caso de las raices de números negativos math.sqrt(-10) # <table align="left"> # <tr><td width="80"><img src="../../imagenes/ejercicio.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>Ejercicio de operaciones con math</h3> # # Consulta la documentación de math para resolver este ejercicio # <ol> # <li>Calcula el valor absoluto de -25. Usa fabs</li> # <li>Redondea 4.7 a su enero más bajo. Usa floor</li> # <li>Redondea 4.3 a su enero más alto. Usa ceil</li> # <li>El número pi</li> # <li>¿Cuál es el área de un círculo de radio 3?</li> # </ol> # # </td></tr> # </table> # + import math print(math.fabs(-25)) print(math.floor(4.7)) print(math.ceil(4.7)) print(math.pi) print(math.pi * 3**2) # - # ## 2. Operaciones comparativas # Es bastante intuitivo comparar valores en Python. La sintaxis es la siguiente: # * `==`: Igualdad. No es un `=`. Hay que diferenciar entre una comparativa, y una asignación de valores # * `!=`: Desigualdad # * `>`: Mayor que # * `<`: Menor que # * `>=`: Mayor o igual que # * `<=`: Menor o igual que # + # Asignacion asign = 1 print(asign) # Comparacion print(asign == 5) print(asign) # - # En la asignación estamos diciendole a Python que la variable `asign` vale 1, mientras que en la comparación, estamos preguntando a Python si `a` equivale a 5. Como vale 1, nos devuelve un `False` # + print("AAA" == "BBB") print("AAA" == "AAA") print(1 == 1) print(1 == 1.0) # True, a pesar de que sean de distinto tipo print(67 != 93) print(67 > 93) print(67 >= 93) print(True == 1) # - # <table align="left"> # <tr><td width="80"><img src="../../imagenes/error.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>ERRORES en comparativas</h3> # # </td></tr> # </table> # Este tipo de errores son muy comunes, pues es muy habitual comparar peras con manzanas. Cuando se trata de una igualdad (`==`), no suele haber problemas, ya que si las variables son de distinto tipo, simplemente es `False`. Lo ideal sería que Python nos avisase de estas cosas porque realmente lo estamos haciendo mal, no estamos comparando cosas del mismo tipo print(True == 6) print(True == "verdadero") print(6 == "cadena") # Obtenemos un TypeError cuando la comparativa es de > o < 1.0 > "texto" # ## 3. Operaciones con booleanos # Todas las operaciones que realizabamos en el apartado anterior devolvían un tipo de dato concreto: un booleano. `True` o `False`. Podemos operar este tipo de datos mediante el *álgebra de boole*. Las operaciones más comunes son: # * **and**: Se tienen que cumplir ambas condiciones para que sea un `True` # * **or**: Basta que se cumpla al menos una condicion para que sea `True` # * **not**: Lo contrario de lo que haya # # En la ppt de documentación tienes unas tablas donde podrás ver los resultados de `and`, `or`, `not`, dependiendo de sus inputs. # # Veamos un ejemplo práctico para aclarar estos conceptos. Imaginemos que queremos comprar un ordenador, pero nos cuesta decidirnos. Eso sí, tenemos claras las siguentes condiciones a la hora de elegir # * La RAM me vale que tenga 16, 32 o 64 GB # * En cuanto al procesador y disco duro, la combinación que mejor me viene es un i3 con 500GB de disco. # * Precio: que no pase de los 800 € # + # Primer ordenador ram1 = 32 process1 = "i5" disco1 = 500 precio1 = 850 # Segundo ordenador ram2 = 8 process2 = "i5" disco2 = 500 precio2 = 600 # Tercer ordenador ram3 = 32 process3 = "i3" disco3 = 500 precio3 = 780 # - # Veamos cómo implemento esto mediante operaciones booleanas # + # Primero, calculamos el valor de estas condiciones por separado cond_ram1 = (ram1 == 16 or ram1 == 32 or ram1 == 64) # OR: me vale al menos un True para que se cumpla esta condicion cond_process1 = (process1 == "i3" and disco1 == 500) # AND: se tienen que cumplir ambas cond_precio1 = (precio1 <= 800) print(cond_ram1) print(cond_process1) print(cond_precio1) # Si lo ponemos todo junto en una línea, tendriamos cond_tot1 = cond_ram1 and cond_process1 and cond_precio1 print("Resultado de si me encaje el ordenador 1: ", cond_tot1) # - # El primer ordenador cumple el requisito de ram, pero no los de precio y procesador/disco. Veamos los otros dos si los cumplen # + cond_tot2 = (ram2 == 16 or ram2 == 32 or ram2 == 64) and (process2 == "i3" and disco2 == 500) and (precio2 <= 800) cond_tot3 = (ram3 == 16 or ram3 == 32 or ram3 == 64) and (process3 == "i3" and disco3 == 500) and (precio3 <= 800) print("Resultado de si me encaje el ordenador 2: ", cond_tot2) print("Resultado de si me encaje el ordenador 3: ", cond_tot3) # - # ¡Bingo! El tercer ordenador cumple todas las condiciones para ser mi futura compra. Verás en próximos notebooks que esto se puede hacer todavía más sencillo mediante bucles y funciones. # <table align="left"> # <tr><td width="80"><img src="../../imagenes/error.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>ERRORES varios</h3> # # </td></tr> # </table> # ¡No me vas a creer cuando te diga que lo mejor que te puede pasar es que te salten errores por pantalla! Si, estos son los errores más fáciles de detectar y puede que también fáciles de corregir ya que tienes la ayuda del descriptivo del error. El problema gordo viene cuando no saltan errores y ves que tu código no lo está haciendo bien. Para ello tendremos que debugear el código y ver paso a paso que está pasando. Lo veremos en notebooks posteriores. De momento corregiremos el código revisandolo a ojo. # # Como ves en el siguiente ejemplo, el resultado del ordenador 3 es `False` cuando debería ser `True`. ¿Por qué? # + cond_tot3 = (ram3 == 16 or ram3 == 32 or ram3 == 64) and (process3 == "i3" and disco3 == 500) and (precio3 >= 800) print("Resultado de si me encaje el ordenador 3: ", cond_tot3) # - # Cuidado cuando tenemos sentencias muy largas, ya que nos puede bailar perfectamente un paréntesis, un `>`, un `and` por un `or`... Hay que andarse con mil ojos. # # Y sobretodo, cuidado con el *copy paste*. Muchas veces, por ahorrar tiempo, copiamos código ya escrito para cambiar pequeñas cosas y hay veces que se nos olvida cambiar otras. Pensamos que está bien, ejecutamos, y saltan errores. Copiar código no es una mala práctica, es más, muchas veces evitamos errores con los nombres de las variables, pero hay que hacerlo con cabeza # <table align="left"> # <tr><td width="80"><img src="../../imagenes/ejercicio.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>Ejercicio de operaciones con booleanos</h3> # # Sin escribir código, ¿Qué valor devuelve cada una de las siguientes operaciones? # <ol> # <li>not (True and False)</li> # <li>False or False or False or False or False or False or True or False or False or False</li> # <li>True or True or True or True or True or False or True or True or True or True</li> # <li>(False and True and True) or (True and True)</li> # </ol> # # </td></tr> # </table> # 1. True # 2. True # 3. True # 4. True # ## 4. Funciones *Built in* # Hay una serie de funciones internas, que vienen en el intérprete de Python. Algunas de las más comunes son: # * **Tipos**: `bool()`, `str()`, `int()`, `float()` # * **Min/Max**: `min()`, `max()` # * **print()** # * **type()** # * **range()** # * **zip()** # * **len()** # * ... # # La sintaxis de la función es: # # ```Python # nombre_funcion(argumentos) # ``` # # Algunas ya las hemos visto. Sin embargo, hay unas cuantas que las iremos descubriendo a lo largo de estos notebooks. Para más detalle, tienes [aquí](https://docs.python.org/3/library/functions.html) todas las funciones *built-in* de la documentación. # # De momento, en lo que se refiere a funciones, vamos a ir trabajando con funciones ya hechas, pero más adelante crearemos nuestras propias funciones. # + # Len se usa para calcular la longitud de una variable. Ya veras que lo usaremos mucho en colecciones print(len("Este string")) # Devuelve el numero de caracteres de la cadena de texto # Funcion max. Tiene tantos argumentos como cantidad de números entre los cuales queramos sacar su valor máximo. print(max(1,2,3,4)) # - # <table align="left"> # <tr><td width="80"><img src="../../imagenes/ejercicio.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>Ejercicio de operaciones con booleanos</h3> # # Busca <a href="https://docs.python.org/3/library/functions.html">en la documentación</a> una función que te sirva para ordenar de manera descendente la siguiente lista # # </td></tr> # </table> temperaturas_de_hoy = [17, 22, 26, 18, 21, 21, 25, 29] sorted(temperaturas_de_hoy, reverse = True) # ## 5. Métodos # Se trata de una propiedad MUY utilizada en programación. Son funciones propias de las variables/objetos, y que nos permiten modificarlos u obtener más información de los mismos. Dependiendo del tipo de objeto, tendremos unos métodos disponibles diferentes. # # Para usar un método se usa la sintaxis `objeto.metodo()`. Ponemos un punto entre el nombre del objeto y el del metodo, y unos paréntesis por si el método necesita de algunos argumentos. **Aunque no necesite de argumentos, los paréntesis hay que ponerlos igualmente.** # # Veamos algunos ejemplos # # ### String # Una variable de tipo string, tiene una serie de métodos que permiten sacarle jugo a la cadena de texto. [Aquí](https://docs.python.org/2.5/lib/string-methods.html) tienes todos los métodos que podemos usar en cadenas de texto # + string_ejemplo = "string en mayusculas" # Para poner un string todo en mayusculas print("Todo mayusculas:", string_ejemplo.upper()) # Para poner un string todo en minusculas print("Todo minusculas:", string_ejemplo.lower()) # Para sustituir caracteres. Dos argumentos (busca este string, sustituyelo por este otro) print("Sustituir m por M:", string_ejemplo.replace("m", "M")) # El replace también es muy útil cuando queremos eliminar caracteres. Sustituimos por vacío print("Eliminar m:", string_ejemplo.replace("m", "")) # Divide el string por un caracter en una LISTA print("Separalo segun el numero de espacios:", string_ejemplo.split(" ")) # Devuelve la posicion del caracter que le pongamos como argumento print("'y' está en la posición:", string_ejemplo.index("y")) # - # Como ves, se pueden hacer muchas cosas en los Strings gracias a sus métodos. Ya verás cómo la cosa se pone más interesante cuando los tipos de los datos sean todavía más complejos. # # Los métodos son una manera de abstraernos de cierta operativa. Convertir todos los caracteres de una cadena a minuscula, puede ser un poco tedioso si no existiese el método `lower()`. Tendríamos que acudir a bucles o programación funcional. # <table align="left"> # <tr><td width="80"><img src="../../imagenes/error.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>ERRORES en métodos</h3> # # </td></tr> # </table> # Cuando un método necesita ciertos argumentos, y no se los proporcionamos string_ejemplo = "string en mayusculas" string_ejemplo.replace() # ## 6. Listas # Se trata de otro de los tipos de datos de Python más usados. Dentro de las colecciones, que veremos más adelante, la lista es la colección que normalmente se le da más uso. **Nos permiten almacenar conjuntos de variables u objetos**, y son elementos de lo más versátiles puesto que podemos almacenar objetos de distintos tipos, modificarlos, eliminarlos, meter listas dentro de listas... Sus dos caractrísticas principales son: # * **Mutables**: una vez se ha creado la lista, se puede modificar # * **Ordenada**: Los elementos tienen un cierto orden, lo que nos permite acceder al elemento que queramos teniendo en cuenta tal orden # # En cuanto a su sintaxis, cuando declaremos la lista simplemente hay que separar cada elemento con comas, y rodearlo todo con corchetes. numeros_favoritos = [3, 6, 1] print(numeros_favoritos) # + # También podemos hacer listas de strings strings_favoritos = ["ponme", "otra"] print(strings_favoritos) # Listas de booleanos. Incluso con operaciones que se ejecutan al crearse la lista bools_favoritos = [True, True, not False, True or False] print(bools_favoritos) # Lista mezclada de elementos mix_list = ["texto", 1, 7, 55.78, True, False] # Listas dentro de listas list_list = [[], 4, "diez", [True, 43, "mas texto"]] # Concatena dos listas lista_a = ['a', 'A'] lista_b = ['b', 'B'] lista_ab = lista_a + lista_b print(lista_ab) # - # **NOTA**: ¿Ves por qué los decimales en Python siempre van con puntos y no con comas? Con las colecciones el intérprete de Python se volvería loco. # # Podemos ver tambien el tipo de la lista type(list_list) # Calcular la longitud de la misma mediante el método *built-in* ya visto: `len()` lista_len = [1,2,3,4] len(lista_len) # Accedemos a los elemenos de la lista mediante corchetes `[]` # # **Importante**. El primer elemento es el 0 lista_index = [4,5,7,8,9] print(lista_index[0]) # ### Metodos en Listas # Para el tipo de objeto lista, también hay una serie de métodos catacterísticos que nos permiten operar con ellas: añadir valores, quitarlos, indexado, filtrado, etc... En [este enlace](https://www.w3schools.com/python/python_ref_list.asp) puedes encontrar todos los métodos que podrás usar con listas. # + asignaturas = ["Mates", "Fisica", "Inglés"] print(asignaturas) # Para agregar nuevos elementos. No hace falta reasignarlo a la lista. Simplemente, se añade al objeto asignaturas. asignaturas.append("Quimica") print(asignaturas) # Obtener el indice de un elemento de la lista print(asignaturas.index("Fisica")) # Eliminar todos los elementos. Fijate que devuelve None, el elemento vacio. print(asignaturas.clear()) # - # <table align="left"> # <tr><td width="80"><img src="../../imagenes/ejercicio.png" style="width:auto;height:auto"></td> # <td style="text-align:left"> # <h3>Ejercicio de listas</h3> # # <ol> # <li>Crea una lista con tus películas favoritas. No te pases de larga!</li> # <li>Imprime por pantalla la longitud de la lista</li> # <li>Añade a esta lista otra lista con tus series favoritas</li> # </ol> # # </td></tr> # </table> # + pelis = ["Ciudadano ejemplar", "El retorno del rey", "Origen"] print(len(pelis)) series = ["Narcos, T1 y T2", "<NAME>", "Juego de Tronos"] print(pelis + series) # - # ## 7. Resumen # + # Operaciones matemáticas print("Operaciones matemáticas") print(4 + 6) print(9*2) print(2 * (3 + 5)) print(10/5) print(10 % 3) print(2**10) # Funciones matemáticas más complejas import math print(math.sqrt(25)) # Operaciones comparativas print("\nOperaciones comparativas") print("AAA" == "BBB") print("AAA" == "AAA") print(1 == 1) print(1 == 1.0) print(67 != 93) print(67 > 93) print(67 >= 93) # Operaciones con booleanos print("\nOperaciones con booleanos") print(True and True and False) print(True or True or False) print(not False) # Funciones builtin print("\nFunciones built-in") string_builtin = "Fin del notebook" print(string_builtin.upper()) print(string_builtin.lower()) print( string_builtin.replace("o", "O")) print(string_builtin.replace("o", "")) # Listas print("\nListas") musica = ["AC/DC", "Metallica", "Nirvana"] musica.append("Queen") print(musica)
Bloque 1 - Ramp-Up/04_Python Basics/05_RESU_Python Basics II.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="YB1fPheo4CWw" # ### Collecting Tweets # - # #### This notebook demonstrates how we collect tweets from Twitter using the Twitter API # + id="HO089WwL4HzQ" import tweepy import pandas as pd import emoji import re import datetime import pytz # - # #### Define the keys # + id="fT5wEWUU7KUV" api_key = "XXXXX" api_secret = "XXXXX" bearer_token = "<PASSWORD>" access_token = "XXXXX" access_token_secret = "XXXXX" # To be replaced by valid keys obtaine # - # #### Get acceess to the API using the keys # + id="8TwxRbJmRpaF" auth = tweepy.OAuthHandler(api_key, api_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) # - # #### Define whose tweets to get and from what date # + candidate_username = 'CANDIDATE_USERNAME' # Twitter username of the candidate (without the '@') start_date = datetime.datetime(%Y, %M, %D, 0,0,0) end_date = datetime.datetime(%Y, %M, %D, 0,0,0) # The time period of the tweets utc = pytz.UTC start_date = utc.localize(start_date) end_date = utc.localize(end_date) #Normalizes the timezone according to the UTC # - # #### Scrape tweets and store the information in lists # + name = [] bio = [] follower_count = [] tweets = [] fav_count = [] rt_count = [] time = [] is_quote = [] in_reply_to = [] tweet_id = [] hashtags = [] is_retweet = [] mentions = [] emojicons = [] # Creates a cursor objects and iterates through it to store tweet text and metadata of the tweet in lists for i in tweepy.Cursor(api.user_timeline, id = candidate_username , tweet_mode = 'extended').items(): if i.created_at > start_date and i.created_at < end_date: #If throws timezone error, try utc.localize(i.created_at) name.append(i.user.name) tweets.append(i.full_text) time.append(i.created_at) bio.append(i.user.description) follower_count.append(i.user.followers_count) fav_count.append(i.favorite_count) rt_count.append(i.retweet_count) is_quote.append(i.is_quote_status) in_reply_to.append(i.in_reply_to_screen_name) tweet_id.append(i.id) is_retweet.append('retweeted_status' in dir(i)) temp_mentions = [] for dicti in i.entities['user_mentions']: temp_mentions.append(dicti['screen_name']) mentions.append(temp_mentions) temp_hashtags = [] for dicti in i.entities['hashtags']: temp_hashtags.append(dicti['text']) hashtags.append(temp_hashtags) for tweet in tweets: de_emojized = emoji.demojize(tweet.replace(':', ' ')) emojis = re.findall(r'(:[^:]*:)', de_emojized) emojicons.append(emojis) # - # #### Create pandas dataframe from the list df = pd.DataFrame({'name':name, 'bio': bio, 'follower_count': follower_count, 'tweet_text':tweets, 'tweet_id':tweet_id, 'fav_count':fav_count, 'rt_count':rt_count, 'in_reply_to': in_reply_to, 'mentions' : mentions, 'is_retweet':is_retweet, 'is_quote': is_quote, 'hashtags': hashtags, 'emojicons': emojicons, 'time':time }) # #### Normalize the columns columns = ['mentions', 'hashtags', 'emojicons'] for column in columns: for j in range(len(df[column])): df[column][j] = ', '.join(i for i in df[column][j]) if df[column][j] == '': df[column][j] = None
data_gathering_scripts/tweet_scraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Informe de Análisis VI # ## Creando Nuevas Variábles # La directoría de la empresa precisa nuevas informaciones, por lo que tendremos que reorganizar nuestro conjunto de datos para obtener las siguiente informaciones: # # - Valor bruto del Alquiler # - Valor por metro cuadrado de un inmueble # - Agrupar los datos de Casas y Apartamentos import pandas as pd datos = pd.read_csv('datos/alquiler_residencial.csv', sep = ';') datos.head(10) datos['Valor Bruto'] = datos['Valor'] + datos['Condominio'] + datos['IPTU'] datos.head(10) datos['Valor m2'] = datos['Valor'] / datos['Area'] datos.head(10) datos['Valor m2'] = datos['Valor m2'].round(2) datos.head(10) datos['Valor Bruto m2'] = (datos['Valor Bruto'] / datos['Area']).round(2) datos.head(10) casa = ['Casa', 'Casa de Condomínio', 'Casa de Vila'] datos['Tipo Agregado'] = datos['Tipo'].apply(lambda x: 'Casa' if x in casa else 'Apartamento') datos # ## Excluyendo Variábles # La variable IPTU tiene valores poco confiables, después de todo, el IPTU no siempre tiene valores declarados. Al final, la empresa decidió que el valor bruto y el valor bruto m2 deberían excluirse de la base de datos. # Creando un DataFrame para almacenar datos auxiliares datos_aux = pd.DataFrame(datos[['Tipo Agregado', 'Valor m2', 'Valor Bruto', 'Valor Bruto m2']]) datos_aux.head(10) # Para eliminar una variable de la base de datos, usaremos el 'del', # pasaremos el DataFrame y el nombre de la variable que se eliminará. del datos_aux['Valor Bruto'] datos_aux.head(10) # Otra manera de excluir variábles es utilizando el método pop() datos_aux.pop('Valor Bruto m2') datos_aux # Otra manera de excluir múltiples variábles es utilizando el método drop() datos.drop(['Valor Bruto', 'Valor Bruto m2'], axis = 1, inplace = True) datos.head(10) datos.to_csv('datos/alquiler_residencial.csv', sep = ';', index = False)
exploracion-de-datos/6- Creando Nuevas Variables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="YSLXOZqX1-D9" # Os treinamentos das redes MLP e Resnet foram feitos com um batch de 64 imagens por epoca e 10 epocas de treinamento com o dataset Cifar10 # + [markdown] id="Lt5NNdEbyX5Y" # #CNN # + id="NCHHsWhxxTmi" '''For Pre-activation ResNet, see 'preact_resnet.py'. Reference: [1] <NAME>, <NAME>, <NAME>, <NAME> Deep Residual Learning for Image Recognition. arXiv:1512.03385 ''' import torch import torch.nn as nn import torch.nn.functional as F class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d( in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) #ultima camada de features out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) #inserir uma nova camada aqui self.linear = nn.Linear(512*block.expansion, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.linear(out) return out def ResNet18(): return ResNet(BasicBlock, [2, 2, 2, 2]) def test(): net = ResNet18() y = net(torch.randn(1, 3, 32, 32)) print(y.size()) # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["0fb29dcb22a04b99aad4e8d837ceb20b", "675bdee7596e40cfbd5bca2ecefc6461", "25a185f4c1c648709e375eaf34961185", "df52e62a5f5c4bd9a16137fa182dcebc", "92204afb887d45339f5938a00ced7954", "4d3af40009b849d29a2dacf432e5b24d", "97058759e34145ecaffc1f7fee15a2e3", "f484395de3a24b7e88c626ef0fbdd94a", "c25b86d2118949bea6130137dbf887cd", "1fc31ba5de174cdbaf1f93b8eeabead6", "37d48189fa0f4467931a3c02eee2de70"]} id="Gi7Tm6-eyiH4" outputId="f2335dbe-2f32-422d-8271-38e3c4b96a55" import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms #from loader import MultiFolderLoader from torch.utils.data import DataLoader import torchvision.datasets as datasets #from torch.utils.tensorboard import SummaryWriter #from auto_augment import AutoAugment #from auto_augment import Cutout import os import argparse import numpy as np from torch.autograd import Variable #from PreResNet import * batch_size = 64 lr = 0.02 device = 'cuda' if torch.cuda.is_available() else 'cpu' best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch # Data print('==> Preparing data..') transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), # AutoAugment(), # Cutout(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), #, ]) trainset = datasets.CIFAR10(root='~/data', train=True, download=True, transform=transform_train) #'./data', train=True, download=True, transform=transform_train) testset = datasets.CIFAR10(root='~/data', train=False, download=True, transform=transform_test) trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=3) testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=1) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # Model print('==> Building model..') net = ResNet18() net = net.to(device) if device == 'cuda': net = torch.nn.DataParallel(net) cudnn.benchmark = True Softmin = nn.Softmin() Softmax = nn.Softmax() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=lr,momentum=0.9, weight_decay=5e-4) # Training def train(epoch): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 listSum = 0 lr=0.02 if epoch >= 150: lr /= 10 for param_group in optimizer.param_groups: param_group['lr'] = lr #print("antes") for batch_idx, data in enumerate(trainloader, 0): # for batch_idx, (inputs, targets) in enumerate(trainloader): # print(batch_idx) inputs, targets = data inputs, targets = inputs.to(device), targets.to(device).view(-1) optimizer.zero_grad() outputs = net(inputs) #print(outputs.size) batch_size = outputs.size()[0] lam = 2 loss = criterion(outputs,targets) #print(loss) loss.backward() optimizer.step() #train_loss += loss.data[0] train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().float()# predicted.eq(targets).sum().item() acc = 100.*correct/total print('loss train') print((train_loss/64)) print('') print('---------------------------------------------------------------') print('Acc train') print(acc) print('') print('---------------------------------------------------------------') def test(epoch): global best_acc net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader,0): inputs, targets = inputs.to(device), targets.to(device).view(-1) outputs = net(inputs) batch_size = outputs.size()[0] loss = criterion(outputs,targets) test_loss += loss.item() _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets).cpu().sum().float() acc = 100.*correct/total # progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' # % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) print('Loss test') print(test_loss/64) print('') print('---------------------------------------------------------------') print('Acc test') print(acc) print('') print('---------------------------------------------------------------') # Save checkpoint. acc = 100.*correct/total if acc > best_acc: print('Saving..') state = { 'net': net.state_dict(), 'acc': acc, 'epoch': epoch, } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/ckpt.pth') best_acc = acc print("best_acc: %f"%best_acc) print('---------------------------------------------------------------') for epoch in range(start_epoch, start_epoch+10): train(epoch) test(epoch) # + [markdown] id="3uk2bvUmyTio" # #MLP # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="f3T-toShys4j" outputId="f86de170-b873-48f9-b8b8-5112216fe303" import time import numpy as np import torch import torchvision from torch.autograd import Variable import torchvision.transforms as transforms import matplotlib.pyplot as plt # Constantes IMAGE_WIDTH = 32 IMAGE_HEIGHT = 32 COLOR_CHANNELS = 3 EPOCHS = 10 LEARNING_RATES = [.00001, 0.0001, 0.001, 0.01, 0.1] KEEP_RATES = [.5, .65, .8] MOMENTUM_RATES = [.25, .5, .75] WEIGHT_DECAY_RATES = [.0005, .005, .05] BATCH_SIZE = 64 BATCH_IMAGE_COUNT = 10000 TRAIN_BATCHS = ["data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4"] TEST_BATCHES = ["data_batch_5"] CLASSES = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] N_CLASSES = len(CLASSES) PLOT = False class Net(torch.nn.Module): def __init__(self, n_hidden_nodes, n_hidden_layers, activation, keep_rate=0): super(Net, self).__init__() self.n_hidden_nodes = n_hidden_nodes self.n_hidden_layers = n_hidden_layers self.activation = activation if not keep_rate: keep_rate = 0.5 self.keep_rate = keep_rate # Configure camadas da rede e adicione dropout self.fc1 = torch.nn.Linear(IMAGE_WIDTH * IMAGE_WIDTH * COLOR_CHANNELS, n_hidden_nodes) self.fc1_drop = torch.nn.Dropout(1 - keep_rate) if n_hidden_layers == 2: self.fc2 = torch.nn.Linear(n_hidden_nodes, n_hidden_nodes) self.fc2_drop = torch.nn.Dropout(1 - keep_rate) self.out = torch.nn.Linear(n_hidden_nodes, N_CLASSES) def forward(self, x): x = x.view(-1, IMAGE_WIDTH * IMAGE_WIDTH * COLOR_CHANNELS) if self.activation == "sigmoid": sigmoid = torch.nn.Sigmoid() x = sigmoid(self.fc1(x)) elif self.activation == "relu": x = torch.nn.functional.relu(self.fc1(x)) x = self.fc1_drop(x) if self.n_hidden_layers == 2: if self.activation == "sigmoid": x = sigmoid(self.fc2(x)) elif self.activation == "relu": x = torch.nn.functional.relu(self.fc2(x)) x = self.fc2_drop(x) return torch.nn.functional.log_softmax(self.out(x)) def train(epoch, model, train_loader, optimizer, log_interval=100, cuda=None): model.train() correct = 0 for batch_idx, (data, target) in enumerate(train_loader): if cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) pred = output.data.max(1)[1] # obter o índice da probabilidade de log máxima correct += pred.eq(target.data).cpu().sum() accuracy = 100. * correct / len(train_loader.dataset) loss = torch.nn.functional.nll_loss(output, target) loss.backward() optimizer.step() #if batch_idx % log_interval == 0: #print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} Accuracy: {}'.format( #epoch, batch_idx * len(data), len(train_loader.dataset), #100. * batch_idx / len(train_loader), loss.item(), accuracy)) print('Epoch:') print(epoch) print('loss:') print(loss.item()) print('acc:') print(accuracy) def validate(loss_vector, accuracy_vector, model, validation_loader, cuda=None): model.eval() val_loss, correct = 0, 0 for data, target in validation_loader: if cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) val_loss += torch.nn.functional.nll_loss(output, target).item() pred = output.data.max(1)[1] # obter o índice da probabilidade de log máxima correct += pred.eq(target.data).cpu().sum() val_loss /= len(validation_loader) loss_vector.append(val_loss) accuracy = 100. * correct / len(validation_loader.dataset) accuracy_vector.append(accuracy) print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( val_loss, correct, len(validation_loader.dataset), accuracy)) def main(): cuda = torch.cuda.is_available() print('Using PyTorch version:', torch.__version__, 'CUDA:', cuda) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) train_loader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=0, pin_memory=False) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) validation_loader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=0, pin_memory=False) hidden_nodes = 100 layers = 1 for i in range(1, len(LEARNING_RATES) + 1): model = Net(hidden_nodes, layers, "sigmoid") if cuda: model.cuda() optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATES[i-1]) loss_vector = [] acc_vector = [] for epoch in range(1, EPOCHS + 1): train(epoch, model, train_loader, optimizer, cuda=cuda) validate(loss_vector, acc_vector, model, validation_loader, cuda=cuda) if epoch == 25: break # Plote perda de trem e precisão de validação versus épocas para cada taxa de aprendizado if PLOT: epochs = [i for i in range(1, 26)] plt.plot(epochs, acc_vector) plt.xlabel("Epochs") plt.ylabel("Accuracy with Sigmoid") plt.show() plt.plot(epochs, loss_vector) plt.xlabel("Epochs") plt.ylabel("Loss") plt.show() # Repita usando RELU para ativação hidden_nodes = 100 layers = 1 start_time = time.time() for i in range(1, len(LEARNING_RATES)): model = Net(hidden_nodes, layers, "relu") if cuda: model.cuda() optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATES[i]) loss_vector = [] acc_vector = [] for epoch in range(1, EPOCHS + 1): train(epoch, model, train_loader, optimizer, cuda=cuda) validate(loss_vector, acc_vector, model, validation_loader, cuda=cuda) if epoch == 25: break end_time = time.time() - start_time print("Total time", end_time) # Plote perda de trem e precisão de validação versus épocas para cada taxa de aprendizado if PLOT: epochs = [i for i in range(1, 26)] plt.plot(epochs, acc_vector) plt.xlabel("Epochs") plt.ylabel("Accuracy with RELU") plt.show() plt.plot(epochs, loss_vector) plt.xlabel("Epochs") plt.ylabel("Loss") plt.show() # Experimentando com/diferentes parâmetros: hidden_nodes = 100 layers = 1 start_time = time.time() for i in range(1, len(KEEP_RATES) + 1): model = Net(hidden_nodes, layers, "relu", keep_rate=KEEP_RATES[i-1]) if cuda: model.cuda() optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATES[2], momentum=MOMENTUM_RATES[1], weight_decay=WEIGHT_DECAY_RATES[0]) loss_vector = [] acc_vector = [] for epoch in range(1, EPOCHS + 1): train(epoch, model, train_loader, optimizer, cuda=cuda) validate(loss_vector, acc_vector, model, validation_loader, cuda=cuda) if epoch == 20: break end_time = time.time() - start_time print("Total time", end_time) if PLOT: epochs = [i for i in range(1, 21)] plt.plot(epochs, acc_vector) plt.xlabel("Epochs") plt.ylabel("Accuracy with RELU") plt.show() plt.plot(epochs, loss_vector) plt.xlabel("Epochs") plt.ylabel("Loss") plt.show() # 2 camadas, 50 nós ocultos: hidden_nodes = 50 layers = 2 start_time = time.time() model = Net(hidden_nodes, layers, "relu", keep_rate=.8) if cuda: model.cuda() optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATES[2]) loss_vector = [] acc_vector = [] for epoch in range(1, EPOCHS + 1): train(epoch, model, train_loader, optimizer, cuda=cuda) validate(loss_vector, acc_vector, model, validation_loader, cuda=cuda) if epoch == 30: break end_time = time.time() - start_time print("Total time", end_time) if PLOT: epochs = [i for i in range(1, 31)] plt.plot(epochs, acc_vector) plt.xlabel("Epochs") plt.ylabel("Accuracy") plt.show() plt.plot(epochs, loss_vector) plt.xlabel("Epochs") plt.ylabel("Loss") plt.show() if __name__ == '__main__': main() # + [markdown] id="ASUKNrcvS4GP" # #Resultado # + [markdown] id="7O-2PmRvS57A" # Na nossa resnet obtivemos melhores resultados nas mesmas condicões comparados a nossa MLP, entretando na nossa cnn necessitamos de mais parametros para regular os pesos, mas de forma mais geral a nossa cnn possui melhores resultados #
CNN/CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import cv2 from openvino.inference_engine import IECore import matplotlib.pyplot as plt import time # ## Read Image file_path = 'data/images/logo.jpg' image = cv2.imread(file_path) plt.imshow(cv2.cvtColor(image,cv2.COLOR_BGR2RGB)) plt.axis('off') plt.show() # ## Prepare Model # + def PrepareNetWork(onnx_model,device): ie = IECore() ############## Slight Change ############# net = ie.read_network(model = onnx_model) ######################################### ####################### Very Important ############################################# # Check to make sure that the plugin has support for all layers in the model supported_layers = ie.query_network(net,device_name = device) unsupported_layers = [layer for layer in supported_layers.values() if layer!= device] if len(unsupported_layers)>0: raise Exception(f"Number of unsupported layers {len(unsupported_layers)}") #################################################################################### exec_net = ie.load_network(network=net, device_name = device) # Store name of input and output blobs input_blob = next(iter(net.input_info)) output_blob = next(iter(net.outputs)) # Extract Dimension (n:batch, c:color channel,h: height, w: width ) n, c ,h ,w = net.input_info[input_blob].input_data.shape print('Extract Model Input Dimension:',n,c,h,w) return (input_blob,output_blob), exec_net, (n,c,h,w) def PrepareInputImage(input_path,n,c,h,w): # height width channels image = cv2.imread(input_path) # Resize in_frame = cv2.resize(image,(w,h)) in_frame = in_frame.transpose((2,0,1)) # Moving color channels to head in_frame = in_frame.reshape((n,c,h,w)) return image, in_frame def MakePrediction(execution_network, input_blob, inference_frame): st_time = time.time() # Run Inference result = execution_network.infer(inputs = {input_blob:inference_frame}) ed_time = time.time() time_sp = ed_time-st_time FPS = np.round((1/time_sp),4) print(f"FPS: {FPS}\n") return FPS,result # + # Model Path onnx_model = 'intel_models/StyleGAN.onnx' # Device device = 'CPU' # Options include CPU, GPU, MYRIAD, [HDDL or HETERO] I am not familiar with the last two # Prepare Network inputs_outputs, execution_network, dimensions = PrepareNetWork(onnx_model,device) # Extract Required Input dimension n,c,h,w = dimensions # Extract input and output names input_blob, output_blob = inputs_outputs # Print Networf Information print(f"Input_name: {input_blob:>6}\nOutput_name: {output_blob:>5}") print(f"OpenVINO Engine: {execution_network}") # - original_image, inference_frame = PrepareInputImage(file_path,n,c,h,w) plt.imshow(cv2.cvtColor(original_image,cv2.COLOR_BGR2RGB)) plt.axis('off') plt.show() # + FPS, result = MakePrediction(execution_network,input_blob,inference_frame) styled_image = result[output_blob] styled_image = styled_image[0] styled_image = styled_image.transpose((1,2,0)) styled_image = np.clip(styled_image, 0, 255) plt.imshow(styled_image/255) plt.axis('off') plt.show() # -
003-Style GAN with ONNX Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Resetting the Wave: # ## A High Reward Strategy Employed by a Hebbian Cellular Automaton Policy to Game a Change-in-Center-of-Mass Mobility Reward Across Multiple B3/Sxxx Life-Like Rules. # # <div align="center"> # <img src="../assets/harli_reset_wave_strategy_small.gif"> # <em> # First observation of the "reset wave" strategy for gaming the `SpeedDetector` reward wrapper in Carle's Game. Unlike the trained policies demonstrated in this notebook, the animation above is of an agent operating in and trained on the B368/S245 Morley/Move rules. # </em> # </div> # # # The command for running this experiment is shown below. The training run with the most successful and interesting take on gaming the reward function occured with random seed `42`. Coincidence? Probably. # # ``` # experiment.py -mg 128 -ms 256 -p 32 -sm 1 -v 1 -d cuda:1 -dim 128 -s 13 42 1337 -a HARLI -w RND2D SpeedDetector -tr B3/S023 B3/S236 B3/S237 B3/S238 -vr B3/S23 -tag _harli_glider_experiment_ # ``` # + from IPython.core.display import display, HTML display(HTML("<style>.container { width:75% !important; }</style>")) import os import numpy as np import torch import time from carle.env import CARLE from carle.mcl import CornerBonus, SpeedDetector, PufferDetector, AE2D, RND2D from game_of_carle.agents.harli import HARLI from game_of_carle.agents.carla import CARLA from game_of_carle.agents.grnn import ConvGRNN from game_of_carle.agents.toggle import Toggle import bokeh import bokeh.io as bio from bokeh.io import output_notebook, show from bokeh.plotting import figure from bokeh.layouts import column, row from bokeh.models import TextInput, Button, Paragraph from bokeh.models import ColumnDataSource from bokeh.events import DoubleTap, Tap import matplotlib.pyplot as plt my_cmap = plt.get_cmap("magma") output_notebook() # - if (0): device_string = "cuda:0" else: device_string = "cpu" # + agent = HARLI(device=device_string) policy_list = [] directory_list = os.listdir("../policies/") for filename in directory_list: if "HARLI" in filename and "glider" in filename: policy_list.append(os.path.join("..", "policies", filename)) policy_list.sort() # instantiate CARLE with a speed detection wrapper env = CARLE(height=256, width=256, device=device_string) env = SpeedDetector(env) # set rules # this agent was trained on B3/S023, B3/S236, B3/S237, and B3/S238 my_rules = "B3/S023" agent.set_params(np.load(policy_list[0])) print(f"{len(policy_list)} HARLI mobility policies found.") # + def modify_doc(doc): env.rules_from_string(my_rules) dim_wh = 24 dim_ww = 24 global obs obs = env.reset() my_weights = agent.get_weights().reshape(dim_wh, dim_ww) p = figure(plot_width=3*256, plot_height=3*256, title="CA Universe") p_plot = figure(plot_width=int(1.25*256), plot_height=int(1.25*256), title="'Reward'") p_weights = figure(plot_width=int(1.255*256), plot_height=int(1.25*256), title="Weights") global my_period global number_agents global agent_number global agent_on global action global reward_sum reward_sum = 0.0 agent_number = 0 number_agents = len(policy_list) my_period = 512 agent_on = False action = torch.zeros(1, 1, env.action_height, env.action_width) # add a circle renderer with x and y coordinates, size, color, and alpha source = ColumnDataSource(data=dict(my_image=[obs.squeeze().cpu().numpy()])) source_plot = ColumnDataSource(data=dict(x=np.arange(1), y=np.arange(1)*0)) source_weights = ColumnDataSource(data=dict(my_image=[my_weights])) img = p.image(image='my_image',x=0, y=0, dw=256, dh=256, palette="Magma256", source=source) line_plot = p_plot.line(line_width=3, color="firebrick", source=source_plot) img_w = p_weights.image(image='my_image',x=0, y=0, dw=240, dh=240, palette="Magma256", source=source_weights) button_go = Button(sizing_mode="stretch_width", label="Run >") button_slower = Button(sizing_mode="stretch_width",label="<< Slower") button_faster = Button(sizing_mode="stretch_width",label="Faster >>") input_birth = TextInput(value=f"{env.birth}") input_survive = TextInput(value=f"{env.survive}") button_birth = Button(sizing_mode="stretch_width", label="Update Birth Rules") button_survive = Button(sizing_mode="stretch_width", label="Update Survive Rules") button_reset_prev_agent = Button(sizing_mode="stretch_width",label="Reset w/ Prev. Agent") button_reset_this_agent = Button(sizing_mode="stretch_width",label="Reset w/ This Agent") button_reset_next_agent = Button(sizing_mode="stretch_width",label="Reset w/ Next Agent") button_reset_w_spaceship = Button(sizing_mode="stretch_width",label="Reset w/ Spaceship") button_reset_w_glider = Button(sizing_mode="stretch_width",label="Reset w/ Glider") button_agent_switch = Button(sizing_mode="stretch_width", label="Turn Agent On") message = Paragraph() def update(): global obs global stretch_pixel global action global agent_on global my_step global rewards global agent_number global reward_sum obs, r, d, i = env.step(action) rewards = np.append(rewards, r.cpu().numpy().item()) if agent_on: action = agent(obs) else: action = torch.zeros_like(action) padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) my_weights = agent.get_weights().reshape(dim_wh, dim_ww) new_weights = dict(my_image=[my_weights]) #new_line = dict(x=np.arange(my_step+2), y=rewards) new_line = dict(x=[my_step], y=[r.cpu().numpy().item()]) source.stream(new_data, rollover=1) source_plot.stream(new_line, rollover=2000) source_weights.stream(new_weights, rollover=1) my_step += 1 reward_sum += r.item() message.text = f"agent {agent_number}, step {my_step}, reward: {r.item():.4f}, mean reward per step: {(reward_sum/my_step):.4f} \n"\ f"{policy_list[agent_number]}" def go(): if button_go.label == "Run >": my_callback = doc.add_periodic_callback(update, my_period) button_go.label = "Pause" #doc.remove_periodic_callback(my_callback) else: doc.remove_periodic_callback(doc.session_callbacks[0]) button_go.label = "Run >" def faster(): global my_period my_period = max([my_period * 0.5, 1]) go() go() def slower(): global my_period my_period = min([my_period * 2, 8192]) go() go() def reset_w_spaceship(): global obs global action global stretch_pixel global my_step global rewards global agent_number global number_agents global reward_sum reward_sum = 0.0 my_step = 0 new_line = dict(x=[my_step], y=[0]) obs = env.reset() stretch_pixel = torch.zeros_like(obs).squeeze() stretch_pixel[0,0] = 3 agent.reset() if agent_on: action = agent(obs) else: # add a spaceship to the action action[:, :, 32, 32:34] = 1.0 action[:, :, 33, 29:32] = 1.0 action[:, :, 33, 33:35] = 1.0 action[:, :, 34, 29:34] = 1.0 action[:, :, 35, 30:33] = 1.0 padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) my_weights = agent.get_weights().reshape(dim_wh, dim_ww) new_weights = dict(my_image=[my_weights]) source.stream(new_data, rollover=1) source_plot.stream(new_line, rollover=2) source_weights.stream(new_weights, rollover=1) message.text = f"agent {agent_number}, step {my_step} \n"\ f"{policy_list[agent_number]}" rewards = np.array([0]) source_plot.stream(new_line, rollover=1) source.stream(new_data, rollover=8) def reset_w_glider(): global obs global action global stretch_pixel global my_step global rewards global agent_number global number_agents global use_spaceship global reward_sum reward_sum = 0.0 my_step = 0 new_line = dict(x=[my_step], y=[0]) obs = env.reset() stretch_pixel = torch.zeros_like(obs).squeeze() stretch_pixel[0,0] = 3 agent.reset() if agent_on: action = agent(obs) else: # add a glider to the action action[:, :, 34, 32] = 1.0 action[:, :, 33, 32:34] = 1.0 action[:, :, 32, 31] = 1.0 action[:, :, 32, 33] = 1.0 padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) my_weights = agent.get_weights().reshape(dim_wh, dim_ww) new_weights = dict(my_image=[my_weights]) source.stream(new_data, rollover=1) source_plot.stream(new_line, rollover=2) source_weights.stream(new_weights, rollover=1) message.text = f"agent {agent_number}, step {my_step} \n"\ f"{policy_list[agent_number]}" rewards = np.array([0]) source_plot.stream(new_line, rollover=1) source.stream(new_data, rollover=8) def reset_this_agent(): global obs global action global stretch_pixel global my_step global rewards global agent_number global number_agents global use_spaceship global reward_sum reward_sum = 0.0 my_step = 0 new_line = dict(x=[my_step], y=[0]) obs = env.reset() stretch_pixel = torch.zeros_like(obs).squeeze() stretch_pixel[0,0] = 3 agent.reset() if agent_on: action = agent(obs) else: action = torch.zeros_like(action) padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) my_weights = agent.get_weights().reshape(dim_wh, dim_ww) new_weights = dict(my_image=[my_weights]) source.stream(new_data, rollover=1) source_plot.stream(new_line, rollover=2) source_weights.stream(new_weights, rollover=1) message.text = f"agent {agent_number}, step {my_step} \n"\ f"{policy_list[agent_number]}" rewards = np.array([0]) source_plot.stream(new_line, rollover=1) source.stream(new_data, rollover=8) def reset_next_agent(): global obs global action global stretch_pixel global my_step global rewards global agent_number global number_agents global reward_sum reward_sum = 0.0 my_step = 0 new_line = dict(x=[my_step], y=[0]) obs = env.reset() agent_number = (agent_number + 1) % number_agents agent.set_params(np.load(policy_list[agent_number])) agent.reset() if agent_on: action = agent(obs) else: action = torch.zeros_like(action) padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) my_weights = agent.get_weights().reshape(dim_wh, dim_ww) new_weights = dict(my_image=[my_weights]) source.stream(new_data, rollover=1) source_plot.stream(new_line, rollover=1) source_weights.stream(new_weights, rollover=1) message.text = f"agent {agent_number}, step {my_step}\n"\ f"{policy_list[agent_number]}" def reset_prev_agent(): global obs global action global stretch_pixel global my_step global rewards global agent_number global number_agents global reward_sum reward_sum = 0.0 my_step = 0 new_line = dict(x=[my_step], y=[0]) obs = env.reset() agent_number = (agent_number - 1) % number_agents agent.set_params(np.load(policy_list[agent_number])) agent.reset() if agent_on: action = agent(obs) else: action = torch.zeros_like(action) padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) my_weights = agent.get_weights().reshape(dim_wh, dim_ww) new_weights = dict(my_image=[my_weights]) source.stream(new_data, rollover=1) source_plot.stream(new_line, rollover=1) source_weights.stream(new_weights, rollover=1) message.text = f"agent {agent_number}, step {my_step}\n"\ f"{policy_list[agent_number]}" def set_birth_rules(): env.birth_rule_from_string(input_birth.value) my_message = "Rules updated to B" for elem in env.birth: my_message += str(elem) my_message += "/S" for elem in env.survive: my_message += str(elem) message.text = my_message def set_survive_rules(): env.survive_rule_from_string(input_survive.value) my_message = "Rules updated to B" for elem in env.birth: my_message += str(elem) my_message += "/S" for elem in env.survive: my_message += str(elem) message.text = my_message def human_toggle(event): global action coords = [np.round(env.height*event.y/256-0.5), np.round(env.width*event.x/256-0.5)] offset_x = (env.height - env.action_height) / 2 offset_y = (env.width - env.action_width) / 2 coords[0] = coords[0] - offset_x coords[1] = coords[1] - offset_y coords[0] = np.uint8(np.clip(coords[0], 0, env.action_height-1)) coords[1] = np.uint8(np.clip(coords[1], 0, env.action_height-1)) action[:, :, coords[0], coords[1]] = 1.0 * (not(action[:, :, coords[0], coords[1]])) #padded_action = stretch_pixel/2 + env.action_padding(action).squeeze() padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) source.stream(new_data, rollover=8) def clear_toggles(): global action if button_go.label == "Pause": action *= 0 doc.remove_periodic_callback(doc.session_callbacks[0]) button_go.label = "Run >" padded_action = stretch_pixel/2 + env.inner_env.action_padding(action).squeeze() my_img = (padded_action*2 + obs.squeeze()).cpu().numpy() my_img[my_img > 3.0] = 3.0 (padded_action*2 + obs.squeeze()).cpu().numpy() new_data = dict(my_image=[my_img]) source.stream(new_data, rollover=8) else: doc.add_periodic_callback(update, my_period) button_go.label = "Pause" def agent_on_off(): global agent_on if button_agent_switch.label == "Turn Agent Off": agent_on = False button_agent_switch.label = "Turn Agent On" else: agent_on = True button_agent_switch.label = "Turn Agent Off" reset_w_glider() p.on_event(Tap, human_toggle) p.on_event(DoubleTap, clear_toggles) button_reset_prev_agent.on_click(reset_prev_agent) button_reset_this_agent.on_click(reset_this_agent) button_reset_next_agent.on_click(reset_next_agent) button_reset_w_glider.on_click(reset_w_glider) button_reset_w_spaceship.on_click(reset_w_spaceship) button_birth.on_click(set_birth_rules) button_survive.on_click(set_survive_rules) button_go.on_click(go) button_faster.on_click(faster) button_slower.on_click(slower) button_agent_switch.on_click(agent_on_off) control_layout = row(button_slower, button_go, button_faster) reset_layout = row(button_reset_prev_agent, button_reset_this_agent, button_reset_next_agent) spaceship_layout = row(button_reset_w_glider, button_reset_w_spaceship) rule_layout = row(input_birth, button_birth, input_survive, button_survive) agent_toggle_layout = row(button_agent_switch) display_layout = row(p, column(p_plot, p_weights)) message_layout = row(message) doc.add_root(display_layout) doc.add_root(control_layout) doc.add_root(spaceship_layout) doc.add_root(reset_layout) doc.add_root(rule_layout) doc.add_root(message_layout) doc.add_root(agent_toggle_layout) show(modify_doc) # -
notebooks/evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fadelramli/Tugas-MachineLearning/blob/main/Week5_PCA_With_Iris(PCA).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="wtR2FIh2NdnB" # Code source: <NAME> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import decomposition from sklearn import datasets # + id="RBrLY_WANe5D" np.random.seed(5) # + id="EoRjsb0DNiK-" iris = datasets.load_iris() X = iris.data y = iris.target # + colab={"base_uri": "https://localhost:8080/", "height": 247} id="_4bCU72_Nkqg" outputId="85d93fe0-ab97-4c2b-9e3c-22f0f5891e3c" fig = plt.figure(1, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134) plt.cla() pca = decomposition.PCA(n_components=3) pca.fit(X) X = pca.transform(X) for name, label in [("Setosa", 0), ("Versicolour", 1), ("Virginica", 2)]: ax.text3D( X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment="center", bbox=dict(alpha=0.5, edgecolor="w", facecolor="w"), ) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(float) ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral, edgecolor="k") ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) plt.show()
Week5_PCA_With_Iris(PCA).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # 说明: # 给定两个整数n和k,返回1 ... n中k个数字的所有可能组合。 # 您可以按任何顺序返回答案。 # # Example 1: # Input: # n = 4, k = 2 # Output: # [ # [2,4], # [3,4], # [2,3], # [1,2], # [1,3], # [1,4], # ] # Example 2: # Input: n = 1, k = 1 # Output: [[1]] # # 限制: # 1、1 <= n <= 20 # 2、1 <= k <= n # - class Solution: def combine(self, n: int, k: int): self.res = [] self.k = k self.dfs(1, n + 1, []) return self.res def dfs(self, start, end, path): if len(path) == self.k: self.res.append(list(path)) return for i in range(start, end): path.append(i) self.dfs(i+1, end, path) path.pop() solution = Solution() solution.combine(4, 2)
Back Tracking/0904/77. Combinations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 (SageMath) # language: python # name: python2 # --- # + import sympy import adaptive_trapzint as p1 import sinesum1 as p2 import centered_diff as p3 import find_primes as p4 # - # # Homework 1 # # ### <NAME> # ### 2/8/2016 # # # # # ## Exercises Completed: # # 1. Exercise 3.8 (```adaptive_trapzint.py```) # 1. Exercise 3.15 (```sinesum1.py```) # 1. Exercise 3.18 (```centered_diff.py```) # 1. Exercise 3.20 (```find_primes.py```) # # # # ## adaptive_trapzint.py # # Uses algorithm to calculate the number of series terms required to obtain integral value within a given error, then computes this quantity. A select sampling of functions, intervals, and function performance is output below. p1.performance_table() # ## sinesum1.py # # Constructs a trigonometric series with a known point of conversion and ultimately produces a table which details the effectiveness of truncating at various indexes for a sampling of arguments. p2.table(1) # ## centered_diff.py # # Estimates the first derivative of a function by a truncated Taylor Series approximation. Ultimately, a table was produced in which the estimations were compared to the analytic derivatives, calculated using # sympy, and the error was deduced. p3.application() # ## find_primes.py # # This program identifies prime factors by iterating through a list from 2 to the number given as the function argument and eliminating numbers which are multiples of smaller numbers from the iteration list. # The iteration list is then returned and consists of only prime numbers lesser or equal to the function argument. Here, the prime numbers of 100 and less are shown. print p4.find_primes(100)
hw1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="QnI6uEAIedIr" colab_type="text" # # **FINAL_SUBMISSION** # + [markdown] id="2W1lvz8k921d" colab_type="text" # **THE CHALLANGE** # # # Help find ways to improve the performance of machine learning and predictive models by filling in gaps in the datasets prior to model training. # # # # + [markdown] id="fDm8rIs0Jr7O" colab_type="text" # ANATOMY OF THE CHALLANGE: # # # DO WHAT? # # Improve performance of Machine Learning (ML) models by collecting a complete and continuous sensor data stream. # # WHY DID IT HAPPEN? # # * Sensor issues or signal noise due to experimental environment/setup # * Corrupted of data # * Loss of data during transmission (also due to limited bandwidth of transmission) # * Interference # * Limited amount of power for data collection and transmission # # # WHAT IT DOES? # # * Limits the ability to train accurate ML models to predict features/characteristics in data, which in turn renders the data "useless" # * Hinders the collection of good-quality data silos # # # HOW TO SOLVE/OBJECTIVE? # # * By "filling in" the missing datapoints in the datasets # * By "generating" the missing datapoints in the datasets # * By eliminating/removing the noisy/corrupted information that is embedded in individual datapoints # # # # DO IT WHEN? # # * Prior to training, i.e. during data cleaning and preprocessing. # # # # # # + [markdown] id="Clp6WdehRgm_" colab_type="text" # We started by investigating the reasons behind data loss when the data is acquired through a sensor or sensor array. In addition, we also started doing research finding the reasons behind the loss. # # Our research concluded that data loss in any dataset does not only occur due to missing data (be it discreet or continuous/timeseries) but also due to incomplete or corrupted or noisy collection of these data that are acquired by the sensors due to the reasons mentioned above. # # # # --- # # # # HYPOTHESIS: # # # # We propose an end-to-end Machine learning pipeline to -fill in the missing data using Generative modeling which involves using a model to generate new examples that plausibly come from an existing distribution of samples. # # Stacked Denoising Autoencoder for when the sensor data is corrupted or there is a bit of noise in it, we call this type of data noisy data. # To obtain proper information about the data, we want Denoising. # We define our autoencoder to remove (if not all)most of the noise our data. # # Transforms the input into a lower dimensional representation, and a decoder, which tries to reconstruct the original input from the lower dimensional representation. Therefore, these models present some some sort of “bottle neck” in the middle that forces the network to learn how to compress the data in a lower dimensional space. When training these algorithms, the objective is to be able to reconstruct the original input with the minimum amount of information loss. Once the model is trained, we can compress data at will by only using the encoder component of the autoencoder. # # # --- # **(A)** # # DETAILS: # One model is called the “generator” or “generative network” model that learns to generate new plausible samples. The other model is called the “discriminator” or “discriminative network” and learns to differentiate generated examples from real examples. # # The two models are set up in a contest or a game (in a game theory sense) where the generator model seeks to fool the discriminator model, and the discriminator is provided with both examples of real and generated samples. # # After training, the generative model can then be used to create new plausible samples on demand. # # ---- # # # **(B)** # # An autoencoder is a neural network used for dimensionality reduction; that is, for feature selection and extraction. Autoencoders with more hidden layers than inputs run the risk of learning the identity function – where the output simply equals the input – thereby becoming useless. # # Denoising autoencoders are an extension of the basic autoencoder, and represent a stochastic version of it. Denoising autoencoders attempt to address identity-function risk by randomly corrupting input (i.e. introducing noise) that the autoencoder must then reconstruct, or denoise. # # Stacked Denoising Autoencoder # # A stacked denoising autoencoder is simply many denoising autoencoders strung together. # # A key function of SDAs, and deep learning more generally, is unsupervised pre-training, layer by layer, as input is fed through. Once each layer is pre-trained to conduct feature selection and extraction on the input from the preceding layer, a second stage of supervised fine-tuning can follow. # # A word on stochastic corruption in SDAs: Denoising autoencoders shuffle data around and learn about that data by attempting to reconstruct it. The act of shuffling is the noise, and the job of the network is to recognize the features within the noise that will allow it to classify the input. When a network is being trained, it generates a model, and measures the distance between that model and the benchmark through a loss function. Its attempts to minimize the loss function involve resampling the shuffled inputs and re-reconstructing the data, until it finds those inputs which bring its model closest to what it has been told is true. # # --- # # **(C)** # # Encoder network: It translates the original high-dimension input into the latent low-dimensional code. The input size is larger than the output size. # Decoder network: The decoder network recovers the data from the code, likely with larger and larger output layers. # # The encoder network essentially accomplishes the dimensionality reduction, just like how we would use Principal Component Analysis (PCA) or Matrix Factorization (MF) for. In addition, the autoencoder is explicitly optimized for the data reconstruction from the code. # # # # --- # # **(D)** # # Disentangled Variational autoencoders # # The idea of Variational Autoencoder is actually less similar to all the autoencoder models above, but deeply rooted in the methods of variational bayesian and graphical model. # Instead of mapping the input into a fixed vector, we want to map it into a distribution. # If each variable in the inferred latent representation is only sensitive to one single generative factor and relatively invariant to other factors, we will say this representation is disentangled or factorized. One benefit that often comes with disentangled representation is good interpretability and easy generalization to a variety of tasks. # # For example, a model trained on photos of human faces might capture the gentle, skin color, hair color, hair length, emotion, whether wearing a pair of glasses and many other relatively independent factors in separate dimensions. Such a disentangled representation is very beneficial to facial image generation. # # # https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html#beta-vae # # # # + [markdown] id="IkuHeWXaJTeb" colab_type="text" # # **Proof of Concept #3** # + [markdown] colab_type="text" id="SBwaJJLCJT67" # # **NASA Meteorite Landing dataset: Recovering/approximating/imputing missing values** # # # **# Algorithm: Deep Convolutional Generative Adversarial Network (DCGAN)** # + [markdown] id="JBWlq21JaepW" colab_type="text" # # **Our approach** # # Here, we are transforming each sample (row) of the Meteorite CSV dataset into an image. The dataset has 45716 rows and 9 columns, including metorites' names and IDs. Our algorithm transforms each row it into a 3x3 image and zero pads the outer periphery of the 3x3 matrix (to make it a 7x7 matrix) so that each row fits into a 2D Convolutional filter. Then, once the training and evaluation is complete, we plan to recover the original data (CSV) from the images generated by the DCGAN. # # # **Challenges** # # There are mainly 2 challenges that we faced (#1 and #2) while implementing this demo. #3 is our planned future work. # # # # 1. The dataset is probably not big enough for a regular Convolutional Neural Network (CNN)-based DCGAN architecture. # 2. We had to encode all categorical values to numeric values, including the names. The problem here was our label encoder generated different numeric values for the meteorite names compared to their IDs. We chose not to exclude these two columns from the dataset for the sake of architectural simplicity at this moment. # # # # **Future work** # Due to time constraints, we are yet to recover the generated csv data from the output images and evaluate whether the algorithm converged or not. If it did not converge, then we plan to tune the hyperparameters, modify the CNN architecture if necessary and re-train the DCGAN algorithm. # # + [markdown] colab_type="text" id="3wPcFbxLJT7B" # ### Import TensorFlow and other libraries # + colab_type="code" id="CytkWIlIJT7H" colab={} from __future__ import absolute_import, division, print_function, unicode_literals try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass # + colab_type="code" outputId="e4b89705-dcef-4733-c024-9b1df29e0760" id="Yr_IsI0XJT7S" colab={"base_uri": "https://localhost:8080/", "height": 35} import tensorflow as tf tf.__version__ # + colab_type="code" outputId="ef6fbde2-1ea7-48ec-d8a4-b166bd1323e4" id="f2JlVi_aJT7f" colab={"base_uri": "https://localhost:8080/", "height": 90} # To generate GIFs # !pip install imageio # + colab_type="code" id="Lx-gzPmgJT7h" colab={} import glob import imageio import matplotlib.pyplot as plt import numpy as np import os import PIL import time from tensorflow.keras import layers from sklearn import preprocessing from sklearn.preprocessing import normalize from sklearn.preprocessing import MinMaxScaler from IPython import display # + [markdown] colab_type="text" id="x4TGihDDJT7k" # ### Load and prepare NASA Meteorite dataset for **training** # + id="hhULQyuGek0V" colab_type="code" outputId="19389bf7-d168-48e4-fc1b-502998c223e9" colab={"base_uri": "https://localhost:8080/", "height": 122} ##### Initialize training dataset ##### # mount google drive location where you saved a .zip archive of your folder that contains images; then unzip the file from google.colab import drive drive.mount('/content/drive') # + id="jXTHBn3c7ZSa" colab_type="code" cellView="both" outputId="9305c530-7c6e-426f-884c-8dab665ed644" colab={"base_uri": "https://localhost:8080/", "height": 35} # cd /content/drive/My\ Drive/Colab\ Notebooks/NASA-challenge-sample-datasets/ # + [markdown] id="vClB8Xo-gKA8" colab_type="text" # **Load dataset and encode categorical values** # + id="sIOmIx7jvLgz" colab_type="code" colab={} # Load dataset and encode categorical values import pandas as pd df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/NASA-challenge-sample-datasets/Meteorite_Landings_clean.csv') df_filled = df.fillna(0) # Encode categorical values label_enc = preprocessing.LabelEncoder() data_clean = df_filled.apply(lambda series: pd.Series(label_enc.fit_transform(series), index=series.index)) #data_clean.to_csv('Meteorite_not_normalized.csv',index=True) # Normalize numeric data only (range: 0-255) first_2_col = data_clean[data_clean.columns[:2]] minmax = MinMaxScaler(feature_range=(0, 255), copy=True) data_norm_minmax = minmax.fit_transform(data_clean[['nametype', 'recclass', 'mass (g)', 'fall', 'year_numeric', 'reclat', 'reclong']]) #data_norm = normalize(data_clean[['nametype', 'recclass', 'mass (g)', 'fall', 'year_numeric', 'reclat', 'reclong']]) dataset_complete = np.hstack((first_2_col, data_norm_minmax)) #np.savetxt("Meteorite_clean_minmax.csv", dataset_complete, delimiter=",") # + [markdown] id="NerOTN2vgEJy" colab_type="text" # **Start zero padding the data so that it fits into a 2D Convolutional filter** # + id="ZEmbmYxLzNUM" colab_type="code" outputId="2ebc0cfe-3dff-4945-f74b-8425313eabdd" colab={"base_uri": "https://localhost:8080/", "height": 35} ## Start zero padding the data so that it fits into a 2D Convolutional filter zeros_vert = np.zeros((3, 2)) zeros_hori = np.zeros((2, 7)) zeros_vert.shape, zeros_hori.shape, dataset_complete.shape # + id="3Bnkqr58tA9t" colab_type="code" outputId="ad95fe80-9618-4d80-a8b6-7e75c534a455" colab={"base_uri": "https://localhost:8080/", "height": 35} # Rehsape data and pad zeros to increase dimensionality dataset_intermed = dataset_complete.reshape(dataset_complete.shape[0], 3, 3).astype('float32') dataset_intermed.shape # + id="l9qQ9f600Vvj" colab_type="code" outputId="bd601fd4-dd5d-4931-a820-c0e689ad3523" colab={"base_uri": "https://localhost:8080/", "height": 35} # Pad zeros horizontally dataset_hor_pad = [] for i in range(dataset_intermed.shape[0]): dataset_hor_pad_1 = np.hstack((zeros_vert, dataset_intermed[i,:,:], zeros_vert)) dataset_hor_pad.append(dataset_hor_pad_1) dataset_hor_pad = np.array(dataset_hor_pad) dataset_hor_pad_1.shape, dataset_hor_pad.shape # + id="Qjtbdh854O2_" colab_type="code" outputId="bc3a7972-de71-4047-d008-4e9eb23d5165" colab={"base_uri": "https://localhost:8080/", "height": 35} # Pad zeros vertically dataset_ver_pad = [] for i in range(dataset_hor_pad.shape[0]): dataset_ver_pad_1 = np.vstack((zeros_hori, dataset_hor_pad[i,:,:], zeros_hori)) dataset_ver_pad.append(dataset_ver_pad_1) dataset_padded = np.array(dataset_ver_pad) dataset_ver_pad_1.shape, dataset_padded.shape # + id="Ko5z0Yqjvw_F" colab_type="code" cellView="both" outputId="2ed930aa-6c70-46ed-f0b1-05ba3c26acd6" colab={"base_uri": "https://localhost:8080/", "height": 274} dataset_padded[1111,:,:] # + [markdown] id="3Jbmom_jgbX9" colab_type="text" # **Reshape training data, define batch and buffer sizes** # + id="_iwOatDqmVmV" colab_type="code" colab={} # Reshape training data, define batch and buffer sizes dataset = dataset_padded.reshape(dataset_padded.shape[0], 7, 7, 1).astype('float32') # Initialize buffer and batch size BUFFER_SIZE = dataset_padded.shape[0] BATCH_SIZE = 256 # BATCH and SHUFFLE the data train_dataset = tf.data.Dataset.from_tensor_slices(dataset).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) # + id="INhzpXh6F1RP" colab_type="code" outputId="36cd4625-1447-426e-d045-eba7e3b487a9" colab={"base_uri": "https://localhost:8080/", "height": 35} dataset.shape # + [markdown] colab_type="text" id="WZjMLwFcJT8l" # ## Create the models # # Both the generator and discriminator are defined using the [Keras Sequential API](https://www.tensorflow.org/guide/keras#sequential_model). # + [markdown] colab_type="text" id="mI-Ne_5_JT8m" # ### The Generator # # The generator uses `tf.keras.layers.Conv2DTranspose` (upsampling) layers to produce an image from a seed (random noise). It starts with a **`Dense` layer that takes this seed as input**, then **upsamples it several times until it reaches the desired image size** of 28x28x1. Notice the `tf.keras.layers.LeakyReLU` activation for each layer, except the output layer which uses tanh. # + id="WbuMZGRLnaEE" colab_type="code" colab={} # Create generator def make_generator_model(): model = tf.keras.Sequential() model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 7, 7, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 7, 7, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(1, (5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 7, 7, 1) return model # + [markdown] colab_type="text" id="3UYdT2TdJT8o" # **Use the (as yet untrained) generator to create an image.** # + id="tAGLa53Hq-54" colab_type="code" cellView="both" outputId="3a31c5ff-02c6-43e3-c439-ea93f624a55f" colab={"base_uri": "https://localhost:8080/", "height": 286} generator = make_generator_model() noise = tf.random.normal([1, 100]) generated_image = generator(noise, training=False) plt.imshow(generated_image[0, :, :, 0]) # + [markdown] colab_type="text" id="D_8k68hjJT8q" # ### The Discriminator # # The discriminator is a CNN-based image classifier. # + id="len8EoxnuKS-" colab_type="code" colab={} # Create a discriminator to police the generator (notice the input shape of the first Conv2D layer) def make_discriminator_model(): model = tf.keras.Sequential() model.add(layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', input_shape=[7, 7, 1])) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Flatten()) model.add(layers.Dense(1)) return model # + [markdown] colab_type="text" id="GW-tP5LoJT8s" # **Use the (as yet untrained) discriminator to classify the generated images as real or fake. The model will be trained to output positive values for real images, and negative values for fake images.** # + id="aKewzWxktMzb" colab_type="code" cellView="both" outputId="1d54c328-847c-4ef3-ad9d-fa44c4ff3718" colab={"base_uri": "https://localhost:8080/", "height": 35} discriminator = make_discriminator_model() decision = discriminator(generated_image) print (decision) # + [markdown] colab_type="text" id="GHJw0oziJT8u" # ## Define the loss and optimizers # # Define loss functions and optimizers for both models. # # + colab_type="code" id="N7pxVNpJJT8u" colab={} # This method returns a helper function to compute cross entropy loss cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) # + [markdown] colab_type="text" id="yS8AY32aJT8x" # ### Discriminator loss # # This method quantifies how well the discriminator is able to distinguish real images from fakes. It compares the discriminator's predictions on real images to an array of 1s, and the discriminator's predictions on fake (generated) images to an array of 0s. # + colab_type="code" id="yIuZ1jUYJT8y" colab={} def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss # + [markdown] colab_type="text" id="ik5qxVj5JT81" # ### Generator loss # The generator's loss quantifies how well it was able to trick the discriminator. Intuitively, if the generator is performing well, the discriminator will classify the fake images as real (or 1). Here, we will compare the discriminators decisions on the generated images to an array of 1s. # + colab_type="code" id="V29EcGwtJT82" colab={} def generator_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) # + [markdown] colab_type="text" id="JlWL9N2LJT86" # The discriminator and the generator optimizers are different since we will train two networks separately. # + colab_type="code" id="pwAPk_rhJT87" colab={} generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) # + [markdown] colab_type="text" id="abECcSA9JT89" # ### Save checkpoints # Save and restore models, which can be helpful in case a long running training task is interrupted. # + colab_type="code" id="R98MWKQ6JT8-" colab={} checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator=generator, discriminator=discriminator) # + [markdown] colab_type="text" id="hEG_qhsoJT9F" # ## Define the training loop # # # + id="WbOzaGf7wde8" colab_type="code" colab={} EPOCHS = 50 noise_dim = 100 num_examples_to_generate = 16 # We will reuse this seed overtime (so it's easier) # to visualize progress in the animated GIF) seed = tf.random.normal([num_examples_to_generate, noise_dim]) # + [markdown] colab_type="text" id="Chg4RIG-JT9J" # The training loop begins with generator receiving a random seed as input. That seed is used to produce an image. The discriminator is then used to classify real images (drawn from the training set) and fakes images (produced by the generator). The loss is calculated for each of these models, and the gradients are used to update the generator and discriminator. # + colab_type="code" id="Sfrgc2CsJT9K" colab={} # Notice the use of `tf.function` # This annotation causes the function to be "compiled". @tf.function def train_step(images): noise = tf.random.normal([BATCH_SIZE, noise_dim]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output) disc_loss = discriminator_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) # + colab_type="code" id="X5bgFv7dJT9N" colab={} def train(dataset, epochs): for epoch in range(epochs): start = time.time() for image_batch in dataset: train_step(image_batch) # Produce images for the GIF as we go display.clear_output(wait=True) generate_and_save_images(generator, epoch + 1, seed) # Save the model every 15 epochs if (epoch + 1) % 15 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start)) # Generate after the final epoch display.clear_output(wait=True) generate_and_save_images(generator, epochs, seed) # + [markdown] colab_type="text" id="9y0WxdT5JT9Q" # **Generate and save images** # # # + colab_type="code" id="hSm77QfuJT9R" colab={} def generate_and_save_images(model, epoch, test_input): # Notice `training` is set to False. # This is so all layers run in inference mode (batchnorm). predictions = model(test_input, training=False) print('prediction shape = ', predictions.shape) fig = plt.figure(figsize=(4,4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i+1) #plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.imshow(predictions[i, :, :, 0]) plt.axis('off') #plt.imshow(predictions[0, :, :, 0]) plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() # + [markdown] colab_type="text" id="hVkUZNCAJT9S" # ## Train the model # Call the `train()` method defined above to train the generator and discriminator simultaneously. Note, training GANs can be tricky. It's important that the generator and discriminator do not overpower each other (e.g., that they train at a similar rate). # # At the beginning of the training, the generated image tiles will look like random noise. As training progresses, the tiles will look increasingly real This is the expectation when enough training data i.e. images, is provided. Usually with only a few images as input, the discriminator doesn't get the oppooertunity to police the generator properly. # + id="Nw4iLv21vSOi" colab_type="code" outputId="d8b0ddd4-8017-478b-d3ab-004a3af2194b" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %%time train(train_dataset, EPOCHS) # + [markdown] colab_type="text" id="2u5VLnPxJT9U" # Restore the latest checkpoint. # + colab_type="code" id="oDx32LnJJT9U" colab={} checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
nasa-spaceapps/DEDOMENA_meteorite_dcgan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## E-Bike survey analysis # # This notebook contains my analysis for how to charactrize the survey data gathered by the City of Toronto regarding E-Bike usage, and to discern if there were any patterns in the responses which could predict a particular answer to the question **"Does your household have access to any of the following types of private motorized vehicles?"** # # The conclusions from this data analysis are presented in the **Conclusions** cell, at the bottom of the notebook. My responses to the challenge question are presented therein. I began by importing the tools needed to load the data and perform EDA: import pandas as pd import numpy as np from collections import OrderedDict, defaultdict survey_data = pd.read_csv("https://raw.githubusercontent.com/samtalasila/e-bike-survey-response-results/master/E-Bike_Survey_Responses.csv") print(survey_data.shape) survey_data.head() # ### First impression of the data and assumptions # # Based on my first examination of the data, the responses in each column can be modeled as one of three types: # - ordinal; ordered string responses # - binary; yes / no, male / female responses # - categorical; un-ordered string responses # # So the way forward to begin attacking the problem is to re-encode each column appropriately. The appropriate encoder for each response type (in scikit-learn anyhow) is: # - `CategoricalEncoder(encoding='ordinal')` for ordinal data # - `LabelEncoder(n=2)` for binary data # - `CategoricalEncoder(encoding='onehot')` for categorical data # # Except I don't have a version of scikit-learn that has ordinal encoding (it's in pre-release alpha version 0.20, I have 0.19.1) So I'll assume that all non-binary features are categorical features, ignoring the ordinal nature of many of the responses. # However I choose to represent the data, I should make sure to isolate the responses my model will try to predict. Also, we want to drop any rows where the response is missing: # + response_q = 'Does your household have access to any of the following private motorized vehicles?' positive_label = 'No - I do not have access to a private motorized vehicle' labels = survey_data[response_q] predictors = survey_data.drop(columns=[response_q, 'Timestamp']) missing_values_per_predictor = predictors.isna().sum() print("Original data was shape: ", survey_data.shape) print("Predictors have shape: ", predictors.shape) print("Found ", labels.isna().sum(), " rows with response missing") print("Found ", missing_values_per_predictor.sum(), " number of missing values total among all predictor columns") # - # Let's do a bit of simple cleaning. We will fill missing values with 'NA', and make sure that when we break down the label distribution, we get the expected number of positive and negative cases that together form the total: # + predictors = predictors.fillna(value='NA') print("Found ", len(set(labels)), "different responses") positives = 0 negatives = 0 for label in labels: if label.strip().startswith(positive_label): positives += 1 else: negatives += 1 print("Got ", positives, " matches to specific answer") print("Got ", negatives, " non-mathces to specific answer") print("Expected total of ", labels.shape[0], " responses, got ", positives + negatives) # - # ## Initial assumptions about the data are wrong # # We should pause here to reflect on a two points revealed by our EDA so far. # # 1. There are 123 different answers to the question we are trying to predict as response. We cannot hope to predict 123 separate classes with only 2238 data points, so we will treat this as a binary classification problem. # # 2. This makes me want to check the variability in the answers to each other question. I inferred from a scan of the first five records that the responses to each question were codified; that is, I could associate a categorical or ordinal code to each text response. But counting the number of different responses to each question shows that this is *not* the case: # + question_response_complexity_dict = OrderedDict() for k in predictors.keys(): question_response_complexity_dict[k] = "" print("Number of questions: ", len(predictors.keys())) print("Cardinality of responses to each survey question:\n") for k in question_response_complexity_dict.keys(): data = predictors[k] cardinality = len(set(data)) question_response_complexity_dict[k] = cardinality print(k, "\t", cardinality) # - # ## Revising my assumptions of the data # # It looks like only a small minority of questions have low cardinality: age, income, average commute distance, length of daily commute, aware of speed limits. What about the distribution of responses to each question? Let's look at a quick histogram of how frequently each response to the corresponding question appears in the data. We can do this for each question using `pd.Series.value_counts()` and arranging the histograms into a grid: # + # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn-white') ### look at hist of responses to each predictor fig, ax = plt.subplots(5, 4, figsize=(14, 14)) fig.subplots_adjust(hspace=0.4, wspace=0.4) r = 0 c = 0 for i,k in enumerate(predictors.keys()): data = [d for _, d in predictors[k].value_counts().items()] names = [n for n, _ in predictors[k].value_counts().items()] r = i % 5 c = (i - r) % 4 title_length = min(len(k.split()),8) title_line_one = ' '.join(k.split()[0 : title_length // 2]) title_line_two = ' '.join(k.split()[title_length // 2 : title_length]) title = title_line_one + "\n" + title_line_two ax[r,c].bar([i for i in range(len(data))], data, color='gray') ax[r,c].set_title(title) # - # ## Obervations about the data, deciding on an approach to predict the response # # The histograms reveal a few things about the data: # # 1) Most of the questions have a few common responses (likely provided in the survey), followed by a long tail of principly unique responses that seem provided free-form. If I were to drop or group the free-form answers into one factor level (e.g 'other'), I could model most questions as categorical factors. # # 2) A second way would be to model each answer in the responses as a bag of words, and turn each answer in to a sparse bag of vectorized features. # # Without wanting to get too deep into reverse engineering the survey, I'll try to model the collection of responses first as a set of categorical variables, and then later as a stemmed bag of words. # ## Modeling predictors as categorical variables # + # Go through the predictors, find those answers in each question to be grouped into 'other' # N.B: using a poorly chosen heuristic of 4 is not great, but based on my reading of # the data manually, it seems an okay heuristic. # One of the ways to improve this would be to automatically determine an appropriate threshold via an information theoretic measure # for each predictor, but this is out of scope for a first attempt. replace_as_other = {k: [] for k in predictors.keys()} for k in predictors.keys(): replace_as_other[k] = [n for n, d in predictors[k].value_counts().items() if d < 4] # perform the in-place replacement for k in predictors.keys(): predictors.loc[predictors[k].isin(replace_as_other[k]), k] = 'unusual answer' # - # Let's re-examine the distribution of answers to each question now: # + # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn-white') ### look at hist of responses to each predictor fig, ax = plt.subplots(5, 4, figsize=(14, 14)) fig.subplots_adjust(hspace=0.4, wspace=0.4) r = 0 c = 0 for i,k in enumerate(predictors.keys()): data = [d for _, d in predictors[k].value_counts().items()] names = [n for n, _ in predictors[k].value_counts().items()] r = i % 5 c = (i - r) % 4 title_length = min(len(k.split()),8) title_line_one = ' '.join(k.split()[0 : title_length // 2]) title_line_two = ' '.join(k.split()[title_length // 2 : title_length]) title = title_line_one + "\n" + title_line_two ax[r,c].bar([i for i in range(len(data))], data, color='gray') ax[r,c].set_title(title) # - # Great. Now, we encode the answers to each question as categotical 'one-hot' answers, and train models to accurately predict our response. Also we encode the labels to be predicted. # + from sklearn.feature_extraction import DictVectorizer from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer # Encode labels in the response label_encoder = {positive_label: 1} encoded_labels = [label_encoder.get(r,0) for r in labels] y = np.array(encoded_labels) # Encode the features in the predictors, imputing missing values encoder_pipeline = Pipeline([ ("vectorizer", DictVectorizer(sparse=False)), ("imputer", Imputer(missing_values=np.nan, strategy="mean", axis=0)) ]) features = predictors.to_dict('records') X = encoder_pipeline.fit_transform(features) # Make sure we get sensible features print("shape of X: ", X.shape) print("type of X: ", type(X)) print("Any NaN or infinity? ", np.isnan(X).sum()) # + from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.linear_model import LogisticRegressionCV from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier # CV on logistic regression lr_clf = LogisticRegressionCV() lr_clf.fit(X,y) # CV on n_estimators, max_depth for both GBC, RFC param_grid_dict = {'n_estimators': [5,10,20,50,100,200], 'max_depth': [2,4,6,8,10], 'min_samples_leaf': [1,3,5,10,15]} rfc = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0) gbc = GradientBoostingClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0) random_forest_clf = GridSearchCV(estimator=rfc, param_grid=param_grid_dict, n_jobs=2) random_forest_clf.fit(X, y) gbc_clf = GridSearchCV(estimator=gbc, param_grid=param_grid_dict, n_jobs=2) gbc_clf.fit(X,y) # - # ## Examining the model performances and the best features # # Let's take a look at a crude breakdown of the best scoring models, and what hyperparameters define them: # + print("Model: Logistic Regression") print("Model best params: ", lr_clf.C_) print("Model best score: ", lr_clf.score(X,y)) print("\n") for name, model in zip(["Random Forest", "Gradient Boosted Classifier"],[random_forest_clf, gbc_clf]): print("Model: ", name) print("Model best params: ", model.best_params_) print("Model best score: ", model.best_score_) print("\n") # - # ## General comments # # Since we aren't training this model to generalize to new data but rather to understand our survey data set, I cheated and did not follow best data hygene, which is to make sure training, test and validation data are strictly exclusive. Despite this we can still learn a lot from examining the distribution of model scores. The reported best score is accuracy over a held out fold of training data, which for binary classification is the proportion of correctly classified elements. # # Each classifier I trained performed about equally, with perhaps a minor difference between the random forests and the logistic regression classifier, but on a data set this small with the cross-validation scheme used, we cannot say with certainty. Let's proceed to unpack the most important features by classifier. # + # Unpack the Logistic Regression classifier best = lr_clf.coef_ feature_importance = 100.0 * (best / best.max()) # Retrieve the feature names feature_names = encoder_pipeline.named_steps['vectorizer'].get_feature_names() indices = np.argsort(np.abs(feature_importance)) top_n = 10 print("\nTop ", top_n, " feature ranking:\n") for f in range(top_n): print(f + 1, feature_names[indices[0,f]], feature_importance[0,indices[0,f]]) # + fig, ax = plt.subplots(1, 2, figsize=(12, 2)) fig.subplots_adjust(hspace=0.4, wspace=0.4) from sklearn.ensemble import partial_dependence for i, (name, model) in enumerate(zip(["Random Forest", "Gradient Boosted Classifier"],[random_forest_clf, gbc_clf])): best = model.best_estimator_ importances = best.feature_importances_ #std = np.std([tree.feature_importances_ for tree in best.estimators_], # axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("\nTop ", top_n, name, " feature ranking:") print("(indexed feature, question and response, feature weight) \n") for f in range(top_n): print("*",indices[f],"*", feature_names[indices[f]], importances[indices[f]]) # Plot the feature ranking title = name + " feature importances" ax[i].bar(range(top_n), importances[indices[0:top_n]], color="r", align="center") ax[i].set_xticks(range(top_n)) ax[i].set_xticklabels(indices[0:top_n]) ax[i].set_title(title) # - # ## Feature importances # # The plots above reveal the most important 10 features for the random forest model as well as the gradient boosted regression tree classifier model. The overlap between both sets of features is large, which is encouraging. However, it only identifies those features which are most informative for constructing the trees used to separate positive from negative responders, and not the importance of the directionality of the response. # # To find out which features drive a responsdent to answer affirmatively (i.e "*No - I do not have access to a private motorized vehicle*" to the question "**'Does your household have access to any of the following private motorized vehicles?'**"), we need to resort to a different analysis. Fortunately, the gradient boosted regression tree classifier model allows us to efficiently calculate just this relationship, termed *partial dependence*. We do so below for the top five features: # + # shorten the names of the features, removing non-informative words import nltk nltk.download('stopwords') from nltk.corpus import stopwords stops = set(stopwords.words('english')) def strip_to_three_max(q_or_a): ''' strip a multi word question or answer down to a max length of its first three words ''' words = [word for word in q_or_a.split() if word not in stops] new_len = min(3, len(words)) return " ".join([w for w in words[0:new_len]]) def repro(name): ''' shorten the feature name ''' parts = name.strip().split("=") if len(parts) != 2: return name q, a = parts return strip_to_three_max(q) + " = " + strip_to_three_max(a) reprocessed_features = [repro(f) for f in feature_names] # + from sklearn.ensemble import partial_dependence as pdep top_n = 5 model_name = "Gradient Boosted Regression Tree" model = gbc_clf best = model.best_estimator_ importances = best.feature_importances_ indices = np.argsort(importances)[::-1] top_n_features = [indices[f] for f in range(top_n)] # Plot the feature ranking title = name + " feature importances" fig, axs = pdep.plot_partial_dependence(best, X, top_n_features, feature_names=reprocessed_features, n_cols=top_n) fig.set_size_inches(16, 6) fig.suptitle('Q: ' + response_q + '\n' + "A: " + positive_label) plt.subplots_adjust(wspace=0.75) # - # ## Conclusions # # From the scikit-learn [website](http://scikit-learn.org/stable/modules/ensemble.html#partial-dependence): *"Partial dependence plots (PDP) show the dependence between the target response and a set of ‘target’ features, marginalizing over the values of all other features (the ‘complement’ features). Intuitively, we can interpret the partial dependence as the expected target response [1] as a function of the ‘target’ features [2]."* # # So what can we learn from these five plots? The absolute value of the Y-axis in the plots is less informative for us than the slope, but we can use the slope to gain some insight from the model. I will comment on each figure in turn. I will say that the respondent answers affirmativey if they respond *No - I do not have access to a private motorized vehicle* to the target question **'Does your household have access to any of the following private motorized vehicles?'**: # # 1) If the respondent said they commonly used a private motor vehicle, they were not going to answer affirmatively. This is an encouraging sanity check on the model. # # 2) Similarly, if the respondent came from a household with a combined income of over $100k+, it is reasonable to expect they either owned a private vehicle. # # 3) If the respondent most frequently used a bicycle for transportation, they were more likely to answer affirmatively. # # 4) If the respondent had a household income of between 20k-39k, they were more likely to respond affirmatively. # # 5) There is little we can say about the bylaws definition, since the slope is quite flat. # # This is only a cursory look at the features. Partial dependence can also be calcuated for pairs or triples of interacting features. While acknowledging this would be the next natural step, I think it is out of scope for this exercise. # ### Q : Which models did you consider? Which Model did you choose and why? How good was it? # # Since the question posed was a binary decision problem, I tried three possible models: # # 1) A logistic regression model # # 2) A random forest model # # 3) A gradient boosted regression trees model # # I chose the logistic regression model as a baseline; it is a simple model, but one whose model structure is flat, and where the regularization on the parameters has to be extensive if you want an interpretation of the model decisions. # # I chose to also look at both tree based methods because the nodes chosen to represent decision rules during the fitting process yields a ready interpretation: the tree splits on variables that separate the data by target response, and the response to each predictor question tell you which responses were most informative. In addition, they are both ensemble models which are known to # # Furthermore, the gradient boosting regression trees model admits efficient calculation of partial dependence, which is a more direct way to determine the effect, positive or negative, of a particular response to a particular predictor question. # # As to how good the model was, the GBRT was statistically close to the performance of the logistic regression baseline. This suggests to me that it is 'good enough' without further re-training the model with best hyperparameters for `{n_estimators, max_depth, min_samples_leaf}` and then re-tuning the learning rate. Another method for boosting performance would be to create a larger ensemble of all three models, that predicts class by majority vote. The trade off here is that for a (potential) amount of increased accuracy, we make the problem of model interpretation much more difficult. # ### What was the pattern of missing values? Was it random? Could those be inferred from the context? # # See the accompanying `Missing-data` notebook for details. For a given element of the survey `(i,j)`, I looked at two simple tests to determine if there was any dependency of the missingness on row or column. There were 2238 rows in the survey, 22 columns, and a total of 135 missing values in the data. # # Counting by row, most rows had no missing data. Of those that had any, the range was from 1 missing value up to a maximum of 9 missing values [see second figure]. Based on that, I saw no evidence to characterize the row-wise missing data pattern further. # # The columns, however, show a large bias for missing data in the question "What is your household income?" [see first figure]. There were some minor effects for questions "Sex", "What level of education have you reached?", and "Which category best describes your employment?". # # All together, the column-wise sums of missing data suggest we can rule out a model of missing data in which data is missing completely at random (i.e with no dependency on either row, column or value). The strong observed frequency of missing data for the question "What is your household income?" relative to all others suggests that the probability of data to be missing conditioned on a certain column is not the same across all columns. I am not sure if this is enough evidence to support the hypothesis that people were reluctant to fill in this question because of their household income status. # # I am not sure what is meant by the third part of the question "Could those be inferred from the context?": do you mean could the data be imputed? Or could the likelihood of a question lacking an answer given other answers by the same respondent? In the second case, I suspect we do not have enough data to support such a hypothesis. There is just too little data. # ### Which features were significant in predicting the target response? # # This question was central to my analysis and is treated extensively under the **General comments** and **Feature importances** cells of this notebook. I won't repeat the results here. # ### If you could re-design the survey for next year, what question(s) would you add or remove in order to improve the precision of the prediction? # # I do not have any experience in survey design, but I will try to answer as best I can. If the desired effect of the survey is to figure out how to predit whether the respondents do not have access to a private motorized vehicle, then this should simply be included as a yes/no question. I also suggest that instead of asking respondents to provide where they live by geographical region, why not instead ask them to provide a postal code. This would allow for the survey analysis to be performed on several different levels (from the very small to the more broad areas through mergeing proximal codes).
bike_exercise/E-Bike-Survey.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Load packages import matplotlib.pyplot as plt import pickle from FDApy.preprocessing.dim_reduction.fpca import UFPCA from FDApy.representation.functional_data import DenseFunctionalData from FDApy.clustering.fcubt import Node, FCUBT from sklearn.metrics import adjusted_rand_score from sklearn.model_selection import train_test_split from sklearn.utils import shuffle # - # Load data with open('./data/scenario_1.pkl', 'rb') as f: data_fd = pickle.load(f) with open('./data/labels.pkl', 'rb') as f: labels = pickle.load(f) # + # Split data into train/test set values, new_labels = shuffle(data_fd.values, labels, random_state=42) X_train, X_test, y_train, y_test = train_test_split(values, new_labels, test_size=0.33, random_state=42) data_train = DenseFunctionalData(data_fd.argvals, X_train) data_test = DenseFunctionalData(data_fd.argvals, X_test) # - # ## fCUBT clustering # Initialization of the tree root_node = Node(data_train, is_root=True) fcubt = FCUBT(root_node=root_node) # Fit the tree fcubt.grow(n_components=0.95) fcubt.join(n_components=0.95) # ## Prediction on the test set # Perform the prediction on the test set pred_test = fcubt.predict(data_test, step='join') res = [] for idx in range(1, len(pred_test) + 1): res.append(adjusted_rand_score(y_test[:idx], pred_test[:idx])) # Plot the results plt.plot(res) plt.ylabel('ARI', size=16) plt.xlabel('Size of the online dataset', size=16) _ = plt.show()
scenario_1/08-prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Script to process MB dataset import pandas as pd import sys import numpy as np sys.path.append("C:/Users/sumaiyah/OneDrive - University Of Cambridge/Project/DNN-RE-data/preprocessing/raw_data") raw_data = pd.read_csv('MB.csv') from MB_helpers import get_data mb_data = get_data(raw_data) mb_data.keys() rna_col_names = pd.read_csv('MB-GE-ER.csv').columns[:-1] rna_col_names # # MB-DR rna_data_df = pd.DataFrame(mb_data['rnanp'], columns=rna_col_names) clin_data_df = pd.DataFrame(mb_data['clin']) dr_data_df = pd.DataFrame(mb_data['drnp'], columns=['DR']) mb_dr_df = pd.concat([rna_data_df, clin_data_df, dr_data_df], axis=1) mb_dr_df.to_csv('MB-DR.csv', index=False) # # MB-GE-DR import sys sys.path.append("../../DNN-RE/src") import pandas as pd raw_data = pd.read_csv('MBdata_33CLINwMiss_1KfGE_1KfCNA.csv') rna_data_df = raw_data.iloc[:, 34:1034] dr_data_df = raw_data.loc[:, "DR"] mb_ge_dr_df = pd.concat([rna_data_df, dr_data_df], axis=1) mb_ge_dr_df.to_csv('MB-GE-DR.csv', index=False) dr_data_df.value_counts() raw_data["DR"] j=list(raw_data["iC10"]) from collections import Counter Counter(j) print(*raw_data.columns) def load_split_indices(file_path, fold_index=0): """ Args: file_path: path to split indices file fold_index: index of the fold whose train and test indices you want Returns: train_index: list of integer indices for train data test_index: list of integer indices for test data File of the form train 0 1 0 2 ... test 3 4 6 ... train 1 5 2 ... test 6 8 9 ... ... """ with open(file_path, 'r') as file: lines = file.readlines() assert len(lines) >= (2 * fold_index) + 2, 'Error: not enough information in fold indices file %d < % d' \ % (len(lines), (2 * fold_index) + 2) train_index = lines[(fold_index * 2)].split(' ')[1:] test_index = lines[(fold_index * 2) + 1].split(' ')[1:] # Convert string indices to ints train_index = [int(i) for i in train_index] test_index = [int(i) for i in test_index] return train_index, test_index DATASET_DP = '../../MB-GE-DR/' CV_DP = DATASET_DP + 'cross_validation/' N_FOLD_CV_DP = CV_DP + '5_folds/' N_FOLD_CV_SPLIT_INDICIES_FP = N_FOLD_CV_DP + 'data_split_indices.txt' DATA_FP = DATASET_DP + 'data.csv' train_index, test_index = load_split_indices(N_FOLD_CV_SPLIT_INDICIES_FP, fold_index=0) # + import pandas as pd raw_data = pd.read_csv('MBdata_33CLINwMiss_1KfGE_1KfCNA.csv') ic = pd.DataFrame(raw_data["iC10"]) data = pd.read_csv(DATA_FP) comb = pd.concat([data, ic], axis=1) # - X = comb.drop(["DR"], axis=1).values y = comb["DR"].values X_test, y_test = X[test_index], y[test_index] X_test X_test.shape # + vv = comb.drop(["DR"], axis = 1) vj = vv.iloc[:1, :].values vj[0,99] # import sys # import numpy # numpy.set_printoptions(threshold=sys.maxsize) # vj # - gh = data.drop(["DR"], axis=1) type(gh) x = "4ER-" if x == "4ER+": print("yes") else: print("no") t = [1,2,3,4,5,6,7,8,9,0] for i in t: z = i + 5 z emp = set() ex = {1,2,3} new = {5,6} newish = {7,8} vnew ={9, 10} for i in [new, newish, vnew]: emp = emp.union(i) emp f = "fg" type(f) p = [1,3,90] o = [45,49,12] p = p+o p raw_data = pd.read_csv('MBdata_33CLINwMiss_1KfGE_1KfCNA.csv') drz = {"1":1, "2":2, "3":3, "4ER+":41, "4ER-":42, "5":5, "6":6, "7":7, "8":8, "9":9, "10":10} raw_data.replace({"iC10":drz}, inplace=True) raw_data["iC10"] raw_data = pd.read_csv('MBdata_33CLINwMiss_1KfGE_1KfCNA.csv') ic10 = pd.get_dummies(raw_data["iC10"], prefix = "iC10_", dummy_na = False) ic10.head(5) set1 ={4,5,6} set2=set1 set2 set3={1,2,3} set2.union(set3) set1 set2 set3 pip install --upgrade scikit-learn import sklearn from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.tree.export import export_text # iris = load_iris() # X = iris['data'] # y = iris['target'] # decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2) # decision_tree = decision_tree.fit(X, y) # r = export_text(decision_tree, feature_names=iris['feature_names']) # print(r) import sklearn print(sklearn.__version__) from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.tree.export import export_text iris = load_iris() X = iris['data'] y = iris['target'] decision_tree = DecisionTreeClassifier(random_state=0, max_depth=3) decision_tree = decision_tree.fit(X, y) r = export_text(decision_tree, feature_names=iris['feature_names']) print(r) # + import numpy as np from sklearn.tree import _tree def tree_to_code(tree, feature_names): tree_ = tree.tree_ feature_name = [feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!" for i in tree_.feature] print("def tree({}):".format(", ".join(feature_names))) def recurse(node, depth): indent = " " * depth if tree_.feature[node] != _tree.TREE_UNDEFINED: name = feature_name[node] threshold = tree_.threshold[node] print("{}if {} <= {}:".format(indent, name, threshold)) recurse(tree_.children_left[node], depth + 1) print("{}else: # if {} > {}".format(indent, name, threshold)) recurse(tree_.children_right[node], depth + 1) else: print("{}return {}".format(indent, np.argmax(tree_.value[node]))) recurse(0, 1) # - from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier iris = load_iris() X = iris['data'] y = iris['target'] decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2) decision_tree = decision_tree.fit(X, y) tree_to_code(decision_tree,feature_names=iris['feature_names'] ) # + from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.tree import _tree iris = load_iris() X = iris['data'] y = iris['target'] decision_tree = DecisionTreeClassifier(random_state=0, max_depth=2) decision_tree = decision_tree.fit(X, y) def tree_to_code(tree, feature_names): tree_ = tree.tree_ feature_name = [ feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!" for i in tree_.feature ] pathto=dict() global k k = 0 def recurse(node, depth, parent): global k indent = " " * depth if tree_.feature[node] != _tree.TREE_UNDEFINED: name = feature_name[node] threshold = tree_.threshold[node] s= "{} <= {} ".format( name, threshold, node ) if node == 0: pathto[node]=s else: pathto[node]=pathto[parent]+' & ' +s recurse(tree_.children_left[node], depth + 1, node) s="{} > {}".format( name, threshold) if node == 0: pathto[node]=s else: pathto[node]=pathto[parent]+' & ' +s recurse(tree_.children_right[node], depth + 1, node) else: k=k+1 print(node,')',pathto[parent], tree_.value[node]) recurse(0, 1, 0) tree_to_code(decision_tree,feature_names=iris['feature_names']) # - tree_to_code(decision_tree, iris['feature_names'], y) tree_to_code(decision_tree, iris['feature_names'], y) from sklearn.tree import _tree _tree.TREE_UNDEFINED d = [[50.0 0.0 0.0]] def tree_to_code(tree, feature_names, Y): tree_ = tree.tree_ feature_name = [ feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!" for i in tree_.feature ] i =0 name_to_id_map = dict() listtoset = set(feature_name) for f in listtoset: if (f != "undefined!"): name_to_id_map[f]=i i+=1 pathto=dict() terms = set() clause_con_dict = dict() def recurse(node, depth, parent): indent = " " * depth if tree_.feature[node] != _tree.TREE_UNDEFINED: name = feature_name[node] threshold = tree_.threshold[node] # s= "{} <= {} ".format( name, threshold, node ) s = Term(Neuron(0, listtoset(name)), '<=', threshold) if node == 0: pathto[node]={s} else: pathto[node]=pathto[parent].union(s) recurse(tree_.children_left[node], depth + 1, node) # s="{} > {}".format( name, threshold) s = Term(Neuron(0, listtoset(name)), '<=', threshold) if node == 0: pathto[node]={s} else: pathto[node]=pathto[parent].union(s) recurse(tree_.children_right[node], depth + 1, node) else: clause = ConjunctiveClause(terms=pathto[parent], remove_redundant_terms=True) conclusion = np.argmax(tree_.value[node]) clause_con_dict[clause] = conclusion return clause_con_dict recurse(0, 1, 0) tree_to_code(decision_tree, iris['feature_names'], y) a = set() a = a.union({1,3,4}) a a.union(1) b = 5 t = {b} t type(t) tiyarajj = {5,6,7} z = tiyarajj.union(19) type(z) z = tiyara.union({20}) tiyara A = {1,3,4} A+6 listtest =[6,8,9] listtest.append(6) listtest terms = set() terms.add(7) terms
data/preprocessing/raw_data/.ipynb_checkpoints/MB-process-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Customer Churn Prediction with XGBoost # _**Using Gradient Boosted Trees to Predict Mobile Customer Departure**_ # # --- # # --- # # ## Contents # # 1. [Background](#Background) # 1. [Data](#Data) # 1. [Train](#Train) # 1. [Host](#Host) # 1. [Monitor](#Extensions) # # --- # # ## Background # # _This notebook has been adapted from an [AWS blog post](https://aws.amazon.com/blogs/ai/predicting-customer-churn-with-amazon-machine-learning/)_ # # Losing customers is costly for any business. Identifying unhappy customers early on gives you a chance to offer them incentives to stay. This notebook describes using machine learning (ML) for the automated identification of unhappy customers, also known as customer churn prediction. It uses various features of SageMaker for managing experiments, training the model and monitoring the model after it has been deployed. # # Let's import the Python libraries we'll need for the remainder of the exercise. import sys # !{sys.executable} -m pip install sagemaker -U # !{sys.executable} -m pip install sagemaker-experiments # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import io import os import sys import time import json from IPython.display import display from time import strftime, gmtime import boto3 import re import sagemaker from sagemaker import get_execution_role from sagemaker.predictor import csv_serializer from sagemaker.debugger import rule_configs, Rule, DebuggerHookConfig from sagemaker.model_monitor import DataCaptureConfig, DatasetFormat, DefaultModelMonitor from sagemaker.s3 import S3Uploader, S3Downloader from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent from smexperiments.tracker import Tracker # - sess = boto3.Session() sm = sess.client('sagemaker') role = sagemaker.get_execution_role() # --- # ## Data # # Mobile operators have historical records on which customers ultimately ended up churning and which continued using the service. We can use this historical information to construct train an ML model that can predict customer churn. After training the model, we can pass the profile information of an arbitrary customer (the same profile information that we used to train the model) to the model, and have the model predict whether this customer is going to churn. # # The dataset we use is publicly available and was mentioned in the book [Discovering Knowledge in Data](https://www.amazon.com/dp/0470908742/) by <NAME>. It is attributed by the author to the University of California Irvine Repository of Machine Learning Datasets. We already have the dataset downloaded and processed. It's been split into a training set and a validation set. To see how the dataset was preprocessed take a look at this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_applying_machine_learning/xgboost_customer_churn/xgboost_customer_churn.ipynb). # # Now we'll upload the files to S3 for training but first we will create an S3 bucket for the data if one does not already exist. # + account_id = sess.client('sts', region_name=sess.region_name).get_caller_identity()["Account"] bucket = 'sagemaker-studio-{}-{}'.format(sess.region_name, account_id) prefix = 'xgboost-churn' try: if sess.region_name == "us-east-1": sess.client('s3').create_bucket(Bucket=bucket) else: sess.client('s3').create_bucket(Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': sess.region_name}) except Exception as e: print(e) S3Uploader.upload('data/train.csv', 's3://{}/{}/{}'.format(bucket, prefix,'train')) S3Uploader.upload('data/validation.csv', 's3://{}/{}/{}'.format(bucket, prefix,'validation')) # - # --- # ## Train # # Moving onto training! We will be training a class of models known as gradient boosted decision trees on the data we just uploaded. # # Because we're using XGBoost, first we'll need to specify the locations of the XGBoost algorithm containers. from sagemaker.amazon.amazon_estimator import get_image_uri docker_image_name = get_image_uri(boto3.Session().region_name, 'xgboost', repo_version='0.90-2') # Then, because we're training with the CSV file format, we'll create `s3_input`s that our training function can use as a pointer to the files in S3. s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/{}/validation/'.format(bucket, prefix), content_type='csv') # ### Experiment Tracking # # SageMaker experiment management now allows us to keep track of model training, organize related models together, log model configuration, parameters, and metrics in order to reproduce and iterate on previous models and compare models. We will be creating a single experiment to keep track of the different approaches we will try to train the model. # # Each approach or training code we run will be an experiment trial and we will be able to compare different trials in SageMaker studio. # # Let's create the experiment now. # # # + sess = sagemaker.session.Session() create_date = strftime("%Y-%m-%d-%H-%M-%S", gmtime()) customer_churn_experiment = Experiment.create(experiment_name="customer-churn-prediction-xgboost-{}".format(create_date), description="Using xgboost to predict customer churn", sagemaker_boto_client=boto3.client('sagemaker')) # - # ### Hyperparameters # Now, we can specify our XGBoost hyperparameters. A few key hyperparameters are: # - `max_depth` controls how deep each tree within the algorithm can be built. Deeper trees can lead to better fit, but are more computationally expensive and can lead to overfitting. There is typically some trade-off in model performance that needs to be explored between a large number of shallow trees and a smaller number of deeper trees. # - `subsample` controls sampling of the training data. This technique can help reduce overfitting, but setting it too low can also starve the model of data. # - `num_round` controls the number of boosting rounds. This is essentially the subsequent models that are trained using the residuals of previous iterations. Again, more rounds should produce a better fit on the training data, but can be computationally expensive or lead to overfitting. # - `eta` controls how aggressive each round of boosting is. Larger values lead to more conservative boosting. # - `gamma` controls how aggressively trees are grown. Larger values lead to more conservative models. # # More detail on XGBoost's hyperparmeters can be found on their GitHub [page](https://github.com/dmlc/xgboost/blob/master/doc/parameter.md). hyperparams = {"max_depth":5, "subsample":0.8, "num_round":100, "eta":0.2, "gamma":4, "min_child_weight":6, "silent":0, "objective":'binary:logistic'} # ### Trial 1 - XGBoost in Algorithm mode # # For our first trial, we will use the built-in xgboost container to train a model without supplying any additional code. This way, we can use XGBoost to train and deploy a model as you would other built-in Amazon SageMaker algorithms. # # We will create a new `Trial` object for this and associate the trial with the experiment we created earlier. To train the model we will create an estimator and specify a few parameters like what type of training instances we'd like to use and how many as well as where the trained model artifacts should be stored. # # We will also associate the training job (when we call `estimator.fit`) with the experiment trial that we just created. # + trial = Trial.create(trial_name="algorithm-mode-trial-{}".format(strftime("%Y-%m-%d-%H-%M-%S", gmtime())), experiment_name=customer_churn_experiment.experiment_name, sagemaker_boto_client=boto3.client('sagemaker')) xgb = sagemaker.estimator.Estimator(image_name=docker_image_name, role=role, hyperparameters=hyperparams, train_instance_count=1, train_instance_type='ml.m4.xlarge', output_path='s3://{}/{}/output'.format(bucket, prefix), base_job_name="demo-xgboost-customer-churn", sagemaker_session=sess) xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}, experiment_config={ "ExperimentName": customer_churn_experiment.experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "Training", } ) # - # Once the training job kicks off and succeeds, you should be able to view metrics, logs and graphs related to the trial in experiments tab in SageMaker Studio # ### Trial 2 - XGBoost in Framework mode # # To get even more flexibility we will try to train a similar model but using XGBoost in framework mode. This way of using XGBoost should be to familiar to users who have worked with the open source XGBoost. Using XGBoost as a framework provides more flexibility than using it as a built-in algorithm as it enables more advanced scenarios that allow pre-processing and post-processing scripts to be incorporated into your training script. Specifically, we will be able to specify a list of rules that we want the SageMaker Debugger to evaluate our training process against. # ### Specify SageMaker Debug Rules # # Amazon SageMaker now enables debugging of machine learning models during training. During training the debugger periodicially saves tensors, which fully specify the state of the machine learning model at that instance. These tensors are saved to S3 for analysis and visualization to diagnose training issues using SageMaker studio. # # In order to enable automated detection of common issues during machine learning training, the SageMaker debugger also allows you to attach a list of rules to evaluate the training job against. # # Some rule configs that apply to XGBoost include `AllZero`, `ClassImbalance`, `Confusion`, `LossNotDecreasing`, `Overfit`, `Overtraining`, `SimilarAcrossRuns`, `TensorVariance`, `UnchangedTensor`, `TreeDepth`. # # Here we will use the just the `LossNotDecreasing` rule which is triggered if the loss at does not decrease monotonically at any point during training. Let's create the rule now. debug_rules = [Rule.sagemaker(rule_configs.loss_not_decreasing())] # ### Fit Estimator # # In order to use XGBoost as a framework you need to specify an entry-point script that can incorporate additional processing into your training jobs. # # We have made a couple of simple changes to the enable the SageMaker Debugger `smdebug`. Here we created a SessionHook which we pass as a callback function when creating a Booster. We passed a SaveConfig object telling the hook to save the evaluation metrics, feature importances, and SHAP values at regular intervals. Note that Sagemaker-Debugger is highly configurable, you can choose exactly what to save. The changes are described in a bit more detail below after we train this example as well as in even more detail in our [Developer Guide for XGBoost](https://github.com/awslabs/sagemaker-debugger/tree/master/docs/xgboost). # !pygmentize xgboost_customer_churn.py # Let's create our Framwork estimator and call `fit` to start the training job. As before, we will create a separate trial for this run so that we can compare later using SageMaker Studio. Since we are running in framework mode we also need to pass additional parameters like the entry point script, and the framework version to the estimator. # # As training progresses, you will be able to see logs from the SageMaker debugger evaluating the rule against the training job. # + entry_point_script = "xgboost_customer_churn.py" trial = Trial.create(trial_name="framework-mode-trial-{}".format(strftime("%Y-%m-%d-%H-%M-%S", gmtime())), experiment_name=customer_churn_experiment.experiment_name, sagemaker_boto_client=boto3.client('sagemaker')) framework_xgb = sagemaker.xgboost.XGBoost(image_name=docker_image_name, entry_point=entry_point_script, role=role, framework_version="0.90-2", py_version="py3", hyperparameters=hyperparams, train_instance_count=1, train_instance_type='ml.m4.xlarge', output_path='s3://{}/{}/output'.format(bucket, prefix), base_job_name="demo-xgboost-customer-churn", sagemaker_session=sess, rules=debug_rules ) framework_xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}, experiment_config={ "ExperimentName": customer_churn_experiment.experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "Training", }) # - # --- # ## Host # # Now that we've trained the model, let's deploy it to a hosted endpoint. In other to monitor the model once it is hosted and serving requests, we will also add configurations to capture data being sent to the endpoint. # + data_capture_prefix = '{}/datacapture'.format(prefix) endpoint_name = "demo-xgboost-customer-churn-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) print("EndpointName={}".format(endpoint_name)) # - xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge', endpoint_name=endpoint_name, data_capture_config=DataCaptureConfig(enable_capture=True, sampling_percentage=100, destination_s3_uri='s3://{}/{}'.format(bucket, data_capture_prefix) ) ) # ### Invoke the deployed model # # Now that we have a hosted endpoint running, we can make real-time predictions from our model very easily, simply by making an http POST request. But first, we'll need to setup serializers and deserializers for passing our `test_data` NumPy arrays to the model behind the endpoint. xgb_predictor.content_type = 'text/csv' xgb_predictor.serializer = csv_serializer xgb_predictor.deserializer = None # Now, we'll loop over our test dataset and collect predictions by invoking the XGBoost endpoint: # + print("Sending test traffic to the endpoint {}. \nPlease wait for a minute...".format(endpoint_name)) with open('data/test_sample.csv', 'r') as f: for row in f: payload = row.rstrip('\n') response = xgb_predictor.predict(data=payload) time.sleep(0.5) # - # ### Verify data capture in S3 # # Since we made some real-time predictions by sending data to our endpoint we should have also captured that data for monitoring purposes. # # Let's list the data capture files stored in S3. You should expect to see different files from different time periods organized based on the hour in which the invocation occurred. The format of the s3 path is: # # `s3://{destination-bucket-prefix}/{endpoint-name}/{variant-name}/yyyy/mm/dd/hh/filename.jsonl` current_endpoint_capture_prefix = '{}/{}'.format(data_capture_prefix, endpoint_name) print("Found Data Capture Files:") capture_files = S3Downloader.list("s3://{}/{}".format(bucket, current_endpoint_capture_prefix)) print(capture_files) # All the data captured is stored in a SageMaker specific json-line formatted file. Next, Let's take a quick peek at the contents of a single line in a pretty formatted json so that we can observe the format a little better. # + capture_file = S3Downloader.read_file(capture_files[-1]) print("=====Single Data Capture====") print(json.dumps(json.loads(capture_file.split('\n')[0]), indent=2)[:2000]) # - # As you can see, each inference request is captured in one line in the jsonl file. The line contains both the input and output merged together. In our example, we provided the ContentType as `text/csv` which is reflected in the `observedContentType` value. Also, we expose the enconding that we used to encode the input and output payloads in the capture format with the `encoding` value. # # To recap, we have observed how you can enable capturing the input and/or output payloads to an Endpoint with a new parameter. We have also observed how the captured format looks like in S3. Let's continue to explore how SageMaker helps with monitoring the data collected in S3. # --- # ## Model Monitoring - Baselining and continous monitoring # In addition to collecting the data, SageMaker provides capability for you to monitor and evaluate the data observed by the Endpoints. For this : # 1. We need to create a baseline with which we compare the realtime traffic against. # 1. Once a baseline is ready, we can set up a schedule to continously evaluate/compare against the baseline. # ### 1. Constraint suggestion with baseline/training dataset # # The training dataset with which you trained the model is usually a good baseline dataset. Note that the training dataset data schema and the inference dataset schema should exactly match (ie number and type of the features). # # From our training dataset let's ask SageMaker to suggest a set of baseline `constraints` and generate descriptive `statistics` to explore the data. For this example, let's upload the training dataset which was used to train model. We will use the dataset file with column headers to have descriptive feature names. # + baseline_prefix = prefix + '/baselining' baseline_data_prefix = baseline_prefix + '/data' baseline_results_prefix = baseline_prefix + '/results' baseline_data_uri = 's3://{}/{}'.format(bucket,baseline_data_prefix) baseline_results_uri = 's3://{}/{}'.format(bucket, baseline_results_prefix) print('Baseline data uri: {}'.format(baseline_data_uri)) print('Baseline results uri: {}'.format(baseline_results_uri)) baseline_data_path = S3Uploader.upload("data/training-dataset-with-header.csv", baseline_data_uri) # - # #### Create a baselining job with training dataset # # Now that we have the training data ready in S3, let's kick off a job to `suggest` constraints. The convenient helper kicks off a `ProcessingJob` using a SageMaker provided ProcessingJob container to generate the constraints. # + my_default_monitor = DefaultModelMonitor(role=role, instance_count=1, instance_type='ml.m5.xlarge', volume_size_in_gb=20, max_runtime_in_seconds=3600, ) baseline_job = my_default_monitor.suggest_baseline(baseline_dataset=baseline_data_path, dataset_format=DatasetFormat.csv(header=True), output_s3_uri=baseline_results_uri, wait=True ) # - # Once the job succeeds, we can explore the `baseline_results_uri` location in s3 to see what files where stored there. print("Found Files:") S3Downloader.list("s3://{}/{}".format(bucket, baseline_results_prefix)) # We have a`constraints.json` file that has information about suggested constraints. We also have a `statistics.json` which contains statistical information about the data in the baseline. baseline_job = my_default_monitor.latest_baselining_job schema_df = pd.io.json.json_normalize(baseline_job.baseline_statistics().body_dict["features"]) schema_df.head(10) constraints_df = pd.io.json.json_normalize(baseline_job.suggested_constraints().body_dict["features"]) constraints_df.head(10) # ### 2. Analyzing Subsequent captures for data quality issues # # Now, that we have generated a baseline dataset and processed the baseline dataset to get baseline statistics and constraints, let's proceed monitor and analyze the data being sent to the endpoitn with Monitoring Schedules # #### Create a schedule # Let's first create a Monitoring schedule for the previously created Endpoint. The schedule specifies the cadence at which we run a new processing job to compare recent data captures to the baseline. # First, copy over some test scripts to the S3 bucket so that they can be used for pre and post processing code_prefix = '{}/code'.format(prefix) S3Uploader.upload('preprocessor.py', 's3://{}/{}'.format(bucket,code_prefix)) S3Uploader.upload('postprocessor.py', 's3://{}/{}'.format(bucket,code_prefix)) # We are ready to create a model monitoring schedule for the Endpoint created before and also the baseline resources (constraints and statistics) which were generated above. # + from sagemaker.model_monitor import CronExpressionGenerator from time import gmtime, strftime reports_prefix = '{}/reports'.format(prefix) s3_report_path = 's3://{}/{}'.format(bucket,reports_prefix) mon_schedule_name = 'demo-xgboost-customer-churn-model-schedule-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) my_default_monitor.create_monitoring_schedule(monitor_schedule_name=mon_schedule_name, endpoint_input=xgb_predictor.endpoint, #record_preprocessor_script=pre_processor_script, post_analytics_processor_script=s3_code_postprocessor_uri, output_s3_uri=s3_report_path, statistics=my_default_monitor.baseline_statistics(), constraints=my_default_monitor.suggested_constraints(), schedule_cron_expression=CronExpressionGenerator.hourly(), enable_cloudwatch_metrics=True, ) # - # #### Start generating some artificial traffic # The block below kicks off a thread to send some traffic to the created endpoint. This is so that we can continue to send traffic to the endpoint so that we will have always have data continually captured for analysis. If there is no traffic, the monitoring jobs will start to fail later on. # # Note that you need to stop the kernel to terminate this thread. # + from threading import Thread from time import sleep import time runtime_client = boto3.client('runtime.sagemaker') # (just repeating code from above for convenience/ able to run this section independently) def invoke_endpoint(ep_name, file_name, runtime_client): with open(file_name, 'r') as f: for row in f: payload = row.rstrip('\n') response = runtime_client.invoke_endpoint(EndpointName=ep_name, ContentType='text/csv', Body=payload) time.sleep(1) def invoke_endpoint_forever(): while True: invoke_endpoint(endpoint_name, 'data/test-dataset-input-cols.csv', runtime_client) thread = Thread(target = invoke_endpoint_forever) thread.start() # Note that you need to stop the kernel to stop the invocations # - # #### List executions # Once the schedule is scheduled, it will kick of jobs at specified intervals. Here we are listing the latest 5 executions. Note that if you are kicking this off after creating the hourly schedule, you might find the executions empty. You might have to wait till you cross the hour boundary (in UTC) to see executions kick off. The code below has the logic for waiting. # + mon_executions = my_default_monitor.list_executions() if len(mon_executions) == 0: print("We created a hourly schedule above and it will kick off executions ON the hour.\nWe will have to wait till we hit the hour...") # UNCOMMENT the code below if you want to keep retrying until the hour # while len(mon_executions) == 0: # print("Waiting for the 1st execution to happen...") # time.sleep(60) # mon_executions = my_default_monitor.list_executions() # - # #### Evaluate the latest execution and list the generated reports # + latest_execution = mon_executions[-1] print("Latest execution result: {}".format(latest_execution.describe()['ExitMessage'])) report_uri = latest_execution.output.destination print("Found Report Files:") S3Downloader.list(report_uri) # - # #### List violations # # If there are any violations compared to the baseline, it will be generated here. Let's list the violations. violations = my_default_monitor.latest_monitoring_constraint_violations() pd.set_option('display.max_colwidth', -1) constraints_df = pd.io.json.json_normalize(violations.body_dict["violations"]) constraints_df.head(10) # You can plug in the processing job arn for a single execution of the monitoring into this notebook to see more detailed visualizations of the violations and distribution statistics of the data captue that was processed in that execution # # ### (Optional) Clean-up # # If you're ready to be done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on. sess.delete_monitoring_schedule(mon_schedule_name) sess.delete_endpoint(xgb_predictor.endpoint)
aws_sagemaker_studio/getting_started/xgboost_customer_churn_studio.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # %config InlineBackend.figure_format = 'retina' from matplotlib.backends.backend_pdf import PdfPages pp = PdfPages("Rajas Mehendale Controlled System.pdf") # + t = np.linspace(0,20,1000) tset=np.ones(np.shape(t))*60 t1= np.exp(-3*t)*np.sin(0.5*t) t2= 0.75*(np.exp(-0.5*t)-np.exp(-2.5*t)) t3= (1.5*0.312)*(np.exp(-1.149*t)-np.exp(-4.351*t)) pset=np.ones(np.shape(t))*9 p1= 0.6+np.exp(-1.5*t)*(3*np.sin(0.5*t)-0.6*np.sqrt(10)*np.sin(0.5*t+np.arctan(1.0/3.0))) p2= 0.6*(1-np.exp(-2.5*t)) p3= 0.6*(1+0.2*np.exp(-1.149*t)-1.2*np.exp(-4.351*t)) fset=np.ones(np.shape(t))*1 f1=(p1/6); f2=(p2/6); f3=(p3/6); # - plt.figure() plt.plot(t,t1+tset, color="r", label=r"$K_C = -1, \tau_I = 1$") plt.plot(t,t2+tset, color="b", label=r"$K_C = -1, \tau_I = 2$") plt.plot(t,t3+tset, color="k", label=r"$K_C = -2, \tau_I = 1$") plt.legend(loc="best"); plt.ylim([60,60.5]); plt.xlim([0,10]); plt.ylabel(r"$T \ ({}^{o}C)$", fontsize=12) plt.xlabel("Time (hours)", fontsize=12) plt.title("Temperature Profiles for Controlled System", fontsize=12); pp.savefig(bbox_inches='tight') plt.figure() plt.plot(t,p1+pset, color="r", label=r"$K_C = -1, \tau_I = 1$") plt.plot(t,p2+pset, color="b", label=r"$K_C = -1, \tau_I = 2$") plt.plot(t,p3+pset, color="k", label=r"$K_C = -2, \tau_I = 1$") plt.legend(loc="best"); plt.ylim([9,9.7]); plt.xlim([0,10]); plt.ylabel("Pressure (psi)", fontsize=12) plt.xlabel("Time (hours)", fontsize=12) plt.title("Pressure Profiles for Valve"); pp.savefig(bbox_inches='tight') plt.figure() plt.plot(t,f1+fset, color="r", label=r"$K_C = -1, \tau_I = 1$") plt.plot(t,f2+fset, color="b", label=r"$K_C = -1, \tau_I = 2$") plt.plot(t,f3+fset, color="k", label=r"$K_C = -2, \tau_I = 1$") plt.legend(loc="lower right"); plt.ylim([1,1.12]); plt.xlim([0,10]); plt.ylabel(r"Flow rate $(\frac{{m}^{3}}{hr})$", fontsize=12) plt.xlabel("Time (hours)", fontsize=12) plt.title("Pressure Profiles for Valve"); plt.title("Flow rate Profiles"); pp.savefig(bbox_inches='tight') pp.close()
Rajas Mehendale Controller System Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setup and Imports # %%capture # !rm -rf fairgraph # !git clone -https://github.com/GMattheisen/fairgraph.git # !pip install -r ./fairgraph/requirements.txt # !pip install -U ./fairgraph # + from fairgraph import KGClient import os import re import io import logging from datetime import date, datetime from pprint import pprint import yaml from collections import defaultdict import requests import numpy as np from neo.io import get_io, Spike2IO, NeoMatlabIO, RawBinarySignalIO # dependent on data format from fairgraph import KGClient from fairgraph.commons import (Species, Handedness, Sex, Strain, Age, Genotype, QuantitativeValue, BrainRegion, CellType, QuantitativeValueRange, StimulusType) from fairgraph.core import (Person, Identifier, Material, Organization, Address, Subject, Protocol, Step, use_namespace) from fairgraph.experiment import (Device, ElectrophysiologicalStimulation, ElectrophysiologicalStimulus, BehavioralStimulation, BehavioralStimulus, CranialWindow, Craniotomy) from fairgraph.electrophysiology import (Distribution, PatchClampActivity, PatchClampExperiment, Trace, MultiChannelMultiTrialRecording, Distribution, QualifiedMultiTraceGeneration, ElectrodePlacementActivity, ElectrodeImplantationActivity, Sensor, ImplantedBrainTissue, ECoGExperiment, EEGExperiment) from fairgraph.base import KGQuery, as_list from fairgraph.minds import Dataset use_namespace('neuralactivity') # + logging.basicConfig(filename="to_knowledge_graph.log", filemode='a', level=logging.DEBUG) logger = logging.getLogger("nar") token = "" client = KGClient(token, nexus_endpoint='https://nexus-int.humanbrainproject.org/v0') if 'int' in client.nexus_endpoint: Subject._path = '/core/subject/v0.1.2' Device._path = '/electrophysiology/device/v0.1.2' Step._path = '/core/protocol/v0.1.6' Material._path = '/core/material/v0.1.2' Protocol._path = '/core/protocol/v0.1.6' BehavioralStimulation._path = '/experiment/behavioralstimulation/v0.1.0' BehavioralStimulus._path = '/experiment/behavioralstimulus/v0.1.0' ElectrophysiologicalStimulation._path = '/optophysiology/electrophysiologicalstimulation/v0.5.0' ElectrophysiologicalStimulus._path = '/optophysiology/electrophysiologicalstimulus/v0.1.0' Sensor._path = '/electrophysiology/sensor/v0.1.0' EEGExperiment._path = '/electrophysiology/electrodearrayexperiment/v0.1.1' ElectrodePlacementActivity._path = '/experiment/electrodeplacement/v0.1.0' QualifiedMultiTraceGeneration._path = '/electrophysiology/multitracegeneration/v0.2.3' MultiChannelMultiTrialRecording._path = '/electrophysiology/multitrace/v0.3.1' # - # ### EEG # # #### Components # # Subject<br> # Device<br> # Step<br> # Protocol<br> # ElectrophysiologicalStimulus<br> # ElectrophysiologicalStimulation<br> # Sensor<br> # EEGExperiment<br> # ElectrodePlacementActivity<br> # QualifiedMultiTraceGeneration<br> # MultiChannelMultiTrialRecording # # Locate Dataset and Define Contributors # + dataset_name = None minds_dataset = Dataset.by_name(dataset_name, client, resolved=True, api="query") # map MINDSPerson to Person (for owner, contributors) owner = [obj.resolve(client) for obj in as_list(minds_dataset.owners)] contributors = [obj.resolve(client) for obj in as_list(minds_dataset.contributors)] # - for minds_person in contributors: print(minds_person) # + """People""" abc = Person(given_name="", family_name="", email="") abc.save(client) deg = Person(given_name="", family_name="", email=None) deg.save(client) hij = Person(given_name="", family_name="", email=None) hij.save(client) people = [abc, deg, hij] specimen_group = [obj.resolve(client) for obj in as_list(minds_dataset.specimen_group)] minds_subjects = [] for sg in specimen_group: minds_subjects.extend([obj.resolve(client) for obj in as_list(sg.subjects)]) # - # # Define Subjects, Samples, Experiment for minds_subject in minds_subjects: print(minds_subject.name) print(minds_subject.samples) for minds_sample in as_list(minds_subject.samples): print(as_list(minds_subject.samples)) minds_sample = minds_sample.resolve(client) print(minds_sample.name) # + """Import trace information from previous run if available""" if os.path.exists(f'{minds_dataset.name}_trace_dict.yaml'): print("loading trace_dict") with open(f'{minds_dataset.name}_trace_dict.yaml') as file: trace_dict = yaml.load(file, Loader=yaml.FullLoader) preload = True print(trace_dict) else: print("no saved trace_dict, building trace_dict") trace_dict = {} preload = False # - # map MINDSSubject to Subject for minds_subject in minds_subjects: print(minds_subject.name) """Subject""" species = minds_subject.species.resolve(client).name strain = minds_subject.strains genotype = minds_subject.genotype sex = minds_subject.sex.resolve(client).name if sex == "Unknown" or sex == '<Unknown>': sex = None else: sex=Sex(sex) handedness = None age_units = "days" age_value = minds_subject.age if "-" in age_value: min, max = age_value.split("-") age_value = QuantitativeValueRange(int(min), int(max), age_units) else: age_value = QuantitativeValue(int(age_value), age_units) age_category = "Post-natal" death_date = None # date type group = None # 'control group' v 'treatment group' subject = Subject( minds_subject.name, species=Species(species), strain=Strain(strain), genotype=Genotype(genotype), sex = sex, age=Age(age_value, age_category), death_date=None ) print(subject) subject.save(client) assert subject.id is not None for minds_sample in as_list(minds_subject.samples): minds_sample = minds_sample.resolve(client) brain_regions = [BrainRegion("striatum")] device = Device( name = "", manufacturer="", model_name="", software_version =None, serial_number = None, distribution = Distribution(""), ) device.save(client) print(device) step_one = Step( name = "Step one for protocol", previous_step_name = None, sequence_number = 2, version = "", # string or int identifier = "", distribution = Distribution(), description = "", materials - Material(), # or list author = Person() # or list ) step_one.save(client) step_two = Step( name = "Step two for protocol", previous_step_name = "Step one for protocl", sequence_number = 2, version = "", # string or int identifier = "", distribution = Distribution(), description = "", materials - Material(), # or list author = Person() # or list ) step_two.save(client) steps = [step_one, step_two] protocol = Protocol( name = "Protcol for ", version = "", # string or int identifier = "", distribution = Distribution(), number_of_steps = 0, steps = steps, materials - Material(), # or list author = Person(), # or list date_published = # date type ) protocol.save(client) behavioral_stimulus = BehavioralStimulus( name=f"Stimulus for {minds_sample.name}", description = "", distribution = None ) behavioral_stimulus.save(client) behavioral_stimulation = BehavioralStimulation( name=f"Stimulation for {minds_sample.name}", stimulus = behavioral_stimulus, protocol= protocol, citation = None, code = None, license = None ) behavioral_stimulation.save(client) sensor = Sensor( name = "Sensors", coordinate_system = Distribution(""), coordinate_units = "", description = "" ) sensor.save(client) expt = EEExperiment( name = f"EEG recording for {minds_sample.name}", device=device, stimulation = [electrophysiological_stimulation], sensors=sensors, digitized_head_points_coordinates = Sensor(), head_localization_coils_coordinates = Sensor(), digitized_head_points = , # bool digitized_landmarks = , # bool start_time =, # datetime end_time = , # datetime people = people, protocol = Protocol() ) expt.save(client) """Trace Generation""" """Accounts for muliple files existing per sample""" holding_potential_value = None holding_potential_unit = None if preload: # building from YAML print("building trace metadata from YAML") trace_generation = QualifiedMultiTraceGeneration( f"Metadata for EEG recording in {minds_sample.name}", stimulus_experiment=expt, sweeps=trace_dict[minds_sample.name]['n_segments'], channel_type = None, holding_potential = QuantitativeValue(holding_potential_value, holding_potential_unit), sampling_frequency = None, power_line_frequency = None ) trace_generation.save(client) trace = MultiChannelMultiTrialRecording( f"Traces recorded in {minds_sample.name}", data_location=Distribution(location = trace_dict[minds_sample.name]['file_location'], content_type=trace_dict[minds_sample.name]['content_type']), generated_by=expt, generation_metadata=trace_generation, channel_names=trace_dict[minds_sample.name]['channel_names'], data_unit=trace_dict[minds_sample.name]['data_unit'], time_step=QuantitativeValue(trace_dict[minds_sample.name]['time_step'], trace_dict[minds_sample.name]['time_step_unit']), channel_type = None, part_of=minds_dataset ) trace.save(client) else: file_dir = "hbp-d00000/" filename = os.listdir(file_dir) file_url=file_dir + filename file_location = "https://object.cscs.ch/v1/AUTH_0000000000000000000/" + file_url io = RawBinarySignalIO(filename = file_url) data = io.read_block() n_segments = len(data.segments) sigs = data.segments[0].analogsignals channel_names = [sig.name for sig in sigs] units = [sig.units.dimensionality.string for sig in sigs] time_step_unit = "ms" time_step = float(sigs[0].sampling_period.rescale(time_step_unit)) content_type = "" print("trace_dict does not exist, building trace dict") # trace_dict[minds_sample.name] = {} trace_dict[minds_sample.name]['channel_names'] = channel_names trace_dict[minds_sample.name]['data_unit'] = units trace_dict[minds_sample.name]['time_step'] = time_step trace_dict[minds_sample.name]['time_step_unit'] = time_step_unit trace_dict[minds_sample.name]['n_segments'] = n_segments trace_dict[minds_sample.name]['file_url'] = file_url trace_dict[minds_sample.name]['file_location'] = file_location trace_dict[minds_sample.name]['content_type'] = content_type trace_generation = QualifiedMultiTraceGeneration( f"Metadata for EEG recording in {minds_sample.name}", stimulus_experiment = expt, sweeps = n_segments, channel_type = None, holding_potential = QuantitativeValue(holding_potential_value, holding_potential_unit), sampling_frequency = None, power_line_frequency = None ) trace_generation.save(client) print(trace_generation) trace = MultiChannelMultiTrialRecording( f"Traces recorded in {minds_sample.name}", data_location=Distribution(location = file_location, content_type=content_type), generated_by=expt, generation_metadata=trace_generation, channel_names=channel_names, data_unit=units, time_step=QuantitativeValue(**{ "unit_text": time_step_unit, "value": time_step }), channel_type = None, part_of=minds_dataset ) print(trace) trace.save(client) electrode = ElectrodePlacementActivity( name = f"Electrode placement activity for {minds_sample.name}", subject = subject, brain_location = brain_regions, device=device, protocol = Protocol(), people=people) electrode.save(client) trace_dict with open(f'{minds_dataset.name}_trace_dict.yaml', 'w') as file: #save trace dict documents = yaml.dump(trace_dict, file) print(f"Updated on: {datetime.now(tz=None)} on {client.nexus_endpoint}")
examples/notebooks/EEG Template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/popnut123/PoseCNN/blob/master/train_test_split.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Zdbl_h2NZYai" import numpy as np import pickle import torch import torch.nn as nn import torch.nn.utils.rnn as rnn_utils from torch.utils.data import DataLoader from torch.utils.data import Dataset from torch.utils.data.sampler import SubsetRandomSampler nFeatrue = 2 class SensorDataset(Dataset): def __init__(self, Data, Label): self.Data = Data self.Label = Label def __len__(self): return len(self.Data) def __getitem__(self, index): data = torch.Tensor(self.Data[index]) label = torch.LongTensor(self.Label[index]) return (data, label) def my_collate(data): data.sort(key=lambda x: len(x[0]), reverse=True) context = [ i[0] for i in data ] label = [ i[1] for i in data ] label = torch.LongTensor(label) data_length = [len(i)/nFeatrue for i in context] padded_context = rnn_utils.pad_sequence(context, batch_first=True, padding_value=0) # final_data = [[a,b] for a,b in zip(padded_context, label)] # final_data = torch.FloatTensor(final_data) return (padded_context, label), data_length D2 = [ [1.1, 2, 3, 2], [4, 5], [6, 7.1, 8, 9, 0, 1], [6, 7, 1.8, 9, 10, 1, 3, 4], [6, 8, 9.2, 1, 3, 1], [5, 6, 3.7, 8, 9, 1], [8, 9] ] L2 = np.array([[1], [0], [1], [2], [1], [2], [2]]) # + id="lxq50i7UaGx6" dataset_size = len(L2) indices = list(range(dataset_size)) split = int(np.floor(0.3 * dataset_size)) shuffle_dataset = True if shuffle_dataset : np.random.seed(42) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(val_indices) # + id="nB9yRpODZksZ" dataset = SensorDataset(D2, L2) train_dataloader = DataLoader(dataset, batch_size=4, collate_fn=my_collate, sampler=train_sampler) validation_dataloader = DataLoader(dataset, batch_size=4, collate_fn=my_collate, sampler=valid_sampler) # + colab={"base_uri": "https://localhost:8080/"} id="x3g83DtObK2-" outputId="c344b1ad-b5fc-4b89-8ef1-881d24091a88" for (a,b), length in train_dataloader: print("******************") batch_size = a.shape[0] v = a.view(batch_size, -1, nFeatrue) print(v) print("------------------") data = rnn_utils.pack_padded_sequence(v.type(torch.FloatTensor), length, batch_first=True) print(data) print() # + colab={"base_uri": "https://localhost:8080/"} id="HGnygXipbjTm" outputId="2045eb8f-5cf7-4de6-ff9c-6fa794ceb644" for (a,b), length in validation_dataloader: print("******************") batch_size = a.shape[0] v = a.view(batch_size, -1, nFeatrue) print(v) print("------------------") data = rnn_utils.pack_padded_sequence(v.type(torch.FloatTensor), length, batch_first=True) print(data) print()
train_test_split.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # PyImageJ Tutorial # === # # This notebook covers how to use ImageJ as a library from Python. A major advantage of this approach is the ability to combine ImageJ with other tools available from the Python software ecosystem, including NumPy, SciPy, scikit-image, CellProfiler, OpenCV, ITK and more. # # This notebook assumes familiarity with the ImageJ API. Detailed tutorials in that regard can be found in the other notebooks. # ## 2 A simple example: `ij.py.show()` and `ij.ui().show()` # # PyImageJ can display NumPy images and some* ImageJ Java images using `matplotlib.pyplot.imshow` with`ij.py.show()`. You can also open some images in an ImageJ window with `ij.ui().show()`. Let's demonstrate both methods,first initialize PyImageJ: # # *: Both `ij.py.show()` and `ij.ui().show()` have limitations in dispaying image data. Briefly, `ij.py.show()` expects a 2D NumPy or ImageJ image, while `ij.ui().show()` expects an ImageJ image. For more information view the examples below. # + import imagej # initialize ImageJ in interactive mode ij = imagej.init(mode='interactive') print(f"ImageJ version: {ij.getVersion()}") # - # ### 2.1 `ij.py.show()` # # `ij.py.show()` displays images with `matplotlib.pyplot.imshow` behind the scenes. You can use either Python images (_e.g._ `np.ndarray`, `xarray.DataArray`) or ImageJ/Java images (_e.g._ `net.imagej.Dataset`, `net.imglib2.RandomAccessibleInterval`) so long as they are 2D image. >2D image data is not supported by `matplotlib.pyplot`. Let's open a couple different image data types and display them with `ij.py.show()`: # **`skimage` 2D** # # # View image data from `sckimage`. # + import skimage import numpy as np img = skimage.data.astronaut() img = np.mean(img[10:190,140:310], axis=2) ij.py.show(img, cmap = 'gray') # - # #### `net.imagej.Dataset` 2D # # View an ImageJ `Dataset`. dataset_2d = ij.io().open('sample-data/test_still.tif') ij.py.show(dataset_2d) # #### `net.imagej.Dataset` 4D # # Image data that is >2D can't be displayed with `ij.py.show()` all at once, however you can display individual slices of the data like so: dataset_4d = ij.io().open('sample-data/test_timeseries.tif') ij.py.show(dataset_4d[:, :, 2, 10]) # channel 2, frame 10 # #### `xarray.DataArray` 4D # # The same can be done with >2D `xarray.DataArray`s: # get xarray from dataset xarr_4d = ij.py.from_java(dataset_4d) ij.py.show(xarr_4d[10, :, :, 2]) # channel 2, frame 10 # ### 2.2 `ij.ui().show()` # # `ij.ui().show()` displays images with ImageJ's image viewer. Unlike `ij.py.show()`, `ij.ui().show()` only accepts ImageJ/Java images (_e.g._ `net.imagej.Dataset`, `net.imagej.ImgPlus`, `net.imglib2.RandomAccessibleInterval`). Eventhough `ij.ui().show()` only accepts Java objects, it can display > 2D image data. In order to display your NumPy/xarray data with ImageJ's viewer, convert your data to Java first with `ij.py.to_java()` before displaying it with `ij.ui().show()`. # # **Important**: This section of the notebook only works locally (_i.e._ not on online on Binder) and requires either the `mode='interactive'` or `mode='gui'` initialization parameters to be set). # #### `skimage` 2D # # View image data from `sckimage`. # + import skimage import numpy as np img = skimage.data.astronaut() img = np.mean(img[10:190,140:310], axis=2) java_img = ij.py.to_java(img) ij.ui().show(java_img) # - # #### `net.imagej.Dataset` 2D # # View an ImageJ `Dataset`. dataset_2d = ij.io().open('sample-data/test_still.tif') ij.ui().show(dataset_2d) # #### `net.imagej.Dataset` 4D # # ImageJ can view this 4D image data without slicing to specific frames. dataset_4d = ij.io().open('sample-data/test_timeseries.tif') ij.ui().show(dataset_4d) # #### `xarray.DataArray` 4D # # `xarray.DataArray`s need to first be converted to Java before displaying with ImageJ. # get xarray from dataset xarr_4d = ij.py.from_java(dataset_4d) new_dataset_4d = ij.py.to_java(xarr_4d) ij.ui().show(new_dataset_4d)
doc/2-Opening-and-Displaying-Images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # name: python3 # --- # + import numpy import matplotlib.pyplot as plt x = numpy.random.normal(5.0, 1.0, 1000) #mean = 5.0, standard_deviation = 1.0, 1000 random numbers come from normal data distribution y = numpy.random.normal(10.0, 2.0, 1000) #mean = 10.0, standard_deviation = 2.0, 1000 numbers from normal distribution plt.scatter(x, y) plt.show()
scatter_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The `requests` library # # [`requests`](http://docs.python-requests.org/en/master/) is a handy third-party library for making HTTP requests. It does the same thing your browser does when you type in a URL and hit enter -- sends a message to a server and requests a copy of the page -- but it allows us to do this programatically instead of pointing and clicking. # # For our purposes today, we're interested in the library's `get` method. # # **Let's fetch IRE's home page and look at the HTML.**
_exercises/2. The requests library.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/smaranjitghose/PyDiabetesPredict/blob/master/PyDiabetesPredict.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="bKwCCPL1dJaT" colab_type="text" # # Diabetes Prediction # + id="4UIxf1FvdKDs" colab_type="code" colab={} # import dependencies import numpy as np import pandas as pd # + id="QVhVzeXadLcc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 190} outputId="e222ef23-fee3-4e1d-bdc7-de4e33e6f82e" # First import the data set and rename the columns column = ["Pregnancies","Glucose","BloodPressure","SkinThickness","Insulin","BMI","DiabetesPedigreeFunction","Age","Outcome"] data = pd.read_csv('/content/pima-indians-diabetes.data.csv.txt',names=column) # Lets see the first 5 rows data.head() # + [markdown] id="WlmazFPfe97J" colab_type="text" # ### Now lets split the dataset in Training and Testing Sets # + id="YGKOR-QleISY" colab_type="code" colab={} X = data.iloc[:,0:-1] # X is the features in our dataset y = data.iloc[:,-1] # y is the Labels in our dataset # + id="J3cGTszjex_C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="e8d2b5f0-304a-4a58-91f6-f8d16ec5a88c" from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) print('Number of rows in the total set: {}'.format(data.shape[0])) print('Number of rows in the training set: {}'.format(X_train.shape[0])) print('Number of rows in the test set: {}'.format(X_test.shape[0])) # + [markdown] id="_eySk3ssfiYM" colab_type="text" # ### Using Gaussian Naive Bayes to train our model # + id="47w7hXUufF0t" colab_type="code" colab={} from sklearn.naive_bayes import GaussianNB naive_bayes = GaussianNB() naive_bayes.fit(X_train, y_train) predictions = naive_bayes.predict(x_test) # + [markdown] id="uvZUnNDNgXEn" colab_type="text" # ### Model Evaluation # + id="WAHs-UEofdvY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="a781bffa-a79e-4eab-a9f5-c829ca68ff3d" from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score print('Accuracy score: ', format(accuracy_score(y_test, predictions))) print('Precision score: ', format(precision_score(y_test, predictions))) print('Recall score: ', format(recall_score(y_test, predictions))) print('F1 score: ', format(f1_score(y_test, predictions))) # + id="uKkFCOTKgaCP" colab_type="code" colab={}
PyDiabetesPredict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:facescape] * # language: python # name: conda-env-facescape-py # --- # # FaceScape Bilinear Model - generate random 3D face # # This demo shows how to use FaceScape bilinear model to generate randmom 3D meshes. Please make sure the environmenthave been prepared following [README-toolkit](https://github.com/zhuhao-nju/facescape/blob/master/toolkit/README.md). # ### (1) demo for version 1.6 # # Please download **'facescape_bilinear_model_v1.6.zip'** from [FaceScape website](https://facescape.nju.edu.cn/) or from the [external_link](https://github.com/zhuhao-nju/facescape_debug/blob/master/doc/external_link_fsbm.md), then extract the npz files to "/toolkit/bilinear_model_v1.6/". Here is a demo for the basic usage: # + import numpy as np, trimesh from src.facescape_bm import facescape_bm from src.renderer import render_cvcam from src.utility import show_img_arr np.random.seed(1000) model = facescape_bm("./bilinear_model_v1.6/facescape_bm_v1.6_847_50_52_id_front.npz") # create random identity vector random_id_vec = (np.random.random(50) - 0.5) * 0.1 if random_id_vec[0]>0: random_id_vec = -random_id_vec # create random expression vector exp_vec = np.zeros(52) exp_vec[np.random.randint(52)] = 1 # creat random color vector random_color_vec = (np.random.random(100) - 0.5) * 100 # generate and save full head mesh mesh_full = model.gen_full(random_id_vec, exp_vec) mesh_full.export("./demo_output/bm_v16_result_full.obj") # generate and save facial mesh mesh_face = model.gen_face(random_id_vec, exp_vec) mesh_face.export("./demo_output/bm_v16_result_face.obj") # generate and save facial mesh with rough vertex color mesh_face_color = model.gen_face_color(random_id_vec, exp_vec, random_color_vec) mesh_face_color.export("./demo_output/bm_v16_result_face_color.obj", enable_vc = True) print("Results saved to './demo_output/'") # render generated meshes Rt = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 500]], dtype=np.float64) depth_full, image_full = render_cvcam(trimesh.Trimesh(vertices = mesh_full.vertices, faces = mesh_full.faces_v-1), Rt = Rt) depth_face, image_face = render_cvcam(trimesh.Trimesh(vertices = mesh_face.vertices, faces = mesh_face.faces_v-1), Rt = Rt) depth_face_color, image_face_color = render_cvcam(trimesh.Trimesh( vertices = mesh_face_color.vertices, faces = mesh_face_color.faces_v-1, vertex_colors = mesh_face_color.vert_colors), Rt = Rt) # show rendered images merge_img = np.concatenate((image_full, image_face, image_face_color), 1) show_img_arr(merge_img, bgr_mode = True) # - # **Generate random 3D faces.** If you hope to generate random faces that are not wierd, you should make the identity vectors follow the Gaussian distribution, with the provided mean and variance value. Here is a demo to generate 10 random and common 3D faces: # + # new Rt for rendering Rt = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 1000]], dtype=np.float64) random_faces_list = [] for i in range(6): # create random identity vector random_id_vec = np.random.normal(model.id_mean, np.sqrt(model.id_var)) # create random expression vector exp_vec = np.zeros(52) exp_vec[0] = 1 # generate full head mesh mesh_full = model.gen_full(random_id_vec, exp_vec) # render depth_full, image_full = render_cvcam(trimesh.Trimesh(vertices = mesh_full.vertices, faces = mesh_full.faces_v-1), Rt = Rt) random_faces_list.append(image_full) # show rendered images merge_faces_img = np.concatenate(random_faces_list, 1) show_img_arr(merge_faces_img, bgr_mode = True) # - # ### (2) demo for Bilinear model v1.0/1.2/1.3 # # Please firstly download **'facescape_bilinear_model_v1.3.zip'** from [FaceScape website](https://facescape.nju.edu.cn/), extract the 'data' folder to the current directory, then run: # + import numpy as np import pickle, os # triangle faces with open('./data/predef_front_faces.pkl', 'rb') as f: faces_front = pickle.load(f) with open('./data/front_indices.pkl', 'rb') as f: indices_front = pickle.load(f) with open('./data/predef_faces.pkl', 'rb') as f: faces_full = pickle.load(f) # texture coordinates with open('./data/predef_texcoords.pkl', 'rb') as f: texcoords = pickle.load(f) # bilinear model with 52 expression parameters and 50 identity parameters # We perform Tucker decomposition only along the identity dimension to reserve the semantic meaning of parameters in expression dimension as specific blendshape weights core_tensor = np.load('./data/core_847_50_52.npy') factors_id = np.load('./data/factors_id_847_50_52.npy') matrix_tex = np.load('./data/matrix_text_847_100.npy') mean_tex = np.load('./data/mean_text_847_100.npy') factors_tex = np.load('./data/factors_tex_847_100.npy') id = factors_id[0] exp = np.zeros(52) exp[0] = 1 core_tensor = core_tensor.transpose((2, 1, 0)) mesh_vertices_full = core_tensor.dot(id).dot(exp).reshape((-1, 3)) mesh_vertices_front = mesh_vertices_full[indices_front] tex = mean_tex + matrix_tex.dot(factors_tex[0]) tex = tex.reshape((-1, 3)) / 255 os.makedirs("./demo_output/", exist_ok = True) with open('./demo_output/bm_v10_result_full.obj', "w") as f: for i in range(mesh_vertices_full.shape[0]): f.write("v %.6f %.6f %.6f\n" % (mesh_vertices_full[i, 0], mesh_vertices_full[i, 1], mesh_vertices_full[i, 2])) for i in range(len(texcoords)): f.write("vt %.6f %.6f\n" % (texcoords[i][0], texcoords[i][1])) for face in faces_full: face_vertices, face_normals, face_texture_coords, material = face f.write("f %d/%d %d/%d %d/%d\n" % ( face_vertices[0], face_texture_coords[0], face_vertices[1], face_texture_coords[1], face_vertices[2], face_texture_coords[2])) with open('./demo_output/bm_v10_result_face_color.obj', "w") as f: for i in range(mesh_vertices_front.shape[0]): f.write("v %.6f %.6f %.6f %.6f %.6f %.6f\n" % ( mesh_vertices_front[i, 0], mesh_vertices_front[i, 1], mesh_vertices_front[i, 2], tex[i, 2], tex[i, 1], tex[i, 0])) for face in faces_front: face_vertices, face_normals, face_texture_coords, material = face f.write("f %d %d %d\n" % (face_vertices[0], face_vertices[1], face_vertices[2])) print("Results saved to './demo_output/'")
toolkit/demo_bilinear_basic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="rENGvL1WaXBW" # # Session-based E-commerce Product Recommender # > We will build one of the simplest and powerful session-based recommender engine on a real-world data. The data contains [Trendyol's](https://www.trendyol.com/) session-level activities and product metadata information. # # - toc: false # - badges: true # - comments: true # - categories: [Session, Sequence, Retail, ECommerce] # - author: "<a href='https://github.com/CeyhanTurnali/ProductRecommendation'>CeyhanTurnalı</a>" # - image: # + id="E8gbN55l4FTu" executionInfo={"status": "ok", "timestamp": 1626198959783, "user_tz": -330, "elapsed": 810, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} import numpy as np import pandas as pd from scipy.sparse import csr_matrix from pandas.api.types import CategoricalDtype from sklearn.metrics.pairwise import cosine_similarity # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="hmXEfQSe4L2n" executionInfo={"status": "ok", "timestamp": 1626205345999, "user_tz": -330, "elapsed": 479, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8c079082-ae50-4e26-a55b-c9c96186b447" meta = pd.read_parquet('https://github.com/recohut/reco-data/raw/trendyol/trendyol/v1/meta.parquet.gzip') meta.head(5) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="byE20ov25ZA4" executionInfo={"status": "ok", "timestamp": 1626205348546, "user_tz": -330, "elapsed": 1808, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="60659814-0241-4c10-f07d-4dbef3a3414e" events = pd.read_parquet('https://github.com/recohut/reco-data/raw/trendyol/trendyol/v1/events.parquet.gzip') events.head(5) # + [markdown] id="49D860wA5f8x" # There are two dataset which are contains prouducts and session details. I used productid as a primary key and merge two csv files. # + colab={"base_uri": "https://localhost:8080/", "height": 289} id="KlTZBdSA5d9a" executionInfo={"status": "ok", "timestamp": 1626205348547, "user_tz": -330, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="604d91db-2a4b-4ce7-e2c9-e6fd70687e34" data = meta.merge(events, on="productid") data.head() # + colab={"base_uri": "https://localhost:8080/"} id="H1Bu0u2c5qBx" executionInfo={"status": "ok", "timestamp": 1626205348548, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d49b3461-7076-499f-da1f-507494c125be" data.info() # + [markdown] id="xSoB6tXeCrpA" # Identify and drop null in ids columns # + colab={"base_uri": "https://localhost:8080/"} id="aDdixI2TCaIS" executionInfo={"status": "ok", "timestamp": 1626205349675, "user_tz": -330, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="86efef8d-6c7d-4199-a707-0feff1b24a20" data.isna().sum() # + id="8-v_wyolCh1H" executionInfo={"status": "ok", "timestamp": 1626205351008, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} data = data.dropna(subset=['sessionid','productid']) # + colab={"base_uri": "https://localhost:8080/"} id="2o9ID1-RCqGQ" executionInfo={"status": "ok", "timestamp": 1626205351010, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="42d092bd-8275-4856-85a4-bd6a691889ec" data.isna().sum() # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="uRIy0SYJ6DEQ" executionInfo={"status": "ok", "timestamp": 1626205352731, "user_tz": -330, "elapsed": 1732, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c195f803-6839-4cbb-b439-92f021d36ca2" data.describe(include=['O']).T # + [markdown] id="f6J5JwNY6Qh4" # Cart is a category but we can use it as a quantity. Every cart process is one buying and we can use it as a quantity to answer how many products did the customers buy. # + id="dRelTI5C6Hgn" executionInfo={"status": "ok", "timestamp": 1626205353405, "user_tz": -330, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} data['event'] = data['event'].replace(['cart'],'1') data['event'] = data['event'].astype(float) # + id="YGWRjWs-ZAfA" executionInfo={"status": "ok", "timestamp": 1626205353407, "user_tz": -330, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} data_full = data.copy() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="TqWupVSSBaB3" executionInfo={"status": "ok", "timestamp": 1626205356721, "user_tz": -330, "elapsed": 635, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="81c2f4da-38b6-408b-8f88-59a2d876e838" data = data[['sessionid','productid','event']] data.head() # + [markdown] id="E-Txbxuu6xSR" # Next, we will create a session-item matrix. In this matrix, each row represents a session, each column represents each product or item and the value in each cell indicates whether the customer has purchased the given product in that particular session. # + colab={"base_uri": "https://localhost:8080/"} id="ZFhB114kBTpA" executionInfo={"status": "ok", "timestamp": 1626199546252, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="bd8cde73-54e3-40b1-a98a-89fa1886d495" session_c = CategoricalDtype(sorted(data.sessionid.unique()), ordered=True) product_c = CategoricalDtype(sorted(data.productid.unique()), ordered=True) row = data.sessionid.astype(session_c).cat.codes col = data.productid.astype(product_c).cat.codes session_item_matrix = csr_matrix((data["event"], (row, col)), shape=(session_c.categories.size, product_c.categories.size)) session_item_matrix # + colab={"base_uri": "https://localhost:8080/"} id="ggNd2z4yELPb" executionInfo={"status": "ok", "timestamp": 1626199924571, "user_tz": -330, "elapsed": 698, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="7bd380af-976e-45df-dbbd-50f9ba18ab27" session_item_matrix[:10,:10].todense() # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="YaeYVzuiGRUz" executionInfo={"status": "ok", "timestamp": 1626200617560, "user_tz": -330, "elapsed": 462, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="36827e8b-2332-4bc6-e7f8-f1ff08829633" session_c.categories[10] # + [markdown] id="zbDv4ey5AnGq" # ## User-User Similarity # + [markdown] id="icXDjBbb7QaG" # We compute the cosine similarity from the session item matrix to determine similarity between user's purchase behaviour. # + colab={"base_uri": "https://localhost:8080/"} id="slHS1piKF41-" executionInfo={"status": "ok", "timestamp": 1626200358341, "user_tz": -330, "elapsed": 3458, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="14a7fa53-e4cb-44cd-ceba-1b0d6e6338d2" user_user_sim_matrix = cosine_similarity(session_item_matrix, dense_output=False) user_user_sim_matrix # + id="iaMqp_z3HWzo" executionInfo={"status": "ok", "timestamp": 1626204812855, "user_tz": -330, "elapsed": 495, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} def getname(id=0, ntype='session', mode='lookup'): if mode=='random': if ntype=='session': id = np.random.randint(0,len(session_c.categories)) return session_c.categories[id], id else: id = np.random.randint(0,len(product_c.categories)) return product_c.categories[id], id else: if ntype=='session': return session_c.categories[id] else: return product_c.categories[id] def print_topk(matrix, id, k=10, ntype='session'): frame = pd.DataFrame(matrix[id].todense()).T.sort_values(by=0, ascending=False).head(k) frame = frame.reset_index() frame.columns = ['id','similarity'] frame[f'{ntype}_id'] = frame['id'].apply(lambda x: getname(x, ntype)) return frame # + colab={"base_uri": "https://localhost:8080/"} id="e-J-UrhYHTez" executionInfo={"status": "ok", "timestamp": 1626204992293, "user_tz": -330, "elapsed": 450, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="e531b1a7-0001-4ecb-dfc2-5531e7b7b949" random_session, id = getname(ntype='session', mode='random') print("Let's try it for a random session {}".format(random_session)) # + [markdown] id="yaulgRot_bpQ" # What are the similar sessions? # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="ay_k2TTHQnWK" executionInfo={"status": "ok", "timestamp": 1626204996667, "user_tz": -330, "elapsed": 489, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="424b8ecc-34da-4759-9f3a-eff1d095054a" similar_sessions = print_topk(user_user_sim_matrix, id=id, k=10, ntype='session') similar_sessions # + colab={"base_uri": "https://localhost:8080/"} id="G9aU9OdIQlx6" executionInfo={"status": "ok", "timestamp": 1626205004477, "user_tz": -330, "elapsed": 463, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="d40e83b7-809f-473e-fa44-2de938114eb1" print("Random Session ID: {}\nTop-similar Session ID: {}".\ format(random_session, similar_sessions.iloc[1].session_id)) # + [markdown] id="U52b-5jWSev_" # For reference, we take a random session id as A and top-most similar session id as B. Therefore, by identifying the items purchased by Customer A and Customer B and the Remaining Items of Customer A relative to Customer B, we can safely assume that there is high similarity between customers, as there is high similarity between customers. The rest of the products purchased by customer A are also likely to be purchased by customer B. Therefore, we recommend the remaining products to Customer # + colab={"base_uri": "https://localhost:8080/"} id="N6dqOt06_xTZ" executionInfo={"status": "ok", "timestamp": 1626205440287, "user_tz": -330, "elapsed": 484, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="50fa73c2-d8c5-495e-9de4-4813ecd0d036" items_bought_by_customerA = [getname(x, ntype='product') for x in np.argwhere(session_item_matrix[id]>0)[:,1]] print("Items Bought by Customer A:") items_bought_by_customerA # + colab={"base_uri": "https://localhost:8080/"} id="79MezSooACUP" executionInfo={"status": "ok", "timestamp": 1626205453434, "user_tz": -330, "elapsed": 404, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="89206cdc-fc19-48f7-f06c-a533dcfc7883" items_bought_by_customerB = [getname(x, ntype='product') for x in np.argwhere(session_item_matrix[similar_sessions.iloc[1].id]>0)[:,1]] print("Items bought by other customer:") items_bought_by_customerB # + colab={"base_uri": "https://localhost:8080/", "height": 717} id="8DL_RNZjACSR" executionInfo={"status": "ok", "timestamp": 1626205487462, "user_tz": -330, "elapsed": 4082, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="aee04f04-66a2-4ab3-e86f-a9c0e77a783c" items_to_recommend_to_customerB= set(items_bought_by_customerA) - set(items_bought_by_customerB) print("Items to Recommend to customer B:") data_full.loc[data_full['productid'].isin(items_to_recommend_to_customerB),['productid', 'name']].drop_duplicates().set_index('productid') # + [markdown] id="H7oVusIFAfOo" # > Tip: For Item-item similarity, take the transpose of session-item matrix and repeat the same steps.
_docs/nbs/session-retail-trendyol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ShinAsakawa/2019seminar_info/blob/master/notebooks/2019_03_13Rendering_OpenAi_Gym_in_Colaboratory.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="GsSGgH2Y0apl" colab_type="code" colab={} # _____ _____ _____ _____ _____ _____ # /\ \ /\ \ /\ \ /\ \ /\ \ /\ \ # /::\ \ /::\ \ /::\ \ /::\ \ /::\ \ /::\ \ # /::::\ \ \:::\ \ /::::\ \ /::::\ \ /::::\ \ \:::\ \ # /::::::\ \ \:::\ \ /::::::\ \ /::::::\ \ /::::::\ \ \:::\ \ # /:::/\:::\ \ \:::\ \ /:::/\:::\ \ /:::/\:::\ \ /:::/\:::\ \ \:::\ \ # /:::/__\:::\ \ \:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ \:::\ \ # \:::\ \:::\ \ /::::\ \ /::::\ \:::\ \ /::::\ \:::\ \ /::::\ \:::\ \ /::::\ \ # ___\:::\ \:::\ \ /::::::\ \ /::::::\ \:::\ \ /::::::\ \:::\ \ /::::::\ \:::\ \ ____ /::::::\ \ # /\ \:::\ \:::\ \ /:::/\:::\ \ /:::/\:::\ \:::\ \ /:::/\:::\ \:::\____\ /:::/\:::\ \:::\ \ /\ \ /:::/\:::\ \ #/::\ \:::\ \:::\____\ /:::/ \:::\____\/:::/ \:::\ \:::\____\/:::/ \:::\ \:::| |/:::/ \:::\ \:::\____\/::\ \/:::/ \:::\____\ #\:::\ \:::\ \::/ / /:::/ \::/ /\::/ \:::\ /:::/ /\::/ |::::\ /:::|____|\::/ \:::\ /:::/ /\:::\ /:::/ \::/ / # \:::\ \:::\ \/____/ /:::/ / \/____/ \/____/ \:::\/:::/ / \/____|:::::\/:::/ / \/____/ \:::\/:::/ / \:::\/:::/ / \/____/ # \:::\ \:::\ \ /:::/ / \::::::/ / |:::::::::/ / \::::::/ / \::::::/ / # \:::\ \:::\____\ /:::/ / \::::/ / |::|\::::/ / \::::/ / \::::/____/ # \:::\ /:::/ / \::/ / /:::/ / |::| \::/____/ /:::/ / \:::\ \ # \:::\/:::/ / \/____/ /:::/ / |::| ~| /:::/ / \:::\ \ # \::::::/ / /:::/ / |::| | /:::/ / \:::\ \ # \::::/ / /:::/ / \::| | /:::/ / \:::\____\ # \::/ / \::/ / \:| | \::/ / \::/ / # \/____/ \/____/ \|___| \/____/ \/____/ # + [markdown] id="GVjwBHuTRe8A" colab_type="text" # Original: <https://colab.research.google.com/drive/1flu31ulJlgiRL1dnN2ir8wGh9p7Zij2t> # # # + [markdown] id="odNaDE1zyrL2" colab_type="text" # # install dependancies, takes around 45 seconds # # Rendering Dependancies # # # + id="8-AxnvAVyzQQ" colab_type="code" colab={} #remove " > /dev/null 2>&1" to see what is going on under the hood # !pip install gym pyvirtualdisplay > /dev/null 2>&1 # !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 # + [markdown] id="8A-1LTSH88EE" colab_type="text" # Pacman Dependancies # + id="TCelFzWY9MBI" colab_type="code" colab={} # !apt-get update > /dev/null 2>&1 # !apt-get install cmake > /dev/null 2>&1 # !pip install --upgrade setuptools 2>&1 # !pip install ez_setup > /dev/null 2>&1 # !pip install gym[atari] > /dev/null 2>&1 # + [markdown] id="APXSx7hg19TH" colab_type="text" # # Imports and Helper functions # # + id="pdb2JwZy4jGj" colab_type="code" colab={} import gym from gym import logger as gymlogger from gym.wrappers import Monitor gymlogger.set_level(40) #error only import tensorflow as tf import numpy as np import random import matplotlib import matplotlib.pyplot as plt # %matplotlib inline import math import glob import io import base64 from IPython.display import HTML from IPython import display as ipythondisplay # + id="nQEtc28G4niA" colab_type="code" colab={} from pyvirtualdisplay import Display display = Display(visible=0, size=(1400, 900)) display.start() # + id="G9UWeToN4r7D" colab_type="code" colab={} """ Utility functions to enable video recording of gym environment and displaying it To enable video, just do "env = wrap_env(env)"" """ def show_video(): mp4list = glob.glob('video/*.mp4') if len(mp4list) > 0: mp4 = mp4list[0] video = io.open(mp4, 'r+b').read() encoded = base64.b64encode(video) ipythondisplay.display(HTML(data='''<video alt="test" autoplay loop controls style="height: 400px;"> <source src="data:video/mp4;base64,{0}" type="video/mp4" /> </video>'''.format(encoded.decode('ascii')))) else: print("Could not find video") def wrap_env(env): env = Monitor(env, './video', force=True) return env # + [markdown] id="W3BGbWOu179M" colab_type="text" # # Pacman! # + id="dGEFMfDOzLen" colab_type="code" colab={} env = wrap_env(gym.make("MsPacman-v0")) # + id="7BmIlXhe9Q89" colab_type="code" colab={} #check out the pacman action space! print(env.action_space) # + id="8nj5sjsk15IT" colab_type="code" colab={} observation = env.reset() while True: env.render() #your agent goes here action = env.action_space.sample() observation, reward, done, info = env.step(action) if done: break; env.close() show_video() # + id="Ro-hb5qRRRhU" colab_type="code" colab={}
notebooks/2019_03_13Rendering_OpenAi_Gym_in_Colaboratory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="7dRi_BDWErNf" colab_type="text" # ![BYU PCCL](https://pcc4318.files.wordpress.com/2018/02/asset-1.png?w=150) # # Sponsored by the BYU PCCL Lab. # # > AI Dungeon 2 is a completely AI generated text adventure built with OpenAI's largest GPT-2 model. It's a first of it's kind game that allows you to enter and will react to any action you can imagine. # # # Main mirrors of AI Dungeon 2 are currently down due to high download costs. # We are using bittorrent as a temporary solution to host game files and keep this game alive. It's not fast, but it's the best we've got right now. # # If you want to help, best thing you can do is to **[download this torrent file with game files](https://github.com/nickwalton/AIDungeon/files/3935881/model_v5.torrent.zip)** and **seed it** indefinitely to the best of your ability. This will help new players download this game faster, and discover the vast worlds of AIDungeon2! # # - <a href="https://twitter.com/nickwalton00?ref_src=twsrc%5Etfw" class="twitter-follow-button" data-show-count="false">Follow @nickwalton00</a> on Twitter for updates on when it will be available again. # - **[Support AI Dungeon 2](https://www.patreon.com/AIDungeon) on Patreon to help me to continue improving the game with all the awesome ideas I have for its future!** # # ## How to play # 1. Click "Tools"-> "Settings..." -> "Theme" -> "Dark" (optional but recommended) # 2. Click "Runtime" -> "Run all" # 3. Wait until all files are downloaded (only has to be one once, and it will take some time) # 4. It will then take a couple minutes to boot up as the model is downloaded loaded onto the GPU. # 5. If you have questions about getting it to work then please [go to github repo](https://github.com/AIDungeon/AIDungeon) to get help. # # ## About # * While you wait you can [read adventures others have had](https://aidungeon.io/) # * [Read more](https://pcc.cs.byu.edu/2019/11/21/ai-dungeon-2-creating-infinitely-generated-text-adventures-with-deep-learning-language-models/) about how AI Dungeon 2 is made.- **[Support AI Dungeon 2](https://www.patreon.com/bePatron?u=19115449) on Patreon to help me to continue improving the game with all the awesome ideas I have for its future!** # # + id="FKqlSCrpS9dH" colab_type="code" colab={} # !git clone --depth 1 --branch master https://github.com/AIDungeon/AIDungeon/ # %cd AIDungeon # !./install.sh from IPython.display import clear_output clear_output() print("Download Complete!") # + id="YjArwbWh6XwN" colab_type="code" colab={} from IPython.display import Javascript display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 5000})''')) # !python play.py
AIDungeon_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + """ <NAME> -- rlf238 October 22nd, 2018 CS 5785 -- Applied Machine Learning Homework 2 """ import numpy as np from scipy import misc from matplotlib import pylab as plt import matplotlib.cm as cm # %matplotlib inline def get_data(filename): labels, data = [], [] for line in open(filename): im = misc.imread(line.strip().split()[0]) data.append(im.reshape(2500,)) labels.append(line.strip().split()[1]) return (np.array(data, dtype=float), np.array(labels, dtype=int)) # + """ Part 1.B """ train_data, train_labels = [], [] train_data, train_labels = get_data('./faces/train.txt') print(train_data.shape, train_labels.shape) plt.imshow(train_data[10, :].reshape(50,50), cmap=cm.Greys_r) # + """ Part 1.C """ training_avg = np.zeros(len(train_data[0])) for td in train_data: training_avg += td training_avg /= len(train_data) plt.clf() plt.imshow(training_avg.reshape(50,50), cmap=cm.Greys_r) # + """ Part 1.D - training set """ for i in range(len(train_data)): train_data[i] -= training_avg plt.clf() plt.imshow(train_data[10].reshape(50,50), cmap=cm.Greys_r) # + """ Part 1.D - testing set """ test_data, test_labels = get_data('./faces/test.txt') print(test_data.shape) for i in range(len(test_data)): test_data[i] -= training_avg plt.clf() plt.imshow(test_data[0].reshape(50,50), cmap=cm.Greys_r) # + """ Part 1.E """ from numpy.linalg import svd U, E, Vt = svd(train_data, full_matrices=True) U_test, E_test, Vt_test = svd(test_data, full_matrices=True) E = np.diag(E) E_test = np.diag(E_test) plt.clf() f, axarr = plt.subplots(2,5,figsize=(20,5)) for cols in range(5): for rows in range(2): axarr[rows, cols].imshow(Vt[rows*5 + cols].reshape(50,50), cmap=cm.Greys_r) # + """ Part 1.F """ x_axis = [i for i in range(1, 201)] dist = [] for r in range(1,201): x_hat = U[:,:r].dot(E[:r,:r].dot(Vt[:r,:])) d = np.linalg.norm(train_data - x_hat, ord='fro') dist.append(d) plt.clf() plt.xlabel('r-Value') plt.ylabel('Approximation Error') plt.plot(x_axis, dist) # + """ Part 1.G """ def get_F(r, data, V_t): return data.dot(np.transpose(V_t[:r,:])) F_train_10 = get_F(10, train_data, Vt) F_test_10 = get_F(10, test_data, Vt_test) # + """ Part 1.H """ from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score model = LogisticRegression(multi_class='ovr') model.fit(F_train_10, train_labels) preds = model.predict(F_test_10) score = accuracy_score(test_labels, preds) print('Score with r=10: {0}'.format(score)) scores = [] for r in range(1, 201): F_train = get_F(r, train_data, Vt) F_test = get_F(r, test_data, Vt_test) model = LogisticRegression(multi_class='ovr') model.fit(F_train, train_labels) preds = model.predict(F_test) scores.append(accuracy_score(test_labels, preds)) plt.clf() plt.xlabel('r-Value') plt.ylabel('Accuracy') plt.plot(x_axis, scores) # + """ Part 2.C """ import json ingredients_list = {} # map ingredients to a vector point def get_cooking_data(filename): with open(filename) as f: return json.load(f) def vectorize_data(j_data): r_data = [] r_labels = [] ids = [] for d in j_data: vec = np.zeros(len(ingredients_list)) ids.append(d['id']) if 'cuisine' in d: lab = d['cuisine'] r_labels.append(lab) for i in d['ingredients']: vec[ingredients_list[i]] = 1 r_data.append(vec) return np.array(ids), np.array(r_data), np.array(r_labels) def get_ingredients_vector(train_data, test_data): for d in train_data: for i in d['ingredients']: if i not in ingredients_list: ingredients_list[i] = len(ingredients_list) for d in test_data: for i in d['ingredients']: if i not in ingredients_list: ingredients_list[i] = len(ingredients_list) print(len(ingredients_list)) tr_data = get_cooking_data('./cooking_data/train.json') te_data = get_cooking_data('./cooking_data/test.json') get_ingredients_vector(tr_data, te_data) training_ids, training_data, training_labels = vectorize_data(tr_data) test_ids, test_data, _ = vectorize_data(te_data) # - categories = {} for cat in training_labels: if cat not in categories: categories[cat] = 1 print('Number of samples in training set: {0}'.format(len(training_labels))) print('Number of categories: {0}'.format(len(categories))) print('Number of ingredients: {0}'.format(len(ingredients_list))) # + """ Part 2.D """ from sklearn.naive_bayes import GaussianNB, BernoulliNB from sklearn.model_selection import cross_val_score g_nb = GaussianNB() b_nb = BernoulliNB() g_scores = cross_val_score(g_nb, training_data, training_labels, cv=3) b_scores = cross_val_score(b_nb, training_data, training_labels, cv=3) print('Gaussian 3-fold cross validation score: ' + str(sum(g_scores) / len(g_scores))) print('Bernoulli 3-fold cross validation score: ' + str(sum(b_scores) / len(b_scores))) # + """ Part 2.F """ from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr_scores = cross_val_score(lr, training_data, training_labels, cv=3) print('Logistic Regression 3-fold cross validation score: ' + str(sum(lr_scores)/len(lr_scores))) # + """ Part 2.G """ import csv def write_outputs(ids, labels): with open('testing_output.csv', 'wb') as tof: writer = csv.writer(tof, delimiter=',') writer.writerow(['id', 'cuisine']) for i in range(len(ids)): c_id = ids[i] l = labels[i] writer.writerow([c_id, l]) lr = LogisticRegression().fit(training_data, training_labels) preds = lr.predict(test_data) write_outputs(test_ids, preds) # + """ Written 3.A """ M = [[1, 0, 3], [3, 7, 2], [2, -2, 8], [0, -1, 1], [5, 8, 7]] M = np.array(M) Mt = M.transpose() MMt = M.dot(Mt) MtM = Mt.dot(M) print('M:') print(M) print('\r\nM^t') print(Mt) print('\r\nM^tM') print(MtM) print('\r\nMM^t') print(MMt) # + """ Written 3.B and 3.C """ from numpy import linalg as LA import math w_mtm, v_mtm = LA.eig(Mt.dot(M)) w_mmt, v_mmt = LA.eig(M.dot(Mt)) w_mtm = np.array([w_mtm[0], w_mtm[2]]) v_mtm = np.array([v_mtm[:,0], v_mtm[:,2]]) w_mmt = np.array([w_mmt[0], w_mmt[2]]) v_mmt = np.array([v_mmt[:,0], v_mmt[:,2]]) print('\r\nEigenvalues and eigenvectors for M^tM') for i in range(len(w_mtm)): print('{0}: {1}'.format(w_mtm[i], v_mtm[:,i])) print('\r\nEigenvalues and eigenvectors for MM^t') for i in range(len(w_mmt)): print('{0}: {1}'.format(w_mmt[i], v_mmt[:,i])) # + """ Written 3.D """ sigma = np.diag([math.sqrt(w_mtm[0]), math.sqrt(w_mtm[1])]) print('\r\n Sigma:') print(sigma) V = np.transpose(v_mtm) Vt = np.transpose(V) print('\r\n Vt:') print(Vt) U = M.dot(V.dot(np.linalg.inv(sigma))) print('\r\n U:') print(U) new_M = U.dot(sigma.dot(Vt)) print('\r\n calculated M:') print(new_M) # - """ Written 3.E """ M_1 = U[:,:1].dot(sigma[:1,:1].dot(Vt[:1,:])) print(M_1)
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Intro # In this notebook I process the data to provide with training and validation dataset for eSRGAN fine-tuning. # # **Data description** # The initial data is captured by <NAME> fluorescent microscope in *Brain Stem Cell laboratory* of MIPT by <NAME> and represent the propidium iodid-labeled cells. Such method is used for brain structure labelling along with Nissl structures. The data represent the half of the brain of healthy adult mice. Data is collected as stack of 572 2D-slices, each is an 8bit tif image with sizes 9000 x 7680 pixels. # # **Processes** # eSRGAN was trained on dataset to restore 128x128 images, downscaled to 32x32 with bicubic interpolation. The same principle is taken here. 2D slices were uploaded and cropped randomely to images with size from 64x64 to 256x256. After that GT (ground truth) images were acquired by resizing this images to 128x128 with Nearest Neighbours interpolation, while LR (low res) images were generated by the same manner - rescaling to 32x32. # # Totally there were ~15000 images generated with original GT-crop with sizes from 128 to 256 and ~5000 images with original GT-crop with sizes from 64 to 128 pixels. # # **Additional visualizations** # ## Imports # + import sys import os # #!pip install --user --upgrade opencv-python import cv2 import numpy as np import matplotlib.pyplot as plt from PIL import Image # #!pip install --user --upgrade imutils import imutils import torch from IPython.display import clear_output # - # # Original data # Read and visualize 2D slice # + data_folder = './data_Z1/tiffs/' files = os.listdir(data_folder) def sort_slice_idx(name): return int(name.split(' ')[-1].split('.')[0]) files.sort(key = sort_slice_idx) print(len(files)) print(files[:5]) # - img_slice = cv2.imread(os.path.join(data_folder, files[327]), -1) img_slice.size img_slice.shape plt.figure(figsize=(16,8)) plt.imshow(img_slice[:,:,0], cmap='gray') plt.show() # # Examples of distortion and data generation from scipy.signal import convolve2d # + ## From mmsr.codes.data.utils import math # matlab 'imresize' function, now only support 'bicubic' def cubic(x): absx = torch.abs(x) absx2 = absx**2 absx3 = absx**3 return (1.5 * absx3 - 2.5 * absx2 + 1) * ( (absx <= 1).type_as(absx)) + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * (( (absx > 1) * (absx <= 2)).type_as(absx)) def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): if (scale < 1) and (antialiasing): # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width kernel_width = kernel_width / scale # Output-space coordinates x = torch.linspace(1, out_length, out_length) # Input-space coordinates. Calculate the inverse mapping such that 0.5 # in output space maps to 0.5 in input space, and 0.5+scale in output # space maps to 1.5 in input space. u = x / scale + 0.5 * (1 - 1 / scale) # What is the left-most pixel that can be involved in the computation? left = torch.floor(u - kernel_width / 2) # What is the maximum number of pixels that can be involved in the # computation? Note: it's OK to use an extra pixel here; if the # corresponding weights are all zero, it will be eliminated at the end # of this function. P = math.ceil(kernel_width) + 2 # The indices of the input pixels involved in computing the k-th output # pixel are in row k of the indices matrix. indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( 1, P).expand(out_length, P) # The weights used to compute the k-th output pixel are in row k of the # weights matrix. distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices # apply cubic kernel if (scale < 1) and (antialiasing): weights = scale * cubic(distance_to_center * scale) else: weights = cubic(distance_to_center) # Normalize the weights matrix so that each row sums to 1. weights_sum = torch.sum(weights, 1).view(out_length, 1) weights = weights / weights_sum.expand(out_length, P) # If a column in weights is all zero, get rid of it. only consider the first and last column. weights_zero_tmp = torch.sum((weights == 0), 0) if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): indices = indices.narrow(1, 1, P - 2) weights = weights.narrow(1, 1, P - 2) if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): indices = indices.narrow(1, 0, P - 2) weights = weights.narrow(1, 0, P - 2) weights = weights.contiguous() indices = indices.contiguous() sym_len_s = -indices.min() + 1 sym_len_e = indices.max() - in_length indices = indices + sym_len_s - 1 return weights, indices, int(sym_len_s), int(sym_len_e) def imresize_np(img, scale, antialiasing=True): # Now the scale should be the same for H and W # input: img: Numpy, HWC BGR [0,1] # output: HWC BGR [0,1] w/o round img = torch.from_numpy(img) in_H, in_W, in_C = img.size() _, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) kernel_width = 4 kernel = 'cubic' # Return the desired dimension order for performing the resize. The # strategy is to perform the resize first along the dimension with the # smallest scale factor. # Now we do not support this. # get weights and indices weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( in_H, out_H, scale, kernel, kernel_width, antialiasing) weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( in_W, out_W, scale, kernel, kernel_width, antialiasing) # process H dimension # symmetric copying img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) sym_patch = img[:sym_len_Hs, :, :] inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() sym_patch_inv = sym_patch.index_select(0, inv_idx) img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) sym_patch = img[-sym_len_He:, :, :] inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() sym_patch_inv = sym_patch.index_select(0, inv_idx) img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) out_1 = torch.FloatTensor(out_H, in_W, in_C) kernel_width = weights_H.size(1) for i in range(out_H): idx = int(indices_H[i][0]) out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i]) out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i]) out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i]) # process W dimension # symmetric copying out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) sym_patch = out_1[:, :sym_len_Ws, :] inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() sym_patch_inv = sym_patch.index_select(1, inv_idx) out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) sym_patch = out_1[:, -sym_len_We:, :] inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() sym_patch_inv = sym_patch.index_select(1, inv_idx) out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) out_2 = torch.FloatTensor(out_H, out_W, in_C) kernel_width = weights_W.size(1) for i in range(out_W): idx = int(indices_W[i][0]) out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i]) out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i]) out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i]) return out_2.numpy() # + plt.figure(figsize=(16,12)) plt.subplot(221) size = (3,13) kernel = np.zeros(size) kernel[int(size[0]//2), int(size[1]//2)] = 1 kernel = kernel / np.sum(kernel) plt.imshow(kernel, cmap='gray') plt.subplot(222) kernel = cv2.resize(kernel, (kernel.shape[1]*4, kernel.shape[0]*4), cv2.INTER_CUBIC ) plt.imshow(kernel, cmap='gray') plt.subplot(223) kernel = cv2.resize(kernel, (kernel.shape[1]*1,int(kernel.shape[0]*(size[1]/size[0]))), cv2.INTER_CUBIC ) plt.imshow(kernel, cmap='gray') plt.subplot(224) kernel = cv2.resize(kernel, (kernel.shape[1]//4, kernel.shape[0]//3), cv2.INTER_CUBIC ) step = kernel.shape[0]//2 mask = np.ones_like(kernel) mask[step+3:,:] = 0 kernel = kernel*mask kernel = kernel / np.sum(kernel) angle = np.random.randint(-30,30) rows, cols = kernel.shape M_inverse = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1) kernel = cv2.warpAffine(kernel, M_inverse, (cols, rows)) plt.imshow(kernel, cmap='gray') plt.show() print(kernel.shape) # - np.random.randint(-30,30) kernel # ### Test if their 'cubic' interpolation gives another results than cv2.INTER_CUBIC # No # + plt.figure(figsize=(18,8)) img_size = 128 start_x = 3000 start_y = 1500 resize = 4 part = img_slice[start_x:start_x+img_size, start_y:start_y+img_size, 0] plt.subplot(131) plt.imshow(part.astype(np.float64)/255, cmap='gray') part_resized = imutils.resize(part.astype(np.float64)/255, width=int(part.shape[1]//resize), inter=cv2.INTER_CUBIC ) plt.title('original') plt.subplot(132) plt.imshow(part_resized, cmap='gray') plt.title('cv2 Bicubic') plt.subplot(133) part = cv2.cvtColor(part, cv2.COLOR_GRAY2BGR ) part = part.astype(np.float64)/255 part_resized = imresize_np(part, 1/resize) part_resized = cv2.cvtColor((part_resized*255).astype(np.uint8), cv2.COLOR_RGB2GRAY ).astype(np.float64)/255 #part_resized = imutils.resize(part, width=int(part.shape[1]//(resize/2)), inter=cv2.INTER_CUBIC ) #part_resized_motion = cv2.filter2D(part_resized, -1, kernel) #part_resized_motion = imutils.resize(part_resized_motion, width=int(part_resized_motion.shape[1]//(resize/2)), inter=cv2.INTER_CUBIC ) #plt.imshow(part_resized_motion, cmap='gray') plt.imshow(part_resized, cmap='gray') plt.title('Matlab Bicubic') plt.show() # + plt.figure(figsize=(18,8)) img_size = 128 start_x = 2000 start_y = 1500 resize = 4 part = img_slice[start_x:start_x+img_size, start_y:start_y+img_size, 0] part_resized = imutils.resize(part, width=int(part.shape[1]//resize), inter=cv2.INTER_CUBIC ) part = imutils.resize(part, width=128, inter=cv2.INTER_NEAREST ) plt.subplot(141) plt.imshow(part, cmap='gray') part_resized = imutils.resize(part_resized, width=int(part.shape[1]//resize), inter=cv2.INTER_CUBIC ) plt.subplot(142) plt.imshow(part_resized, cmap='gray') #part_resized = imutils.resize(part, width=int(part.shape[1]//(resize/2)), inter=cv2.INTER_CUBIC ) #part_resized_motion = cv2.filter2D(part_resized, -1, kernel) #part_resized_motion = imutils.resize(part_resized_motion, width=int(part_resized_motion.shape[1]//(resize/2)), inter=cv2.INTER_CUBIC ) part_resized = imutils.resize(part, width=int(part.shape[1]//(resize)), inter=cv2.INTER_CUBIC ) part_resized_motion = cv2.filter2D(part_resized, -1, kernel) #part_resized_motion = imutils.resize(part_resized_motion, width=int(part_resized_motion.shape[1]//(resize)), inter=cv2.INTER_CUBIC ) plt.subplot(143) detalization = 2 mask = np.random.random(detalization*detalization) mask = mask.reshape((detalization,detalization)) mask = imutils.resize(mask, width=32, inter=cv2.INTER_CUBIC ) plt.imshow(mask, cmap='gray') part_resized_motion = part_resized_motion*mask + part_resized*(1-mask) plt.subplot(144) plt.imshow(part_resized_motion, cmap='gray') plt.show() # - # # Check cross-slice connection img_slice_0 = cv2.imread(os.path.join(data_folder, files[0]), -1) img_slice_1 = cv2.imread(os.path.join(data_folder, files[1]), -1) img_slice_2 = cv2.imread(os.path.join(data_folder, files[2]), -1) # + plt.figure(figsize=(18,8)) img_size = 100 start_x = 2000 start_y = 3000 resize = 4 plt.subplot(131) part_0 = img_slice_0[start_x:start_x+img_size, start_y:start_y+img_size, 0] plt.imshow(part_0, cmap='gray') plt.subplot(132) part_1 = img_slice_1[start_x:start_x+img_size, start_y:start_y+img_size, 0] plt.imshow(part_1, cmap='gray') plt.subplot(133) part_2 = img_slice_2[start_x:start_x+img_size, start_y:start_y+img_size, 0] plt.imshow(part_2, cmap='gray') plt.show() # - # ### See the slice over Z # + crop_parts = [] img_size = 512 start_x = 0 start_y = 3000 for i in range(0,100): img_tmp = cv2.imread(os.path.join(data_folder, files[i]), -1) crop_parts.append( img_tmp[start_x:start_x+img_size, start_y:start_y+img_size, 0] ) crop_parts = np.array(crop_parts) crop_parts.shape # + plt.figure(figsize=(16,8)) proj = crop_parts[:,:,220] proj = cv2.resize(proj, (proj.shape[1], int(proj.shape[0]*2.7)), cv2.INTER_CUBIC ) cell = proj[100:150,300:350] plt.imshow(cell, cmap='gray') plt.show() cell = cv2.resize(cell, (int(cell.shape[1]//4), int(cell.shape[0]//4)), cv2.INTER_CUBIC ) plt.imshow(cell, cmap='gray') plt.show() img_test = cv2.cvtColor(proj[100:150,300:350], cv2.COLOR_GRAY2BGR ) img_test = img_test.astype(np.float64)/255 img_new = imresize_np(img_test, 1/4) img_new = cv2.cvtColor((img_new*255).astype(np.uint8), cv2.COLOR_BGR2GRAY ).astype(np.float64)/255 plt.imshow(img_new, cmap='gray') plt.show() # - # <hr> # ## Prepare images for training def generate_train_val_idx(list_of_tif_files, train_num, test_num): # Z-idx total_num_slices = len(list_of_tif_files) test_z = np.random.choice(np.arange(0,total_num_slices,1), int(total_num_slices/3), replace=False) train_z = np.array([idx for idx in range(total_num_slices) if idx not in test_z]) train_z_idx = np.random.choice(train_z, train_num) test_z_idx = np.random.choice(test_z, test_num) # X, Y - coordinate train_start_x = np.random.randint(500,6000, train_num) train_start_y = np.random.randint(2000,6000, train_num) test_start_x = np.random.randint(500,6000, test_num) test_start_y = np.random.randint(2000,6000, test_num) # Image Sizes #img_sizes = np.random.randint(128,256, train_num + test_num) img_sizes = np.random.randint(64, 128, train_num + test_num) train_img_sizes = img_sizes[:train_num] test_img_sizes = img_sizes[train_num:] train_idx = {} for z, x, y, size in zip(train_z_idx, train_start_x, train_start_y, train_img_sizes): if z in train_idx.keys(): train_idx[z].append((x, y, size)) else: train_idx[z] = [(x, y, size)] test_idx = {} for z, x, y, size in zip(test_z_idx, test_start_x, test_start_y, test_img_sizes): if z in test_idx.keys(): test_idx[z].append( (x, y, size) ) else: test_idx[z] = [(x, y, size)] return train_idx, test_idx generate_train_val_idx(files, 2,2) # + input_size = 32 output_size = 128 train_num = 5000 #20000 test_num = 300 #1000 data_folder = './data_Z1/tiffs/' train_folder_gt = './mmsr/datasets/train_data_Z1_GT/' #train_data_Z1_GT_additional180120 train_folder_lr = './mmsr/datasets/train_data_Z1_LR/' val_folder_gt = './mmsr/datasets/val_data_Z1_GT/' val_folder_lr = './mmsr/datasets/val_data_Z1_LR/' # + # Think before launch !!!!!!delete train, valid = generate_train_val_idx(files, train_num, test_num) unique_id = 1 for i_z, z_idx in enumerate(train.keys()): pieces = train[z_idx] tmp_slice = cv2.imread(os.path.join(data_folder, files[z_idx]), -1) for i_xy, shape in enumerate(pieces): (x,y,size) = shape part = tmp_slice[x:x+size, y:y+size, 0] part = np.transpose([part,part,part], (1,2,0)) part = imutils.resize(part, width=output_size, inter=cv2.INTER_NEAREST ) part_resized = imutils.resize(part, width=input_size, inter=cv2.INTER_CUBIC ) # Images are loaded by idx in sorted list cv2.imwrite(os.path.join(train_folder_gt, 'train_Z1_'+'{:06d}'.format(unique_id)+'.png'), part) cv2.imwrite(os.path.join(train_folder_lr, 'train_Z1_'+'{:06d}'.format(unique_id)+'.png'), part_resized) unique_id +=1 clear_output() print(i_z, 'done out of', len(train.keys())) print('train - finished') unique_id = 1 for i_z, z_idx in enumerate(valid.keys()): pieces = valid[z_idx] tmp_slice = cv2.imread(os.path.join(data_folder, files[z_idx]), -1) for i_xy, shape in enumerate(pieces): (x,y,size) = shape part = tmp_slice[x:x+size, y:y+size, 0] part = np.transpose([part,part,part], (1,2,0)) part = imutils.resize(part, width=output_size, inter=cv2.INTER_NEAREST ) part_resized = imutils.resize(part, width=input_size, inter=cv2.INTER_CUBIC ) # Images are loaded by idx in sorted list cv2.imwrite(os.path.join(val_folder_gt, 'valid_Z1_'+'{:06d}'.format(unique_id)+'.png'), part) cv2.imwrite(os.path.join(val_folder_lr, 'valid_Z1_'+'{:06d}'.format(unique_id)+'.png'), part_resized) unique_id +=1 clear_output() print(i_z, 'done out of', len(valid.keys())) print('valid - finished') # + test_gt = sorted(os.listdir(val_folder_gt)) test_lr = sorted(os.listdir(val_folder_lr)) test_gt[:5], test_lr[:5] # - # ### Examples for i in range(3): img_size = np.random.randint(128,256) start_x = np.random.randint(500,6000) start_y = np.random.randint(2000,6000) resize = 4 part = img_slice[start_x:start_x+img_size, start_y:start_y+img_size, 0] part = np.transpose([part,part,part], (1,2,0)) part = imutils.resize(part, width=128, inter=cv2.INTER_NEAREST ) part_resized = imutils.resize(part, width=int(part.shape[1]//resize), inter=cv2.INTER_CUBIC ) #cv2.imwrite(os.path.join(train_folder_gt, 'train_Z1'+'{:04d}'.format(i)+'.png'), part) #cv2.imwrite(os.path.join(train_folder_lr, 'train_Z1'+'{:04d}'.format(i)+'.png'), part_resized) clear_output(3) plt.figure(figsize=(14,6)) plt.subplot(121) plt.imshow(part, cmap='gray') plt.subplot(122) plt.imshow(part_resized, cmap='gray') plt.show() for i in range(1): img_size = 64#np.random.randint(128,256) start_x = np.random.randint(500,6000) start_y = np.random.randint(2000,6000) resize = 4 part = img_slice[start_x:start_x+img_size, start_y:start_y+img_size, 0] part = np.transpose([part,part,part], (1,2,0)) part = imutils.resize(part, width=128, inter=cv2.INTER_NEAREST ) part_resized = imutils.resize(part, width=int(part.shape[1]//resize), inter=cv2.INTER_CUBIC ) cv2.imwrite(os.path.join(val_folder_gt, 'val_Z1'+'{:04d}'.format(i)+'.png'), part) cv2.imwrite(os.path.join(val_folder_lr, 'val_Z1'+'{:04d}'.format(i)+'.png'), part_resized) clear_output(3) plt.figure(figsize=(14,6)) plt.subplot(121) plt.imshow(part, cmap='gray') plt.subplot(122) plt.imshow(part_resized, cmap='gray') plt.show() # <hr> # ## Additional train dataset # this data represents additional augmentation added by motion blur to image # > 18.01.2020 # + input_size = 32 output_size = 128 train_num = 3000 #20000 test_num = 300 #1000 data_folder = './data_Z1/tiffs/' train_folder_gt = './mmsr/datasets/train_data_Z1_GT_additional180120/' #train_data_Z1_GT_additional180120 train_folder_lr = './mmsr/datasets/train_data_Z1_LR_additional180120/' # + def generate_gt(tmp_slice, shape): (x,y,size) = shape part = tmp_slice[x:x+size, y:y+size, 0] return part def generate_mb_kernel(): size = (3,np.random.choice([9,13])) kernel = np.zeros(size) kernel[int(size[0]//2), int(size[1]//2)] = 1 kernel = kernel / np.sum(kernel) kernel = cv2.resize(kernel, (kernel.shape[1]*4, kernel.shape[0]*4), cv2.INTER_CUBIC ) kernel = cv2.resize(kernel, (kernel.shape[1]*1,int(kernel.shape[0]*(size[1]/size[0]))), cv2.INTER_CUBIC ) kernel = cv2.resize(kernel, (kernel.shape[1]//4, kernel.shape[0]//3), cv2.INTER_CUBIC ) step = kernel.shape[0]//2 + int(0.15*kernel.shape[0]) mask = np.ones_like(kernel) mask[step,:] = mask[step,:]*0.5 mask[step+1:,:] = 0 kernel = kernel*mask kernel = kernel / np.sum(kernel) angle = np.random.randint(-10,10) rows, cols = kernel.shape M_inverse = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1) kernel = cv2.warpAffine(kernel, M_inverse, (cols, rows)) return kernel def generate_lr(part, kernel): resize = 4 detalization = 2 part_resized = imutils.resize(part, width=int(part.shape[1]//resize), inter=cv2.INTER_CUBIC ) part_resized_motion = cv2.filter2D(part_resized, -1, kernel) mask = np.random.random(detalization*detalization) mask = mask.reshape((detalization,detalization)) mask = imutils.resize(mask, width=part_resized.shape[1], inter=cv2.INTER_CUBIC ) part_resized_motion = part_resized_motion*mask + part_resized*(1-mask) return part_resized_motion # + train, valid = generate_train_val_idx(files, 1, 1) z_idx =list( train.keys() )[0] pieces = train[z_idx] tmp_slice = cv2.imread(os.path.join(data_folder, files[z_idx]), -1) shape = pieces[0] # + train, valid = generate_train_val_idx(files, 1, 1) z_idx =list( train.keys() )[0] shape = train[z_idx][0] part = generate_gt(tmp_slice, shape) part = cv2.resize(part, (128,128), cv2.INTER_NEAREST ) print(part.shape) kernel = generate_mb_kernel() part_resized = generate_lr(part, kernel) part = np.transpose([part,part,part], (1,2,0)) part = np.uint8(part) part_resized = np.transpose([part_resized,part_resized,part_resized], (1,2,0)) part_resized = np.uint8(part_resized) plt.figure(figsize=(18,8)) plt.subplot(131) plt.imshow(part, cmap='gray') plt.subplot(132) plt.imshow(kernel, cmap='gray') plt.subplot(133) plt.imshow(part_resized, cmap='gray') plt.show() # + train, valid = generate_train_val_idx(files, train_num, test_num) unique_id = 1 for i_z, z_idx in enumerate(train.keys()): pieces = train[z_idx] tmp_slice = cv2.imread(os.path.join(data_folder, files[z_idx]), -1) for i_xy, shape in enumerate(pieces): part = generate_gt(tmp_slice, shape) part = cv2.resize(part, (128,128), cv2.INTER_NEAREST ) kernel = generate_mb_kernel() part_resized = generate_lr(part, kernel) part = np.transpose([part,part,part], (1,2,0)) part = np.uint8(part) part_resized = np.transpose([part_resized,part_resized,part_resized], (1,2,0)) part_resized = np.uint8(part_resized) # Images are loaded by idx in sorted list cv2.imwrite(os.path.join(train_folder_gt, 'train_Z1_'+'{:06d}'.format(unique_id)+'.png'), part) cv2.imwrite(os.path.join(train_folder_lr, 'train_Z1_'+'{:06d}'.format(unique_id)+'.png'), part_resized) unique_id +=1 clear_output() print(i_z, 'done out of', len(train.keys())) print('train - finished') # - part.shape, part.dtype, part_resized.shape, part_resized.dtype
eSRGAN_data_generation_for_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn import metrics import numpy as np results = pd.read_csv("Fatma_training_set_aggression_linear_word_edResults.csv") fpr, tpr, thresholds = metrics.roc_curve(results['y_true_bool'], results['y_predict_prob_1'], pos_label=1) np.mean(thresholds) np.mean(fpr) np.mean(tpr) metrics.auc(fpr,tpr) fpr2, tpr2, thresholds2 = metrics.roc_curve(results['y_true_binary'], results['y_prediction'], pos_label=1) np.mean(thresholds2) np.mean(fpr2) np.mean(tpr2) metrics.auc(fpr2,tpr2)
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Read BAM file and get information about mapping # %pylab inline from sequana import BAM, sequana_data b = BAM(sequana_data('test.bam', 'testing')) b.plot_bar_mapq() b.get_flags_as_df().sum() b.get_full_stats_as_df()
notebooks/BAM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="../../img/ods_stickers.jpg" /> # # Открытый курс по машинному обучению. Сессия № 2 # # Индивидуальный проект по анализу данных. # # Предсказание наличия у пациента хронической почечной недостаточности # **Автор:** <NAME> (<EMAIL>) # ## Задача # ### Постановка # Для индивидуального проекта был взят этот <a href="http://archive.ics.uci.edu/ml/datasets/Chronic_Kidney_Disease" target="_blank">датасет</a>. В нём содержатся различные данные о пациентах и информация о том, есть ли у них хроническая почечная недостаточность (ХПН). Задача состоит в том, чтобы на основе представленных данных в датасете научиться предсказывать, болен ли человек ХПН или нет. # # Причиной ХПН могут быть множество болезней, некоторые из них, если запустить процесс, приводят к полной деградации функции почек. Есть так же болезни, вызывающие ХПН, которые приводят к отказу почек **гарантировано**. Поэтому чем раньше и проще будет диагностироваться возникновение ХПН, тем больше людей будет спасено от терминальной стадии ХПН, ну или хотя бы функция их почек дойдёт до терминальной стадии за больший срок, что тоже не самый плохой результат. # ### Описание данных в датасете # Посмотрим, какие данные есть в датасете. # # Признаки: # 1. age (численный) -- возраст в годах; # 2. bp (численный) -- артериальное давление в мм/рт.ст. # 3. sg (категориальный) -- удельная плотность, судя по всему, мочи. Возможные значения -- 1.005, 1.010, 1.015, 1.020, 1.025. # 4. al (категориальный) -- альбумин. Возможные значения -- 0, 1, 2, 3, 4, 5. # 5. su (категориальный) -- сахар. Возможные значения -- 0, 1, 2, 3, 4, 5. # 6. rbc (категориальный) -- красные кровяные тельца. Возможные значения -- normal, abnormal. # 7. pc (категориальный) -- гнойные клетки. Возможные значения -- normal, abnormal. # 8. pcc (категориальный) -- комки гнойных клеток. Возможные значения -- present, notpresent. # 9. ba (категориальный) -- бактерии. Возможные значения -- present, notpresent. # 10. bgr (численный) -- случайный тест глюкозы. Этот тест измеряет концентрацию глюкозы в крови в любое время без каких-нибудь предварительных условий (на тощак и т.д.). Яркий пример -- приборы для измерения уровня глюкозы у диабетиков. Измеряется в мг/дл. # 11. bu (численный) -- мочевина в крови. Измеряется в мг/дл. # 12. sc (численный) -- креатенин в сыворотке. Изменяется в мг/дл. # 13. sod (численный) -- натрий. Измеряется в миллиэквивалентах/л. # 14. pot (численный) -- калий. Измеряется в миллиэквивалентах/л. # 15. hemo (численный) -- гемоглобин. В описании данных указано, что измеряется в gms, но судя по всему, тут опечатка и измеряется в г/мл (возможно, требуется изучить данные :-)). # 16. pcv (численный) -- гематокрит (объём упакованных клеток). # 17. wc (численный) -- количество белых кровяных клеток. Измеряется в количестве клеток на микролитр. # 18. rc (численный) -- количество эритроцитов. Измеряется в миллионах на кубический сантиметр. # 19. htn (категориальный) -- гипертония. Возможные значения -- yes, no. # 20. dm (категориальный) -- сахарный диабет. Возможные значения -- yes, no. # 21. cad (категориальный) -- коронарная недостаточность. Возможные значения -- yes, no. # 22. appet (категориальный) -- аппетит. Возможные значения -- good, poor. # 23. pe (категориальный) -- отёк ног. Возможные значения -- yes, no. # 24. ane (категориальный) -- анемия. Возможные значения -- yes, no. # # Целевой признак class является категориальным и имеет два значения ckd (есть ХПН) и notckd (нет ХПН). # ## Первичная подготовка данных # Сначала загрузим датасет. Предварительно, я руками сконвертировал его в формат csv (scipy'евский ридер, исходный датасет не смог прочитать). # Константы. RANDOM_SEED = 17 PATH_TO_DATASET_FILE = "../../data/chronic_kidney_disease.csv" # + import pandas as pd raw_dataset = pd.read_csv(PATH_TO_DATASET_FILE) raw_dataset.head() # - # Размер таблицы: raw_dataset.shape # и общая информация о датасете: raw_dataset.info() # Видно, что пандас при чтении посчитал все признаки объектами. Прежде чем начать анализировать датасет, преобразуем датасет в более удобоваримый формат: # 1. заменим строковые значения категориальных признаков на числовые (простое индексирование); # 2. заменим знаки вопроса, обозначающие пропуск значения на NaN; # 3. приведём все признаки к типу float64. # # Поехали: # + import numpy as np fixed_dataset = pd.DataFrame() passes_filler = lambda x: np.nan if str(x) == "?" else x numerical_features = ["age", "bp", "bgr", "bu", "sc", "sod", "pot", "hemo", "pcv", "wc", "rc"] nominal_features = ["sg", "al", "su", "rbc", "pc", "pcc", "ba", "htn", "dm", "cad", "appet", "pe", "ane", "class"] nominal_features_mapping = [ {"1.005": 0, "1.010": 1, "1.015": 2, "1.020": 3, "1.025": 4}, {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5}, {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5}, {"normal": 0, "abnormal": 1}, {"normal": 0, "abnormal": 1}, {"present": 0, "notpresent": 1}, {"present": 0, "notpresent": 1}, {"yes": 0, "no": 1}, {"yes": 0, "no": 1}, {"yes": 0, "no": 1}, {"good": 0, "poor": 1}, {"yes": 0, "no": 1}, {"yes": 0, "no": 1}, {"ckd": 1, "notckd": 0} ] for feature in numerical_features: fixed_dataset[feature] = raw_dataset[feature].map(passes_filler).astype("float64") for num, feature in enumerate(nominal_features): fixed_dataset[feature] = raw_dataset[feature].map(passes_filler).map(nominal_features_mapping[num]) fixed_dataset.head() # - # Теперь в raw_dataset лежат данные в том виде, с которым можно работать. # ## Первичный анализ датасета # Пришло время внимательно посмотреть на данные. Для начала посмотрим на отношения классов, для этого нарисуем гистограмму, заодно импортируем библиотеки для рисования и настроим их: # + import pylab as plt # %matplotlib inline import seaborn as sns from matplotlib import pyplot as plt plt.rcParams["figure.figsize"] = (10, 8) # - fixed_dataset["class"].value_counts().plot(kind="bar", label="ckd") plt.legend() plt.title("Распределение признака ХПН в датасете"); # Видно, что классы не сбалансированы, людей с ХПН в датасете больше, чем без него, но соотношение мощности классов меньше двух, иными словами данных о больных ХПН в датасете не подавляющее большинство. # Теперь посмотрим статистику по нашим числовым данным, попробуем понять, на сколько чистые данные у нас в наличии. fixed_dataset.describe() # В описании датасета написано, что данные реальные, правдоподобность большинства значений признаков, мне, как не специалисту оценить крайне затруднительно. В целом, я думаю, что надо оставить все данные, даже не смотря на то, что минимальный возраст пациента 2 года, увы, ХПН бывает и в таком возрасте. К тому же, экстремальные значения признака скорее всего будет говорить о наличии какой-либо паталогии. # Прежде чем пытаться дальше анализировать данные нужно обработать пропущенные значения в датасете. Поскольку, в теории, может быть пропущен любой признак, кроме целевого, то, будут обработаны как категориальные признаки, так и численные. # # Пропуски в численных признаках попробую заместить средним значением. Поэтому деление на тренировочную и тестовую выборку я выполню сейчас, чтобы не использовать данные из теста для подсчёта среднего по признаку. Разбиение будет стратифицированным, чтобы распределения целевого признака на тренировочном и тестовом датасете были одинаковы. На тестовую часть будет выделено 25% датасета. # + y_tmp = fixed_dataset["class"].copy(deep=True) X_tmp = fixed_dataset.copy(deep=True).drop("class", axis=1) from sklearn.model_selection import StratifiedShuffleSplit sss = StratifiedShuffleSplit(n_splits=1, test_size=0.25, random_state=RANDOM_SEED) train_index, test_index = tuple(sss.split(X_tmp, y_tmp))[0] train = fixed_dataset.iloc[train_index] test = fixed_dataset.iloc[test_index] # - # На всякий случай проверю, что распределения целевого признака на тестовой и тренировочной выборках примерно одинаковы. train["class"].value_counts().plot(kind="bar", label="ckd") plt.legend() plt.title("Распределение ХПН в датасете train"); test["class"].value_counts().plot(kind="bar", label="ckd") plt.legend() plt.title("Распределение ХПН в датасете test"); # Действительно, то что надо. # Теперь, заполним пропуски в данных. Для численных признаков я попробую использовать среднее. В категориальные признаки же введу ещё одно значение, обозначающее то, что признак отсутствует. # + dataset = fixed_dataset.copy(deep=True) # Обработка численных признаков. numerical_features_mean = [] for feature in numerical_features: mean_val = train[feature].mean() numerical_features_mean.append(mean_val) train[feature].fillna(mean_val, inplace=True) test[feature].fillna(mean_val, inplace=True) dataset[feature].fillna(mean_val, inplace=True) # Обработка категориальных признаков. # У этого списка признаков только два значения. cutted_nominal_features = ["rbc", "pc", "pcc", "ba", "htn", "dm", "cad", "appet", "pe", "ane"] for feature in cutted_nominal_features: train[feature].fillna(2, inplace=True) test[feature].fillna(2, inplace=True) dataset[feature].fillna(2, inplace=True) # Оставшиеся обработаю отдельно. # "sg", "al", "su" train["sg"].fillna(5, inplace=True) test["sg"].fillna(5, inplace=True) dataset["sg"].fillna(5, inplace=True) train["al"].fillna(6, inplace=True) test["al"].fillna(6, inplace=True) dataset["al"].fillna(6, inplace=True) train["su"].fillna(6, inplace=True) test["su"].fillna(6, inplace=True) dataset["su"].fillna(6, inplace=True) dataset.head() # - # Для удобства сконвертируем значения категориальных признаков в целые числа. for feature in nominal_features: train[feature] = train[feature].astype("int8") test[feature] = test[feature].astype("int8") dataset[feature] = dataset[feature].astype("int8") # Отлично, теперь у нас есть датасет без пропусков. Давайте посмотрим на значения, которые принимают наши категориальные признаки в тренировочной части датасета. # + train_uniques = pd.melt(frame=train, value_vars=nominal_features) train_uniques = pd.DataFrame(train_uniques.groupby(["variable", "value"])["value"].count()) \ .sort_index(level=[0, 1]) \ .rename(columns={"value": "count"}) \ .reset_index() sns.factorplot(x="variable", y="count", hue="value", data=train_uniques, kind="bar", size=12); # - # Из графика видно, что почти у всех категориальных признаков есть доминирующее значение, а так же видно, что у некоторых признаков очень много пропусков, возможно, их не стоит учитывать при постоении модели. Давайте посмотрим на численную статистику по уникальным значениям у категориальных признаков: for feature in nominal_features: n = train[feature].nunique() print("Feature: %s" % feature) print(n, sorted(train[feature].value_counts().to_dict().items())) print(10 * "-") # Эту статистику будем использовать в дальнейшем, если понадобится. # Давайте разобьём категориальные элементы датасета по значениям целевого признака. Возможно, это покажет нам самые важные признаки. # + train_uniques = pd.melt(frame=train, value_vars=nominal_features[:-1], id_vars=["class"]) train_uniques = pd.DataFrame(train_uniques.groupby(["variable", "value", "class"])["value"].count()) \ .sort_index(level=[0, 1]) \ .rename(columns={"value": "count"}) \ .reset_index() sns.factorplot(x="variable", y="count", hue="value", col="class", data=train_uniques, kind="bar", size=9); # - # Видно, что при отсутствии ХПН при наличии значения альбумина (al) он принимает только нулевое значение. Так же, если нет ХПН, то и значение анемии (ane) нет. Только при наличии ХПН пропадает аппетит (appet). Подобное можно написать про **все** категориальные признаки, в каждом есть значения, которые встречаются в одном классе и почти или вообще не встречаются в другом классе. # # Это выглядит абсолютно логичным, можно предположить, что значения, которые в основном встречаются только при наличии ХПН являются не нормальными. # # Например, гипертония, у тех у кого нет ХПН её нет в тренировочной выборке. Это объясняется тем, одной из причин гипертонии (это значит, что у человека повышенное артериальное давление) с одной стороны могут являться болезни почек, из-за того, что скорость фильтрации почек падает (СКФ - скорость клубочковой фильтрации, клубочек - структурная единица почки, выполняющаяя фильтрационную функцию) и организм пытается компенсировать это повышением артериального давления, что скорость фильтрации увеличивает и ускоряет деградацию функции почки. С другой стороны, гипертония, вызванная другими причинами негативно влияет на почку, разрушая её структурные единицы.T ак же при ХПН может появляться отсутствие аппетита и так далее и тому подобное. В общем очень тяжёлое заболевание, поэтому то, что все категориальные признаки являются достаточно сильными по медицинским причинам. # Теперь попробуем проанализировать численные признаки. Наприсуем boxplot'ы, описывающее статистики распределения количественных признаков в двух группах: у кого нет ХПН и у кого оно есть. # + fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(16, 10)) for index, feature in enumerate(numerical_features): sns.boxplot(x="class", y=feature, data=train, ax=axes[int(index / 3), index % 3]) axes[int(index / 3), index % 3].legend() axes[int(index / 3), index % 3].set_xlabel("class") axes[int(index / 3), index % 3].set_ylabel(feature); # - # На глаз, наибольшие различия у признаков hemo (гемоглобин), pcv (гематокрит), rc (количество эритроцитов), age (возраст). Логично, при ХПН деградируют все функции почки, в том числе её функция при создании гемоглобина в крови. Гематокрит и количество эритроцитов тоже снижаются. Возраст важен потому, что болезнь чаще встречается у людей старше 30. Скорее всего это сильные признаки. # # Так же, хочу обратить внимание на признаки bu (мочевина) и sc (креатенит), не смотря на то что они довольно мелко отрисовались, особенно креатенин, если присмотреться дожно быть видно довольно сильное отличие при разных значениях класса, связано это с тем, что мочевина и креатенин являются азотистыми шлаками, вырабатывающимися при жизнидеятельности человека и их выводом занимаются почки. Чем большая степень ХПН, тем хуже они фильтруют и тем больше концентация азотистых шлаков в крови. Скорее всего, они тоже будут сильными признаками. # И последнее, изучим корреляцию между количественными признаками. corr_matrix = train.drop(nominal_features, axis=1).corr() sns.heatmap(corr_matrix, annot=True, fmt=".2f"); # Используя таблицу Чеддока, получаем, что между собой сильно коррелируют (модуль больше или равен 0.7) признаки pcv и hemo, rc и pcv, заметно коррелируют (модуль от 0.5 до 0.7 не включая) sc и bu, sod и sc, hemo и bu, pcv и bu, rc и hemo, умеренно (модуль от 0.3 до 0.5 не включая) sod и bu, pot и bu, hemo и sc, hemo и sod, pcv и sc, pcv и sod, rc и sc, rc и sod. Корреляции сильные и заметные корреляции объясняются медицинскими причинами. # # Используем эту информацию при создании модели. # ## Построение модели # Поскольку задача медицинская, крайне необходимо, чтобы алгоритм легко интерпретировался. Поэтому будет строиться дерево решений. В качестве метрики качества я выбираю F1 меру, поскольку, не смотря на то, что нужна как можно большая полнота (мы должны пропускать как можно меньше больных людей), нам нужен баланс с точностью, иначе константное решение будет оптимальным, но мы не хотим проводить дополнительные обследования для подтверждения наличия ХПН всем подряд (в первую очередь измерять СКФ). # Сначала окончательно подготовим датасет для тренировки моделей. Сделаем dummy кодирование для категориальных признаков и выкинем из выборки по одному признаку из сильно коррелирующие пар. Нормировать данные не надо, поскольку это не нужно для применения алгоритма решающего дерева. # + def get_datasets(df, train_index, test_index, numerical_features, nominal_features): X_train = pd.DataFrame() y_train = pd.DataFrame() X_test = pd.DataFrame() y_test = pd.DataFrame() # Сначала добавим количественные признаки. for feature in numerical_features: X_train[feature] = train[feature] X_test[feature] = test[feature] # dummy кодирование категориальных признаков. for feature in nominal_features: X_train = pd.concat([X_train, pd.get_dummies(df[feature], feature).iloc[train_index]], axis=1) X_test = pd.concat([X_test, pd.get_dummies(df[feature], feature).iloc[test_index]], axis=1) y_train = df["class"].iloc[train_index].copy(deep=True) y_test = df["class"].iloc[test_index].copy(deep=True) return X_train, X_test, y_train, y_test clean_numerical_features = ["age", "bp", "bgr", "bu", "sc", "sod", "pot", "hemo", "pcv", "wc", "rc"] clean_nominal_features = ["sg", "al", "su", "rbc", "pc", "pcc", "ba", "htn", "dm", "cad", "appet", "pe", "ane"] X_train, X_test, y_train, y_test = get_datasets(dataset, train_index, test_index, clean_numerical_features, clean_nominal_features) # - # С помощью кросс валидации найдём оптимальную глубину дерева. Количество фолдов будет равно 3, поскольку данных очень мало. # + from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV tree_params = {"max_depth": list(range(2, 11))} tree = DecisionTreeClassifier(random_state=RANDOM_SEED) gscv = GridSearchCV(tree, tree_params, scoring="f1", cv=3, n_jobs=-1, verbose=True) gscv.fit(X_train, y_train) # - # Какая оценка F1 score у нас получилась? gscv.best_score_ # Выглядит очень хорошо. Проверим результат на тестовой выборке. # + from sklearn.metrics import f1_score, accuracy_score def evaluate_model(model, X_text, y_test): y_test_pred = model.predict(X_test) base_test_f1_score = f1_score(y_test, y_test_pred) base_accuracy_score = accuracy_score(y_test, y_test_pred) print("Test F1 score: %f" % base_test_f1_score) print("Test accuracy score: %f" % base_accuracy_score) evaluate_model(gscv.best_estimator_, X_test, y_test) # - # Результ просто отличный! # Надо понимать, что для того чтобы получить такой результат мы используем очень много признаков, а значит нужно много анализов и исследований. Давайте попробуем сократить количество признаков и не потерять существенно в качестве. Можно вспомнить, что у нас есть числовые признаки, на которых распределения значений сильно различаются на тренировочной и тестовой выборке, а так же то, что у нас есть категориальные признаки, в которых много пропусков. Давайте возьмём только самые существенные числовые признаки (учитывая корреляци, естественно) и те категориальные, в которых мало пропусков и обучим новую модель. # + clean_numerical_features = ["age", "bu", "sc", "hemo", "rc"] clean_nominal_features = ["pcc", "ba", "htn", "dm", "cad", "appet", "pe", "ane"] X_train, X_test, y_train, y_test = get_datasets(dataset, train_index, test_index, clean_numerical_features, clean_nominal_features) # + tree_params = {"max_depth": list(range(2, 11))} tree = DecisionTreeClassifier(random_state=RANDOM_SEED) gscv = GridSearchCV(tree, tree_params, scoring="f1", cv=3, n_jobs=-1, verbose=True) gscv.fit(X_train, y_train) # - # Результат модели: gscv.best_score_ # И результат на тестовом датасете: evaluate_model(gscv.best_estimator_, X_test, y_test) # Видно, что результат стал чуть хуже, зато мы избавились от кучи параметров. Уменьшим количество признаков ещё сильнее. Используем численные признаки из предыдущей модели, поскольку если креатенин высокий, то и мочевина скорее всего тоже, за исключением мочевины, а из категориальных оставим только гипертонию и сахарный диабет, поскольку это группы риска для ХПН, и аппетит и отёки ног, поскольку легко диагностируются и при ХПН встречаются довольно часто. # + clean_numerical_features = ["age", "hemo", "rc", "sc"] clean_nominal_features = ["dm", "htn", "appet", "pe"] X_train, X_test, y_train, y_test = get_datasets(dataset, train_index, test_index, clean_numerical_features, clean_nominal_features) # + tree_params = {"max_depth": list(range(2, 11))} tree = DecisionTreeClassifier(random_state=RANDOM_SEED) gscv = GridSearchCV(tree, tree_params, scoring="f1", cv=3, n_jobs=-1, verbose=True) gscv.fit(X_train, y_train) # - # Оценка качества модели на кросс-валидации: gscv.best_score_ # Оценка качества на тестовой выборке: evaluate_model(gscv.best_estimator_, X_test, y_test) # Качество осталось примерно таким же, при этом с очень высоким качеством мы определяем наличие ХПН в два анализа крови -- общий, чтобы получить гемоглобин и эритроциты, и биохимический -- для того чтобы получить креатенин, при учёте двух групп риска -- гипертоники и больные сахарным диабетом, а так же используя два крайне легко диагностируемых самостоятельно признака -- отсутствие аппетита и отёки ног. # Отсутствие аппетита и отёк ног - одно из возможных следствий ХПН. Давайте, сделаем признак possible_consequence_of_disease, который будет иметь значение 1, если у нас есть хотя бы одино из этих следствий присутствует у пациета, 0, если отсутствуют оба. # # Так же, при ХПН креатенин всегда выше нормы, поэтому попробуем обойтись без общего анализа крови и возьмём только биохимию на креатенин у пациета. # # Также сделаем признак risk_group, его значение будет равно 1, если у пациента есть сахарный диабет и/или гипертония, и 0, если их нет. # + clean_numerical_features = ["age", "sc"] clean_nominal_features = [] X_train, X_test, y_train, y_test = get_datasets(dataset, train_index, test_index, clean_numerical_features, clean_nominal_features) tmp_1 = dataset["appet"].map({0: 1, 1: 0, 2: 2}) tmp_2 = dataset["pe"].map({0: 1, 1: 0, 2: 2}) tmp = pd.Series([tmp_1[index] or tmp_2[index] for index in range(tmp_1.shape[0])]) features = pd.get_dummies(tmp, "pcd") X_train = pd.concat([X_train, features.iloc[train_index].copy(deep=True)], axis=1) X_test = pd.concat([X_test, features.iloc[test_index].copy(deep=True)], axis=1) tmp_1 = dataset["dm"].map({0: 1, 1: 0, 2: 2}) tmp_2 = dataset["htn"].map({0: 1, 1: 0, 2: 2}) tmp = pd.Series([tmp_1[index] or tmp_2[index] for index in range(tmp_1.shape[0])]) features = pd.get_dummies(tmp, "risk_group") X_train = pd.concat([X_train, features.iloc[train_index].copy(deep=True)], axis=1) X_test = pd.concat([X_test, features.iloc[test_index].copy(deep=True)], axis=1) # + tree_params = {"max_depth": list(range(2, 11))} tree = DecisionTreeClassifier(random_state=RANDOM_SEED) gscv = GridSearchCV(tree, tree_params, scoring="f1", cv=3, n_jobs=-1, verbose=True) gscv.fit(X_train, y_train) # - # Оценка качества модели на кросс-валидации: gscv.best_score_ # Оценка качества на тестовой выборке: evaluate_model(gscv.best_estimator_, X_test, y_test) # Качество на тестовой выборке осталось таким же. В итоге мы получили очень простой классификатор, с очень хорошим качеством предсказания. По сути, для определения с очень высокой вероятностью есть ли у человека ХПН на нужно знать возраст человека, креатенин крови по биохимии, входит ли он в группу риска и есть ли у него возможные наблюдаемые следствия. # ## Достаточно ли данных и можно ли улучшить модель # Интуитивно - у нас очень мало данных, всего о 400 пациентах, при этом 100 ушло в тестовую выборку. Как минимум для теста нужно сильно больше данных. Так же у нас есть заметный перекос в сторону больных ХПН людей, что вполне может сказываться на качестве модели, ведь в реальности, к счастью, ХПН встречается не часто, у большинства людей его нет. # Давайте исследуем последнюю модель на предмет улучшения качества и построим кривые валидации. # + cv_results = pd.DataFrame(gscv.cv_results_) plt.plot(cv_results["param_max_depth"], cv_results["mean_train_score"], label="mean_train_score") plt.plot(cv_results["param_max_depth"], cv_results["mean_test_score"], label="mean_test_score") plt.legend(loc="best") # - # Посмотрим, какая у нас глубина у результата: gscv.best_params_ # По кривым валидации видно, что модель почти сразу началась переобучаться, в смысле получения наилучшего результата мы получили наилучший результат. # Сохраним наилучшую модель. best_model = gscv.best_estimator_ # Построим кривые обучения, чтобы понять, достаточно ли данных. # + # Проценты от обучающей выборки для разделения. split_percents = [index / 10 for index in range(1, 9)] test_scores = [] train_scores = [] for split_percent in split_percents: sss = StratifiedShuffleSplit(n_splits=1, test_size=split_percent, random_state=RANDOM_SEED) train_index, _ = tuple(sss.split(X_train, y_train))[0] X_train_tmp = X_train.iloc[train_index].copy(deep=True) y_train_tmp = y_train.iloc[train_index].copy(deep=True) tree_params = {"max_depth": list(range(2, 10))} tree_2 = DecisionTreeClassifier(random_state=RANDOM_SEED) gscv_2 = GridSearchCV(tree_2, tree_params, scoring="f1", cv=3, n_jobs=-1, verbose=True) gscv_2.fit(X_train_tmp, y_train_tmp) test_score = gscv_2.best_score_ train_score = f1_score(y_train_tmp, gscv_2.best_estimator_.predict(X_train_tmp)) test_scores.append(test_score) train_scores.append(train_score) # - plt.plot(split_percents, test_scores, label="test_score") plt.plot(split_percents, train_scores, label="train_score") plt.legend(loc="best") # Кривые к друг другу ещё не сошлись, качество на валидации растёт, соответственно добавление данных действительно должно дать результат. # ## Выводы # Получился хороший прототип для простой диагностики хронической почечной недостаточности, во всяком случае на данном этапе задача выглядит решаемой. Для того, чтобы её действительно решить, нужно больше данных.
jupyter/projects_individual/project_ckd_diagnostics_kudin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import os, math import numpy as np, pandas as pd import matplotlib.pyplot as plt, seaborn as sns from tqdm import tqdm, tqdm_notebook from pathlib import Path pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 400) sns.set() os.chdir('../..') from src import utils # - DATA = Path('data') RAW = DATA/'raw' INTERIM = DATA/'interim' PROCESSED = DATA/'processed' SUBMISSIONS = DATA/'submissions' from src.utils import get_weeks # week_labels = get_weeks(day_from=20160104, num_weeks=121)[52:] week_labels = get_weeks(day_from=20160104, num_weeks=121)[96:] # week_labels = get_weeks(day_from=20160104, num_weeks=121)[104:] print(week_labels) # %%time weeks = pd.DataFrame() for name in week_labels[:-1]: weeks = pd.concat([weeks, pd.read_feather( PROCESSED/f'SVD_17-18_72f/week_{name}_SVD_diffscount.feather')]) test = pd.read_feather( PROCESSED/f'SVD_17-18_72f/week_{week_labels[-1]}_SVD_diffscount.feather') # ## Preprocessing cat_cols = ['BuySell', 'Sector', 'Subsector', 'Region_x', 'Country', 'TickerIdx', 'Seniority', 'Currency', 'ActivityGroup', 'Region_y', 'Activity', 'RiskCaptain', 'Owner', 'IndustrySector', 'IndustrySubgroup', 'MarketIssue', 'CouponType'] id_cols = ['TradeDateKey', 'CustomerIdx', 'IsinIdx'] target_col = 'CustomerInterest' pred_col = 'PredictionIdx' # + # %%time from src.utils import apply_cats for col in cat_cols: test[col] = test[col].astype('category').cat.as_ordered() apply_cats(weeks, test) for col in cat_cols: weeks[col] = weeks[col].cat.codes test[col] = test[col].cat.codes # - # ## Model from lightgbm import LGBMClassifier from sklearn.metrics import roc_auc_score from src.utils import alert # %%time val_set = [] train_auc = [] val_auc = [] for i in range(-5, -1): train, val = weeks[weeks.TradeDateKey<week_labels[i]], \ weeks[(weeks.TradeDateKey<week_labels[i+1]) & \ (weeks.TradeDateKey>=week_labels[i])] print(train.TradeDateKey.min(), train.TradeDateKey.max(), val.TradeDateKey.unique()) val_set.append(val.TradeDateKey.unique()[0]) y_train = train[target_col] train.drop(id_cols + [target_col], axis=1, inplace=True) y_val = val[target_col] val.drop(id_cols + [target_col], axis=1, inplace=True) model = LGBMClassifier(n_estimators=400, max_depth=30, random_state=42, reg_alpha=1, reg_lambda=1, colsample_by_tree=0.8) model.fit(train, y_train, eval_metric='auc', verbose=20, eval_set=[(val, y_val)], early_stopping_rounds=30) y_pred = model.predict_proba(train)[:,1] train_auc.append(roc_auc_score(y_train, y_pred)) print('Train AUC: ', train_auc[-1]) y_pred = model.predict_proba(val)[:,1] val_auc.append(roc_auc_score(y_val, y_pred)) print('Val AUC: ', val_auc[-1]) print() del model, train, y_train, val, y_val, y_pred alert() results = pd.DataFrame() results['val_set'] = val_set results['train_auc'] = train_auc results['val_auc'] = val_auc results['iterations'] = [397,261,328,400] results = pd.DataFrame() results['val_set'] = [20180326, 20180402, 20180409, 20180416] results['train_auc'] = [0.7980312197668828, 0.8062227442047145, 0.8161902543359266, 0.8038221068804319] results['val_auc'] = [0.8273520913252462, 0.824267377403749, 0.8516418834039483, 0.8693741344750291] results['iterations'] = [163, 231, 376, 137] print(results.train_auc.mean(), results.val_auc.mean(), results.iterations.mean()) results # (n_estimators=400, max_depth=30, random_state=42, # reg_alpha=1, reg_lambda=1, colsample_by_tree=0.8) print(results.train_auc.mean(), results.val_auc.mean(), results.iterations.mean()) results
notebooks/chu/JC-06-lightgbm-CV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="6bYaCABobL5q" # ##### Copyright 2018 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="FlUw7tSKbtg4" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="08OTcmxgqkc2" # # Automatically upgrade code to TensorFlow 2 # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/upgrade"> # <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> # View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/upgrade.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> # Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/upgrade.ipynb"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> # View source on GitHub</a> # </td> # <td> # <a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/upgrade.ipynb"> # <img src="https://www.tensorflow.org/images/download_logo_32px.png" /> # Download notebook</a> # </td> # </table> # # + [markdown] colab_type="text" id="hZSaRPoybOp5" # TensorFlow 2.0 includes many API changes, such as reordering arguments, renaming symbols, and changing default values for parameters. Manually performing all of these modifications would be tedious and prone to error. To streamline the changes, and to make your transition to TF 2.0 as seamless as possible, the TensorFlow team has created the `tf_upgrade_v2` utility to help transition legacy code to the new API. # # Note: `tf_upgrade_v2` is installed automatically for TensorFlow 1.13 and later (including all TF 2.0 builds). # # Typical usage is like this: # # <pre class="devsite-terminal devsite-click-to-copy prettyprint lang-bsh"> # tf_upgrade_v2 \ # --intree my_project/ \ # --outtree my_project_v2/ \ # --reportfile report.txt # </pre> # # It will accelerate your upgrade process by converting existing TensorFlow 1.x Python scripts to TensorFlow 2.0. # # The conversion script automates as much as possible, but there are still syntactical and stylistic changes that cannot be performed by the script. # + [markdown] colab_type="text" id="gP9v2vgptdfi" # ## Compatibility modules # # Certain API symbols can not be upgraded simply by using a string replacement. To ensure your code is still supported in TensorFlow 2.0, the upgrade script includes a `compat.v1` module. This module replaces TF 1.x symbols like `tf.foo` with the equivalent `tf.compat.v1.foo` reference. While the compatibility module is nice, we recommend that you manually proofread replacements and migrate them to new APIs in the `tf.*` namespace instead of `tf.compat.v1` namespace as quickly as possible. # # Because of TensorFlow 2.x module deprecations (for example, `tf.flags` and `tf.contrib`), some changes can not be worked around by switching to `compat.v1`. Upgrading this code may require using an additional library (for example, [`absl.flags`](https://github.com/abseil/abseil-py)) or switching to a package in [tensorflow/addons](http://www.github.com/tensorflow/addons). # # + [markdown] colab_type="text" id="s78bbfjkXYb7" # ## Recommended upgrade process # # The rest of this guide demonstrates how to use the upgrade script. While the upgrade script is easy to use, it is strongly recomended that you use the script as part of the following process: # # 1. **Unit Test**: Ensure that the code you’re upgrading has a unit test suite with reasonable coverage. This is Python code, so the language won’t protect you from many classes of mistakes. Also ensure that any dependency you have has already been upgraded to be compatible with TensorFlow 2.0. # # 1. **Install TensorFlow 1.14**: Upgrade your TensorFlow to the latest TensorFlow 1.x version, at least 1.14. This includes the final TensorFlow 2.0 API in `tf.compat.v2`. # # 1. **Test With 1.14**: Ensure your unit tests pass at this point. You’ll be running them repeatedly as you upgrade so starting from green is important. # # 1. **Run the upgrade script**: Run `tf_upgrade_v2` on your entire source tree, tests included. This will upgrade your code to a format where it only uses symbols available in TensorFlow 2.0. Deprecated symbols will be accessed with `tf.compat.v1`. These will eventually require manual attention, but not immediately. # # 1. **Run the converted tests with TensorFlow 1.14**: Your code should still run fine in TensorFlow 1.14. Run your unit tests again. Any error in your tests here means there’s a bug in the upgrade script. [Please let us know](https://github.com/tensorflow/tensorflow/issues). # # 1. **Check the upgrade report for warnings and errors**: The script writes a report file that explains any conversions you should double-check, or any manual action you need to take. For example: Any remaining instances of contrib will require manual action to remove. Please consult [the RFC for more instructions](https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md). # # 1. **Install TensorFlow 2.0**: At this point it should be safe to switch to TensorFlow 2.0 # # 1. **Test with `v1.disable_v2_behavior`**: Re-running your tests with al `v1.disable_v2_behavior()` in the tests main function should give the same results as running under 1.14. # # 1. **Enable V2 Behavior**: Now that your tests work using the v2 API, you can start looking into turning on v2 behavior. Depending on how your code is written this may require some changes. See the [Migration guide](migrate.ipynb) for details. # + [markdown] colab_type="text" id="6pwSAQEwvscP" # ## Using the upgrade script # # + [markdown] colab_type="text" id="I9NCvDt5GwX4" # ### Setup # # Before getting started ensure that TensorlFlow 2.0 is installed. # + colab={} colab_type="code" id="DWVYbvi1WCeY" import tensorflow as tf print(tf.__version__) # + [markdown] colab_type="text" id="Ycy3B5PNGutU" # Clone the [tensorflow/models](https://github.com/tensorflow/models) git repository so you have some code to test on: # + colab={} colab_type="code" id="jyckoWyAZEhZ" # !git clone --branch r1.13.0 --depth 1 https://github.com/tensorflow/models # + [markdown] colab_type="text" id="wfHOhbkgvrKr" # ### Read the help # # The script should be installed with TensorFlow. Here is the builtin help: # + colab={} colab_type="code" id="m2GF-tlntqTQ" # !tf_upgrade_v2 -h # + [markdown] colab_type="text" id="se9Leqjm1CZR" # ### Example TF1 code # + [markdown] colab_type="text" id="whD5i36s1SuM" # Here is a simple TensorFlow 1.0 script: # + colab={} colab_type="code" id="mhGbYQ9HwbeU" # !head -n 65 models/samples/cookbook/regression/custom_regression.py | tail -n 10 # + [markdown] colab_type="text" id="UGO7xSyL89wX" # With TensorFlow 2.0 installed it does not run: # + colab={} colab_type="code" id="TD7fFphX8_qE" !(cd models/samples/cookbook/regression && python custom_regression.py) # + [markdown] colab_type="text" id="iZZHu0H0wLRJ" # ### Single file # # The upgrade script can be run on a single Python file: # + attributes={"classes": ["sh"], "id": ""} colab={} colab_type="code" id="xIBZVEjkqkc5" # !tf_upgrade_v2 \ # --infile models/samples/cookbook/regression/custom_regression.py \ # --outfile /tmp/custom_regression_v2.py # + [markdown] colab_type="text" id="L9X2lxzqqkc9" # The script will print errors if it can not find a fix for the code. # + [markdown] colab_type="text" id="r7zpuE1vWSlL" # ### Directory tree # + [markdown] colab_type="text" id="2q7Gtuu8SdIC" # Typical projects, including this simple example, will use much more than one file. Typically want to upgrade an entire package, so the script can also be run on a directory tree: # + colab={} colab_type="code" id="XGqcdkAPqkc-" # upgrade the .py files and copy all the other files to the outtree # !tf_upgrade_v2 \ # --intree models/samples/cookbook/regression/ \ # --outtree regression_v2/ \ # --reportfile tree_report.txt # + [markdown] colab_type="text" id="2S4j7sqbSowC" # Note the one warning about the `dataset.make_one_shot_iterator` function. # # Now the script works in with TensorFlow 2.0: # # Note that because the `tf.compat.v1` module, the converted script will also run in TensorFlow 1.14. # + colab={} colab_type="code" id="vh0cmW3y1tX9" !(cd regression_v2 && python custom_regression.py 2>&1) | tail # + [markdown] colab_type="text" id="4EgZGGkdqkdC" # ## Detailed report # # The script also reports a list of detailed changes. In this example it found one possibly unsafe transformation and included a warning at the top of the file: # + colab={} colab_type="code" id="CtHaZbVaNMGV" # !head -n 20 tree_report.txt # + [markdown] colab_type="text" id="1-UIFXP3cFSa" # Note again the one warning about the `Dataset.make_one_shot_iterator function`. # + [markdown] colab_type="text" id="oxQeYS1TN-jv" # In other cases the output will explain the reasoning for non-trivial changes: # + colab={} colab_type="code" id="WQs9kEvVN9th" # %%writefile dropout.py import tensorflow as tf d = tf.nn.dropout(tf.range(10), 0.2) z = tf.zeros_like(d, optimize=False) # + colab={} colab_type="code" id="7uOkacZsO3XX" # !tf_upgrade_v2 \ # --infile dropout.py \ # --outfile dropout_v2.py \ # --reportfile dropout_report.txt > /dev/null # + colab={} colab_type="code" id="m-J82-scPMGl" # !cat dropout_report.txt # + [markdown] colab_type="text" id="DOOLN21nTGSS" # Here is the modified file contents, note how the script adds argument names to deal with moved and renamed arguments: # + colab={} colab_type="code" id="SrYcJk9-TFlU" # !cat dropout_v2.py # + [markdown] colab_type="text" id="wI_sVNp_b4C4" # A larger project might contain a few errors. For example convert the deeplab model: # + colab={} colab_type="code" id="uzuY-bOvYBS7" # !tf_upgrade_v2 \ # --intree models/research/deeplab \ # --outtree deeplab_v2 \ # --reportfile deeplab_report.txt > /dev/null # + [markdown] colab_type="text" id="FLhw3fm8drae" # It produced the output files: # + colab={} colab_type="code" id="4YYLRxWJdSvQ" # !ls deeplab_v2 # + [markdown] colab_type="text" id="qtTC-cAZdEBy" # But there were errors. The report will help you pin-point what you need to fix before this will run. Here are the first three errors: # + colab={} colab_type="code" id="UVTNOohlcyVZ" # !cat deeplab_report.txt | grep -i models/research/deeplab | grep -i error | head -n 3 # + [markdown] colab_type="text" id="gGBeDaFVRJ5l" # ## "Safety" mode # + [markdown] colab_type="text" id="BnfCxB7SVtTO" # The conversion script also has a less invasive `SAFETY` mode that simply changes the imports to use the `tensorflow.compat.v1` module: # + colab={} colab_type="code" id="XdaVXCPWQCC5" # !cat dropout.py # + colab={} colab_type="code" id="c0tvRJLGRYEb" # !tf_upgrade_v2 --mode SAFETY --infile dropout.py --outfile dropout_v2_safe.py > /dev/null # + colab={} colab_type="code" id="91suN2RaRfIV" # !cat dropout_v2_safe.py # + [markdown] colab_type="text" id="EOzTF7xbZqqW" # As you can see this doesn't upgrade your code, but does allow TensorFlow 1 code to run in TensorFlow 2 # + [markdown] colab_type="text" id="jGfXVApkqkdG" # ## Caveats # # - Do not update parts of your code manually before running this script. In particular, functions that have had reordered arguments like `tf.argmax` or `tf.batch_to_space` cause the script to incorrectly add keyword arguments that mismap your existing code. # # - The script assumes that `tensorflow` is imported using `import tensorflow as tf`. # # - This script does not reorder arguments. Instead, the script adds keyword arguments to functions that have their arguments reordered. # # - Check out [tf2up.ml](http://tf2up.ml) for a convenient tool to upgrade Jupyter # notebooks and Python files in a GitHub repository. # # To report upgrade script bugs or make feature requests, please file an issue on [GitHub](https://github.com/tensorflow/tensorflow/issues). And if you’re testing TensorFlow 2.0, we want to hear about it! Join the [TF 2.0 Testing community](https://groups.google.com/a/tensorflow.org/forum/#!forum/testing) and send questions and discussion to [<EMAIL>](mailto:<EMAIL>).
site/en/guide/upgrade.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <img style="float: left; padding-right: 10px; width: 45px" src="iacs.png"> S-109A Introduction to Data Science: # # ## Homework 6: Ensemble Methods, and Neural Networks # # # **Harvard University**<br/> # **Summer 2018**<br/> # **Instructors**: <NAME>, <NAME> # # <hr style="height:2pt"> # # from IPython.core.display import HTML def css_styling(): styles = open("cs109.css", "r").read(); return HTML(styles) css_styling() # ## Assumed Skills: # This assignment presumes knowledge of the following skills: # - Familiarity with sklearn's model objects # - Cross validation to estimate models' future performance # - Booststrapping to build alternative datasets # - Some instruction on Keras' interface for building and training neural networks # Import libraries: # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import cross_val_score from sklearn.utils import resample from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegressionCV from keras.models import Sequential from keras.layers import Dense # %matplotlib inline import seaborn as sns pd.set_option('display.width', 1500) pd.set_option('display.max_columns', 100) # - # # Higgs Boson Discovery # # The discovery of the Higgs boson in July 2012 marked a fundamental breakthrough in particle physics. The Higgs boson particle was discovered through experiments at the Large Hadron Collider at CERN, by colliding beams of protons at high energy. A key challenge in analyzing the results of these experiments is to differentiate between collisions that produce Higgs bosons and collisions that produce only background noise. We shall explore the use of ensemble methods for this classification task. # # You are provided with data from Monte-Carlo simulations of collisions of particles in a particle collider experiment. The training set is available in `Higgs_train.csv` and the test set is in `Higgs_test.csv`. Each row in these files corresponds to a particle collision described by 28 features (columns 1-28), of which the first 21 features are kinematic properties measured by the particle detectors in the accelerator, and the remaining features are derived by physicists from the first 21 features. The class label is provided in the last column, with a label of 1 indicating that the collision produces Higgs bosons (signal), and a label of 0 indicating that the collision produces other particles (background). # # The data set provided to you is a small subset of the HIGGS data set in the UCI machine learning repository. The following paper contains further details about the data set and the predictors used: <a href = "https://www.nature.com/articles/ncomms5308">Baldi et al., Nature Communications 5, 2014</a>. data_train = pd.read_csv('data/Higgs_train.csv') data_test = pd.read_csv('data/Higgs_test.csv') data_train.head() X_train = data_train.iloc[:, data_train.columns != 'class'] y_train = data_train['class'].values X_test = data_test.iloc[:, data_test.columns != 'class'] y_test = data_test['class'].values # ## Question 1 (12pts): A Single Model # We start by fitting a basic model we can compare the other models to. We'll pick an optimally-tuned decision tree as the base model, because we'll later include random forests and want a fair comparison. # # <div class='exercise'> Question 1</div> # ** 1.1** Fit a decision tree model to the training set. Determine the depth-of-tree parameter via 5-fold cross-validation and plot the estimated performance +/- 2 standard deviations for the various depths. # # ** 1.2** Select an appropriate maximum depth-of-tree, and justify your choice. # # **1.3** Report the model's classification accuracy on the test set. # **Answers**: # **1.1:** Fit a decision tree model to the training set. Determine the depth-of-tree parameter via 5-fold cross-validation and plot the estimated performance +/- 2 standard deviations for the various depths. # + # your code here # - # ** 1.2** Select an apropriate maximum depth-of-tree, and justify your choice. # ---- # Your answer here # # ---- # **1.3** Report the model's classification accuracy on the test set. # + # your code here # - # ## Question 2 (14 pts): Bagging # Bagging is the technique of building the same model on multiple bootstraps from the data and combining each model's prediction to get an overall classification. In this question we build an example by hand and study how the number of bootstrapped datasets impacts the combined accuracy. # # <div class='exercise'> Question 2</div> # **2.1** Create 25 bootstrapped replications of the original training data, and fit a decision tree of depth 5 to each. Record each tree's prediction. In particular, produce a dataset like those below, where each row is a training example, each column is a tree from the forest, and each entry is that tree's prediction for that training example. # # `bagging_train`: # # | |bootstrap model 1's prediction|bootstrap model 2's prediction|...|bootstrap model 25's prediction| # | --- | --- | --- | --- | # |training row 1| binary value | binary value|... |binary value| # |training row 2| binary value| binary value|... |binary value| # |...| ...| ...|... |... | # # `bagging_test`: # # | |bootstrap model 1's prediction|bootstrap model 2's prediction|...|bootstrap model 25's prediction| # | --- | --- | --- | --- | # |test row 1| binary value | binary value|... |binary value| # |test row 2| binary value| binary value|... |binary value| # |...| ...| ...|... |... | # # Store these results as `bagging_train` and `bagging_test`. # # **2.2** _Aggregate_ all 25 _bootstrapped_ models to get a combined prediction for each training and test point: predict a 1 if and only if a majority of the 25 models predict that example to be from class 1. Verify that this bagging model scores either 67% or 68% accuracy on the test set. # # **2.3** We want to know how the number of bootstraps affects our bagging ensemble's performance. Use the `running_predictions` function to get the model's accuracy score when using only 1,2,3,4,... of the bootstrapped models. Make a plot of training and test set accuracy as a function of number of bootstraps. # # **2.4** Analyze the graph from 2.3 and discuss the effect of adding more bootstrapped models to the ensemble. What number of trees would you use in a production model to be cost-effective? # # **Hints** # - Use `resample` from sklearn to easily bootstrap the x and y data. # - use `np.mean` to easily test for majority. If a majority of models vote 1, what does that imply about the mean? def running_predictions(prediction_dataset, targets): """A function to predict examples' class via the majority among trees (ties are predicted as 0) Inputs: prediction_dataset - a (n_examples by n_sub_models) dataset, where each entry [i,j] is sub-model j's prediction for example i targets - the true class labels Returns: a vector where vec[i] is the model's accuracy when using just the first i+1 sub-models """ n_trees = prediction_dataset.shape[1] # find the running percentage of models voting 1 as more models are considered running_percent_1s = np.cumsum(prediction_dataset, axis=1)/np.arange(1,n_trees+1) # predict 1 when the running average is above 0.5 running_conclusions = running_percent_1s > 0.5 # check whether the running predictions match the targets running_correctnesss = running_conclusions == targets.reshape(-1,1) return np.mean(running_correctnesss,axis=0) # returns a 1-d series of the accuracy of using the first n trees to predict the targets # **Answers**: # **2.1** Create 25 bootstrapped replications of the original training data, and fit a decision tree of depth 5 to each. In particular, produce a dataset similar to 2.1, where each row is a training example, each column is a tree from the forest, but each entry is that tree's prediction of the _probability_ that training example comes from class 1. # + # your code here # - # **2.2** _Aggregate_ all 25 _bootstrapped_ models to get a combined prediction for each training and test point: predict a 1 if and only if a majority of the 25 models predict that example to be from class 1. Verify that this bagging model scores either 67% or 68% accuracy on the test set. # + # your code here # - # **2.3** We want to know how the number of bootstraps affects our bagging ensemble's performance. Use the `running_predictions` function to get the model's accuracy score when using only 1,2,3,4,... of the bootstrapped models. Make a plot of training and test set accuracy as a function of number of bootstraps. # + # your code here # - # **2.4** Analyze the graph from 2.3 and discuss the effect of adding more bootstrapped models to the ensemble. What number of trees would you use in a production model to be cost-effective? # ---- # Your answer here # # ---- # ## Question 3 (6 pts): Random Forests # Random Forests are closely related to the bagging model we built by hand in question 2. In this question we compare our by-hand results with the results of using `RandomForestClassifier` directly. # # <div class='exercise'> Question 3</div> # **3.1** Fit a `RandomForestClassifier` to the original `X_train` data using 25 trees and a depth of 5. Comment on the model's test performance compared to the bagging model from Question 2. # # **3.2** There are two improvements Random Forests make to the pure bagging approach in Question 2. What are they, and how do they help the random forest model do better than the pure bagging model? # # **Hints**: # - Random forests do not combine each tree's prediction via a majority vote. What do they use instead? # ## **Answers**: # **3.1** Fit a `RandomForestClassifier` to the original `X_train` data using 25 trees and a depth of 5. Comment on the model's test performance compared to the model from Question 2. # + # your code here # - # ---- # Your answer here # # ---- # **3.2** There are two improvements Random Forests make to the pure bagging approach in Question 2. What are they, and how do they help the random forest model do better than the pure bagging model? # ---- # Your answer here # # ---- # ## Question 4 (12 pts): Boosting # In this question we explore a counterpart to bagging, where each new model is trained on a dataset weighted towards observations that the current set of models predicts incorrectly. # # We'll focus on the AdaBoost flavor of boosting and examine what happens to the ensemble model's accuracy over the algorithm's run. # # <div class='exercise'> Question 4</div> # **4.1** Use `AdaBoostClassifier` to fit another ensemble to `X_train`. Use a decision tree of depth 3 as the base learner and a learning rate 0.05, and run the boosting for 400 iterations. Use the `staged_score` method to help make a plot of the effect of the number of estimators/iterations on the model's train and test accuracy. # # **4.2** Repeat the plot above for a base learner with depth in (1,2,3,4). What trends do you see in the training and test accuracy? # # **4.3** Based on the plot from 4.2, what combination of base learner depth and number of iterations seems optimal? Why? # # **4.4** AdaBoost doesn't combine its sub-models via simple majority vote, or by averaging probabilities. What does it use instead, and why do you think that combination rule was chosen? # **Answers**: # **4.1** Use `AdaBoostClassifier` to fit another ensemble to `X_train`. Use a decision tree of depth 3 as the base learner and a learning rate 0.05, and run the boosting for 400 iterations. Use the `staged_score` method to help make a plot of the effect of the number of estimators/iterations on the model's train and test accuracy. # + # your code here # - # **4.2** Repeat the plot above for a base learner with depth in (1,2,3,4). What trends do you see in the training and test accuracy? # + # your code here # - # ---- # Your answer here # # ---- # **4.3** Based on the plot from 4.2, what combination of base learner depth and number of iterations seems optimal? Why? # # ---- # Your answer here # # ---- # **4.4** AdaBoost doesn't combine its sub-models via simple majority vote, or by averaging probabilities. What does it use instead, and why do you think that combination rule was chosen? # ---- # Your answer here # # ---- # ## Question 5 (18 pts): Ensembling # In this question we take the running theme of combining model to its extreme. So far, we have been combining the predictions of relatively bad models; in this section we'll combine several strong models and achieve our best accuracy yet. # # We provide well-tuned models in the file `models.pkl`. The code below will read in this data for you. The model_dict object contains 5 tuned models, under the names "Ada", "KNN", "Logit", "QDA", and "RF". # # **5.1**: Report each of the 5 tuned models' score on the test set, so that you can compare to these scores later. # # **5.2**: Read in the fresh dataset `data/Higgs_tune.csv` Similar to 2.1, build `ensemble_tune` and `ensemble_test`, datasets containing each tuned model's prediction of P(this point belongs to class 1) for each of the tuning and test points. # # **5.3**: Build a meta-model trained on `ensemble_tune` and predicting the tuning set labels (e.g., a LogisticRegression or RandomForest). Which model does your meta-model consider most important, and how well does your meta-model perform on the test set? # # **5.4**: Augment the `ensemble_tune` and `ensemble_test` datasets with the columns from the original tuning and test data to form `augmented_tune` and `augmented_test`. Fit a decision tree model to this new tuning data (max depth 5, no mximum number of features). # # **5.5**: How well does the meta-tree do on the test set? Why does training a decision tree on the combination of original data and model predictions perform so well? # # **5.6**: Suggest one way to improve on the model above # + # will produce a warning under most versions of SKlearn, but it should be OK to ignore # if you get weird errors or the models all stink, let us know import pickle with open("data/models.pkl", 'rb') as infile: model_dict = pickle.load(infile) # - # **Answers**: # # **5.1**: Report each model's score on the test set, so that you can compare to these scores later. # + # your code here # - # **5.2**: Read in the fresh dataset `data/Higgs_tune.csv`. Similar to 2.1, build `ensemble_tune` and `ensemble_test`, datasets containing each tuned model's prediction of P(this point belongs to class 1) for each of the tuning and test points. # + # your code here # - # **5.3**: Build a meta-model trained on `ensemble_tune` and predicting the tuning set labels (e.g., a LogisticRegression or RandomForest). Which model does your meta-model consider most important, and how well does your meta-model perform on the test set? # + # your code here # - # ---- # Your answer here # # ---- # **5.4**: Augment the `ensemble_tune` and `ensemble_test` datasets with the columns from the original tuning and test data to form `augmented_tune` and `augmented_test`. Fit a decision tree model to this new tuning data (max depth 5, no mximum number of features). # + # your code here # - # **5.5**: How well does the meta-tree do on the test set? Why does training a decision tree on the combination of original data and model predictions perform so well? # + # your code here # - # ---- # Your answer here # # ---- # **5.6**: Suggest one way to improve on the model above # + # your code here (optional) # - # ---- # Your answer here # # ---- # ## Question 6 (12 pts): Understanding # This question is an overall test of your knowledge of this homework's material. You may need to refer to lecture notes and other material outside this homework to answer these questions. # # <div class='exercise'> Question 6</div> # **6.1** How do ensembling, boosting, and bagging all relate: what is common to all three, and what is unique to each of them? # # **6.2** Which technique, boosting or bagging, is better suited to parallelization, where you could have multiple computers working on a problem at the same time? # # **6.3** What is the impact of having too many trees/iterations in boosting and in bagging? In which instance is it worse to overshoot? # # **6.4** Suppose you have 10,000 training observations and have selected (non-polynomial) linear regression as your base model. Which technique will help your model more, boosting or bagging? How does your choice (and boosting/bagging in general) tie to overfitting versus underfitting? # **Answers**: # **6.1** How do ensembling, boosting, and bagging all relate: what is common to all three, and what is unique to each of them? # ---- # Your answer here # # ---- # **6.2** Which technique, boosting or bagging, is better suited to parallelization, where you could have multiple computers working on a problem at the same time? # ---- # Your answer here # # ---- # **6.3** What is the impact of having too many trees/iterations in boosting and in bagging? In which instance is it worse to overshoot? # ---- # Your answer here # # ---- # **6.4** Suppose you have 10,000 training examples and have selected (non-polynomial) linear regression as your base model. Which technique will help your model more, boosting or bagging? How does your choice (and boosting/bagging general) tie to overfitting versus underfitting? # # ---- # Your answer here # # ---- # <hr style='height:2pt'> # ## Question 7 (26 points): Neural Networks # Neural networks are, of course, a large and complex topic that cannot be covered in a single homework. Here we'll focus on the key idea of NNs: they are able to learn a mapping from example input data (of fixed size) to example output data (of fixed size). We'll also partially explore what patterns the neural network learns and how well they generalize. # # In this question we'll see if Neural Networks can learn a (limited) version of the Fourier Transform. (The Fourier Transform takes in values from some function and returns a set of sine and cosine functions which, when added together, approximate the original function.) # # In our specific problem, we'll try to teach a network to map from a function's 1000 sample y-values to the four features of the sine and cosine waves that make up that function. Thus, the network is attempting to learn a mapping from a 1000-entry vector down to a 4-entry vector. Our X_train dataset is thus N by 1000 and our y_train is N by 4. # # We'll use 6 data files in this question: # - `sinewaves_X_train.npy` and `sinewaves_y_train.npy`: a (10,000 by 1,000) and (10,000 by 4) training dataset. Examples were generated by randomly selecting a,b,c,d in the interval [0,1] and building the curve $a\sin(b\,x) + c\cos(d\,x)$ # - `sinewaves_X_test.npy` and `sinewaves_y_test.npy`: a (2,000 by 1,000) and (2,000 by 4) test dataset, generated in the same way as the training data # - `sinewaves_X_extended_test` and `sinewaves_y_extended_test`: a (9 by 1,000) and (9 by 4) test dataset, testing whether the network can generalize beyond the training data (e.g. to negative values of $a$) # # **These datasets are read in to their respective variables for you.** # <div class='exercise'> Question 7</div> # **7.1** Plot the first row of the `X_train` training data and visually verify that it is a sinusoidal curve # # **7.2** The first row of the `y_train` data is $[0.024, 0.533, 0.018, 0.558]$. Visually or numerically verify that the first row of X_train is 1000 equally-spaced samples in $[0,10\pi]$ from the function $f(x) = 0.24\sin(0.533\,x) + 0.018\cos(0.558\,x)$. This pattern (y_train is the true parameters of the curve in X_train) will always hold. # # **7.3** Use `Sequential` and `Dense` from Keras to build a fully-connected neural network. You can choose any number of layers and any number of nodes in each layer. # # **7.4** Compile your model via the line `model.compile(loss='mean_absolute_error', optimizer='adam')` and display the `.summary()`. Explain why the first layer in your network has the indicated number of parameters. # # **7.5** Fit your model to the data for 50 epochs using a batch size of 32 and a validation split of 0.2. You can train for longer if you wish- the fit tends to improve over time. # # **7.6** Use the `plot_predictions` function to plot the model's predictions on `X_test` to the true values in `y_test` (by default, it will only plot the first few rows). Report the model's overall loss on the test set. Comment on how well the model performs on this unseen data. Do you think it has accurately learned how to map from sample data to the coefficients that generated the data? # # **7.7** Examine the model's performance on the 9 train/test pairs in the `extended_test` variables. Which examples does the model do well on, and which examples does it struggle with? # # **7.8** Is there something that stands out about the difficult examples, especially with respect to the data the model was trained on? Did the model learn the mapping we had in mind? Would you say the model is overfit, underfit, or neither? # # **Hint**: # - Keras's documentation and examples of a Sequential model are a good place to start. # - A strong model can achieve validation error of around 0.03 on this data and 0.02 is very good. # + def plot_predictions(model, test_x, test_y, count=None): # Model - a Keras or SKlearn model that takes in (n,1000) training data and predicts (n,4) output data # test_x - a (n,1000) input dataset # test_y - a (n,4) output dataset # This function will plot the sine curves in the training data and those implied by the model's predictions. # It will also print the predicted and actual output values. #helper function that takes the n by 4 output and reverse-engineers #the sine curves that output would create def y2x(y_data): #extract parameters a=y_data[:,0].reshape(-1,1) b=y_data[:,1].reshape(-1,1) c=y_data[:,2].reshape(-1,1) d=y_data[:,3].reshape(-1,1) #build the matching training data x_points = np.linspace(0,10*np.pi,1000) x_data = a*np.sin(np.outer(b,x_points)) + c*np.cos(np.outer(d,x_points)) return x_data #if <20 examples, plot all. If more, just plot 5 if count==None: if test_x.shape[0]>20: count=5 else: count=test_x.shape[0] #build predictions predicted = model.predict(test_x) implied_x = y2x(predicted) for i in range(count): plt.plot(test_x[i,:],label='true') plt.plot(implied_x[i,:],label='predicted') plt.legend() plt.ylim(-2.1,2.1) plt.xlabel("x value") plt.xlabel("y value") plt.title("Curves using the Neural Network's Approximate Fourier Transform") plt.show() print("true:", test_y[i,:]) print("predicted:", predicted[i,:]) # + X_train = np.load('data/sinewaves_X_train.npy') y_train = np.load('data/sinewaves_y_train.npy') X_test = np.load('data/sinewaves_X_test.npy') y_test = np.load('data/sinewaves_y_test.npy') X_extended_test = np.load('data/sinewaves_X_extended_test.npy') y_extended_test = np.load('data/sinewaves_y_extended_test.npy') # - # **Answers**: # **7.1** Plot the first row of the `X_train` training data and visually verify that it is a sinusoidal curve # + # your code here # - # **7.2** The first row of the `y_train` data is $[0.024, 0.533, 0.018, 0.558]$. Visually or numerically verify that the first row of X_train is 1000 equally-spaced points in $[0,10\pi]$ from the function $f(x) = 0.24\sin(0.533\,x) + 0.018\cos(0.558\,x)$... # + # your code here # - # **7.3** Use `Sequential` and `Dense` from Keras to build a fully-connected neural network. You can choose any number of layers and any number of nodes in each layer. # + # your code here # - # **7.4** Compile your model via the line `model.compile(loss='mean_absolute_error', optimizer='adam')` and display the `.summary()`. Explain why the first layer in your network has the indicated number of parameters. # + # your code here # - # ---- # Your answer here # # ---- # **7.5** Fit your model to the data for 50 epochs using a batch size of 32 and a validation split of .2. You can train for longer if you wish- the fit tends to improve over time. # + # your code here # - # **7.6** Use the `plot_predictions` function to plot the model's predictions on `X-test` to the true values in `y_test` (by default, it will only plot the first few rows). Report the model's overall loss on the test set. Comment on how well the model performs on this unseen data. Do you think it has accurately learned how to map from sample data to the coefecients that generated the data? # + # your code here # - # ---- # Your answer here # # ---- # **7.7** Examine the model's performance on the 9 train/test pairs in the `extended_test` variables. Which examples does the model do well on, and which examples does it struggle with? # + # your code here # - # ---- # Your answer here # # ---- # **7.8** Is there something that stands out about the difficult observations, especially with respect to the data the model was trained on? Did the model learn the mapping we had in mind? Would you say the model is overfit, underfit, or neither? # ---- # Your answer here # # ----
content/HW/hw6/HW6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !pip install transformers # # !pip install seqeval # # !pip install tensorboardx # # !pip install simpletransformers # + import pandas as pd from simpletransformers.classification import ClassificationModel from sklearn.metrics import f1_score, accuracy_score def f1_multiclass(labels, preds): return f1_score(labels, preds, average='macro') # + # load data and format it for simpletransformers train_df = pd.read_json("data/AS/train.json", orient="records") val_df = pd.read_json("data/AS/validation.json", orient="records") test_df = pd.read_json("data/AS/test.json", orient="records") train_df = train_df.loc[:,["problem","label"]] train_df = train_df.rename(columns = {"problem":"text", "label":"labels"}) val_df = val_df.loc[:,["problem","label"]] val_df = val_df.rename(columns = {"problem":"text", "label":"labels"}) test_df = test_df.loc[:,["problem","label"]] test_df = test_df.rename(columns = {"problem":"text", "label":"labels"}) # - train_df # calculate class weights num_classes = 15 weights_dict = {i: ((len(train_df)/num_classes)/train_df["labels"].value_counts()[i]) for i in train_df["labels"].value_counts().index} weights = [0]*num_classes for i in weights_dict: weights[i] = weights_dict[i] weights # + # define model parameters args = { "train_batch_size": 32, "num_train_epochs": 21, "learning_rate": 4e-5, # "weight": weights, "save_model_every_epoch": False, "save_eval_checkpoints": False, "overwrite_output_dir": True, "reprocess_input_data": False, 'evaluate_during_training': True, "eval_batch_size": 32 } # Create a ClassificationModel model = ClassificationModel('bert', 'allenai/scibert_scivocab_uncased', weight=weights, num_labels=num_classes, args=args) # + # train model model.train_model(train_df, eval_df=val_df, f1=f1_multiclass, acc=accuracy_score) # - #evaluate model result, model_outputs, wrong_predictions = model.eval_model(test_df, f1=f1_multiclass, acc=accuracy_score) print(result) #get predictions preds, raw = model.predict(list(test_df["text"]))
AS_BERT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _kg_hide-output=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import gc import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import KFold from sklearn.metrics import roc_auc_score import lightgbm as lgb SEED = 42 float_cols = [ 'TransactionAmt', 'card2', 'card3', 'card5', 'addr1', 'addr2', 'dist1', 'dist2', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9', 'D10', 'D11', 'D12', 'D13', 'D14', 'D15', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'V29', 'V30', 'V31', 'V32', 'V33', 'V34', 'V35', 'V36', 'V37', 'V38', 'V39', 'V40', 'V41', 'V42', 'V43', 'V44', 'V45', 'V46', 'V47', 'V48', 'V49', 'V50', 'V51', 'V52', 'V53', 'V54', 'V55', 'V56', 'V57', 'V58', 'V59', 'V60', 'V61', 'V62', 'V63', 'V64', 'V65', 'V66', 'V67', 'V68', 'V69', 'V70', 'V71', 'V72', 'V73', 'V74', 'V75', 'V76', 'V77', 'V78', 'V79', 'V80', 'V81', 'V82', 'V83', 'V84', 'V85', 'V86', 'V87', 'V88', 'V89', 'V90', 'V91', 'V92', 'V93', 'V94', 'V95', 'V96', 'V97', 'V98', 'V99', 'V100', 'V101', 'V102', 'V103', 'V104', 'V105', 'V106', 'V107', 'V108', 'V109', 'V110', 'V111', 'V112', 'V113', 'V114', 'V115', 'V116', 'V117', 'V118', 'V119', 'V120', 'V121', 'V122', 'V123', 'V124', 'V125', 'V126', 'V127', 'V128', 'V129', 'V130', 'V131', 'V132', 'V133', 'V134', 'V135', 'V136', 'V137', 'V138', 'V139', 'V140', 'V141', 'V142', 'V143', 'V144', 'V145', 'V146', 'V147', 'V148', 'V149', 'V150', 'V151', 'V152', 'V153', 'V154', 'V155', 'V156', 'V157', 'V158', 'V159', 'V160', 'V161', 'V162', 'V163', 'V164', 'V165', 'V166', 'V167', 'V168', 'V169', 'V170', 'V171', 'V172', 'V173', 'V174', 'V175', 'V176', 'V177', 'V178', 'V179', 'V180', 'V181', 'V182', 'V183', 'V184', 'V185', 'V186', 'V187', 'V188', 'V189', 'V190', 'V191', 'V192', 'V193', 'V194', 'V195', 'V196', 'V197', 'V198', 'V199', 'V200', 'V201', 'V202', 'V203', 'V204', 'V205', 'V206', 'V207', 'V208', 'V209', 'V210', 'V211', 'V212', 'V213', 'V214', 'V215', 'V216', 'V217', 'V218', 'V219', 'V220', 'V221', 'V222', 'V223', 'V224', 'V225', 'V226', 'V227', 'V228', 'V229', 'V230', 'V231', 'V232', 'V233', 'V234', 'V235', 'V236', 'V237', 'V238', 'V239', 'V240', 'V241', 'V242', 'V243', 'V244', 'V245', 'V246', 'V247', 'V248', 'V249', 'V250', 'V251', 'V252', 'V253', 'V254', 'V255', 'V256', 'V257', 'V258', 'V259', 'V260', 'V261', 'V262', 'V263', 'V264', 'V265', 'V266', 'V267', 'V268', 'V269', 'V270', 'V271', 'V272', 'V273', 'V274', 'V275', 'V276', 'V277', 'V278', 'V279', 'V280', 'V281', 'V282', 'V283', 'V284', 'V285', 'V286', 'V287', 'V288', 'V289', 'V290', 'V291', 'V292', 'V293', 'V294', 'V295', 'V296', 'V297', 'V298', 'V299', 'V300', 'V301', 'V302', 'V303', 'V304', 'V305', 'V306', 'V307', 'V308', 'V309', 'V310', 'V311', 'V312', 'V313', 'V314', 'V315', 'V316', 'V317', 'V318', 'V319', 'V320', 'V321', 'V322', 'V323', 'V324', 'V325', 'V326', 'V327', 'V328', 'V329', 'V330', 'V331', 'V332', 'V333', 'V334', 'V335', 'V336', 'V337', 'V338', 'V339', 'id_01', 'id_02', 'id_03', 'id_04', 'id_05', 'id_06', 'id_07', 'id_08', 'id_09', 'id_10', 'id_11', 'id_13', 'id_14', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', 'id_24', 'id_25', 'id_26', 'id_32' ] # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" X_train = pd.read_csv('../input/data-cleaning-and-feature-engineering/train.csv', dtype=dict.fromkeys(float_cols, np.float32)) X_test = pd.read_csv('../input/data-cleaning-and-feature-engineering/test.csv', dtype=dict.fromkeys(float_cols, np.float32)) y_train = X_train['isFraud'].copy() X_train.drop(columns=['isFraud'], inplace=True) print('Number of Training Examples = {}'.format(X_train.shape[0])) print('Number of Test Examples = {}'.format(X_test.shape[0])) print('Training Set Memory Usage = {:.2f} MB'.format(X_train.memory_usage().sum() / 1024**2)) print('Test Set Memory Usage = {:.2f} MB\n'.format(X_test.memory_usage().sum() / 1024**2)) print('X_train Shape = {}'.format(X_train.shape)) print('y_train Shape = {}'.format(y_train.shape)) print('X_test Shape = {}\n'.format(X_test.shape)) # + drop_cols = ['V300','V309','V111','C3','V124','V106','V125','V315','V134','V102','V123','V316','V113', 'V136','V305','V110','V299','V289','V286','V318','V103','V304','V116','V29','V284','V293', 'V137','V295','V301','V104','V311','V115','V109','V119','V321','V114','V133','V122','V319', 'V105','V112','V118','V117','V121','V108','V135','V320','V303','V297','V120', 'TransactionID', 'TransactionDate', 'Minute', 'Hour', 'Day', 'DayOfWeek', 'Week', 'Month', 'card'] for df in [X_train, X_test]: df.drop(columns=drop_cols, inplace=True) # + object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object'] le = LabelEncoder() for df in [X_train, X_test]: for col in object_cols: df[col] = le.fit_transform(df[col].astype(str).values) # - lgb_param = { 'min_data_in_leaf': 106, 'num_leaves': 500, 'learning_rate': 0.008, 'min_child_weight': 0.03454472573214212, 'bagging_fraction': 0.4181193142567742, 'feature_fraction': 0.3797454081646243, 'reg_lambda': 0.6485237330340494, 'reg_alpha': 0.3899927210061127, 'max_depth': -1, 'objective': 'binary', 'seed': SEED, 'feature_fraction_seed': SEED, 'bagging_seed': SEED, 'drop_seed': SEED, 'data_random_seed': SEED, 'boosting_type': 'gbdt', 'verbose': 1, 'metric':'auc', } # + # %%time N = 10 kf = KFold(n_splits=N) importance = pd.DataFrame(np.zeros((X_train.shape[1], N)), columns=['Fold_{}'.format(i) for i in range(1, N + 1)], index=X_train.columns) scores = [] y_pred = np.zeros(X_test.shape[0]) oof = np.zeros(X_train.shape[0]) for fold, (trn_idx, val_idx) in enumerate(kf.split(X_train, y_train), 1): print('Fold {}'.format(fold)) trn_data = lgb.Dataset(X_train.iloc[trn_idx, :].values, label=y_train.iloc[trn_idx].values) val_data = lgb.Dataset(X_train.iloc[val_idx, :].values, label=y_train.iloc[val_idx].values) clf = lgb.train(lgb_param, trn_data, 10000, valid_sets=[trn_data, val_data], verbose_eval=500, early_stopping_rounds=500) predictions = clf.predict(X_train.iloc[val_idx, :].values) importance.iloc[:, fold - 1] = clf.feature_importance() oof[val_idx] = predictions score = roc_auc_score(y_train.iloc[val_idx].values, predictions) scores.append(score) print('Fold {} ROC AUC Score {}\n'.format(fold, score)) y_pred += clf.predict(X_test) / N del trn_data, val_data, predictions gc.collect() print('Average ROC AUC Score {} [STD:{}]'.format(np.mean(scores), np.std(scores))) # + importance['Mean_Importance'] = importance.sum(axis=1) / N importance.sort_values(by='Mean_Importance', inplace=True, ascending=False) plt.figure(figsize=(15, 120)) sns.barplot(x='Mean_Importance', y=importance.index, data=importance) plt.xlabel('') plt.tick_params(axis='x', labelsize=15) plt.tick_params(axis='y', labelsize=15) plt.title('Mean Feature Importance Between Folds', size=15) plt.show() # - submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv', index_col='TransactionID') submission['isFraud'] = y_pred submission.to_csv('submission.csv') submission.head()
IEEE-CIS Fraud Detection - LightGBM and Some New Features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + [markdown] nbpresent={"id": "dac6427e-b8df-46f9-bfd3-b24427a73993"} slideshow={"slide_type": "slide"} # # Introduction to Data Science # # Lecture 8: Hypothesis testing and statistical inference # *COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/* # # In this lecutre, we'll cover # * Hypothesis testing # * Central limit theorem # * A/B testing # # Mandatory reading: # [WIRED article on A/B testing](http://www.wired.com/2012/04/ff_abtesting/) # # Mandatory listening: # [Planet Money Episode 669: A or B](https://www.npr.org/sections/money/2015/12/11/459412925/episode-669-a-or-b) # # Further reading: <NAME>, Probability and Statistics for Engineering and the Sciences, 9th ed. Cengage Learning (2016) Ch. 8 and 9. # # For a more complete treatment, take Math 3070 (Applied Statistics I). # # + slideshow={"slide_type": "skip"} #imports and setup import pandas as pd import scipy as sc from scipy.stats import bernoulli from scipy.stats import binom from scipy.stats import norm from scipy.stats import t import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams['figure.figsize'] = (10, 6) plt.style.use('ggplot') # + [markdown] slideshow={"slide_type": "slide"} # ## Recap Lecture 4: Descriptive vs. Inferential Statistics # # *Descriptive statistics* quantitatively describes or summarizes features of a dataset. # # *Inferential statistics* attempts to learn about the population from which the data was sampled. # + [markdown] slideshow={"slide_type": "slide"} # ## Recap Lecture 4: discrete random variables # # *Discrete random variables* take discrete values with preassigned probabilities described by a probaility mass function (PMF). If $X$ is the random variable and $f(k)$ is the PMF, we say "the probability that $X$ takes value $k$ is given by $f(k)$" and write # $$ # \textrm{Prob}(X=k) = f(k). # $$ # # ### Bernoulli distribution # A Bernoulli random variable can take the values $k=0$ or $1$ and has PMF # $$ # f(k) = \begin{cases} p & k=1 \\ 1-p & k = 0 \end{cases} # $$ # # # Some facts about Bernoulli variables: # * mean is $p$ # * variance is $p(1-p)$ # # **Example:** The Bernoulli distribution with $p=0.5$ describes a 'fair' coin toss where 1 and 0 represent "heads" and "tails", respectively. If the coin is unfair, then we would have that $p\neq 0.5$. # + [markdown] slideshow={"slide_type": "-"} # ### Binomial distribution # # A binomial r.v. takes values $k=0,1,\ldots,n$, with a probability given by the pmf # $$ # f(k) = \binom{n}{k} p^k (1-p)^{n-k}. # $$ # Here, $\binom{n}{k} = \frac{n!}{k!(n-k)!}$ in the binomial coefficient that describes how many ways there are to choose a subset of $k$ elements, disregarding their order, from a set of $n$ elements. # # + slideshow={"slide_type": "-"} n =10 p = 0.5 f = lambda k: binom.pmf(k, n=n,p=p) x = sc.arange(n+1); plt.plot(x, f(x),'*-') plt.title("The probability mass function for a Binomial random variable") plt.xlim([0,n]) plt.show() # + [markdown] slideshow={"slide_type": "-"} # Some facts about the binomial distribution: # - A binomial random variable is just the sum of $n$ Bernoulli random variables. You can think of it as summarizing the resutls of $n$ coin flips by just keeping track of the total number of heads. # - The mean is $np$ # - The variance is $np(1−p)$ # + [markdown] slideshow={"slide_type": "-"} # ### Poisson distribution # You also saw the Poisson random variable in the homework, which is another example of a discrete random variable. # # + [markdown] slideshow={"slide_type": "slide"} # ## Recap Lecture 4: continuous random variables # # A *continuous random variable* can take any real value, but some numbers are more likely than others. The probability is given by the *probability density function (PDF)*, which is analogous to the PMF for discrete random variables. If f(x) is the PDF for the random variable $X$, then the probability that $X$ takes the value in the interval $[a,b]$ is given by # # $$ # \textrm{Prob}(X\in[a,b]) = # \int_a^b f(x) dx. # $$ # This is just the area under the curve for this interval. # # ### Example: Normal (Gaussian) distribution # # The *probability density function (PDF)* for a normal (Gaussian) random variable is # $$ # f(x) = \frac{1}{\sqrt{ 2 \pi \sigma^2 }} # e^{ - \frac{ (x - \mu)^2 } {2 \sigma^2} }. # $$ # This is sometimes referred to as the 'bell curve'. # + slideshow={"slide_type": "-"} mu = 0 # mean sigma = 1 # standard deviation x = sc.arange(mu-4*sigma,mu+4*sigma,0.001); pdf = norm.pdf(x,loc=mu, scale=sigma) plt.title("The probability density function for a normal random variable") plt.plot(x, pdf, linewidth=2, color='k') plt.show() # + [markdown] slideshow={"slide_type": "-"} # Some facts about the normal distribution: # - The mean is $\mu$ # - The variance is $\sigma^2$ # # To compute the integral # $$ # \textrm{Prob}(X\in[a,b]) = # \int_a^b f(x) dx, # $$ # it is useful to define the *cumulative distribution function* (CDF) # $$ # F(x) = \int_{-\infty}^x f(x) dx. # $$ # Then we can write # $$ # \int_a^b f(x) dx = # \int_{-\infty}^b f(x) dx - \int_{-\infty}^a f(x) dx = # F(b) - F(a). # $$ # This is convenient because we know longer have to evaluate an integral! However, there isn't a nice way to write $F(x)$ for the normal distribution in terms of elementary functions. So we just think about $F(x)$ as a known function that we can easily compute using python. # + slideshow={"slide_type": "-"} mu = 0 # mean sigma = 1 # standard deviation x = sc.arange(mu-4*sigma,mu+4*sigma,0.001); cdf = norm.cdf(x,loc=mu, scale=sigma) plt.title("The cumulative density function for a normal random variable") plt.plot(x, cdf, linewidth=2, color='k') plt.show() # + [markdown] slideshow={"slide_type": "-"} # ### Exercise # Interpet the following in terms of normal random variables: # - $\int_{-\infty}^1 f(x) dx = F(1)$ # + slideshow={"slide_type": "-"} norm.cdf(1, loc=mu, scale=sigma) # + [markdown] slideshow={"slide_type": "-"} # - $\int_{-1}^1 f(x) dx = F(1) - F(-1)$ # + slideshow={"slide_type": "-"} norm.cdf(1, loc=mu, scale=sigma) - norm.cdf(-1, loc=mu, scale=sigma) # + [markdown] slideshow={"slide_type": "-"} # Note that $\int_{-\infty}^\infty f(x) dx = 1$ # + slideshow={"slide_type": "-"} norm.cdf(sc.inf, loc=mu, scale=sigma) # + [markdown] slideshow={"slide_type": "-"} # # **Remark:** There are many other continous random variables, but in this class we'll only consider normal random variables. # + [markdown] slideshow={"slide_type": "slide"} # ## Hypothesis testing # # Suppose we have a coin and we want to determine whether or not it is 'fair'. We could flip it many, many times and count how many heads we obtain. If the fraction of heads is approximately $0.5$, we might argue that the coin is fair. # # This is an example of statistical inference. We are trying to determine something about the coin from samples of coin flips. # # Let's say we flip a coin $n=1000$ times. If the coin is fair, the outcome is described by the Binomial distribution with $p=0.5$. # + slideshow={"slide_type": "-"} f = lambda k: binom.pmf(k, n=1000,p=0.5) x = sc.arange(1001); plt.plot(x, f(x),'*-') plt.plot(545,f(545),'o') plt.title("The probability mass function for a Binomial random variable") plt.xlim([0,1001]) plt.show() # + [markdown] slideshow={"slide_type": "-"} # Suppose that in our experiment, we saw $545$ heads. The probability of this occuring is # f(k = 545): # + slideshow={"slide_type": "-"} binom.pmf(545, n=1000,p=0.5) # + [markdown] slideshow={"slide_type": "-"} # In hypothesis testing, the more important question is: what is the probability of seeing a value as extreme or more extreme than the value that we observed? # # I would say that any result $\leq 455$ or $\geq 545$ is 'as or more extreme'. Why? # # So the probability of seeing as extreme of an outcome is: # + slideshow={"slide_type": "-"} s = sum(binom.pmf(sc.arange(0,456),n=1000,p=0.5)) + sum(binom.pmf(sc.arange(545,1001),n=1000,p=0.5)) print(s) print(1-s) # + [markdown] slideshow={"slide_type": "-"} # So the likelihood of seeing so few heads or tails is just $0.49\%$. So it is very unlikely that if the coin were fair, we would see this result! Maybe so unlikely that we would declare that the coin is unfair? This is the idea behind **hypothesis testing**. # # **Note**: I didn't say that it is unlikely that the coin itself is unfair. Only if it were to be fair, it would be unlikely to see this result. # # In *hypothesis testing*, we make a null hypothesis, written $H_0$. In this case, the null hypothesis is # $$ # H_0: \text{the coin is fair, i.e., $p=0.5$}. # $$ # The alternative hypothesis, $H_a$, is typically the hypothesis that the researcher wants to validate. In this case, $H_a$ is that the coin is unfair, i.e., $p\neq 0.5$. # # We also choose a *significance level* for the test, $\alpha$, traditionally $1\%$ or $5\%$. # In this case, let's choose a significance level of $\alpha = 1\%$. # # We then perform an experiment. In this case, we flip the coin 1000 times and count the number of heads (in this case 545). # # Finally, assuming the null hypothesis is true, we compute how how likely it is to see a number that is at least as far from the expected value as the number obtained. In our case, this is $0.49\%$. The is called the *p-value*. Since $p=0.49\%$ is smaller than the chosen significance level, $\alpha = 1\%$, we reject the null hypothesis and declare the coin to be unfair. # # Some comments about the p-value: # 1. A p-value is a probability calculated assuming that $H_0$ is true. # # + The smaller the p-value, the stronger the evidence against $H_0$. # # + **Warning:** A p-value is not the probability that the null hypothesis is true or false. It is the probability that an erroneous conclusion is reached. In this example, it is the probability that the coin actually is fair and we just happened to see an outcome as extreme as 545 heads. # # To avoid computing sums (as above) and to 'normalize' the above procedure, it is useful to introduce the *Central Limit Thoerem*. # + [markdown] slideshow={"slide_type": "slide"} # ## Central Limit Theorem # # One of the reasons that the normal distribution is **so important** is the following theorem. # # **Central Limit Theorem.** Let $\{X_1,\ldots, X_n\}$ be a sample of $n$ random variables chosen identically and independently from a distribution with mean $\mu$ and finite variance $\sigma^2$. If $n$ is 'large', then # - the sum of the variables $\sum_{i=1}^n X_i$ is also a random variable and is approximately **normally** distributed with mean $n\mu$ and variance $n\sigma^2$ and # - the mean of the variables $\frac{1}{n}\sum_{i=1}^n X_i$ is also a random variable and is approximately **normally** distributed with mean $\mu$ and variance $\frac{\sigma^2}{n}$. # # How can we use the central limit theorem (CLT)? # # Recall that a binomial random variable is the sum of $n$ bernoulli random variables. So the CLT tells us that if $n$ is large, binomial random variables will be distributed approximately normally. That is, if we flip a coin many times, the number of heads that we're likely to see is described by a normal distribution. This provides a different (easier) way to answer the question: How unlikely is it to flip a fair coin 1000 times and see 545 heads? # # Suppose we flip a fair ($p=0.5$) coin 1000 times. # # *Question:* How many heads do we expect to see? # # The CLT says that the number of heads (= sum of Bernoulli r.v. = binomial r.v.) is approximately normally distributed with mean # $$ # n\mu = np = 1000*0.5 = 500 # $$ # and variance # $$ # n \sigma^2 = np(1-p) = 1000*0.5*0.5 = 250. # $$ # # Let's do an experiment to see how good the CLT is for Bernoulli random variables. We'll call flipping a fair coin n=1,000 times and counting the number of heads a "simulation". Recall that the outcome is precisely a binomial random variable with n=1,000 and p = 0.5. We'll do 10,000 simulations and then compare the histogram of the binomial random variables and the normal distribution predicted by the CLT. # + slideshow={"slide_type": "-"} n = 1000 p = 0.5 bin_vars = binom.rvs(n=n,p=p,size=10000) plt.hist(bin_vars, bins='auto',normed=True) mu = n*p sigma = sc.sqrt(n*p*(1-p)) x = sc.arange(mu-4*sigma,mu+4*sigma,0.1); pdf = norm.pdf(x, loc=mu, scale=sigma) plt.plot(x, pdf, linewidth=2, color='k') plt.title("A comparison between the histogram of binomial random \n variables and the normal distribution predicted by the CLT") plt.show() # + [markdown] slideshow={"slide_type": "-"} # So what is the likelihood of flipping a coin 1000 times and seeing an event less extreme as 545 heads? # # The CLT tells us that this is approximately # $$ # \int_{455}^{545} f(x) dx = F(545) - F(455). # $$ # # This is something that we can easily evaluate using the cumulative distribution function (CDF). # + slideshow={"slide_type": "-"} n = 1000 p = 0.5 mu = n*p sigma = sc.sqrt(n*p*(1-p)) print(norm.cdf(545, loc=mu, scale=sigma) - norm.cdf(455, loc=mu, scale=sigma)) # a plot illustrating the integral x = sc.arange(mu-4*sigma,mu+4*sigma,0.001); plt.plot(x, norm.pdf(x, loc=mu, scale=sigma), linewidth=2, color='k') x2 = sc.arange(455,545,0.001) plt.fill_between(x2, y1= norm.pdf(x2,loc=mu, scale=sigma), facecolor='red', alpha=0.5) plt.xlim([mu-4*sigma,mu+4*sigma]) plt.show() # + [markdown] slideshow={"slide_type": "-"} # So again, we see that $99.6\%$ of the time, we would see an event less extreme than 545 heads. # + [markdown] slideshow={"slide_type": "slide"} # ## Example: "Freshman 15": Fact or Fiction # # This example was taken from Devore, pp.314-315. # # "A common belief among the lay public is that body weight increases after entry into college, and the phrase 'freshman 15' has been coined to describe the 15 pounds that students presumably gain over their freshman year." # # Let $\mu$ denote the true average weight gain in the first year of college. We take the null hypothesis to be # $$ # H_0: \mu \geq 15 # $$ # so that the alternative hypothesis is that the average weight gain in the first year of college is less than 15 lbs. # # We set a signifnicance level of, say, $\alpha = 1\%$. # # We suppose a random sample of $n$ students is selected, their weights (before and after the first year of college) are measured, and the sample mean $\bar{x}$ and sample standard deviation $s$ are computed. An article in the journal Obesity (2006) cites that for a sample of $n=137$ students, the sample mean weight gain was $\bar{x}=2.42$ lb and with a sample standard deviation of $s=5.72$ lb. # # Assuming $H_0$ to be true, how unlikely is it that we would observe such a small value ($\bar{x}=2.42$)? We take a normal distribution with mean given by the null value ($\mu = 15$) and variance given by $s^2/n = (5.72)^2/137=0.2388$. The p-value is then computed as $F(2.42)$. # + slideshow={"slide_type": "-"} mu = 15 sigma = sc.sqrt(5.72**2/137) print('p:', norm.cdf(2.42, loc=mu, scale=sigma)) # + [markdown] slideshow={"slide_type": "-"} # The p-value is practically zero, much less than the significance level! The data very strongly contradicts the null hypothesis. We reject the null hypothesis, $H_0$, and conclude that the 'freshman 15' is fiction! # + [markdown] slideshow={"slide_type": "slide"} # ## Example: Gender in Utah, the z-test # # Let's try to determine whether the following statement is true: # $$ # \text{In Utah, there are 50% men and 50% women.} # $$ # # We model this as a Bernoulli variable with female = 1, male = 0. # # We take as null hypothesis that the proportion of women is $p=0.5$, # $$ # H_0: p = 0.5 # $$ # We set the significance level as $\alpha = 0.05$. # # Now, we need a sample...we can use our class survey with 62 responses. # + slideshow={"slide_type": "-"} class_gender = pd.read_csv('SurveyGenderResponse.csv') print(class_gender.head()) print(class_gender.describe()) # It is more convenient to map # Female -> 1 # Male -> 0 di = {'Female': 1,'Male': 0} class_gender['Gender'].replace(di,inplace=True) print(class_gender.head()) # + [markdown] slideshow={"slide_type": "-"} # Record the number of female students, size of survey, and percent of female students, and sample standard deviation. # + slideshow={"slide_type": "-"} num_f = class_gender['Gender'].sum() print(num_f) n = class_gender.size print(n) x_bar = class_gender['Gender'].mean() print(x_bar) s = class_gender['Gender'].std() print(s) # + [markdown] slideshow={"slide_type": "-"} # According to our survery, 17 of 62 people are women, so we have $\bar x = 0.27$. # # To proceed, we conduct a **z-test**, which is the same as we did in the previous example, except now we use the normalized z-values, # $$ # z = \frac{\bar{x} - .5}{s/\sqrt{n}}. # $$ # The CLT can be used to show that the $z$ score is distributed according to the "standard" normal distribution with mean $\mu=0$ and standard deviation $\sigma = 1$. # # The z-value is generally called a **test statistic**. Every type of hypothesis test has its own test statistic. The z-test is just one example of a hypothesis test, see many more listed [here](https://en.wikipedia.org/wiki/Test_statistic#Common_test_statistics). # + slideshow={"slide_type": "-"} z = (x_bar - .5)/(s/sc.sqrt(n)) print(z) # + [markdown] slideshow={"slide_type": "-"} # How unlikely is it to see a z-score less than $z = -3.95$? # $$ # \text{p-value} = \int_{-\infty}^z f(x) dx = F(z) # $$ # We can compute this using the function norm.cdf with default arguments 'loc=0' and 'scale=1'. # + slideshow={"slide_type": "-"} p_value = norm.cdf(z) print(p_value) # + [markdown] slideshow={"slide_type": "-"} # Under the assumption of the null hypothesis, we would only see a sample size this extreme $p=0.004\%$ of the time. Since the $p$ value is less than our chosen signficance level, $\alpha = 5\%$, we reject the null hypothesis and conclude that in Utah there are fewer than 50% women. # # **What's wrong with this finding?** # # The proceedure we used for the hypothesis test was correct. # # However, the students were **not randomly sampled from the population**! If we wanted to address this question, we'd have to account for the fact that the survey was taken at a university in a STEM course, both of which bias the sample (in opposite directions). This is one reason that polling (inference from survey data) is very challenging. # # The 2010 census shows that the percentage of females in Utah is 49.8%. # http://www.census.gov/quickfacts/table/SEX205210/49,4967000 # + [markdown] slideshow={"slide_type": "slide"} # ## What to do for smaller sample sizes? Student's t-test # # When $n$ is small, the Central Limit Theorem can no longer be used. In this case, if the samples are drawn from an approximately normal distribution, then the correct distribution to use is called the Student's t distribution with $\nu = n-1$ degrees of freedom. The probability density function (pdf) for the student's t distribution is not pretty (Google it!) but it is built into scipy, so we can compare the student's t-test to the normal distribution. # + slideshow={"slide_type": "-"} # there is some trouble with this package for some python versions # if it doesn't work, don't worry about it from ipywidgets import interact samp_mean = 0 samp_std_dev = 1 x = sc.linspace(samp_mean-4*samp_std_dev,samp_mean+4*samp_std_dev,1000); def compare_distributions(sample_size): pdf1 = norm.pdf(x, loc=samp_mean, scale=samp_std_dev/sc.sqrt(sample_size)) pdf2 = t.pdf(x,df=sample_size-1,loc=samp_mean, scale=samp_std_dev/sc.sqrt(sample_size)) plt.plot(x, pdf1, linewidth=2, color='k',label='normal distribution pdf') plt.plot(x, pdf2, linewidth=2, color='r',label='t distribution pdf') plt.xlim(x.min(),x.max()) plt.ylim(0,2) plt.legend() plt.show() interact(compare_distributions,sample_size=(2,20,1)) # + [markdown] slideshow={"slide_type": "-"} # The student's t distribution has "heaveier tails" than the normal distribution. For a sample size greater than $\approx 20$, the normality assumption is generally accepted as reasonable. # + [markdown] slideshow={"slide_type": "slide"} # ## Types of error in hypothesis testing # # In hypothesis testing, there are two types of errors. A *type I error* is the incorrect rejection of a true null hypothesis (a "false positive"). A *type II error* is incorrectly accepting a false null hypothesis (a "false negative"). Depending on the application, one error can be more consequential than the other. # # ![](InferenceErrors.png) # $\qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad$ # source: [wikipedia](https://en.wikipedia.org/wiki/Type_I_and_type_II_errors) # # The probability of making a type I (false positive error) is the significance level $\alpha$. # # **Examples** # # **(1)** In drug testing, we take the null hypothesis (H0): "This drug has no effect on the disease." A type I error detects an effect (the drug cures the disease) that is not present. A type II error fails to detect an effect (the drug cures the disease) that is present. # # **(2)** In a trial, we take the null hypothesis (H0): "This man is innocent." A type I error convicts an innocent person. A type II error lets a guilty person go free. # + [markdown] slideshow={"slide_type": "slide"} # ## P hacking # # Recall that the p-value measures how extreme the observation is and is compared to the significance level. Some comments about the p-value: # 1. A p-value is a probability calculated assuming that $H_0$ is true. # # + The smaller the p-value, the stronger the evidence against $H_0$. # # + A p-value is not the probability that the null hypothesis is true or false. It is the probability that an erroneous conclusion is reached. # # Recently the *misuse* of hypothesis testing (p-values) has raised considerable controversy. Basically, if you do enough hypothesis tests, eventually you'll have a Type I (false positive) error. This is a real problem in a world with tons of data in which it is easy to do many, many hypothesis tests automatically. # # You can read more about 'P hacking' here: # # - <NAME>, Scientific method: Statistical errors, Nature (2014) [link](https://doi.org/10.1038/506150a) # # - <NAME>, The Earth is Round (p<0.05), American Psychologist (1994) [link](https://doi.org/10.1037/0003-066x.49.12.997) # # + [markdown] slideshow={"slide_type": "slide"} # ## A/B testing # A/B testing is a method of comparing two or more versions of an advertisement, webpage, app, etc. We set up an experiment where the variants are shown to users at random and statistical analysis is used to determine which is best. AB testing is the *de facto* test for many business decisions. # # **Example.** A/B testing was extensively used by President Obama during his 2008 and 2012 campaigns to develop # * optimized fund-raising strategies, # * get-out-the-vote programs that would be most beneficial, and # * target ads to the most susceptible audiences. # # Learn more here: # [Wired story on A/B testing](http://www.wired.com/2012/04/ff_abtesting/) # and # [Planet Money Episode 669: A or B](https://www.npr.org/sections/money/2015/12/11/459412925/episode-669-a-or-b) # # **Example.** Suppose your company is developing an advertisement. The art department develops two internet ads: "Ad A" and "Ad B". Your job is to figure out which is better. # # You decide to do an experiment: You use Google ads to randomly show 1000 internet users Ad A and 1000 internet users Ad B. # # It turns out that 500 Ad A viewers click on the ad while 550 Ad B viewers click on the ad? Obviously Ad B did better, but is the difference "significant" enough to say that Ad B is better? Or perhaps Ad B just got lucky in this test? # # In homework 4, you’ll answer this question. More generally, this is a question about the difference between population proportions. # + [markdown] slideshow={"slide_type": "slide"} # ## Statistical inference for a difference between population proportions # We consider comparing the population proportions of two different populations. # # We make the following definitions: # - $N_A$ is the number of surveyed people from population $A$ # - $n_A$ is the number of successes from population $A$ # - $p_A = n_A/N_A$ is the proportion of successes from population $A$ # # Similarly, we define # - $N_B$ is the number of surveyed people from population $B$ # - $n_B$ is the number of successes from population $B$ # - $p_B = n_B/N_B$ is the proportion of successes from population $B$ # # We make the null hypothesis: # $$ # H_0\colon \text{$p_A$ and $p_B$ are the same, that is, } p_A - p_B = 0. # $$ # That is, the proportion of successes in the two populations is the same. # # We'll take it as a fact (see Devore Ch. 9.4 or Math 3070) that: # - $n_A/N_A$ is approximately a normal random variable with mean $p_A$ and variance $\sigma_A^2 = p_A(1-p_A)/N_A$ # - $n_B/N_B$ is approximately a normal random variable with mean $p_B$ and variance $\sigma_B^2 = p_B(1-p_B)/N_B$ # - $n_A/N_A - n_B/N_B$ is approximately a normal random variable with mean $\mu = 0$ and variance $\sigma^2 = \sigma_A^2 + \sigma_B^2$. # - The test statistic called the *two-proportion z-value* # $$ # Z = \frac{p_A - p_B}{\sqrt{\hat{p} \hat{q} \left( \frac{1}{N_A} + \frac{1}{N_B} \right)}}. # $$ # is approximately distributed according to the standard normal distribution when $H_0$ is true. Here $\hat{p} = \frac{N_A}{N_A + N_B}p_A + \frac{N_B}{N_A + N_B}p_B$ and $\hat{q} = 1-\hat{p}$. # # From the data, we estimate the mean, $\mu$, to be $p_A - p_B$. # # ## Example: 1954 Salk polio-vaccine experiment # # In 1954, polio was widespread and a new vaccine of unknown efficacy was introduced. To test the efficacy, in a double-blind study, two groups of children were give injections: one contained the vaccine and the other contained a placebo. # # Let $p_A$ and $p_B$ be the proportions of the children, having received the placebo and vaccine injections, respectively, to contract polio. We formulate the null hypothesis that # $$ # H_0\colon p_A - p_B \leq 0, # $$ # that is, the vaccine is not effective. # The alternative hypothesis is that # $$ # H_a\colon p_A - p_B >0, # $$ # that is, a vaccinated child is less likely to contract polio than a child receiving the placebo. # # We choose a significance level of $\alpha = 0.01$. # # An experiment was conducted with the following results: # $$ # \begin{aligned} # &\text{Placebo:} \quad N_A = 201,229, \quad n_A = 110 \\ # &\text{Vaccine:} \quad N_B = 200,745, \quad n_B = 33. # \end{aligned} # $$ # + slideshow={"slide_type": "-"} nA = 110 NA = 201229 pA = nA/NA muA = pA sigmaA = sc.sqrt(pA*(1-pA)/NA) nB = 33 NB = 200745 pB = nB/NB muB = pB sigmaB = sc.sqrt(pB*(1-pB)/NB) # + [markdown] slideshow={"slide_type": "-"} # Now we perform the hypothesis test and see what the probability of the outcome is under the assumption of the null hypothesis. # + slideshow={"slide_type": "-"} phat = NA*pA/(NA+NB) + NB*pB/(NA+NB) qhat = 1-phat z = (pA - pB)/sc.sqrt(phat*qhat*(1/NA + 1/NB)) print(z) p_value = 1-norm.cdf(z) print(p_value) # + [markdown] slideshow={"slide_type": "-"} # The probability that an erroneous conclusion is reached, under the assumption of the null hypothesis, is $6.6\times10^{-11}$, way less than the significance level, $\alpha$. We reject the null hypothesis and declare that the vaccine is more effective than a placebo!
08-hypothesis-testing/08-hypothesis-testing.ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .cs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (C#) // language: C# // name: .net-csharp // --- // # Введение в JavaScript // // JavaScript - язык программирования, который может выполнятся на стороне пользователя с помощью браузера и является основным для управления элементами веб-страницы. // // JavaScript часто сокращают до аббревиатуры JS. // // JavaScript как название является зарегистрированной торговой маркой и принадлежит компании ORACLE. // // JavaScript имеет синтаксис схожий с языком Си, однако имеет ряд существенных отличий: // // * Возможность работы с объектами, в том числе определение типа и структуры объекта во время выполнения программы // * Возможность передавать и возвращать функции как параметры, а также присваивать их переменной // * Наличие механизма автоматического приведения типов // * Автоматическая сборка мусора // * Использование анонимных функций // Внедрение кода JavaScript в документ HTML можно выполнить двумя способами: // // 1. Размещение кода непосредственно в HTML-файле // 2. Размещение кода в отдельном файле // При размещении кода внутри HTML-файла код JavaScript обрамляется тэгом `<script>`, пример: // // + dotnet_interactive={"language": "html"} <html>  <head>   <title>Страница с примером кода JavaScript</title>   <script>    alert("Hello World!");   </script>  </head>  <body>   Это текст основной страницы  </body> </html> // - // При размещении JavaScript-кода в отдельном файле, создадим файл с названием `myscript.js` и поместим в него код: // + dotnet_interactive={"language": "javascript"} alert("Hello World!"); // + [markdown] dotnet_interactive={"language": "html"} // А вызов кода из тела основного HTML-файла теперь будет выглядеть вот так: // + dotnet_interactive={"language": "html"} <html> <head> <title>Страница с примером кода JavaScript</title> <script src="./myscript.js"></script> </head> <body> Это текст основной страницы </body> </html>
intro/1_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Decision Tree # # top-down, recursive, divide-and-conquer classifiers # # best for: # - Linear Separable Data # - Multiple Linear Questions # # ![image.png](attachment:image.png) # # ### Strengths and Weaknesses # # - Allows to easily understand and visualise what is happening with the data # - Prone to overfitting # - specially when there is lots and lots of features # # ### Simple Implementation: Scikit Learn # + from sklearn import tree X = [[0,1], [1,0]] # training set Y = [1,0] # classification classifier = tree.DecisionTreeClassifier() classifier.fit(X,Y) # Normal Prediction query = [[1,0]] # Requires list of list normal = classifier.predict(query) # Probalilistic Prediction probability = classifier.predict_proba(query) print("Normal Prediction",normal) print("Probability", probability) # - # # Parameters # # ```min_sample_spit```
ML/Decision Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## This notebook has mixed contents. # # 1) This notebook has the metacyc master dataframe in its infant stages. # 2) This notebook contains the basis of a function to take a PubChem ID number and fetch the associated SMILES string from PubChem. # _____________ # # ## Master DataFrame operations performed: import numpy as np import pandas as pd import pubchempy as pc master_df = pd.read_csv('../../../big-datasets/EC_master_fix_cofactor.csv') # read in dataframe from metacyc after removal of cofactors pos_df = master_df.iloc[:7570,:] # select only the positive (true) reactions pos_df.head() pos_df = pos_df[['enzyme', 'product', 'reacts', 'PubChemID']] neg_df = master_df.sample(10000, random_state=12) # randomly select 10,0000 points from the master_df # this is a problem to solve later!!!! some of these selected could be positive points neg_df = neg_df[['enzyme', 'product', 'reacts', 'PubChemID']] neg_df.head() use_me = pos_df.append(neg_df) use_me.shape use_me.to_csv('../../../big-datasets/selected_pos_neg_rxns.csv') # saving selected reactions use_df_smiles, unsuccessful = cid_df_to_smiles(use_me, 'PubChemID') use_df_smiles.head() len(unsuccessful) use_df = use_df_smiles[~use_df_smiles['PubChemID'].isin(unsuccessful)] # drop rows without SMILES use_df.shape use_df.to_csv('../../../big-datasets/selected_with_smiles.csv') # save this version of the master # ### Get SMILES from CID # __________________ # # ### Manipulate DF containing CID into SMILES # + # %%writefile pubchem_client.py import pubchempy as pc def cid_df_to_smiles(df, cid_colname): """ Args: df : pandas dataframe with CID numbers cid_colname (str) : name of column that contains PubChem SID numbers Returns: df : modified with column SMILES unsuccessful_list : list of CIDs for which no SMILES were found """ res = [] unsuccessful_list = [] for index, row in df.iterrows(): cid = row[cid_colname] try: # pubchempy calls to get compound info compound = pc.get_compounds(cid)[0] smiles = compound.canonical_smiles res.append(smiles) except BaseException: res.append('none') unsuccessful_list.append(cid) pass df['SMILES'] = res # df.to_csv(r'../datasets/df_cleaned_kegg_with_smiles.csv') return df, unsuccessful_list # - compound = pc.get_compounds(5363397)[0] smiles = compound.canonical_smiles print(smiles) cid_df_to_smiles(test_df, 'CID')[0] test_df = pd.DataFrame([['EC-1.1.1.321', 'CPD-685', 1, 5363397], ['EC-1.1.1.111', '1-INDANOL', 1, 22819], ['EC-1.21.99.M2', '4-HYDROXYPHENYLACETATE', 1, 4693933],['EC-1.21.99.M2', 'Cl-', 1, 312]], columns=['EC', 'Compound', 'Reacts', 'CID']) # + # %%writefile test_pubchem_client.py import pandas as pd from pandas.util.testing import assert_frame_equal import pubchem_client def test_cid_df_to_smiles(): """Unit test for pubchem_client.py.""" test_frame = pd.DataFrame([['EC-1.1.1.321', 'CPD-685', 1, 5363397], ['EC-1.1.1.111', '1-INDANOL', 1, 22819], ['EC-1.21.99.M2', '4-HYDROXYPHENYLACETATE', 1, 4693933], ['EC-1.21.99.M2', 'Cl-', 1, 312]], columns=['EC', 'Compound', 'Reacts', 'CID']) expected_frame = pd.DataFrame([['EC-1.1.1.321', 'CPD-685', 1, 5363397, 'CC(=CCO)CCC=C(C)CO'], ['EC-1.1.1.111', '1-INDANOL', 1, 22819, 'C1CC2=CC=CC=C2C1O'], ['EC-1.21.99.M2', '4-HYDROXYPHENYLACETATE', 1, 4693933, 'C1=CC(=CC=C1CC(=O)[O-])O'], ['EC-1.21.99.M2', 'Cl-', 1, 312, '[Cl-]']], columns=['EC', 'Compound', 'Reacts', 'CID', 'SMILES', ]) cid_colname = 'CID' result_frame = pubchem_client.cid_df_to_smiles(test_frame, cid_colname) assert_frame_equal( result_frame[0], expected_frame), 'Did not generate expected df.' return # -
deprecated/notebooks/metacyc_notebooks/Pubchem2SMILES-MetaCyc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Predicting the Outcome of Cricket Matches # ## Introduction # # In this project, we shall build a model which predicts the outcome of cricket matches in the Indian Premier League using data about matches and deliveries. # # ### Data Mining: # # * Season : 2008 - 2015 (8 Seasons) # * Teams : DD, KKR, MI, RCB, KXIP, RR, CSK (7 Teams) # * Neglect matches that have inconsistencies such as No Result, Tie, D/L Method, etc. # # ### Possible Features: # # * Average Batsman Rating (top 5) # * Average Bowler Rating (top 4) # * Player of the match frequency # * Previous Encounter - Win by runs, Win by Wickets # * Recent form (Last 5 Games) # * Venue - Home, Away, Neutral # + # The %... is an iPython thing, and is not part of the Python language. # In this case we're just telling the plotting library to draw things on # the notebook, instead of on a separate window. # %matplotlib inline #this line above prepares IPython notebook for working with matplotlib # See all the "as ..." contructs? They're just aliasing the package names. # That way we can call methods like plt.plot() instead of matplotlib.pyplot.plot(). import numpy as np # imports a fast numerical programming library import scipy as sp #imports stats functions, amongst other things import matplotlib as mpl # this actually imports matplotlib import matplotlib.cm as cm #allows us easy access to colormaps import matplotlib.pyplot as plt #sets up plotting under plt import pandas as pd #lets us handle data as dataframes #sets up pandas table display pd.set_option('display.width', 500) pd.set_option('display.max_columns', 100) pd.set_option('display.notebook_repr_html', True) import seaborn as sns #sets up styles and gives us more plotting options from __future__ import division # - # ## Data Mining # # Reading in the data allmatches = pd.read_csv("../data/matches.csv") alldeliveries = pd.read_csv("../data/deliveries.csv") allmatches.head(10) # Selecting Seasons 2008 - 2015 matches_seasons = allmatches.loc[allmatches['season'] != 2016] deliveries_seasons = alldeliveries.loc[alldeliveries['match_id'] < 518] # Selecting teams DD, KKR, MI, RCB, KXIP, RR, CSK matches_teams = matches_seasons.loc[(matches_seasons['team1'].isin(['Kolkata Knight Riders', \ 'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \ 'Mumbai Indians', 'Kings XI Punjab'])) & (matches_seasons['team2'].isin(['Kolkata Knight Riders', \ 'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \ 'Mumbai Indians', 'Kings XI Punjab']))] matches_team_matchids = matches_teams.id.unique() deliveries_teams = deliveries_seasons.loc[deliveries_seasons['match_id'].isin(matches_team_matchids)] print "Teams selected:\n" for team in matches_teams.team1.unique(): print team # Neglect matches with inconsistencies like 'No Result' or 'D/L Applied' matches = matches_teams.loc[(matches_teams['result'] == 'normal') & (matches_teams['dl_applied'] == 0)] matches_matchids = matches.id.unique() deliveries = deliveries_teams.loc[deliveries_teams['match_id'].isin(matches_matchids)] # Verifying consistency between datasets (matches.id.unique() == deliveries.match_id.unique()).all() # # Building Features # + # Team Strike rates for first 5 batsmen in the team (Higher the better) def getMatchDeliveriesDF(match_id): return deliveries.loc[deliveries['match_id'] == match_id] def getInningsOneBatsmen(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique()[0:5] def getInningsTwoBatsmen(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique()[0:5] def getBatsmanStrikeRate(batsman, match_id): onstrikedeliveries = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['batsman'] == batsman)] total_runs = onstrikedeliveries['batsman_runs'].sum() total_balls = onstrikedeliveries.shape[0] if total_balls != 0: return (total_runs/total_balls) * 100 else: return None def getTeamStrikeRate(batsmen, match_id): strike_rates = [] for batsman in batsmen: bsr = getBatsmanStrikeRate(batsman, match_id) if bsr != None: strike_rates.append(bsr) return np.mean(strike_rates) def getAverageStrikeRates(match_id): match_deliveries = getMatchDeliveriesDF(match_id) innOneBatsmen = getInningsOneBatsmen(match_deliveries) innTwoBatsmen = getInningsTwoBatsmen(match_deliveries) teamOneSR = getTeamStrikeRate(innOneBatsmen, match_id) teamTwoSR = getTeamStrikeRate(innTwoBatsmen, match_id) return teamOneSR, teamTwoSR # - # Testing Functionality getAverageStrikeRates(517) # + # Bowler Rating : Wickets/Run (Higher the Better) # Team 1: Batting First; Team 2: Fielding First def getInningsOneBowlers(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique()[0:4] def getInningsTwoBowlers(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique()[0:4] def getBowlerWPR(bowler, match_id): balls = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['bowler'] == bowler)] total_runs = balls['total_runs'].sum() total_wickets = balls.loc[balls['dismissal_kind'].isin(['caught', 'bowled', 'lbw', \ 'caught and bowled', 'stumped'])].shape[0] if total_runs != 0: return (total_wickets/total_runs) * 100 else: return total_wickets def getTeamWPR(bowlers, match_id): totalWPRs = [] for bowler in bowlers: totalWPRs.append(getBowlerWPR(bowler, match_id)) return np.mean(totalWPRs) def getAverageWPR(match_id): match_deliveries = getMatchDeliveriesDF(match_id) innOneBowlers = getInningsOneBowlers(match_deliveries) innTwoBowlers = getInningsTwoBowlers(match_deliveries) teamOneWPR = getTeamWPR(innTwoBowlers, match_id) teamTwoWPR = getTeamWPR(innOneBowlers, match_id) return teamOneWPR, teamTwoWPR # - #Testing Functionality getAverageWPR(517) # + # Man of the Match Awards for players of both Teams def getInningsOneAllBatsmen(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique() def getInningsTwoAllBatsmen(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique() def getInningsOneAllBowlers(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique() def getInningsTwoAllBowlers(match_deliveries): return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique() def getTeam(batsmen,bowlers): p = [] p = np.append(p, batsmen) for i in bowlers: if i not in batsmen: p = np.append(p, i) return p def getPlayerMVPAwards(player, match_id): return matches.loc[(matches["player_of_match"] == player) & (matches['id'] < match_id)].shape[0] def getTeamMVPAwards(team, match_id): mvpAwards = 0 for player in team: mvpAwards = mvpAwards + getPlayerMVPAwards(player,match_id) return mvpAwards def bothTeamMVPAwards(match_id): matchDeliveries = getMatchDeliveriesDF(match_id) innOneBatsmen = getInningsOneAllBatsmen(matchDeliveries) innTwoBatsmen = getInningsTwoAllBatsmen(matchDeliveries) innOneBowlers = getInningsTwoAllBowlers(matchDeliveries) innTwoBowlers = getInningsOneAllBowlers(matchDeliveries) team1 = getTeam(innOneBatsmen, innTwoBowlers) team2 = getTeam(innTwoBatsmen, innOneBowlers) team1Awards = getTeamMVPAwards(team1,match_id) team2Awards = getTeamMVPAwards(team2,match_id) return team1Awards, team2Awards # - #Testing Functionality bothTeamMVPAwards(517) # + #Function to generate squad rating def generateSquadRating(match_id): gameday_teams = deliveries.loc[(deliveries['match_id'] == match_id)].batting_team.unique() teamOne = gameday_teams[0] teamTwo = gameday_teams[1] teamOneSR, teamTwoSR = getAverageStrikeRates(match_id) teamOneWPR, teamTwoWPR = getAverageWPR(match_id) teamOneMVPs, teamTwoMVPs = bothTeamMVPAwards(match_id) print "Comparing squads for {} vs {}".format(teamOne,teamTwo) print "\nAverage Strike Rate for Batsmen in {} : {}".format(teamOne,teamOneSR) print "\nAverage Strike Rate for Batsmen in {} : {}".format(teamTwo,teamTwoSR) print "\nBowler Rating (W/R) for {} : {}".format(teamOne,teamOneWPR) print "\nBowler Rating (W/R) for {} : {}".format(teamTwo,teamTwoWPR) print "\nNumber of MVP Awards in {} : {}".format(teamOne,teamOneMVPs) print "\nNumber of MVP Awards in {} : {}".format(teamTwo,teamTwoMVPs) # - #Testing Functionality generateSquadRating(517) # + ## 2nd Feature : Previous Encounter # Won by runs and won by wickets (Higher the better) def getTeam1(match_id): return matches.loc[matches["id"] == match_id].team1.unique() def getTeam2(match_id): return matches.loc[matches["id"] == match_id].team2.unique() def getPreviousEncDF(match_id): team1 = getTeam1(match_id) team2 = getTeam2(match_id) return matches.loc[(matches["id"] < match_id) & (((matches["team1"].isin(team1)) & (matches["team2"].isin(team2))) | ((matches["team1"].isin(team2)) & (matches["team2"].isin(team1))))] def getTeamWBR(match_id, team): WBR = 0 DF = getPreviousEncDF(match_id) winnerDF = DF.loc[DF["winner"] == team] WBR = winnerDF['win_by_runs'].sum() return WBR def getTeamWBW(match_id, team): WBW = 0 DF = getPreviousEncDF(match_id) winnerDF = DF.loc[DF["winner"] == team] WBW = winnerDF['win_by_wickets'].sum() return WBW def getTeamWinPerc(match_id): dF = getPreviousEncDF(match_id) timesPlayed = dF.shape[0] team1 = getTeam1(match_id)[0].strip("[]") timesWon = dF.loc[dF["winner"] == team1].shape[0] if timesPlayed != 0: winPerc = (timesWon/timesPlayed) * 100 else: winPerc = 0 return winPerc def getBothTeamStats(match_id): DF = getPreviousEncDF(match_id) team1 = getTeam1(match_id)[0].strip("[]") team2 = getTeam2(match_id)[0].strip("[]") timesPlayed = DF.shape[0] timesWon = DF.loc[DF["winner"] == team1].shape[0] WBRTeam1 = getTeamWBR(match_id, team1) WBRTeam2 = getTeamWBR(match_id, team2) WBWTeam1 = getTeamWBW(match_id, team1) WBWTeam2 = getTeamWBW(match_id, team2) print "Out of {} times in the past {} have won {} times({}%) from {}".format(timesPlayed, team1, timesWon, getTeamWinPerc(match_id), team2) print "{} won by {} total runs and {} total wickets.".format(team1, WBRTeam1, WBWTeam1) print "{} won by {} total runs and {} total wickets.".format(team2, WBRTeam2, WBWTeam2) # - #Testing functionality getBothTeamStats(517) # + #3rd Feature: Recent Form (Win Percentage of 3 previous matches of a team in the same season) #Higher the better def getMatchYear(match_id): return matches.loc[matches["id"] == match_id].season.unique() def getTeam1DF(match_id, year): team1 = getTeam1(match_id) return matches.loc[(matches["id"] < match_id) & (matches["season"] == year) & ((matches["team1"].isin(team1)) | (matches["team2"].isin(team1)))].tail(3) def getTeam2DF(match_id, year): team2 = getTeam2(match_id) return matches.loc[(matches["id"] < match_id) & (matches["season"] == year) & ((matches["team1"].isin(team2)) | (matches["team2"].isin(team2)))].tail(3) def getTeamWinPercentage(match_id): win = 0 total = 0 year = int(getMatchYear(match_id)) team1 = getTeam1(match_id)[0].strip("[]") team2 = getTeam2(match_id)[0].strip("[]") team1DF = getTeam1DF(match_id, year) team2DF = getTeam2DF(match_id, year) team1TotalMatches = team1DF.shape[0] team1WinMatches = team1DF.loc[team1DF["winner"] == team1].shape[0] team2TotalMatches = team2DF.shape[0] team2WinMatches = team2DF.loc[team2DF["winner"] == team2].shape[0] if (team1TotalMatches != 0) and (team2TotalMatches !=0): winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100) winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100) elif (team1TotalMatches != 0) and (team2TotalMatches ==0): winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100) winPercTeam2 = 0 elif (team1TotalMatches == 0) and (team2TotalMatches !=0): winPercTeam1 = 0 winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100) else: winPercTeam1 = 0 winPercTeam2 = 0 return winPercTeam1, winPercTeam2 def displayTeamWin(match_id): year = int(getMatchYear(match_id)) team1 = getTeam1(match_id)[0].strip("[]") team2 = getTeam2(match_id)[0].strip("[]") P,Q = getTeamWinPercentage(match_id) print "In the season of {}, {} has a win percentage of {}% and {} has a win percentage of {}% ".format(year, team1, P, team2, Q) # - #Function to implement all features def getAllFeatures(match_id): generateSquadRating(match_id) print ("\n") getBothTeamStats(match_id) print("\n") displayTeamWin(match_id) #Testing Functionality getAllFeatures(517) # # Adding Columns # # + #Create Column for Team 1 Winning Status (1 = Won, 0 = Lost) matches['team1Winning'] = np.where(matches['team1'] == matches['winner'], 1, 0) # + #New Column for Difference of Average Strike rates (First Team SR - Second Team SR) [Negative value means Second team is better] firstTeamSR = [] secondTeamSR = [] for i in matches['id'].unique(): P, Q = getAverageStrikeRates(i) firstTeamSR.append(P), secondTeamSR.append(Q) firstSRSeries = pd.Series(firstTeamSR) secondSRSeries = pd.Series(secondTeamSR) matches["Avg_SR_Difference"] = firstSRSeries.values - secondSRSeries.values # + #New Column for Difference of Wickets Per Run (First Team WPR - Second Team WPR) [Negative value means Second team is better] firstTeamWPR = [] secondTeamWPR = [] for i in matches['id'].unique(): R, S = getAverageWPR(i) firstTeamWPR.append(R), secondTeamWPR.append(S) firstWPRSeries = pd.Series(firstTeamWPR) secondWPRSeries = pd.Series(secondTeamWPR) matches["Avg_WPR_Difference"] = firstWPRSeries.values - secondWPRSeries.values # + #New column for difference of MVP Awards (Negative value means Second team is better) firstTeamMVP = [] secondTeamMVP = [] for i in matches['id'].unique(): T, U = bothTeamMVPAwards(i) firstTeamMVP.append(T), secondTeamMVP.append(U) firstMVPSeries = pd.Series(firstTeamMVP) secondMVPSeries = pd.Series(secondTeamMVP) matches["Total_MVP_Difference"] = firstMVPSeries.values - secondMVPSeries.values # + #New column for win percentage of Team1 in previous encounter firstTeamWP = [] for i in matches['id'].unique(): WP = getTeamWinPerc(i) firstTeamWP.append(WP) firstWPSeries = pd.Series(firstTeamWP) matches["Prev_Enc_Team1_WinPerc"] = firstWPSeries.values # + #New column for Recent form(Win Percentage in the current season) of 1st Team compared to 2nd Team(Negative means 2nd team has higher win percentage) firstTeamRF = [] secondTeamRF = [] for i in matches['id'].unique(): K, L = getTeamWinPercentage(i) firstTeamRF.append(K), secondTeamRF.append(L) firstRFSeries = pd.Series(firstTeamRF) secondRFSeries = pd.Series(secondTeamRF) matches["Total_RF_Difference"] = firstRFSeries.values - secondRFSeries.values # - #Testing matches.tail(20) # # Visualisation # #Graph for Strike Rate matches.boxplot(column = 'Avg_SR_Difference', by='team1Winning', showfliers= False) #Graph for WPR Difference matches.boxplot(column = 'Avg_WPR_Difference', by='team1Winning', showfliers= False) # Graph for MVP Difference matches.boxplot(column = 'Total_MVP_Difference', by='team1Winning', showfliers= False) #Graph for Previous encounters Win Percentage of Team #1 matches.boxplot(column = 'Prev_Enc_Team1_WinPerc', by='team1Winning', showfliers= False) # Graph for Recent form(Win Percentage in the same season) matches.boxplot(column = 'Total_RF_Difference', by='team1Winning', showfliers= False) # # Predictions for the data from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.cross_validation import train_test_split from sklearn import metrics from patsy import dmatrices y, X = dmatrices('team1Winning ~ 0 + Avg_SR_Difference + Avg_WPR_Difference + Total_MVP_Difference + Prev_Enc_Team1_WinPerc + \ Total_RF_Difference', matches, return_type="dataframe") y_arr = np.ravel(y) # ### Training and testing on Entire Data # instantiate a logistic regression model, and fit with X and y model = LogisticRegression() model = model.fit(X, y_arr) # check the accuracy on the training set print "Accuracy is", model.score(X, y_arr)*100, "%" # ### Splitting train and test using train_test_split # evaluate the model by splitting into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y_arr, random_state = 0) # Logistic Regression on train_test_split model2 = LogisticRegression() model2.fit(X_train, y_train) # predict class labels for the test set predicted = model2.predict(X_test) # generate evaluation metrics print "Accuracy is ", metrics.accuracy_score(y_test, predicted)*100, "%" # KNN Classification on train_test_split k_range = list(range(1, 61)) k_score = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) k_score.append(metrics.accuracy_score(y_test, y_pred)) plt.plot(k_range, k_score) # Best values of k in train_test_split knn = KNeighborsClassifier(n_neighbors = 50) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) print "Accuracy is ", metrics.accuracy_score(y_test, y_pred)*100, "%" # ### Splitting Training Set (2008-2013) and Test Set (2013-2015) based on Seasons # # #Splitting X_timetrain = X.loc[X.index < 398] Y_timetrain = y.loc[y.index < 398] Y_timetrain_arr = np.ravel(Y_timetrain) X_timetest = X.loc[X.index >= 398] Y_timetest = y.loc[y.index >= 398] Y_timetest_arr = np.ravel(Y_timetest) # Logistic Regression on time-based split sets model3 = LogisticRegression() model3.fit(X_timetrain, Y_timetrain_arr) timepredicted = model3.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, timepredicted)*100, "%" # KNN Classification on time-based split sets k_range = list(range(1, 61)) k_score = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k) knn.fit(X_timetrain, Y_timetrain_arr) y_pred = knn.predict(X_timetest) k_score.append(metrics.accuracy_score(Y_timetest_arr, y_pred)) plt.plot(k_range, k_score) # Best values of k in time-based split data knn1 = KNeighborsClassifier(n_neighbors = 31) knn1.fit(X_timetrain, Y_timetrain_arr) y_pred = knn1.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, y_pred)*100, "%" # ### Support Vector Machines clf = svm.SVC(gamma=0.001, C=10) clf.fit(X_timetrain, Y_timetrain_arr) clf_pred = clf.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, clf_pred)*100, "%" # ### Random Forests rfc = RandomForestClassifier(n_jobs = -1, random_state = 1) rfc.fit(X_timetrain, Y_timetrain_arr) rfc_pred = rfc.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, rfc_pred)*100, "%" fi = zip(X.columns, rfc.feature_importances_) print "Feature Importance according to Random Forests Model\n" for i in fi: print i[0], ":", i[1] # ### Naive Bayes Classifier gclf = GaussianNB() gclf.fit(X_timetrain, Y_timetrain_arr) gclf_pred = gclf.predict(X_timetest) print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, gclf_pred) *100, "%"
src/Match Outcome Prediction with IPL Data (Gursahej).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Info # We will use the likely ground truth catalogue (`FDSDWARF_LSB`) and the unfiltered set of objects from which this catalogue is derived (`LSBS_no_par_sel`): # - `LSBS_no_par_sel` contains a number of objects, including many of the objects used in SpaceFluff. # - `FDSDWARF_LSB` contains the objects in the likely ground truth catalogue. # # ### Goal # The goal of this notebook is to extract the properties (color, surface brightness, effective radius, etc.) of all the objects present in the catalogue, so we can attach them to our SpaceFluff data for analysis. We also want to extract (the names of) the objects thought to be LSB/UDG Fornax cluster members, so we can easily query these objects. # # Specifically, we # - extract the objects, with their name and all the properties we can extract from the .fits files, to `./sf_spacefluff_object_data.csv` # - extract the list of names of objects in the likely ground truth (LGT) catalogue to `sf_catalogue_targets.txt` # # ### Findings # # In this notebook, we'll come to find out that: # - the object names (e.g. 'UDGcand_102') from the catalogue(s) match those used in SpaceFluff, so we can use the objects' names as handles to compare. Alternatively, coordinates (RA/DEC) also be used. # - the two catalogues mentioned above find the same object properties (color, concentration, effective radius, etc.), so it doesn't matter from which of the two we extract the properties. There are a few parameters present in each of the catalogues that aren't in the other, and sometimes the names differ (e.g. `PA` vs `pos_angle`). The only relevant difference for us is the presence of a surface brightness (`mue_r`, $\mu_{e,r}$) in one catalogue. We need this for our analysis. # + from astropy.io import fits import numpy as np import pandas as pd import sys sys.path.append('../') from sf_lib.df import ( make_df_classify, make_df_tasks_with_props ) # - # # Extract catalogue data: # # ## Extract `FDSDWARF_LSB.fits`: # + # load fits file, and extract column names and object data hdul = fits.open('./FDSDWARF_LSB.fits') header = hdul[0].header data_selective = hdul[1].data col_names = data_selective.columns.names print('FDSDWARF_LSB columns:', col_names) # extract each target's name to a list. # these target names match those used in SpaceFluff (we'll verify this later in this notebook) targets_selective = [d['target'] for d in data_selective if 'UDGcand' in d['target']] # - # See http://cdsarc.u-strasbg.fr/ftp/J/A+A/620/A165/ReadMe for description of the columns printed above # + # Loop over every entry in data_selective (which is the `FDSDWARF_LSB.fits` file) and map its properties to a dictionary. selected_data = [] for d in data_selective: if 'UDGcand' in d['target']: object_properties = { "name": d[0] # first manually assign 'name', since I prefer 'name' to 'target' } for idx, column in enumerate(data_selective.columns.names[1:]): # then loop over the rest of the properties object_properties[column] = d[idx+1] # and assign the property using its existing name selected_data.append(object_properties) # + # save target names to txt file for later comparison to classification votes # we only need to this this once. Can uncomment the cell if we need to run it again. np.savetxt('sf_catalogue_targets.txt', targets_selective, delimiter=',', fmt="%s") # - # in notebook `sf_12-04-2021`, I extracted a list of unique target names from `classify-classifications.csv`. candidate_names_classify = np.loadtxt('../analysis/sf_candidate_names__classification-classify.txt', dtype=str) # + # find intersection of names between FDSDWARF_LSB.fits and classify_classifications.csv intersecting = list(set(targets_selective) & set(candidate_names_classify)) print('Number of intersecting targets:', len(intersecting)) # - # ## Extract `LSBS_no_par_sel.fits` # + hdul = fits.open('./LSBS_no_par_sel.fits') header = hdul[0].header data_no_selection = hdul[1].data # Extract all UDGcand_* targets from LSBS_no_par_sel. # Note that this fits file also contains other targets. We might want to check if any of those happen to be # Space Fluff candidates, but with another name. Check using RA/dec (see below) targets_no_selection = [d['target'] for d in data_no_selection if 'UDGcand' in d['target']] print('LSBS_no_par_sel columns:', data_no_selection.columns.names) # + # map targets' properties to a list of objects, same as above with FDS_DWARF_LSB spacefluff_data = [] for d in data_no_selection: object_properties = { "name": d[0] } for idx, column in enumerate(data_no_selection.columns.names[1:]): object_properties[column] = d[idx+1] spacefluff_data.append(object_properties) # - # convert the list of objects to a DataFrame (and inspect the head to see if it worked properly) df_spacefluff_data = pd.DataFrame(spacefluff_data) # + # save the DataFrame to csv for later use: df_spacefluff_data.to_csv('./sf_spacefluff_object_data.csv', sep=",", index=False) # load and inspect created .csv to see if it saved correctly: df_spacefluff_data_read = pd.read_csv('./sf_spacefluff_object_data.csv', comment="#") df_spacefluff_data_read.head(2) # - # We see above that there is an `r_mag` and an `r`. I assume these represent magnitudes obtained by various methods. We note that g_mag has a placeholder, so already the assumption here is that `r_mag` is less accurate than `r` also. Below, we plot the parameters against each other, but also the absolute difference between them, and a histogram of each of their errors. We find that `r` is most reliable, and thus we will use this throughout our analysis. # + d = df_spacefluff_data_read lb = 'xkcd:lightish blue' import matplotlib.pyplot as plt # %matplotlib inline fig, fr = plt.subplots(1,3, figsize=(12,3.5)) fr[0].scatter(d['r'], d['r_mag'], alpha=0.15, s=4, c=lb) fr[0].update(dict(xlabel='r', ylabel='r_mag')) fr[1].hist(np.abs(d['r']-d['r_mag']), bins=101, color=lb) fr[1].update(dict(xlabel='|r - r_mag|', ylabel='N')) hist_kw = dict(histtype='step', facecolor='none', bins=51) fr[2].hist(d['re'], **hist_kw, label='r') fr[2].hist(d['r_mage'], **hist_kw, label='r_mag') fr[2].update(dict(xlabel='error', ylabel='N', xlim=[0,1])) fr[2].legend() fig.tight_layout() plt.show() # - # --- # # Compare properties # # Compare object properties between the selective and non-selective catalogues, to see if they match or if a different (more resource-intensive) method was used to extract objects' properties in the selective catalogue. def check_object_property_match(index_sel, index_nosel): ''' Compare all properties (RA, DEC, etc.) between an object in the two .fits files @param {int} index_sel: index of the object by this name in selected_data @param {int} index_nosel: index of the object by this name in spacefluff_data @returns set of booleans. If all properties match, this will equal set([True]) ''' candidate_sel = selected_data[index_sel] # properties of candidate according to `FDSDWARF_LSB.fits` candidate_nosel = spacefluff_data[index_nosel] # properties of candidate according to `LSBS_no_par_sel.fits` columns_match = [] if candidate_sel['name'] == candidate_nosel['name']: col_sel = set(candidate_sel.keys()) # get properties of objects offered by FDSDWARF_LSB.fits col_nosel = set(candidate_nosel.keys()) # ^, but for LSBS_no_par_sel.fits col_intersection = col_sel.intersection(col_nosel) # get the properties present in both the .fits files, # so we can compare them in a loop for column in col_intersection: match = candidate_sel[column] == candidate_nosel[column] columns_match.append(match) set_match = set(columns_match) return set_match # + objects_sel = [d['target'] for d in data_selective] # extract target names objects_nosel = [d['target'] for d in data_no_selection] # ^ objects_intersection = set(objects_sel).intersection(set(objects_nosel)) # get the intersection of target names # + # create a lookup table for object indices, # like { 'UDGcand_001': { 'sel': 1, 'nosel': 2 } }, # which would indicate the index of UDGcand_001 to be 1 in the 'sel' file, and 2 in the 'nosel' file # start with an empty table, and retrieve indices in a loop the next cell object_index_lookup = {} for object_name in objects_intersection: # create object like {'UDGcand_1': { 'sel': None, 'nosel': None }} object_index_lookup[object_name] = { # so we only have to loop each *_data list once 'sel': None, 'nosel': None } # + # loop through each list (selected_data and spacefluff_data), and assign the object's index in the list to the lookup for index, obj in enumerate(selected_data): if obj['name'] in objects_intersection: object_index_lookup[obj['name']]['sel'] = index for index, obj in enumerate(spacefluff_data): if obj['name'] in objects_intersection: object_index_lookup[obj['name']]['nosel'] = index # + tags=[] # loop through the lookup and compare each object's properties. if any don't match, the loop'll print the object's name # and we can manually inspect what properties differ all_match = 0 not_in_spacefluff = 0 for (name, indices) in object_index_lookup.items(): if type(indices['sel']) == int and type(indices['nosel']) == int: set_match = check_object_property_match(indices['sel'], indices['nosel']) if set_match == set([True]): all_match += 1 else: print("Properties don't match, investigate!", name, indices) else: # object doesn't exist in SpaceFluff, which is not a problem not_in_spacefluff += 1 continue len(objects_intersection) == all_match + not_in_spacefluff # - # As we see, none of the objects have non-matching properties, meaning we're free to use the properties from either .fits file. # ## Check if any of the non-`UDGcand` objects have coordinates corresponding to objects in SpaceFluff, somehow # # Note from above that not all objects in the catalogue(s) are named `UDGcand_*`. Are the properties still the same for the objects with other names? This is not relevant for our project since we only use `UDGcand_*` objects, but good as a sanity check regardless. # - load spacefluff objects and their properties object_info = pd.read_csv('./sf_spacefluff_object_data.csv', comment="#") candidate_names_classify = list(np.loadtxt('../analysis/sf_candidate_names__classification-classify.txt', dtype='str')) df = make_df_classify(workflow='classify') df_votes = make_df_tasks_with_props(df, candidate_names_classify, object_info) # - list all RA and DEC values of catalogue objects that aren't named `UDGcand_*` # + ra_index = data_selective.columns.names.index('RA') dec_index = data_selective.columns.names.index('DEC') ras = [d[ra_index] for d in data_selective if d['target'].find('UDGcand') == -1] decs = [d[dec_index] for d in data_selective if d['target'].find('UDGcand') == -1] # - # - see if any of these RA or DEC values correspond to SpaceFluff objects dec_exists = df_votes.query("RA.isin(@ras) or DEC.isin(@decs)") _names = dec_exists['name'].tolist() # + decs_in_df = dec_exists['RA'].tolist() ras_in_df = [] for dec in decs_in_df: try: ras_in_df.append(ras.index(dec)) except: ras_in_df.append(-1) ras_in_df # - # - All indices are -1, so none of the (RA, DEC) combinations exist, and thus none of the non-`UDGcand_*` objects are in fact SpaceFluff objects. names_selective = [d[0] for d in data_selective] names_all = np.array([d[0] for d in data_no_selection]) len([n for n in names_all if 'UDGcand' in n ])
catalogue/sf_fits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import struct import numpy as np def load_mnist(path, kind='train'): """Load MNIST data from `path`""" labels_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind) images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind) with open(labels_path, 'rb') as lbpath: magic, n = struct.unpack('>II', lbpath.read(8)) labels = np.fromfile(lbpath, dtype=np.uint8) with open(images_path, 'rb') as imgpath: magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16)) images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784) images = ((images / 255.) - .5) * 2 return images, labels # + # unzips mnist import sys import gzip import shutil if (sys.version_info > (3, 0)): writemode = 'wb' else: writemode = 'w' zipped_mnist = [f for f in os.listdir('./') if f.endswith('ubyte.gz')] for z in zipped_mnist: print(z) with gzip.GzipFile(z, mode='rb') as decompressed, open(z[:-3], writemode) as outfile: outfile.write(decompressed.read()) # - X_train, y_train = load_mnist('', kind='train') print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1])) X_test, y_test = load_mnist('', kind='t10k') print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1])) # + import matplotlib.pyplot as plt fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,) ax = ax.flatten() for i in range(10): img = X_train[y_train == i][0].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_5.png', dpi=300) plt.show() # + fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,) ax = ax.flatten() for i in range(25): img = X_train[y_train == 7][i].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_6.png', dpi=300) plt.show() # + import numpy as np np.savez_compressed('mnist_scaled.npz', X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test) # - mnist = np.load('mnist_scaled.npz') mnist.files # + X_train, y_train, X_test, y_test = [mnist[f] for f in ['X_train', 'y_train', 'X_test', 'y_test']] del mnist X_train.shape # + import numpy as np import sys class NeuralNetMLP(object): """ Feedforward neural network / Multi-layer perceptron classifier. Parameters ------------ n_hidden : int (default: 30) Number of hidden units. l2 : float (default: 0.) Lambda value for L2-regularization. No regularization if l2=0. (default) epochs : int (default: 100) Number of passes over the training set. eta : float (default: 0.001) Learning rate. shuffle : bool (default: True) Shuffles training data every epoch if True to prevent circles. minibatche_size : int (default: 1) Number of training samples per minibatch. seed : int (default: None) Random seed for initalizing weights and shuffling. Attributes ----------- eval_ : dict Dictionary collecting the cost, training accuracy, and validation accuracy for each epoch during training. """ def __init__(self, n_hidden=30, l2=0., epochs=100, eta=0.001, shuffle=True, minibatch_size=1, seed=None): self.random = np.random.RandomState(seed) self.n_hidden = n_hidden self.l2 = l2 self.epochs = epochs self.eta = eta self.shuffle = shuffle self.minibatch_size = minibatch_size def _onehot(self, y, n_classes): """Encode labels into one-hot representation Parameters ------------ y : array, shape = [n_samples] Target values. Returns ----------- onehot : array, shape = (n_samples, n_labels) """ onehot = np.zeros((n_classes, y.shape[0])) for idx, val in enumerate(y.astype(int)): onehot[val, idx] = 1. return onehot.T def _sigmoid(self, z): """Compute logistic function (sigmoid)""" return 1. / (1. + np.exp(-np.clip(z, -250, 250))) def _forward(self, X): """Compute forward propagation step""" # step 1: net input of hidden layer # [n_samples, n_features] dot [n_features, n_hidden] # -> [n_samples, n_hidden] z_h = np.dot(X, self.w_h) + self.b_h # step 2: activation of hidden layer a_h = self._sigmoid(z_h) # step 3: net input of output layer # [n_samples, n_hidden] dot [n_hidden, n_classlabels] # -> [n_samples, n_classlabels] z_out = np.dot(a_h, self.w_out) + self.b_out # step 4: activation output layer a_out = self._sigmoid(z_out) return z_h, a_h, z_out, a_out def _compute_cost(self, y_enc, output): """Compute cost function. Parameters ---------- y_enc : array, shape = (n_samples, n_labels) one-hot encoded class labels. output : array, shape = [n_samples, n_output_units] Activation of the output layer (forward propagation) Returns --------- cost : float Regularized cost """ L2_term = (self.l2 * (np.sum(self.w_h ** 2.) + np.sum(self.w_out ** 2.))) term1 = -y_enc * (np.log(output)) term2 = (1. - y_enc) * np.log(1. - output) cost = np.sum(term1 - term2) + L2_term return cost def predict(self, X): """Predict class labels Parameters ----------- X : array, shape = [n_samples, n_features] Input layer with original features. Returns: ---------- y_pred : array, shape = [n_samples] Predicted class labels. """ z_h, a_h, z_out, a_out = self._forward(X) y_pred = np.argmax(z_out, axis=1) return y_pred def fit(self, X_train, y_train, X_valid, y_valid): """ Learn weights from training data. Parameters ----------- X_train : array, shape = [n_samples, n_features] Input layer with original features. y_train : array, shape = [n_samples] Target class labels. X_valid : array, shape = [n_samples, n_features] Sample features for validation during training y_valid : array, shape = [n_samples] Sample labels for validation during training Returns: ---------- self """ n_output = np.unique(y_train).shape[0] # number of class labels n_features = X_train.shape[1] ######################## # Weight initialization ######################## # weights for input -> hidden self.b_h = np.zeros(self.n_hidden) self.w_h = self.random.normal(loc=0.0, scale=0.1, size=(n_features, self.n_hidden)) # weights for hidden -> output self.b_out = np.zeros(n_output) self.w_out = self.random.normal(loc=0.0, scale=0.1, size=(self.n_hidden, n_output)) epoch_strlen = len(str(self.epochs)) # for progress formatting self.eval_ = {'cost': [], 'train_acc': [], 'valid_acc': []} y_train_enc = self._onehot(y_train, n_output) # iterate over training epochs for i in range(self.epochs): # iterate over minibatches indices = np.arange(X_train.shape[0]) if self.shuffle: self.random.shuffle(indices) for start_idx in range(0, indices.shape[0] - self.minibatch_size + 1, self.minibatch_size): batch_idx = indices[start_idx:start_idx + self.minibatch_size] # forward propagation z_h, a_h, z_out, a_out = self._forward(X_train[batch_idx]) ################## # Backpropagation ################## # [n_samples, n_classlabels] sigma_out = a_out - y_train_enc[batch_idx] # [n_samples, n_hidden] sigmoid_derivative_h = a_h * (1. - a_h) # [n_samples, n_classlabels] dot [n_classlabels, n_hidden] # -> [n_samples, n_hidden] sigma_h = (np.dot(sigma_out, self.w_out.T) * sigmoid_derivative_h) # [n_features, n_samples] dot [n_samples, n_hidden] # -> [n_features, n_hidden] grad_w_h = np.dot(X_train[batch_idx].T, sigma_h) grad_b_h = np.sum(sigma_h, axis=0) # [n_hidden, n_samples] dot [n_samples, n_classlabels] # -> [n_hidden, n_classlabels] grad_w_out = np.dot(a_h.T, sigma_out) grad_b_out = np.sum(sigma_out, axis=0) # Regularization and weight updates delta_w_h = (grad_w_h + self.l2*self.w_h) delta_b_h = grad_b_h # bias is not regularized self.w_h -= self.eta * delta_w_h self.b_h -= self.eta * delta_b_h delta_w_out = (grad_w_out + self.l2*self.w_out) delta_b_out = grad_b_out # bias is not regularized self.w_out -= self.eta * delta_w_out self.b_out -= self.eta * delta_b_out ############# # Evaluation ############# # Evaluation after each epoch during training z_h, a_h, z_out, a_out = self._forward(X_train) cost = self._compute_cost(y_enc=y_train_enc, output=a_out) y_train_pred = self.predict(X_train) y_valid_pred = self.predict(X_valid) train_acc = ((np.sum(y_train == y_train_pred)).astype(np.float) / X_train.shape[0]) valid_acc = ((np.sum(y_valid == y_valid_pred)).astype(np.float) / X_valid.shape[0]) sys.stderr.write('\r%0*d/%d | Cost: %.2f ' '| Train/Valid Acc.: %.2f%%/%.2f%% ' % (epoch_strlen, i+1, self.epochs, cost, train_acc*100, valid_acc*100)) sys.stderr.flush() self.eval_['cost'].append(cost) self.eval_['train_acc'].append(train_acc) self.eval_['valid_acc'].append(valid_acc) return self # + n_epochs = 200 nn = NeuralNetMLP(n_hidden=100, l2=0.01, epochs=n_epochs, eta=0.0005, minibatch_size=100, shuffle=True, seed=1) nn.fit(X_train=X_train[:55000], y_train=y_train[:55000], X_valid=X_train[55000:], y_valid=y_train[55000:]) # + import matplotlib.pyplot as plt plt.plot(range(nn.epochs), nn.eval_['cost']) plt.ylabel('Cost') plt.xlabel('Epochs') #plt.savefig('images/12_07.png', dpi=300) plt.show()
My Implementation/Multi Layer Neural Network .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.datasets import make_blobs from matplotlib import pyplot data, label = make_blobs(n_samples=20, n_features=2, centers=2 ) # 繪製樣本顯示 pyplot.scatter(data[:, 0], data[:, 1], c=label) pyplot.show() print(data[:,0], data[:,1], label) # -
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Aufgabe 25 import numpy as np import matplotlib.pyplot as plt from numpy.polynomial.polynomial import polyval import uncertainties.unumpy as unp from uncertainties.unumpy import nominal_values as noms from uncertainties.unumpy import std_devs as stds import pandas as pd # __a)__ Bestimme die Parameter mit der Methoder der kleinsten Quadrate: # + #read data x, y = np.genfromtxt('aufg_a.csv', delimiter = ',', unpack = True) #design matrix A = np.array([x**i for i in range(7)]).T #parameters with least square best_a = np.linalg.inv(A.T @ A) @ A.T @ y for i in range(7): print(f'a_{i} ~ {best_a[i]:.4f}') # - # Stelle das Ergebnis graphisch dar: xplot = np.linspace(x[0], x[-1], 100) plt.plot(xplot, polyval(xplot, best_a), label = 'Fit') plt.plot(x, y, '.', label = 'Daten') plt.xlabel('x') plt.ylabel('y') plt.legend() plt.show() # __b)__ Erstelle zunächst die Matrix $C$, mit der die numerische zweite Ableitung bestimmt wird: C = np.zeros((np.shape(A)[0], np.shape(A)[0])) np.fill_diagonal(C, -2) np.fill_diagonal(C[1:], 1) np.fill_diagonal(C[:, 1:], 1) C[0, 0] = -1 C[-1, -1] = -1 C # Gibt es dafür eine fertige Methode? # Stelle die Ergebnisse der Regularisierung für verschiedene $\lambda$ dar: # + plt.plot(x, y, '.', label = 'Daten') for lam in [0.1, 0.3, 0.7, 3, 10]: gamma = np.sqrt(lam) * C @ A best_a_reg = np.linalg.inv(A.T @ A + gamma.T @ gamma) @ A.T @ y plt.plot(xplot, polyval(xplot, best_a_reg), label = f'$\lambda = {lam}$') plt.legend() plt.show() # - # __c)__ # + #read data data = pd.read_csv('aufg_c.csv') x = data['x'] #calculate mean and error for y y = unp.uarray(data.drop(columns = 'x').T.mean(), data.drop(columns = 'x').T.std()) #weight matrix W = np.zeros((np.shape(A)[0], np.shape(A)[0])) np.fill_diagonal(W, 1 / stds(y)**2) # - #calculate parameters best_a_weight = np.linalg.inv(A.T @ W @ A) @ A.T @ W @ noms(y) # Stelle Ergebnisse in einem Plot dar: plt.plot(xplot, polyval(xplot, best_a_weight), label = 'Gewichteter Fit') plt.errorbar(x = x, y = noms(y), yerr = stds(y), label = 'Daten', marker = '.', linestyle = '') plt.xlabel('x') plt.ylabel('y') plt.legend() plt.show()
Blatt09/notebook_09.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modeling and Simulation in Python # # Chapter 6 # # Copyright 2017 <NAME> # # License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) # # + # Configure Jupyter so figures appear in the notebook # %matplotlib inline # Configure Jupyter to display the assigned value after an assignment # %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * from pandas import read_html # - # ### Code from the previous chapter # # filename = 'data/World_population_estimates.html' tables = read_html(filename, header=0, index_col=0, decimal='M') table2 = tables[2] table2.columns = ['census', 'prb', 'un', 'maddison', 'hyde', 'tanton', 'biraben', 'mj', 'thomlinson', 'durand', 'clark'] un = table2.un / 1e9 un.head() census = table2.census / 1e9 census.head() # + t_0 = get_first_label(census) t_end = get_last_label(census) elapsed_time = t_end - t_0 p_0 = get_first_value(census) p_end = get_last_value(census) total_growth = p_end - p_0 annual_growth = total_growth / elapsed_time # - # ### System objects # We can rewrite the code from the previous chapter using system objects. system = System(t_0=t_0, t_end=t_end, p_0=p_0, annual_growth=annual_growth) # And we can encapsulate the code that runs the model in a function. def run_simulation1(system): """Runs the constant growth model. system: System object returns: TimeSeries """ results = TimeSeries() results[system.t_0] = system.p_0 for t in linrange(system.t_0, system.t_end): results[t+1] = results[t] + system.annual_growth return results # We can also encapsulate the code that plots the results. def plot_results(census, un, timeseries, title): """Plot the estimates and the model. census: TimeSeries of population estimates un: TimeSeries of population estimates timeseries: TimeSeries of simulation results title: string """ plot(census, ':', label='US Census') plot(un, '--', label='UN DESA') plot(timeseries, color='gray', label='model') decorate(xlabel='Year', ylabel='World population (billion)', title=title) # Here's how we run it. results = run_simulation1(system) plot_results(census, un, results, 'Constant growth model') # ## Proportional growth # Here's a more realistic model where the number of births and deaths is proportional to the current population. def run_simulation2(system): """Run a model with proportional birth and death. system: System object returns: TimeSeries """ results = TimeSeries() results[system.t_0] = system.p_0 for t in linrange(system.t_0, system.t_end): births = system.birth_rate * results[t] deaths = system.death_rate * results[t] results[t+1] = results[t] + births - deaths return results # I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data. system.death_rate = 0.01 system.birth_rate = 0.027 # Here's what it looks like. results = run_simulation2(system) plot_results(census, un, results, 'Proportional model') savefig('figs/chap03-fig03.pdf') # The model fits the data pretty well for the first 20 years, but not so well after that. # ### Factoring out the update function # `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function. def update_func1(pop, t, system): print(t, pop) """Compute the population next year. pop: current population t: current year system: system object containing parameters of the model returns: population next year """ births = system.birth_rate * pop deaths = system.death_rate * pop return pop + births - deaths # The name `update_func` refers to a function object. help(update_func1) # Which we can confirm by checking its type. type(update_func1) # `run_simulation` takes the update function as a parameter and calls it just like any other function. def run_simulation(system, update_func): """Simulate the system using any update function. system: System object update_func: function that computes the population next year returns: TimeSeries """ results = TimeSeries() results[system.t_0] = system.p_0 for t in linrange(system.t_0, system.t_end): results[t+1] = update_func(results[t], t, system) return results # Here's how we use it. # + t_0 = get_first_label(census) t_end = get_last_label(census) p_0 = census[t_0] system = System(t_0=t_0, t_end=t_end, p_0=p_0, birth_rate=0.027, death_rate=0.01) # - results = run_simulation(system, update_func1) plot_results(census, un, results, 'Proportional model, factored') # Remember not to put parentheses after `update_func1`. What happens if you try? # **Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again. # ### Combining birth and death # Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate. def update_func2(pop, t, system): """Compute the population next year. pop: current population t: current year system: system object containing parameters of the model returns: population next year """ net_growth = system.alpha * pop return pop + net_growth # Here's how it works: # + system.alpha = system.birth_rate - system.death_rate results = run_simulation(system, update_func2) plot_results(census, un, results, 'Proportional model, combined birth and death') # - # ### Exercises # # **Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice). # # Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement. # # Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can. # # # + t_1980 = 1980 system.birth_rate_after_1980 = 0.029 system.death_rate_after_1980 = 0.0135 def update_func2(pop, t, system): # Growth rate before 1980 is going to be left the same because that is when the model fit the data the best if t in range(t_0, t_1980): births = system.birth_rate * pop deaths = system.death_rate * pop return pop + births - deaths if t in range(t_1980, t_end): births = system.birth_rate_after_1980 * pop deaths = system.death_rate_after_1980 * pop return pop + births - deaths # Running with the new update function: title = 'Proportional Model' results = run_simulation(system, update_func2) plot(results, label = "Population Model") plot(un, ':', label = "UN") plot(census, '--', label = "Census") decorate(xlabel='Year', ylabel='World population (billion)', title=title) # + # Solution goes here
code/chap06-mine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [Return to Home Page](START_HERE.ipynb) # # RAPIDS # The RAPIDS suite of open source software libraries and APIs gives you the ability to execute end-to-end data science and analytics pipelines entirely on GPUs. Licensed under Apache 2.0, RAPIDS is incubated by NVIDIA® based on extensive hardware and data science science experience. RAPIDS utilizes NVIDIA CUDA® primitives for low-level compute optimization, and exposes GPU parallelism and high-bandwidth memory speed through user-friendly Python interfaces. # <img src="imgs/pipeline.png" height="800" width="800"> # #### Key Features? # - Hassle-Free Integration: Accelerate your Python data science toolchain with minimal code changes and no new tools to learn # - Top Model Accuracy: Increase machine learning model accuracy by iterating on models faster and deploying them more frequently # - Reduced Training Time: RAPIDS is primarily targeted for Data Scientist who are looking at improving performance of their experiments without losing out on accuracy. Drastically improve your productivity with near-interactive data science. # - Open Source: Customizable, extensible, interoperable - the open-source software is supported by NVIDIA and built on Apache Arrow. It can be donwloaded from [here](https://github.com/rapidsai) . Also RAPIDS is presnet as a container in NGC and can be downloaded from [here](https://ngc.nvidia.com/catalog/containers/nvidia:rapidsai:rapidsai) # # ## How is RAPIDS changing the landscape of Data Science? # #### Traditional Data Science Ecosystem # # <img src="imgs/datascience.png" height="800" width="800"> # # # #### RAPIDS Ecosystem # # <img src="imgs/rapids_gpu_stack.png" height="800" width="800"> # # You may notice the Arrow component in GPU memory. Arrow defines a language-independent columnar memory format for flat and hierarchical data, organized for efficient analytic operations on modern hardware like CPUs and GPUs. The Arrow memory format also supports zero-copy reads for lightning-fast data access without serialization overhead.The Apache Arrow format allows computational routines and execution engines to maximize their efficiency when scanning and iterating large chunks of data. In particular, the contiguous columnar layout enables vectorization using the latest SIMD (Single Instruction, Multiple Data) operations included in modern processors. # ## What does this series cover? # # This RAPIDS Bootcamp will be covering some interesting libraries that are easily integratable with the daily data science pipeline and accelerate computations for faster execution. Here are the modules in this tutorial: # # - [CuDF](CuDF/01-Intro_to_cuDF.ipynb) # - [CuML](CuML/01-LinearRegression-Hyperparam.ipynb) # - [Dask](Dask/01-Intro_to_Dask.ipynb) # - [Challenge](Challenge/Gene-Expression-Classification/Challenge.ipynb) # # The challenge is an extra module that you can try after learning the individual modules to test your knowledge. # For more information on RAPIDS, feel free to explore the official documentation here: # - https://rapids.ai/about.html # - https://github.com/rapidsai # ## Licensing # # This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0). # # # # [Return to Home Page](START_HERE.ipynb)
ai/RAPIDS/English/Python/jupyter_notebook/Introduction_To_Rapids.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="AuW-xg_bTsaF" # # Week 1: Using CNN's with the Cats vs Dogs Dataset # # Welcome to the 1st assignment of the course! This week, you will be using the famous `Cats vs Dogs` dataset to train a model that can classify images of dogs from images of cats. For this, you will create your own Convolutional Neural Network in Tensorflow and leverage Keras' image preprocessing utilities. # # You will also create some helper functions to move the images around the filesystem so if you are not familiar with the `os` module be sure to take a look a the [docs](https://docs.python.org/3/library/os.html). # # Let's get started! # + id="dn-6c02VmqiN" import os import zipfile import random import shutil import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from shutil import copyfile import matplotlib.pyplot as plt # + [markdown] id="bLTQd84RUs1j" # Download the dataset from its original source by running the cell below. # # Note that the `zip` file that contains the images is unzipped under the `/tmp` directory. # + id="3sd9dQWa23aj" colab={"base_uri": "https://localhost:8080/"} outputId="95953291-9ad8-4816-e747-68d36890f91e" # If the URL doesn't work, visit https://www.microsoft.com/en-us/download/confirmation.aspx?id=54765 # And right click on the 'Download Manually' link to get a new URL to the dataset # Note: This is a very large dataset and will take some time to download # !wget --no-check-certificate \ # "https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip" \ # -O "/tmp/cats-and-dogs.zip" local_zip = '/tmp/cats-and-dogs.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() # + [markdown] id="e_HsUV9WVJHL" # Now the images are stored within the `/tmp/PetImages` directory. There is a subdirectory for each class, so one for dogs and one for cats. # + id="DM851ZmN28J3" colab={"base_uri": "https://localhost:8080/"} outputId="7a44cd9a-5b3f-4ba0-b7a1-52707cecfe79" source_path = '/tmp/PetImages' source_path_dogs = os.path.join(source_path, 'Dog') source_path_cats = os.path.join(source_path, 'Cat') # os.listdir returns a list containing all files under the given path print(f"There are {len(os.listdir(source_path_dogs))} images of dogs.") print(f"There are {len(os.listdir(source_path_cats))} images of cats.") # + [markdown] id="G7dI86rmRGmC" # **Expected Output:** # # ``` # There are 12501 images of dogs. # There are 12501 images of cats. # ``` # + [markdown] id="iFbMliudNIjW" # You will need a directory for cats-v-dogs, and subdirectories for training # and testing. These in turn will need subdirectories for 'cats' and 'dogs'. To accomplish this, complete the `create_train_test_dirs` below: # + cellView="code" id="F-QkLjxpmyK2" # Define root directory root_dir = '/tmp/cats-v-dogs' # Empty directory to prevent FileExistsError is the function is run several times if os.path.exists(root_dir): shutil.rmtree(root_dir) # GRADED FUNCTION: create_train_test_dirs def create_train_test_dirs(root_path): ### START CODE HERE # HINT: # Use os.makedirs to create your directories with intermediate subdirectories # Don't hardcode the paths. Use os.path.join to append the new directories to the root_path parameter os.makedirs(root_path + '/training/cats') os.makedirs(root_path + '/training/dogs') os.makedirs(root_path + '/testing/cats') os.makedirs(root_path + '/testing/dogs') ### END CODE HERE try: create_train_test_dirs(root_path=root_dir) except FileExistsError: print("You should not be seeing this since the upper directory is removed beforehand") # + id="5dhtL344OK00" colab={"base_uri": "https://localhost:8080/"} outputId="bcd376a9-bb3a-411a-de85-4b8d26a2855f" # Test your create_train_test_dirs function for rootdir, dirs, files in os.walk(root_dir): for subdir in dirs: print(os.path.join(rootdir, subdir)) # + [markdown] id="D7A0RK3IQsvg" # **Expected Output (directory order might vary):** # # ``` txt # /tmp/cats-v-dogs/training # /tmp/cats-v-dogs/testing # /tmp/cats-v-dogs/training/cats # /tmp/cats-v-dogs/training/dogs # /tmp/cats-v-dogs/testing/cats # /tmp/cats-v-dogs/testing/dogs # # ``` # + [markdown] id="R93T7HdE5txZ" # Code the `split_data` function which takes in the following arguments: # - SOURCE: directory containing the files # # - TRAINING: directory that a portion of the files will be copied to (will be used for training) # - TESTING: directory that a portion of the files will be copied to (will be used for testing) # - SPLIT SIZE: to determine the portion # # The files should be randomized, so that the training set is a random sample of the files, and the test set is made up of the remaining files. # # For example, if `SOURCE` is `PetImages/Cat`, and `SPLIT` SIZE is .9 then 90% of the images in `PetImages/Cat` will be copied to the `TRAINING` dir # and 10% of the images will be copied to the `TESTING` dir. # # All images should be checked before the copy, so if they have a zero file length, they will be omitted from the copying process. If this is the case then your function should print out a message such as `"filename is zero length, so ignoring."`. **You should perform this check before the split so that only non-zero images are considered when doing the actual split.** # # # Hints: # # - `os.listdir(DIRECTORY)` returns a list with the contents of that directory. # # - `os.path.getsize(PATH)` returns the size of the file # # - `copyfile(source, destination)` copies a file from source to destination # # - `random.sample(list, len(list))` shuffles a list # + cellView="code" id="zvSODo0f9LaU" # GRADED FUNCTION: split_data def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE): ### START CODE HERE files = [] for fname in os.listdir(SOURCE): file = SOURCE + fname if os.path.getsize(file) > 0: files.append(fname) else: print(fname + ' is zero length, so ignoring') size_train = int(len(files) * SPLIT_SIZE) size_test = int(len(files) - size_train) shuffle_data = random.sample(files, len(files)) train_data = shuffle_data[0:size_train] test_data = shuffle_data[-size_test:] for file in train_data: copyfile(SOURCE + file, TRAINING + file) for file in test_data: copyfile(SOURCE + file, TESTING + file) ### END CODE HERE # + id="FlIdoUeX9S-9" colab={"base_uri": "https://localhost:8080/"} outputId="710851f1-b064-48bf-c28b-916f0fae840f" # Test your split_data function # Define paths CAT_SOURCE_DIR = "/tmp/PetImages/Cat/" DOG_SOURCE_DIR = "/tmp/PetImages/Dog/" TRAINING_DIR = "/tmp/cats-v-dogs/training/" TESTING_DIR = "/tmp/cats-v-dogs/testing/" TRAINING_CATS_DIR = os.path.join(TRAINING_DIR, "cats/") TESTING_CATS_DIR = os.path.join(TESTING_DIR, "cats/") TRAINING_DOGS_DIR = os.path.join(TRAINING_DIR, "dogs/") TESTING_DOGS_DIR = os.path.join(TESTING_DIR, "dogs/") # Empty directories in case you run this cell multiple times if len(os.listdir(TRAINING_CATS_DIR)) > 0: for file in os.scandir(TRAINING_CATS_DIR): os.remove(file.path) if len(os.listdir(TRAINING_DOGS_DIR)) > 0: for file in os.scandir(TRAINING_DOGS_DIR): os.remove(file.path) if len(os.listdir(TESTING_CATS_DIR)) > 0: for file in os.scandir(TESTING_CATS_DIR): os.remove(file.path) if len(os.listdir(TESTING_DOGS_DIR)) > 0: for file in os.scandir(TESTING_DOGS_DIR): os.remove(file.path) # Define proportion of images used for training split_size = .9 # Run the function # NOTE: Messages about zero length images should be printed out split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size) split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size) # Check that the number of images matches the expected output print(f"\n\nThere are {len(os.listdir(TRAINING_CATS_DIR))} images of cats for training") print(f"There are {len(os.listdir(TRAINING_DOGS_DIR))} images of dogs for training") print(f"There are {len(os.listdir(TESTING_CATS_DIR))} images of cats for testing") print(f"There are {len(os.listdir(TESTING_DOGS_DIR))} images of dogs for testing") # + [markdown] id="hvskJNOFVSaz" # **Expected Output:** # # ``` # 666.jpg is zero length, so ignoring. # 11702.jpg is zero length, so ignoring. # ``` # # ``` # There are 11250 images of cats for training # There are 11250 images of dogs for training # There are 1250 images of cats for testing # There are 1250 images of dogs for testing # ``` # + [markdown] id="Zil4QmOD_mXF" # Now that you have successfully organized the data in a way that can be easily fed to Keras' `ImageDataGenerator`, it is time for you to code the generators that will yield batches of images, both for training and validation. For this, complete the `train_val_generators` function below. # # Something important to note is that the images in this dataset come in a variety of resolutions. Luckily, the `flow_from_directory` method allows you to standarize this by defining a tuple called `target_size` that will be used to convert each image to this target resolution. **For this exercise, use a `target_size` of (150, 150)**. # # **Note:** So far, you have seen the term `testing` being used a lot for referring to a subset of images within the dataset. In this exercise, all of the `testing` data is actually being used as `validation` data. This is not very important within the context of the task at hand but it is worth mentioning to avoid confusion. # + cellView="code" id="fQrZfVgz4j2g" # GRADED FUNCTION: train_val_generators def train_val_generators(TRAINING_DIR, VALIDATION_DIR): ### START CODE HERE # Instantiate the ImageDataGenerator class (don't forget to set the rescale argument) train_datagen = ImageDataGenerator(rescale= 1. /255. ) # Pass in the appropiate arguments to the flow_from_directory method train_generator = train_datagen.flow_from_directory(directory=TRAINING_DIR, batch_size=300, class_mode='binary', target_size=(150, 150)) # Instantiate the ImageDataGenerator class (don't forget to set the rescale argument) validation_datagen = ImageDataGenerator(rescale= 1./255) # Pass in the appropiate arguments to the flow_from_directory method validation_generator = validation_datagen.flow_from_directory(directory=VALIDATION_DIR, batch_size=250, class_mode='binary', target_size=(150, 150)) ### END CODE HERE return train_generator, validation_generator # + id="qM7FxrjGiobD" colab={"base_uri": "https://localhost:8080/"} outputId="16a0c411-955f-42d4-ef77-2c2f21a48040" # Test your generators train_generator, validation_generator = train_val_generators(TRAINING_DIR, TESTING_DIR) # + [markdown] id="tiPNmSfZjHwJ" # **Expected Output:** # # ``` # Found 22498 images belonging to 2 classes. # Found 2500 images belonging to 2 classes. # ``` # # + [markdown] id="TI3oEmyQCZoO" # One last step before training is to define the architecture of the model that will be trained. # # Complete the `create_model` function below which should return a Keras' `Sequential` model. # # Aside from defining the architecture of the model, you should also compile it so make sure to use a `loss` function that is compatible with the `class_mode` you defined in the previous exercise, which should also be compatible with the output of your network. You can tell if they aren't compatible if you get an error during training. # # **Note that you should use at least 3 convolution layers to achieve the desired performance.** # + cellView="code" id="oDPK8tUB_O9e" colab={"base_uri": "https://localhost:8080/"} outputId="671909a2-ae7f-4194-e3df-3bba66f7ee44" # GRADED FUNCTION: create_model def create_model(): # DEFINE A KERAS MODEL TO CLASSIFY CATS V DOGS # USE AT LEAST 3 CONVOLUTION LAYERS ### START CODE HERE model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150,150,3)), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(32, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='binary_crossentropy', metrics=['accuracy']) ### END CODE HERE return model model = create_model() model.summary() # + [markdown] id="SMFNJZmTCZv6" # Now it is time to train your model! # # **Note:** You can ignore the `UserWarning: Possibly corrupt EXIF data.` warnings. # + id="5qE1G6JB4fMn" colab={"base_uri": "https://localhost:8080/"} outputId="65df9d04-69f8-41a9-d730-2542c06072ba" # Get the untrained model model = create_model() # Train the model # Note that this may take some time. history = model.fit(train_generator, epochs=15, verbose=1, validation_data=validation_generator) # + [markdown] id="VGsaDMc-GMd4" # Once training has finished, you can run the following cell to check the training and validation accuracy achieved at the end of each epoch. # # **To pass this assignment, your model should achieve a training accuracy of at least 95% and a validation accuracy of at least 80%**. If your model didn't achieve these thresholds, try training again with a different model architecture and remember to use at least 3 convolutional layers. # + id="MWZrJN4-65RC" colab={"base_uri": "https://localhost:8080/", "height": 546} outputId="f278d57b-d17c-4ef3-ab92-2e791d5d940b" #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- acc=history.history['accuracy'] val_acc=history.history['val_accuracy'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs=range(len(acc)) # Get number of epochs #------------------------------------------------ # Plot training and validation accuracy per epoch #------------------------------------------------ plt.plot(epochs, acc, 'r', "Training Accuracy") plt.plot(epochs, val_acc, 'b', "Validation Accuracy") plt.title('Training and validation accuracy') plt.show() print("") #------------------------------------------------ # Plot training and validation loss per epoch #------------------------------------------------ plt.plot(epochs, loss, 'r', "Training Loss") plt.plot(epochs, val_loss, 'b', "Validation Loss") plt.show() # + [markdown] id="NYIaqsN2pav6" # You will probably encounter that the model is overfitting, which means that it is doing a great job at classifying the images in the training set but struggles with new data. This is perfectly fine and you will learn how to mitigate this issue in the upcoming week. # # Before downloading this notebook and closing the assignment, be sure to also download the `history.pkl` file which contains the information of the training history of your model. You can download this file by running the cell below: # + id="yWcrc9nZTsHj" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="30c408a1-ecff-4896-c1fe-186215eab4a4" def download_history(): import pickle from google.colab import files with open('history.pkl', 'wb') as f: pickle.dump(history.history, f) files.download('history.pkl') download_history() # + [markdown] id="xyoA_wIxNkuX" # You will also need to submit this notebook for grading. To download it, click on the `File` tab in the upper left corner of the screen then click on `Download` -> `Download .ipynb`. You can name it anything you want as long as it is a valid `.ipynb` (jupyter notebook) file. # + [markdown] id="joAaZSWWpbOI" # **Congratulations on finishing this week's assignment!** # # You have successfully implemented a convolutional neural network that classifies images of cats and dogs, along with the helper functions needed to pre-process the images! # # **Keep it up!**
C2/W1/assignment/C2W1_Assignment (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 标准输入输出 # # python提供了标准输入输出的高级接口 # # + `input(prompt:str='')->str` # # `prompt`是提示文本 # # # + `print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)` # # + `file`: 默认为标准输出,但可以指定一个文件对象用于输出 # + `sep`: 字符串插入值之间,默认为空格。 # + `end`: 字符串附加在最后一个值之后,默认换行符。 # + `flush`: 是否强制冲洗替换流。 # # 这两个接口一般也是我们从helloworld开始就接触到的东西.下面的例子展示了它的用法: # + # %%writefile src/stdio/helloworld.py def main(): who = input("你是谁?") print(f"hello world {who}!") if __name__ == "__main__": main() # - # ## 标准输入的低级接口 # # 实际上上面这两个接口都是高级接口,是通过封装`sys`模块下的对应接口实现的. # # 标准输入的低级接口是`sys.stdin`,其有两个方法: # # + `sys.stdin.read()`:读取数据`ctrl+d`是结束输入.`enter`是换行.故可以接受多行输入 # # + `sys.stdin.readline()`:会将标准输入全部获取,包括末尾的'\n',因此用len计算长度时是把换行符'\n'算进去了的.遇到`enter`结束,注意它是从第一个回车开始的 # # 而`sys.stdin`本身与可读文件对象具有相同的接口. # + # %%writefile src/stdio/helloworld_stdin.py import sys def main(): print("你是谁?") sys.stdin.readline() while True: line = sys.stdin.readline() if not line: break who = line.strip() print(f"hello world {who}!") if __name__ == "__main__": main() # - # ## 标准输出的低级接口 # # 标准输出的低级接口是`sys.stdout`,它是一个可写的文件对象.和它类似的是`sys.stderr`.`print`函数实际上就是调用的他们的`write`方法,并且`print`中也可以通过`file`参数来指定一个有`write`接口的可写文件对象来写入输出. # + # %%writefile src/stdio/helloworld_stdout.py import sys def main(): who = input("你是谁?") sys.stdout.write(f"hello world {who}!") if __name__ == "__main__": main()
标准输入输出.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Total Variation denoising # =========================== # # This example demoes Total-Variation (TV) denoising on a Racoon face. # # # + import numpy as np import scipy import scipy.misc import matplotlib.pyplot as plt try: from skimage.restoration import denoise_tv_chambolle except ImportError: # skimage < 0.12 from skimage.filters import denoise_tv_chambolle f = scipy.misc.face(gray=True) f = f[230:290, 220:320] noisy = f + 0.4*f.std()*np.random.random(f.shape) tv_denoised = denoise_tv_chambolle(noisy, weight=10) plt.figure(figsize=(12, 2.8)) plt.subplot(131) plt.imshow(noisy, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('noisy', fontsize=20) plt.subplot(132) plt.imshow(tv_denoised, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('TV denoising', fontsize=20) tv_denoised = denoise_tv_chambolle(noisy, weight=50) plt.subplot(133) plt.imshow(tv_denoised, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('(more) TV denoising', fontsize=20) plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0, left=0, right=1) plt.show()
_downloads/plot_face_tv_denoise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def makeMatrix(): matrix = [] file = open("covid19_tweets.csv", "r") count = 0 for line in file: if count == 0: print(line) else: line = line.strip() row = line.split(",") matrix.append(row) count += 1 print("Read ", count, "lines.\n") file.close() return matrix import pandas as pd df = pd.read_csv("covid19_tweets.csv") df.head() import re def clean(s): s=s.replace(r'<lb>',"\n") s=re.sub(r'<br */*>', "\n", s) s=s.replace("&lt;","<").replace("&gt;", ">").replace("&amp;","&") s=s.replace("&amp;","&") s=re.sub(r'\(*https*://[^\)]*\)*',"", s) s=re.sub(r'_+',' ',s) s=re.sub(r'"+','"',s) return str(s) df["text_clean"]='' for i, row in df.iterrows(): df.at[i,"text_clean"]= clean(row.text) import pandas as pd data = pd.read_csv("covid19_tweets.csv") data.head() import matplotlib.pyplot as plt import numpy as np np.random.seed(10) col1=data['user_followers'] col2=data['user_friends'] col3=data['user_favourites'] myData= [col1,col2,col3] fig=plt.figure(figsize=(10,7)) ax=fig.add_axes([0,0,1,1]) bp=ax.boxplot(myData) plt.show() data.describe() df,count()
1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: drlnd # language: python # name: drlnd # --- # # Navigation # # --- # # You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started! # # ### 1. Start the Environment # # Please run the next code cell without making any changes. # + from unityagents import UnityEnvironment import numpy as np from collections import deque import torch # modify file_name value. env = UnityEnvironment(file_name=r"D:\deep-reinforcement-learning\p1_navigation\Banana_Windows_x86_64\Banana.exe") # - # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] # create agent brain from navigation_agent import Agent agent = Agent(state_size=37, action_size=4, seed=0) # # 2. Setup variables # # Run the code cell below to create setup variables. #setup n_episodes = 1000 # number of training episodes scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = 1 # starting value of epsilon eps_end = 0.01 # minimum value of epsilon eps_decay = eps_end**(1/n_episodes) # decreasing epsilon # ### 3. Take Actions in the Environment # # In the next code cell, a Python API is used to control the agent and receive feedback from the environment. # # Note that **in this coding environment, you will not be able to watch the agent while it is training**, and you should set `train_mode=True` to restart the environment. for i_episode in range(n_episodes): env_info = env.reset(train_mode=True)[brain_name] # reset the environment score = 0 # initialize the score while True: state = env_info.vector_observations[0] # get the current state action = agent.act(state, eps) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished agent.step(state, action, reward, next_state, done) # Save experience and learn score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished, done with 300 time stamps break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if np.mean(scores_window)>=13.0: print('\nEnvironment solved in episode {:d}!\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'model.pth') break # 4. Watch an Agent # In the next code cell, you will load the trained weights from file to watch an agent! agent.qnetwork_local.load_state_dict(torch.load('model.pth')) env_info = env.reset(train_mode=False)[brain_name] # reset the environment score = 0 # initialize the score while True: state = env_info.vector_observations[0] # get the current state action = agent.act(state, eps) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished agent.step(state, action, reward, next_state, done) # Save experience and learn score += reward # update the score state = next_state # roll over the state to next time step print('\rScore {}'.format(score), end="") if done: # exit loop if episode finished, done with 300 time stamps break # When finished, you can close the environment. env.close()
Navigation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # if a line start with "!", this is a shell command # !which python # same as the command line, you should see the python interpreter are from the environment directory # + # This whole notebook rely on the python interpreter above, # Jupyter Notebook is just and interactive GUI of the python interpreter # - import pandas as pd print('Successful import means pandas is installed') pd.__version__
analysis/hello_world/check_package_installation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # condition operations a = 10 b = 11 a == b a != b a > b a < b a <= b a >= b not True not a == b # # Chained Comparions Operators c = 10 a == b and b == c a == b or a == c # # Python Statements # - if else # - if elif else # - for # - while if(a == b ){ print('something') } if a == b : print(a) else: print(b) # + a = 10 if a%2==0: print("it is an Even number") else : print('it is an odd number') # - # Multiple branche of if else that is elif # + a = 10 b = 10 c = 8 if a == c : print(a) elif a ==b : print("it is in the elif block",b) else: print("something") # + score = 73 if score >=0 and score <= 50: print("Pass with average marks") elif score>=51 and score <= 75: print("passed with more than average marks") else: print("passed with highes marks") # - # # For loops my_list = [1,2,3,4,5,6,7,8,9,10] for num in my_list: print(num) for num in my_list: print(num) else : print("all items are executed") for num in my_list: if num%2==0: print('it is even number ', num) else: print("id is odd number") # + my_sting = " this is some string" for letter in my_sting: print(letter) # + t = (1,2,3,4,5) for tup in t: print(tup) # + list_of_tuple = [(1,2),(3,4),(5,6),(7,8)] for (t1,t2) in list_of_tuple: print(t1,t2) # + d = {'k1':3,'k2':1,'k3':2} for item in d : print(item) # - #unpack a dict for k,v in d.items(): print(k) print(v) list(d.keys()) list(d.values()) sorted(d.values()) start=1 end=100 for num in range(start,end+1) : if num > 1: for i in range (2,num) : if (num % i) == 0 : break else : print(num) # # While Loop # + x = 0 while x < 5: print('x is currently : ',x) print('x is still less than 5') x +=1 else: print(" the code block is executed and now i can close all the called function here") # + x = 0 while x < 5: print('x is currently : ',x) print('x is still less than 5') x +=1 if x ==3: print('x==3') break else: print(" the code block is executed and now i can close all the called function here") # + x = 0 while x < 5: x +=1 if x ==3: continue print('x is currently : ',x) print('x is still less than 5') else: print(" the code block is executed and now i can close all the called function here") # - a= 10 b = 1 while a>b: pass my_list = range(0,50) for num in range(0,50): print(num) my_list = list(range(1,15)) min(my_list) max(my_list) # + # Random from random import shuffle shuffle(my_list) # - my_list # + #Random number from the range from random import randint randint(0,99) # + # how to take an input in Python input('Enter any number in the box which will appear : ') # - int(input('Enter any number in the box which will appear : '))
Condition Operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # pandas demo # # This is just a quick run through of some of the functionality in pandas import pandas as pd # ## pandas IO, basic types df = pd.read_csv("hospital_charges.csv") type(df) type(df[" Total Discharges "]) type(df.index) df.index # ## Data Inspection df.head() df.iloc[0] # by integer index of index df.loc[0] # by label of index pd.value_counts(df["DRG Definition"]) # %matplotlib inline df["DRG Definition"].apply(lambda x: int(x[:3])).hist(bins=20) # + ## Selecting data with Boolean indexes # - kidney = df[df["DRG Definition"].apply(lambda x: int(x[:3]) == 699)] kidney.head() # ## Data transformations and aggregate query patterns # + import re def float_dollars(dollarstr): """Returns first numeric value after $ currency indicator.""" matched = re.search('(?<=\$)\d+\.\d+', dollarstr) return float(matched.group(0)) # - kidney[" Average Covered Charges "].apply(float_dollars).groupby(kidney["Provider State"]).mean() kidney[" Average Covered Charges "].apply(float_dollars).hist(bins=25) # ## What about preprocessing? pd.get_dummies(df["DRG Definition"]) # # What should I look at next???
pandas_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline # + from bs4 import BeautifulSoup import requests import pandas as pd from PIL import Image import json from collections import Counter from joblib import Parallel, delayed import logging logging.basicConfig( filename='wikimedia-scraper.log', format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # + wikimedia_url = 'https://commons.wikimedia.org/wiki/Category:Beach_vegetation' wikimedia_api_url = 'https://commons.wikimedia.org/w/api.php' res = requests.get(wikimedia_url) b = BeautifulSoup(res.content, 'html.parser') # - thumbs = b.find_all('div', {'class': 'thumb'}) print(len(thumbs)) print(thumbs[12].find_all('a', {'class': 'image'})[0].attrs['href'].split('/')[-1]) print(thumbs[12].find_all('img')[0].attrs['srcset'].split()[0]) # + iiprop = [ # 'timestamp', 'url', # 'size', # 'mime', # 'mediatype', # 'thumb', 'extmetadata' ] iiextmetadatafilter = [ # 'DateTime', # 'DateTimeOriginal', 'ObjectName', 'ImageDescription', # 'License', # 'LicenseShortName', # 'UsageTerms', # 'LicenseUrl', # 'Credit', # 'Artist', # 'AuthorCount', # 'GPSLatitude', # 'GPSLongitude', 'Permission', # 'Attribution', # 'AttributionRequired', # 'NonFree', # 'Restrictions', # 'DeletionReason' ] titles = [ # 'File:Beach_veget_111029-20390_bml.jpg', # 'File:Looking_west_along_Luce_sands_-_geograph.org.uk_-_560895.jpg', # 'File:Dog_weight_pull.jpg', # 'File:ZOO_%C3%9Ast%C3%AD_n_L_-_pavilon_opic_14.jpg' # 'File:ZOO_Ústí_n_L_-_pavilon_opic_14.jpg', # 'File:ZOO Ústí n L - pavilon opic 03.jpg'.replace(' ', '_'), # 'File:Bundesarchiv Bild 183-1982-0623-026, Leipzig, Zoo, Affenhaus.jpg'.replace(' ', '_'), # 'File:Cultural_Properties_and_Touring_for_Building_Numbering_in_South_Korea_(Aquarium)_(Example_2).png', 'File:" Our Lady of Guadalupe, Xcaret Eco Park ".jpg' ] payload = { 'action': 'query', 'format': 'json', 'iiextmetadatafilter': '|'.join(iiextmetadatafilter), 'iiextmetadatalanguage': 'en', 'iiprop': '|'.join(iiprop), 'maxage': '300', 'prop': 'imageinfo', 'smaxage': '300', 'titles': '|'.join(titles), # 'File:Looking_west_along_Luce_sands_-_geograph.org.uk_-_560895.jpg', 'uselang': 'content' } # - res = requests.get(wikimedia_api_url, params=payload) data = res.json() print json.dumps(data) # + # 'https://commons.wikimedia.org/w/index.php?search=clouds&title=Special%3ASearch&profile=advanced&fulltext=1&ns6=1&ns14=1' # 'https://commons.wikimedia.org/w/index.php?search=clouds&title=Special%3ASearch&profile=advanced&fulltext=1&ns6=1&ns14=1' # 'https://commons.wikimedia.org/w/index.php?title=Special:Search&limit=50&offset=50&ns6=1&ns14=1&search=clouds' # '''title:Special:Search # limit:50 # offset:50 # ns6:1 # ns14:1 # search:clouds''' # + N_THREADS = 6 image_files = pd.Series() errors_cat_ref = pd.Series() cat_refs = Counter() # parallel = Parallel(n_jobs=N_THREADS) def scrape_category(cat_ref, download_image_size=256): ''' cat_ref should be in the form of: u'/wiki/Category:Flora_of_Kozhikode_Beach' ''' if len(image_files) % 10000 == 0: image_files.to_hdf('image_metadata_files_parallel.hdf', 'image_files') errors_cat_ref.to_hdf('image_metadata_files_parallel.hdf', 'errors_cat_ref') pd.Series(cat_refs).to_hdf('image_metadata_files_parallel.hdf', 'cat_refs') logging.info(u'Saving {} data...'.format(len(image_files))) try: logging.info(u'cat_ref: {}'.format(cat_ref)) cat_refs.update([cat_ref]) wikimedia_url = 'https://commons.wikimedia.org' + cat_ref retry_count = 0 while retry_count < 10: try: response = requests.get(wikimedia_url) break except requests.exceptions.ConnectionError: retry_count += 1 if retry_count >= 10: raise(ValueError('retries exceeded: {}'.format(cat_ref))) b = BeautifulSoup(response.content, 'html.parser') cat_trees = b.find_all('div', {'class': 'CategoryTreeSection'}) _cat_refs = [cat_tree.find('a').attrs['href'] for cat_tree in cat_trees] for cat_ref in _cat_refs: scrape_category(cat_ref) # res = parallel(delayed(scrape_category)(cat_ref) for cat_ref in _cat_refs) thumbs = b.find_all('div', {'class': 'thumb'}) for thumb in thumbs: try: thumb_link = thumb.find('img').attrs['srcset'].split()[0] except KeyError: # (KeyError, "'srcset'") -> thumb is likely a video thumbnail continue except Exception as e: raise(e) # Example thumb link: https://upload.wikimedia.org/wikipedia/commons/thumb/a/a8/Sulawesi_Panua_dune_trsr_DSCN0853_v1.JPG/180px-Sulawesi_Panua_dune_trsr_DSCN0853_v1.JPG thumb_link = thumb_link.replace('180px', '{}px'.format(download_image_size)) thumb_name = thumb.find('img').attrs['alt'] logging.info(u'thumb_link: {} $$$ thumb_name: {}'.format(thumb_link, thumb_name)) image_files[thumb_name] = thumb_link except Exception as e: error_key = u'{}:{}'.format(type(e), e.__str__()) logging.error(u'error_key: {} $$$ cat_ref: {}'.format(error_key, cat_ref)) if error_key in errors_cat_ref: errors_cat_ref[error_key].add(cat_ref) else: errors_cat_ref[error_key] = set([cat_ref]) # - # %%time scrape_category('/wiki/Category:Animals') # + import re with open('sample.log') as fl: data = [] for line in fl: line = line.strip() if not line: continue thumb_link, thumb_name = re.findall('thumb_link: (.*) \$\$\$ thumb_name: (.*)', line.split(': INFO : ')[-1])[0] data.append(dict(thumb_link=thumb_link, thumb_name=thumb_name)) data = pd.DataFrame(data)
src/scraper/Wikimedia Scraper-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # This notebook was used for development only, all the functional logic is already embbeded in dataset_loading notebook # # importing libraries # + import pandas as pd from tqdm import tqdm import re import emoji import unicodedata as ud tqdm.pandas() # - # # loading data and intial inspection df=pd.read_csv("../data/interim/out.csv",sep="|") df["tweet_len"]=df["tweet"].apply(lambda x: len(x)) # + def filter_text(text:str): flags={'U+1F1E6', 'U+1F1E7', 'U+1F1E9', 'U+1F1EA', 'U+1F1EC', 'U+1F1ED', 'U+1F1EE', 'U+1F1EF', 'U+1F1F0', 'U+1F1F1', 'U+1F1F2', 'U+1F1F3', 'U+1F1F4', 'U+1F1F5', 'U+1F1F6', 'U+1F1F7', 'U+1F1F8', 'U+1F1F9', 'U+1F1FC', 'U+1F1FE', 'U+1F1FF'} # remove emojis except for flags text =''.join(char for char in text if (char in flags or not emoji.is_emoji(char))) # remove mentions text = re.sub("@[A-Za-z0-9_]+", "", text) # remove links text = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', text) # remove hashes and keep the words text = text.replace("#", " ").replace("_", " ") # remove all english characters text = re.sub('[a-zA-Z0-9]+', '', text) # replace multiple spaces with one space text = re.sub(' +', ' ',text) # remove all punctionations and all digits (arabic and english) text =''.join(char for char in text if not ( ud.category(char).startswith('P') or ud.category(char).startswith('Nd'))) return text # - df["clean"]=df["tweet"].progress_apply(filter_text) df # + def remove_emoji(text): text =''.join(char for char in text if any ([not emoji.is_emoji(char),char in flags])) return text # - def remove_punct(text): text =''.join(char for char in text if not ( ud.category(char).startswith('P') or ud.category(char).startswith('Nd'))) text = re.sub('[a-zA-Z0-9]+', '', text) return text # + def remove_links(text): text = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '', text) return text # + df["tweet_clean"] = df["tweet"].progress_apply(remove_emoji) df["tweet_clean"] = df["tweet_clean"].progress_apply(lambda tweet: re.sub("@[A-Za-z0-9_]+", "", tweet)) # remove mentions df["tweet_clean"] = df["tweet_clean"].progress_apply(lambda tweet: re.sub(' +', ' ',tweet)) df["tweet_clean"] = df["tweet_clean"].progress_apply(remove_links) df["tweet_clean"] = df["tweet_clean"].progress_apply(lambda tweet: tweet.replace("#", " ").replace("_", " ")) # split hashtags df["tweet_clean"] = df["tweet_clean"].progress_apply(remove_punct) # - text="\n".join(df["tweet"]) with open("full tweets.txt","w") as f: f.write(text) # + text="\n".join(df["tweet_clean"]) with open("full tweets clean.txt","w") as f: f.write(text)
notebooks/data_filteration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Example workflow using ANUGA unstructured grid # ### <NAME> and <NAME>, July 2020 # # This workbook provides an example workflow for running particles on an unstructured model output (i.e. flow variables are from a non-Cartesian grid). We make use of several of the geospatial functions in `particle_track.py` and others in `routines.py`, in order to show how to grid hydrodynamic input files, convert UTM coordinates into (and out of) the array coordinates used in routing the particles, as well as how to compute exposure times to a region of interest. Hopefully this example in conjunction with other examples can provide information on how users can adapt these codes to their use-case. # # To demonstrate this functionality, we make use of outputs from the hydrodynamic model ANUGA (github.com/GeoscienceAustralia/anuga_core/), which solves the 2D shallow-water equations on a triangular mesh. We've extracted outputs from a previous example model run and included these as text files in the repository, so as to avoid importing any dependencies not required by this distribution. If the user is also using ANUGA flow-fields, there is a a commented-out block of code below demonstrating how we extracted the input files for use in this workbook. # ### Import necessary dependencies import numpy as np import scipy import matplotlib # %matplotlib inline from matplotlib import pyplot as plt import json import dorado import dorado.particle_track as pt # ### Load in model outputs # # If we were starting directly from an ANUGA output file, this is where we would import the outputs from the model run. We have included these files directly with the distribution, but for anyone interested in repeating the steps we used to generate these files, uncomment the block of code below. # # Here, `path2file` should point to the ANUGA output file (e.g. `./model_output.sww`). This output is a NetCDF file with flow variables (e.g. `depth`, `xmom`, `stage`) listed by triangle index, along with the centroid coordinates (`x`, `y`) of that triangle. In our case, those coordinates are in meters UTM, which will be relevant later. # + # # Import anuga to access functions (NOTE: Anuga requires Python 2.7!) # import anuga # # Folder name of run to analyze: # path2file = 'examples/example_model.sww' # # Extract the files from NetCDF using Anuga's `get_centroids` function: # swwvals = anuga.utilities.plot_utils.get_centroids(path2file, timeSlices = 'last') # # Query values: time, x, y, stage, elev, height, xmom, ymom, xvel, yvel, friction, vel, etc # # Here, since we are only interested in saving variables, we migrate variables to a dictionary: # # Make sure to filter out NaN's before converting to lists, if there are any # unstructured = dict() # unstructured['x'] = swwvals.x.tolist() # unstructured['y'] = swwvals.y.tolist() # unstructured['depth'] = swwvals.height[0].tolist() # unstructured['stage'] = swwvals.stage[0].tolist() # unstructured['qx'] = swwvals.xmom[0].tolist() # unstructured['qy'] = swwvals.ymom[0].tolist() # # And then we save this dictionary into a json (text) file for later import # json.dump(unstructured, open('unstructured_model.txt', 'w')) # # This generates the file imported in this workbook # - # Here, we will skip the above step and just import the `unstructured_model.txt` dictionary. # # **Note**: We have chosen to save/import the variables `depth`, `stage`, `qx`, and `qy` in this application. However, we could have chosen to save and use the fields `topography`, `u`, and `v` (or in ANUGA's terminology, `elev`, `xvel`, and `yvel`). The particle tracking code accepts any of these inputs, as long as you provide enough information to calculate the water surface slope, depth of the water column, and the two components of inertia. unstructured = json.load(open('unstructured_model.txt')) # ### Convert data and coordinates for particle routing # # Now that we have the data we need, we can convert it into the format needed by `dorado`. This will include gridding the hydrodynamic outputs and transforming our geospatial coordinates into "array index" coordinates. # # First, let's combine our $(x,y)$ coordinates into a list of tuples. This is the expected format for coordinates in the following functions. # + # Use list comprehension to convert into tuples coordinates = [(unstructured['x'][i], unstructured['y'][i]) for i in list(range(len(unstructured['x'])))] # Let's see the extent of our domain print(min(unstructured['x']), max(unstructured['x']), min(unstructured['y']), max(unstructured['y'])) # As well as our number of data points print(len(unstructured['x'])) # - # Now, let's grid our unstructured data into a uniform grid. For this, we make use of the function `particle_track.unstruct2grid()`, which uses inverse-distance-weighted interpolation to create a Cartesian grid the same size as our model's extent. To use this function, we need to provide: # - Our list of `coordinates` (as tuples). # - The unstructured data we want to be gridded (here we start with `depth`). # - The desired grid size of the resulting rasters (here we're using $1 m$, because the test model was on very high-resolution lidar data). # - The number of $k$ nearest neighbors to use in the interpolation. If $k=1$, we use only the nearest datapoint, whereas higher values (default is $k=3$) interpolate the data into a smoother result. # # The underlying code relies on `scipy` to build a `cKDTree` of our unstructured data, which maps the datapoints onto a uniform array. `cKDTree` is much faster than other gridding functions (e.g. `scipy.interpolate.griddata`), but building the tree can still be very slow if the dataset is very large or if the desired grid size is very small. # # The outputs of `unstruct2grid` are: # - The resulting interpolation function `myInterp` (after building the nearest-distance tree), which will be considerably faster than calling `unstruct2grid` again if we are gridding additional datasets. This function assumes data have the same coordinates, grid size, and $k$. # - A gridded array of our data. # + # Use IDW interpolation interpolate unstructured data into uniform grid myInterp, depth = pt.unstruct2grid(coordinates, unstructured['depth'], 1.0, 3) # Let's plot the resulting grid to see what the output looks like: plt.figure(figsize=(5,5), dpi=200) plt.imshow(depth, cmap='jet') plt.colorbar(fraction=0.018) plt.title('Gridded Depth Array') # - # Now, let's use the new function `myInterp` to grid our additional datasets. If `unstruct2grid` took a while to grid the first dataset, this function will be considerably faster than re-running that process, because it re-uses most of the results of that first function call. This function only requires as input the new unstructured data to be gridded. # # All of these variables will have the same grid size as the first dataset, and we assume that they have all the same coordinates. # + # Grid other data products with new interpolation function stage = myInterp(np.array(unstructured['stage'])) qx = myInterp(np.array(unstructured['qx'])) qy = myInterp(np.array(unstructured['qy'])) # Should be very fast compared to the first dataset! # Let's plot one of these variables to see the new grid plt.figure(figsize=(5,5), dpi=200) plt.imshow(qy, vmin=-2, vmax=2, cmap='seismic') plt.colorbar(fraction=0.018) plt.title('Gridded Y-Discharge Array') # - # **Note:** In all these cases, if your unstructured data does not fill the full rectangular domain, IDW interpolation may still populate those exterior regions with data. If this has potential to cause problems when routing particles, make sure to do some pre-processing on these rasters to correct those exterior regions or crop the domain. # Now, let's figure out where we want to seed our particles. If you're modeling a real domain, it may be easier to figure out a good release location by opening some GIS software and finding the coordinates of that location. Here, we will use the function `particle_track.coord2ind()` to convert your coordinates into array indices. This function requires: # - Coordinates to be converted, as a list [] of $(x,y)$ tuples # - The location of the lower left corner of your rasters (i.e. the origin). If you used `unstruct2grid` to generate rasters, this location will be `[(min(x), min(y))]`. Otherwise, if you're loading data from e.g. a GeoTIFF, the lower left corner will be stored in the .tif metadata and can be accessed by GIS software or gdalinfo (if the user has GDAL) # - The dimensions of the raster, accessible via `np.shape(raster)` # - The grid size of the raster (here $1m$) # # **Note:** this coordinate transform flips the orientation of the unit vectors (i.e. $y_{index} = x$ and $x_{index} = -y$) as well as returning raster indices. This is convenient for the internal functions of `particle_tools.py`, but may cause confusion with plotting or interpreting later if locations are not translated back into spatial coordinates. (Don't worry, we will convert back later!) # # We assume in all of these functions that the coordinates you're using are (at least locally) flat. We do not account for the curvature of the Earth in very large domains. Hopefully you are using a projected coordinate system (here we are using meters UTM), or at least willing to accept a little distortion. Note that this `coord2ind` requires units of either meters or decimal degrees. # + # I have found a nice release location in GIS. Let's convert it to index notation: seedloc = [(624464, 3347078)] # Coordinates are in meters UTM # Call the coordinate transform function seedind = pt.coord2ind(seedloc, (min(unstructured['x']), min(unstructured['y'])), np.shape(depth), 1.0) print(seedind) # Visualize the location on our array plt.figure(figsize=(5,5), dpi=200) plt.scatter(seedind[0][1], seedind[0][0], c='r') plt.imshow(depth) plt.colorbar(fraction=0.03) plt.title('Gridded Depth Array') plt.xlim([seedind[0][1]-40, seedind[0][1]+100]) plt.ylim([seedind[0][0]+70, seedind[0][0]-30]) # - # ### Set up particle routing parameters # # Now that we have pre-converted the input data we need, let's set up the particle routing to be run. We do this using the `particle_track.modelParams` class, in which we populate the attributes to suit our application. This includes the gridded hydrodynamic outputs from above, the grid size `dx`, and tuning parameters which influence our random walk. # + # Create the parameters object and then assign the values params = pt.modelParams() # Populate the params attributes params.stage = stage params.depth = depth params.qx = qx params.qy = qy # Other choices/parameters params.dx = 1. # Grid size params.dry_depth = 0.01 # 1 cm considered dry # You can also tell it which model you're using, but this only matters if the answer is DeltaRCM: params.model = 'Anuga' # - # In this application, we are using the default values for the parameters of the random walk (`gamma`, `theta`, `diff_coeff`). I encourage you to play with these weights and see how your solution is affected. # ### Generate particles # # Now we instantiate the `particle_track.Particles` class, and generate some particles to be routed. Here we are using the `'random'` method to generate particles, which seeds them randomly within a specified region. If we knew exactly where we wanted particles, we could call the `'exact'` method instead. # + # Now we seed in the region +/- 1 cell of the seed location we computed earlier # Note that "xloc" and "yloc" are x and y in the particle coordinate system! seed_xloc = [seedind[0][0]-1, seedind[0][0]+1] seed_yloc = [seedind[0][1]-1, seedind[0][1]+1] # For this example, we model 50 particles: Np_tracer = 50 # Initialize particles and generate particles particles = pt.Particles(params) particles.generate_particles(Np_tracer, seed_xloc, seed_yloc) # - # ### Run the particle routing # # Now we call on one of the routines, `routines.steady_plots()`, to run the model. The core of the particle routing occurs in the `particle_track.run_iteration()` function, but for ease of use, we have provided several high-level wrappers for the underlying code in the `routines.py` script. These routines take common settings, run the particle routing, and save a variety of plots and data for visualization. # # Because our model is a steady case (i.e. flow-field is not varying with time), `steady_plots` will run the particles for an equal number of iterations and return the travel history to us in the `walk_data` dict. This dict is organized into `['xinds']`, `['yinds']`, and `['travel_times']`, which are then indexed by particle ID, and then finally iteration number. (e.g. `walk_data['xinds'][5][10]` will return the xindex for the 6th particle's 11th iteration) # # Note that, while this function returns a `walk_data` dictionary, this information is also stored as an attribute of the particles class, accessible via `particle.walk_data`. # Using steady (time-invariant) plotting routine for 200 iterations walk_data = dorado.routines.steady_plots(particles, 200, 'unstructured_grid_anuga') # Outputs will be saved in the folder 'unstructured_grid_anuga' # Because the particles take different travel paths, at any given iteration they are *not guaranteed* to be synced up in time. We can check this using the `routines.get_state()` function, which allows us to slice the `walk_data` dictionary along a given iteration number. This function logically indexes the dict like `walk_data[:][:][iteration]`, except not quite as simple given the indexing rules of a nested list. # # By default, this function will return the most recent step (iteration number `-1`), but we could ask it to slice along any given iteration number. xi, yi, ti = dorado.routines.get_state(walk_data) print([round(t, 1) for t in ti]) # **Note:** There exists an equivalent function, `get_time_state()`, which performs a similar function by slicing `walk_data` along a given travel time, in case there is interest in viewing the particles in sync. # As a brief aside, the particle routing can also be run in an *unsteady* way, in which each particle continues taking steps until each has reached a specified `target_time`. This can be useful if you want to visualize particle travel times in "real time", or if you want to sync up their propagation with an unsteady flow field that updates every so often (e.g. every 30 minutes). This can be done either with the `unsteady_plots()` routine, or by interacting with `run_iteration()` directly. The commented-out block of code below shows an example of what an unsteady case might look like had we used more timesteps from the model output. # + # # Specify folder to save figures: # path2folder = 'unstructured_grid_anuga' # # Let's say our model outputs update minute: # model_timestep = 60. # Units in seconds # # Number of steps to take in total: # num_steps = 20 # # Create vector of target times # target_times = np.arange(model_timestep, # model_timestep*(num_steps+1), # model_timestep) # # Iterate through model timesteps # for i in list(range(num_steps)): # # The main functional difference with an unsteady model is re-instantiating the # # particle class with updated params *inside* the particle routing loop # # Update the flow field by gridding new time-step # # We don't have additional timesteps, but if we did, we update params here: # params.depth = myInterp(unstructured['depth']) # params.stage = myInterp(unstructured['stage']) # params.qx = myInterp(unstructured['qx']) # params.qy = myInterp(unstructured['qy']) # # Define the particle class and continue # particle = pt.Particles(params) # # Generate some particles # if i == 0: # particle.generate_particles(Np_tracer, seed_xloc, seed_yloc) # else: # particle.generate_particles(0, [], [], 'random', walk_data) # # Run the random walk for this "model timestep" # walk_data = particle.run_iteration(target_times[i]) # # Use get_state() to return original and most recent locations # x0, y0, t0 = dorado.routines.get_state(walk_data, 0) # Starting locations # xi, yi, ti = dorado.routines.get_state(walk_data) # Most recent locations # # Make and save plots and data # fig = plt.figure(dpi=200) # ax = fig.add_subplot(111) # ax.scatter(y0, x0, c='b', s=0.75) # ax.scatter(yi, xi, c='r', s=0.75) # ax = plt.gca() # im = ax.imshow(particle.depth) # plt.title('Depth at Time ' + str(target_times[i])) # cax = fig.add_axes([ax.get_position().x1+0.01, # ax.get_position().y0, # 0.02, # ax.get_position().height]) # cbar = plt.colorbar(im, cax=cax) # cbar.set_label('Water Depth [m]') # plt.savefig(path2folder + '/output_by_dt'+str(i)+'.png') # plt.close() # - # ### Analyze the outputs # # Now that we have the walk history stored in `walk_data`, we can query this dictionary for features of interest. For starters, we can convert the location indices back into geospatial coordinates using the function `particle_track.ind2coord()`. This will append the existing dictionary with `['xcoord']` and `['ycoord']` fields in the units we started with (meters or decimal degrees). # # **Note:** Particle locations are only known to within the specified grid size (i.e. $\pm \Delta x/2$) # + # Convert particle location indices back into UTM coordinates walk_data = pt.ind2coord(walk_data, (min(unstructured['x']), min(unstructured['y'])), np.shape(depth), 1.0) # To check that things have worked, print starting location of first particle. # Should be within +/- dx from seedloc = (624464, 3347078) print(walk_data['xcoord'][0][0], walk_data['ycoord'][0][0]) # - # For something a little more interesting, let's measure the amount of time particles spent "exposed" to a specific sub-region within our domain. For this we make use of the functions `particle_track.exposure_time()` and `routines.plot_exposure_time()`. If we input a binary array (same size as input arrays) delineating our region of interest (ROI) with 1's, these functions will compute and plot the exposure time distribution (ETD) of particles in this sub-region. # # For those familiar with the metric, the ETD is equivalent to the residence time distribution (RTD) for steady flows, with the only difference being that if particles make multiple excursions into our ROI, all those times are counted. # # **Note:** For a representative ETD, it is important to run a *lot* of particles. A large sample size is needed to obtain a realistic distribution (and smooth plots). Depending on the domain, I recommend at least $O(10^3)$ # # First, let's generate and visualize the ROI: # + # Create the array regions = np.zeros_like(depth, dtype='int') regions[:,100:200] = 1 # Include anywhere above sea level # Visualize the region plt.figure(figsize=(5,5), dpi=200) plt.imshow(depth) plt.imshow(regions, cmap='bone', alpha=0.3) # - # Then compute. `exposure_time()` outputs a list of exposure times by particle index, and `plot_exposure_time()` will use those values to generate plots of the cumulative and differential forms of the ETD (i.e. the CDF and PDF, respectively). # Measure exposure times exposure_times = pt.exposure_time(walk_data, regions) # Then generate plots and save data exposure_times = dorado.routines.plot_exposure_time(walk_data, exposure_times, 'unstructured_grid_anuga/figs', timedelta = 60, nbins=20) # Changing 'timedelta' will change the units of the time-axis. # Units are seconds, so 60 will plot by minute. # Because we are using fewer particles than ideal, smooth the plots with small 'nbins' # **Note:** If any particles are still in the ROI at the end of their travel history, they are excluded from plots. These particles are not done being "exposed," so we need to run more iterations in order to capture the tail of the distribution.
examples/unstructured_grid_anuga.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # 4 Diagrama de fases para sustancias puras # En esta sección se presentan los diagramas de fases comunes para sustancias puras. Como son: # # # 1. Envolvente de fases liquido-vapor # 2. Isoterma # 3. Isobara # 4. Sólido-líquido # # %load_ext fortranmagic # activating magic # # Envolvente de fases para sustancias puras # En esta sección se presenta el desarrollo y manipulación de las ecuaciones para establecer un algortimo que permita calcular los puntos del diagrama de fases para una sustancia pura utilizando ecuaciones de estado para modelar el comportamiento de fases. # $$ P^l(T, V^l) - P^v(T, V^v) = 0$$ # $$ ln \phi(T, V^l) - ln \phi(T, V^v) = 0 $$ # $$ g(T, V^l, V^v) = X_S - S = 0 $$ # $$ F = # \begin{bmatrix} # P^l(T, V^l) - P^v(T, V^v)\\ # ln \phi(T, V^l) - ln \phi(T, V^v)\\ # X_S - S # \end{bmatrix} # = 0$$ # $$ F = # \begin{bmatrix} # ln \left( \frac{P^l(T, V^l)} {P^v(T, V^v)} \right)\\ # ln f_l(T, V^l) - ln f_v(T, V^v)\\ # 0 # \end{bmatrix} # $$ # # # $$ J # \begin{bmatrix} # \Delta ln T\\ # \Delta ln V^l\\ # \Delta ln V^v\\ # \end{bmatrix} # + F = 0$$ # # # # # # Se inicia con el cálculo de la envolvente de fases de uns sustancia pura. # # $$ n \left(\frac{\partial ln \hat\phi_i}{\partial n_j}\right)_{T,P} = n \left(\frac{\partial ln \hat\phi_j}{\partial n_j}\right)_{T,P} = n \left(\frac{\partial^2 F} {\partial n_j \partial n_i} \right)_{T,V} + 1 + \frac{n}{RT} + \frac{ \left(\frac{\partial P} {\partial n_j}\right)_{T,V} \left(\frac{\partial P} {\partial n_i}\right)_{T,V} } {\left(\frac{\partial P} {\partial V} \right)_{T,n}}$$ # # # Relación entre derivadas de la fugacidad y el coeficiente de fugacidad # # $$ \left(\frac{\partial ln\hat f_i }{\partial n_i}\right)_{T,P} = \left(\frac{\partial \hat \phi_i}{\partial n_i}\right)_{T,P} + \left(\frac{\delta_{ij}}{n} - \frac{1}{n} \right)$$ # # # $$RJAC(1,1)=DPDTx/Pl-DPDTy/Pv$$ # # $$ RJAC(1,1)=T*RJAC(1,1) $$ # # $$ RJAC(1,1) = T \left( \frac {\left(\frac{\partial P_{x} }{\partial T}\right)} {P_l} - \frac {\left(\frac{\partial P_{y} }{\partial T}\right)} {P_v} \right) $$ # # # $$RJAC(1,2)=Vl*DPDVx/Pl$$ # # $$ RJAC(1,2) = -V_l \left( \frac {\left(\frac{\partial P_{x} }{\partial V}\right)} {P_l} \right) $$ # # $$RJAC(1,3)=-Vv*DPDVy/Pv$$ # # $$ RJAC(1,2) = -V_v \left( \frac {\left(\frac{\partial P_{y} }{\partial V}\right)} {P_y} \right) $$ # # # # $$RJAC(2,1)=FUGTx(icomp)-FUGTy(icomp)$$ # # $$ RJAC(2,1)= \left(\frac{\partial f_{ix} } {\partial T} \right) - \left(\frac{\partial f_{iy} } {\partial T} \right)$$ # # $$RJAC(2,1)=T*RJAC(2,1)$$ # # $$RJAC(2,1)=T* \left(\left(\frac{\partial f_{ix} } {\partial T} \right) - \left(\frac{\partial f_{iy} } {\partial T} \right) \right) $$ # # $$RJAC(2,2)=Vl*FUGVx(icomp)$$ # # $$ RJAC(2,1)= V_l \left(\frac{\partial f_{ix} } {\partial V} \right) $$ # # # # $$RJAC(2,3)=-Vv*FUGVy(icomp)$$ # # $$ RJAC(2,3)= V_v \left(\frac{\partial f_{iy} } {\partial V_{y}} \right) $$ # # # # $$DLFUGT(I)=(ArTn(I)-Arn(I)/T)/RT+1.D0/T ! term DPDT/P is cancelled out $$ # # $$DLFUGT(I)=(ArTn(I)-Arn(I)/T)/RT+1.D0/T $$ # # $$DLFUGT(I) = \frac{ ArTn(I) - Arn(I)} {R_GT^2} + \frac{1} {T} $$ # # $$DLFUGT(I) = \frac{ \frac{\partial Ar}{\partial T \partial n} # - \frac{\partial Ar}{\partial n }} {R_GT^2} + \frac{1} {T} $$ # # # # $$ DPDT = -ArTV+TOTN*RGAS/V $$ # # $$ \frac{\partial P}{\partial T} = -\frac{\partial Ar}{\partial T \partial V} + N_T \frac{R_{G}} {V} $$ # # # $$RJAC(1,1)=DPDTx/Pl-DPDTy/Pv$$ # # $$ RJAC(1,1)=T*RJAC(1,1) $$ # # $$ RJAC(1,1) = T \left( \frac {\left(\frac{\partial P_{x} }{\partial T}\right)} {P_l} - \frac {\left(\frac{\partial P_{y} }{\partial T}\right)} {P_v} \right) $$ # # # $$RJAC(1,2)=Vl*DPDVx/Pl$$ # # $$ RJAC(1,2) = -V_l \left( \frac {\left(\frac{\partial P_{x} }{\partial V}\right)} {P_l} \right) $$ # # $$RJAC(1,3)=-Vv*DPDVy/Pv$$ # # $$ RJAC(1,3) = -V_v \left( \frac {\left(\frac{\partial P_{y} }{\partial V}\right)} {P_y} \right) $$ # # # # $$RJAC(2,1)=FUGTx(icomp)-FUGTy(icomp)$$ # # $$ RJAC(2,1)= \left(\frac{\partial f_{ix} } {\partial T} \right) - \left(\frac{\partial f_{iy} } {\partial T} \right)$$ # # $$RJAC(2,1)=T*RJAC(2,1)$$ # # $$RJAC(2,1)=T* \left(\left(\frac{\partial f_{ix} } {\partial T} \right) - \left(\frac{\partial f_{iy} } {\partial T} \right) \right) $$ # # $$RJAC(2,2)=Vl*FUGVx(icomp)$$ # # $$ RJAC(2,1)= V_l \left(\frac{\partial f_{ix} } {\partial V} \right) $$ # # # # $$RJAC(2,3)=-Vv*FUGVy(icomp)$$ # # $$ RJAC(2,3)= V_v \left(\frac{\partial f_{iy} } {\partial V_{y}} \right) $$ # # # # $$DLFUGT(I)=(ArTn(I)-Arn(I)/T)/RT+1.D0/T ! term DPDT/P is cancelled out $$ # # $$DLFUGT(I)=(ArTn(I)-Arn(I)/T)/RT+1.D0/T $$ # # $$DLFUGT(I) = \frac{ \frac{ArTn(I) - Arn(I)} {T}} {R_GT} + \frac{1} {T} $$ # # # # # $$ DPDT = -ArTV+TOTN*RGAS/V $$ # # $$ \frac{\partial P}{\partial T} = -\frac{\partial Ar}{\partial T \partial V} + N_T \frac{R_{G}} {V} $$ # _Note: # # # $$\begin{bmatrix} # T \left( \frac {\left(\frac{\partial P_{x} }{\partial T}\right)} {P_l} - \frac {\left(\frac{\partial P_{y} }{\partial T}\right)} {P_v} \right) & x_{12} & x_{13} & \dots & x_{1n} \\ # x_{21} & x_{22} & x_{23} & \dots & x_{2n} \\ # \hdotsfor {5} \\ # x_{d1} & x_{d2} & x_{d3} & \dots & x_{dn} # \end{bmatrix}$$ # $$J_x = \begin{bmatrix} # T \left( \frac {\left(\frac{\partial P_{x} }{\partial T}\right)} {P_l} - \frac {\left(\frac{\partial P_{y} }{\partial T}\right)} {P_v} \right) & # -V_l \left( \frac {\left(\frac{\partial P_{x} }{\partial V}\right)} {P_l} \right) & # -V_v \left( \frac {\left(\frac{\partial P_{y} }{\partial V}\right)} {P_y} \right) \\ # \left(\frac{\partial f_{ix} } {\partial T} \right) - \left(\frac{\partial f_{iy} } {\partial T} \right) & V_l \left(\frac{\partial f_{ix} } {\partial V} \right) & V_v \left(\frac{\partial f_{iy} } {\partial V_{y}} \right) & \\ # 0 & 0 & 0 & # \end{bmatrix}$$ J_x = \begin{bmatrix} T \left( \frac {\left(\frac{\partial P_{x} }{\partial T}\right)} {P_l} - \frac {\left(\frac{\partial P_{y} }{\partial T}\right)} {P_v} \right) & -V_l \left( \frac {\left(\frac{\partial P_{x} }{\partial V}\right)} {P_l} \right) & -V_v \left( \frac {\left(\frac{\partial P_{y} }{\partial V}\right)} {P_y} \right) \\ \left(\frac{\partial f_{ix} } {\partial T} \right) - \left(\frac{\partial f_{iy} } {\partial T} \right) & V_l \left(\frac{\partial f_{ix} } {\partial V} \right) & V_v \left(\frac{\partial f_{iy} } {\partial V_{y}} \right) & \\ 0 & 0 & 0 & \end{bmatrix}$$ # # Envolvente para mezclas # $$ f_i = ln K_i + ln \hat\phi_i^v(T,P,y) - ln \hat\phi_i^l(T,P,x) = 0 $$ # # $$ i = 1,2,... C $$ # # $$ f_{C+1} = \sum_{i=1}^C(y_i - x_i) = 0 $$ # # $$ f_{C+2} = X -X_{spec} = 0 $$ # $$ x_i = \frac{z_i} {1-\beta+ \beta K_i}$$ # # $$ y_i = \frac{K_iz_i} {1-\beta+ \beta K_i}$$ # $$ J_{ij} = \frac{\partial f_i}{\partial ln K_j} = \frac{\partial ln K_i}{\partial ln K_j} + \frac{\partial \hat \phi_i^v}{\partial ln K_j} - \frac{\partial \hat \phi_i^l}{\partial ln K_j} $$ # # $$ \frac{\partial ln K_i}{\partial ln K_j} = \left\{ \begin{array}{lcc} # 1 & i = j \\ # \\ 0 & i \neq j \\ # \end{array} # \right.$$ # $$ \frac{\partial ln \hat \phi_i^v}{\partial ln K_j} = \sum_{k=1}^C\frac{\partial ln \hat \phi_i^v}{\partial y_k} \frac{\partial y_k}{\partial ln K_j} $$ # $$ \frac{\partial y_k}{\partial ln K_j} = 0 $$ # # $$ k \neq j $$ # $$ \frac{\partial ln \hat \phi_i^v}{\partial ln K_j} = \frac{\partial ln \hat \phi_i^v}{\partial y_k} \frac{\partial y_k}{\partial ln K_j} $$ # $$ \frac{\partial x_i}{\partial ln K_j} = K_j \frac{\partial x_i}{\partial K_j} = \frac{ \beta K_j z_j}{(1 - \beta + \beta K_j)^2} = -\beta \frac{x_i y_i}{z_i} $$ # $$ \frac{\partial y_i}{\partial ln K_j} = (1 - \beta) \frac{x_i y_i}{z_i} $$ # Finalmente, el termino $ \frac{\partial ln \hat \phi_i^v}{\partial y_k} $ # # # # 4. Descripción del algoritmo # ---------------------------- # Donde $J_F$ es la matriz jacobiana de la función vectorial $F$, $Λ$ es el vector de variables del sistema $F=0$, $S_{Spec}$ es el valor asignado a una de las variables del vector $Λ$, $\frac{dΛ}{ dS_{Spec}}$ es la derivada, manteniendo la condición $F=0$, del vector de variables con respecto al parámetro $S_{spec}$. Observe que si $S_{spec}=Λ_i$, entonces $\frac{dΛi} {dS_{Spec}} =1$. El vector $\frac{dΛ}{ dS_{Spec}}$ es llamado “vector de sensitividades”. # # $\frac{\partial F} {\partial S_{Spec}}$ es la derivada parcial del vector de funciones $F$ con respecto la variable $S_{spec}$. # # En la ecuación A.3-1 la matriz jacobiana $J_F$ debe ser valuada en un punto ya convergido que es solución del sistema de ecuaciones $F=0$. Observe en los distintos sistemas de ecuaciones presentados en el capítulo 3, que sólo una componente del vector $F$ depende explícitamente de $S_{spec}$. Por tanto, las componentes del vector $\frac{\partial F} {\partial S_{Spec}}$ son todas iguales a cero, excepto la que depende de $S_{spec}$, en esta tesis el valor de dicha componente es siempre $“-1”$. # # Conocidos $J_F$ y $\frac{\partial F} {\partial S_{Spec}}$ es posible calcular todas las componentes del vector $\frac{dΛ}{ dS_{Spec}}$ . # Con dΛ dSSpec conocido es posible predecir los valores de todas las variables del vector # Λ para el siguiente punto de la “hiper-línea que se está calculando, aplicando la # siguiente ecuación: # # # $$ A_{next point}^0 = A _{conve. pont} + \left(\frac{dA}{dS_{Spec}}\right) \Delta S_{Spec} $$ # Aquí Λ0 next point corresponde al valor inicial del vector Λ para el próximo punto a ser # calculado. # Λconv. point es el valor del vector Λ en el punto ya convergido. # # Por otra parte, el vector de sensitividades dΛ dSSpec provee información sobre la # próxima variable que debe ser especificada en el próximo punto a ser calculado. La # variable a especificar corresponderá a la componente del vector dΛ dSSpec de mayor # valor absoluto. Supongamos que la variable especificada para el punto convergido fue la # presión P, es decir en el punto convergido Sspec = P. Luego al calcular el vector de # sensitividades para el punto convergido usando A.3-1, supongamos que se determina # que la componente de mayor valor absoluto de dicho vector es la correspondiente a # dT dP , esto implica que en el próximo punto a ser calculado la variable que se debe # especificar ya no es P sino T. Esto es Sspec = T. Cuando existe un cambio de variables # especificadas como el caso del ejemplo anterior, el vector de sensitividades se # normaliza dividiendo todas las componentes del vector por la de mayor valor absoluto, # en el caso anterior se deberían dividir todas las componentes por dT dP . Finalmente se # aplica A.3-2 para encontrar los valores de Λ0 next point # La variable # ∆SSpec se computa en esta tesis de la siguiente manera: # Donde ∆SSpec _Old es el paso que se dio en la variable que se especifico para obtener el punto convergido. “N” es una constante impuesta por el usuario, e ITER es el número de iteraciones requeridas para el punto convergido. Note que para calcular ∆SSpec se utiliza la componente de dΛ dSSpec de mayor valor absoluto, # # # max #  #  # Λ # dSSpec # d # , antes de que # el vector # dΛ dSSpec haya sido normalizado. # 4. Referencias # -------------- # [1] key # [#] <NAME>, <NAME>, Introduction to Numerical Continuation Methods, SIAM. Classics in Applied Mathematics, Philadelphia, 2003. # # [#] <NAME>, <NAME>, Global phase equilibrium calculations: Critical lines, critical end points and liquid-liquid-vapour equilibrium in binary mixtures, Journal of Supercritical Fluids, 39 (2007) 287-295. # # [#] <NAME>, <NAME>, <NAME>, Automated generation of phase diagrams for binary systems with azeotropic behavior, Industrial and Engineering Chemistry Research, 47 (2008) 9728-9743.
Diagrama_de_fases_para_sustancias_puras_0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.ensemble import GradientBoostingClassifier # # Unedited 47-dim data # # Initial training run-through: didn't go long enough, poor results # ## $m_{Z'} = 350$ GeV data50_350Gm_train, data50_350Gm_test = DA.gettraintest(0) gradboost_350Gm = sigbg_model( make_pipeline(StandardScaler(), GradientBoostingClassifier(n_estimators=50, max_depth=7, learning_rate=1, verbose=1)), data50_350Gm_train, data50_350Gm_test); gradboost_350Gm.fit() results(gradboost_350Gm, 350) # ## $m_{Z'} = 500$ GeV data50_500Gm_train, data50_500Gm_test = DA.gettraintest(1) gradboost_500Gm = sigbg_model( make_pipeline(StandardScaler(), GradientBoostingClassifier(n_estimators=50, max_depth=7, learning_rate=1, verbose=1)), data50_500Gm_train, data50_500Gm_test); gradboost_500Gm.fit() results(gradboost_500Gm, 500) # ## $m_{Z'} = 1$ TeV data50_1Tm_train, data50_1Tm_test = DA.gettraintest(2) gradboost_1Tm = sigbg_model( make_pipeline(StandardScaler(), GradientBoostingClassifier(n_estimators=50, max_depth=7, learning_rate=1, verbose=1)), data50_1Tm_train, data50_1Tm_test); gradboost_1Tm.fit() results(gradboost_1Tm, 1000) # ## $m_{Z'} = 2$ TeV data50_2Tm_train, data50_2Tm_test = DA.gettraintest(3) gradboost_2Tm = sigbg_model( make_pipeline(StandardScaler(), GradientBoostingClassifier(n_estimators=50, max_depth=7, learning_rate=1, verbose=1)), data50_2Tm_train, data50_2Tm_test); gradboost_2Tm.fit() results(gradboost_2Tm, 2000) # ## $m_{Z'} = 4$ TeV data50_4Tm_train, data50_4Tm_test = DA.gettraintest(4) # accidentally had this at index = 3 initially gradboost_4Tm = sigbg_model( make_pipeline(StandardScaler(), GradientBoostingClassifier(n_estimators=50, max_depth=7, learning_rate=1, verbose=1)), data50_4Tm_train, data50_4Tm_test); gradboost_4Tm.fit() results(gradboost_4Tm, 4000) # # Unedited 47-dim data Pt 2 # # Trained longer, used XGBClassifier which is much faster than pure scikit-learn from xgboost import XGBClassifier from datetime import datetime # ## $m_{Z'} = 350$ GeV data50_350Gm_train, data50_350Gm_test = DA.gettraintest(0) xgradboost_350Gm = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_350Gm_train, data50_350Gm_test); xgradboost_350Gm.fit() results(xgradboost_350Gm, 350) # ## $m_{Z'} = 500$ GeV data50_500Gm_train, data50_500Gm_test = DA.gettraintest(1) xgradboost_500Gm = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_500Gm_train, data50_500Gm_test); xgradboost_500Gm.fit() results(xgradboost_500Gm, 500) # ## $m_{Z'} = 1$ TeV data50_1Tm_train, data50_1Tm_test = DA.gettraintest(2) xgradboost_1Tm = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_1Tm_train, data50_1Tm_test); xgradboost_1Tm.fit() results(xgradboost_1Tm, 1000) # ## $m_{Z'} = 2$ TeV data50_2Tm_train, data50_2Tm_test = DA.gettraintest(3) xgradboost_2Tm = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_2Tm_train, data50_2Tm_test); xgradboost_2Tm.fit() results(xgradboost_2Tm, 2000) # ## $m_{Z'} = 4$ TeV data50_4Tm_train, data50_4Tm_test = DA.gettraintest(4) xgradboost_4Tm = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_4Tm_train, data50_4Tm_test); xgradboost_4Tm.fit() results(xgradboost_4Tm, 4000) print(current_time) data50_4Tm_train, data50_4Tm_test = DA.gettraintest(4) xgradboost_4Tm = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, verbose=1, nthread=4, use_label_encoder=False)), data50_4Tm_train, data50_4Tm_test); xgradboost_4Tm.fit() print(current_time) # # Noised Data xgradboost_350Gm_noise = refresh_model(xgradboost_350Gm_noise) xgradboost_500Gm_noise = refresh_model(xgradboost_500Gm_noise) xgradboost_1Tm_noise = refresh_model(xgradboost_1Tm_noise) xgradboost_2Tm_noise = refresh_model(xgradboost_2Tm_noise) xgradboost_4Tm_noise = refresh_model(xgradboost_4Tm_noise) # + jupyter={"outputs_hidden": true} xgradboost_350Gm_noise = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, verbose=1, nthread=4, use_label_encoder=False)), data50_350Gm_train_noise, data50_350Gm_test_noise); xgradboost_350Gm_noise.fit() # - results(xgradboost_350Gm_noise, 350) # + jupyter={"outputs_hidden": true} xgradboost_500Gm_noise = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, verbose=1, nthread=4, use_label_encoder=False)), data50_500Gm_train_noise, data50_500Gm_test_noise); xgradboost_500Gm_noise.fit() # - results(xgradboost_500Gm_noise, 500) # + jupyter={"outputs_hidden": true} xgradboost_1Tm_noise = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, verbose=1, nthread=4, use_label_encoder=False)), data50_1Tm_train_noise, data50_1Tm_test_noise); xgradboost_1Tm_noise.fit() # - results(xgradboost_1Tm_noise, 1000) # + jupyter={"outputs_hidden": true} xgradboost_2Tm_noise = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, verbose=1, nthread=4, use_label_encoder=False)), data50_2Tm_train_noise, data50_2Tm_test_noise); xgradboost_2Tm_noise.fit() # - results(xgradboost_2Tm_noise, 2000) # + jupyter={"outputs_hidden": true} xgradboost_4Tm_noise = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, verbose=1, nthread=4, use_label_encoder=False)), data50_4Tm_train_noise, data50_4Tm_test_noise); xgradboost_4Tm_noise.fit() # - results(xgradboost_4Tm_noise, 4000) # # New Background Differentiation data50_350Gm_train_bgdif, data50_350Gm_test_bgdif = DA.getidtraintest(0) xgradboost_350Gm_bgdif = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_350Gm_train_bgdif, data50_350Gm_test_bgdif); xgradboost_350Gm_bgdif.fit() data50_500Gm_train_bgdif, data50_500Gm_test_bgdif = DA.getidtraintest(1) xgradboost_500Gm_bgdif = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_500Gm_train_bgdif, data50_500Gm_test_bgdif); xgradboost_500Gm_bgdif.fit() data50_1Tm_train_bgdif, data50_1Tm_test_bgdif = DA.getidtraintest(2) xgradboost_1Tm_bgdif = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_1Tm_train_bgdif, data50_1Tm_test_bgdif); xgradboost_1Tm_bgdif.fit() data50_2Tm_train_bgdif, data50_2Tm_test_bgdif = DA.getidtraintest(3) xgradboost_2Tm_bgdif = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_2Tm_train_bgdif, data50_2Tm_test_bgdif); xgradboost_2Tm_bgdif.fit() data50_4Tm_train_bgdif, data50_4Tm_test_bgdif = DA.getidtraintest(4) xgradboost_4Tm_bgdif = sigbg_model( make_pipeline( StandardScaler(), XGBClassifier(n_estimators=250, max_depth=7, learning_rate=0.1, nthread=4, use_label_encoder=False)), data50_4Tm_train_bgdif, data50_4Tm_test_bgdif); xgradboost_4Tm_bgdif.fit()
notebooks/grad_boost.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 3 Programming Assignment # # Remark: # # Please upload your solutions of this assignment to Canvas with a file named "Programming_Assignment_3 _yourname.ipynb" before deadline. # + [markdown] colab_type="text" id="l2ENBj8Nz3kg" # ================================================================================================================= # + [markdown] colab_type="text" id="t8Op_NP5z3kf" # ## **Problem 1 .** Use stochastic gradient descent method to train MNIST with 1 hidden layer neural network model to achieve at least 97% test accuracy. Print the results with the following format: # # "Epoch: i, Training accuracy: $a_i$, Test accuracy: $b_i$" # # where $i=1,2,3,...$ means the $i$-th epoch, $a_i$ and $b_i$ are the training accuracy and test accuracy computed at the end of $i$-th epoch. # - # write your code for solving probelm 1 in this cell # + [markdown] colab_type="text" id="l2ENBj8Nz3kg" # ================================================================================================================= # - # ## **Problem 2 .** Use stochastic gradient descent method to train CIFAR-10 with # * (1) logistic regression model to achieve at least 25% test accuracy # * (2) 2-hidden layers neural network model to achieve at least 50% test accuracy # # Print the results with the following format: # # * For logistic regression model, print: # # "Logistic Regression Model, Epoch: i, Training accuracy: $a_i$, Test accuracy: $b_i$" # # # * For 2-hidden layers neural network model, print: # # "DNN Model, Epoch: i, Training accuracy: $a_i$, Test accuracy: $b_i$" # # # where $i=1,2,3,...$ means the $i$-th epoch, $a_i$ and $b_i$ are the training accuracy and test accuracy computed at the end of $i$-th epoch. # # Hint: # # (1) The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. # # (2) The input_size should be $3072=3*32*32$, where 3 is the number of channels (RGB image), $32*32$ is the size of every image. # # (3) For the 2-hidden layers neural network model, consider to use $W^1\in \mathbb{R}^{3072\times3072}$ for the 1st-hidden layer, $W^2 \in \mathbb{R}^{500\times 3072}$ for the 2nd-hidden layer and $W^3 \in \mathbb{R}^{10\times 500}$ for the output layer. # # + # write your code for solving probelm 2 in this cell # You can load CIFAR-10 dataset as follows: CIFAR10_transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=CIFAR10_transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=CIFAR10_transform) testloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False) # - # =================================================================================================================
docs/_sources/Module3/Programming_Assignment_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pyttsx3 import comtypes.gen import datetime import speech_recognition as sr import wikipedia import smtplib import webbrowser as wb import os from PIL import Image import pyautogui import sys import tkinter import random import PyPDF2 from yahoo_fin import stock_info as si import numpy as np from googletrans import Translator # + engine = pyttsx3.init() #'sapi5' is optional engine.say("Hello") engine.runAndWait() voices = engine.getProperty('voices') engine.setProperty('voice', voices[1].id) for voice in voices: # to get the info. about various voices in our PC print("Voice:") print("ID: %s" %voice.id) print("Name: %s" %voice.name) print("Age: %s" %voice.age) print("Gender: %s" %voice.gender) print("Languages Known: %s" %voice.languages) # + active="" # # Initialize the Speech Engine # engine = pyttsx3.init('sapi5') #sapi5 is optional # # Get the voice objects and print them. (This is just to see, if you have more than one voice.) # voices = engine.getProperty('voices') # #print(voices) # # Set the voice to the second voice. (voices[0].id would be the first voice) # engine.setProperty('voice', voices[1].id) # #voice id 1 is female, id 0 is male # # Set the words per minute rate of the Speech engine # engine.setProperty('rate', 105) # # Tell the engine what you want it to say. # engine.say('Sally sells seashells by the seashore.') # engine.say('The quick brown fox jumped over the lazy dog.') # # Tell the engine to start saying what you wanted it to and stop when it reaches the end of the queued sayings. # engine.runAndWait() # + def speak(audio): engine.say(audio) engine.runAndWait() #speak("hello") # + def time(): Time=datetime.datetime.now().strftime("%I:%M:%S") speak("the current time is") speak(Time) #time() # + def date(): year=int(datetime.datetime.now().year) month=int(datetime.datetime.now().month) day=int(datetime.datetime.now().day) speak("the current date is") speak(day) speak(month) speak(year) #date() # - def sendEmail(to, content): server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() sever.starttls() server.login('<EMAIL>','1234') server.sendmail('<EMAIL>',to,content) server.close() def screenshot(): img=pyautogui.screenshot() img.save() # + def wishme(): speak("welcome to my world!") time() #Speak the time date() #Speak the date hour=datetime.datetime.now().hour if hour>=6 and hour<=12: speak("Good morning!") elif hour>=12 and hour<=18: speak("Good afternoon!") elif hour>=18 and hour<=24: speak("Good evening!") else: speak("Good night!") speak("what can i do for you?") #wishme() # + def takeCommand(): r=sr.Recognizer() with sr.Microphone() as source: print("Listening...") r.pause_threshold=1 audio=r.listen(source) try: print("Recognizing...") query=r.recognize_google(audio, language='en-in') print(query) except Exception as e: print(e) speak("Pardon me, can you please say that again...") return takeCommand() return query #takeCommand() # + #tic-tac-toe: human vs human theBoard = {'7': ' ' , '8': ' ' , '9': ' ' , '4': ' ' , '5': ' ' , '6': ' ' , '1': ' ' , '2': ' ' , '3': ' ' } board_keys = [] def gameBoard(): print('7'+'|'+'8'+'|'+'9') print('-+-+-') print('4'+'|'+'5'+'|'+'6') print('-+-+-') print('1'+'|'+'2'+'|'+'3') print() #gameBoard() for key in theBoard: board_keys.append(key) def printBoard(board): print(board['7'] + '|' + board['8'] + '|' + board['9']) print('-+-+-') print(board['4'] + '|' + board['5'] + '|' + board['6']) print('-+-+-') print(board['1'] + '|' + board['2'] + '|' + board['3']) def inp_move(): #move1 = input() move1 = takeCommand() if move1.isdigit(): if int(move1) > 0 and int(move1) < 10: return move1 else: print('Please enter a number within range') speak('Please enter a number within range') tst = inp_move() return tst else: print('please enter a number') speak('please enter a number') tmp = inp_move() return tmp def humanGame(): gameBoard() turn = 'X' count = 0 for i in range(10): printBoard(theBoard) print("It's your turn," + turn + ".Move to which place?") speak("It's your turn,"+turn+".Move to which place?") move = inp_move() if theBoard[move] == ' ': theBoard[move] = turn count += 1 else: print("That place is already filled.\nMove to which place?") speak("That place is already filled. Move to which place?") continue # Now we will check if player X or O has won,for every move after 5 moves. if count >= 5: if theBoard['7'] == theBoard['8'] == theBoard['9'] != ' ': # across the top printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break elif theBoard['4'] == theBoard['5'] == theBoard['6'] != ' ': # across the middle printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break elif theBoard['1'] == theBoard['2'] == theBoard['3'] != ' ': # across the bottom printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break elif theBoard['1'] == theBoard['4'] == theBoard['7'] != ' ': # down the left side printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break elif theBoard['2'] == theBoard['5'] == theBoard['8'] != ' ': # down the middle printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break elif theBoard['3'] == theBoard['6'] == theBoard['9'] != ' ': # down the right side printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break elif theBoard['7'] == theBoard['5'] == theBoard['3'] != ' ': # diagonal printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break elif theBoard['1'] == theBoard['5'] == theBoard['9'] != ' ': # diagonal printBoard(theBoard) print("\nGame Over.\n") speak("Game Over") print(" **** " +turn + " won. ****") speak(turn+"won") break # If neither X nor O wins and the board is full, we'll declare the result as 'tie'. if count == 9: print("\nGame Over.\n") speak("Game Over") print("It's a Tie!!") speak("Aah! It's a tie") # Now we have to change the player after every move. if turn =='X': turn = 'O' else: turn = 'X' # Now we will ask if player wants to restart the game or not. #restart = input("Do want to play Again?(y/n)") speak("Do you want to play again?") restart = takeCommand().lower() if restart == "yes": for key in board_keys: theBoard[key] = " " humanGame() #this is to restart if the user wants to... # + #tic-tac-toe: human vs computer board = [' ' for x in range(10)] def insertLetter(letter, pos): board[pos] = letter def spaceIsFree(pos): return board[pos] == ' ' def basicBoard(): print("1 | 2 | 3") print("-+-+-+-+-") print("4 | 5 | 6 ") print("-+-+-+-+-") print("7 | 8 | 9") #basicBoard() def printBoard(board): print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3]) print('-+-+-+-+-+-') print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6]) print('-+-+-+-+-+-') print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9]) def isWinner(bo, le): return (bo[7] == le and bo[8] == le and bo[9] == le) or (bo[4] == le and bo[5] == le and bo[6] == le) or(bo[1] == le and bo[2] == le and bo[3] == le) or(bo[1] == le and bo[4] == le and bo[7] == le) or(bo[2] == le and bo[5] == le and bo[8] == le) or(bo[3] == le and bo[6] == le and bo[9] == le) or(bo[1] == le and bo[5] == le and bo[9] == le) or(bo[3] == le and bo[5] == le and bo[7] == le) def playerMove(): run = True while run: #move = input('Please select a position to place an \'X\' (1-9): ') speak("Please select a position to place an X from 1 to 9") move=takeCommand() try: move = int(move) if move > 0 and move < 10: if spaceIsFree(move): run = False insertLetter('X', move) else: print('Sorry, this space is occupied!') speak('Sorry this space is occupied!') else: print('Please type a number within the range!') speak('Please type a number within the range!') except: print('Please type a number!') speak('Please type a number!') def compMove(): possibleMoves = [x for x, letter in enumerate(board) if letter == ' ' and x != 0] move = 0 for let in ['O', 'X']: for i in possibleMoves: boardCopy = board[:] boardCopy[i] = let if isWinner(boardCopy, let): move = i return move cornersOpen = [] for i in possibleMoves: if i in [1,3,7,9]: cornersOpen.append(i) if len(cornersOpen) > 0: move = selectRandom(cornersOpen) return move if 5 in possibleMoves: move = 5 return move edgesOpen = [] for i in possibleMoves: if i in [2,4,6,8]: edgesOpen.append(i) if len(edgesOpen) > 0: move = selectRandom(edgesOpen) return move def selectRandom(li): import random ln = len(li) r = random.randrange(0,ln) return li[r] def isBoardFull(board): if board.count(' ') > 1: return False else: return True def computerGame(board): basicBoard() print('Welcome to Tic Tac Toe!') speak("Welcome to Tic Tac Toe!") printBoard(board) while not(isBoardFull(board)): if not(isWinner(board, 'O')): playerMove() printBoard(board) else: print('Sorry, O\'s won this time!') speak("Sorry, Computer won") break if not(isWinner(board, 'X')): move = compMove() if move == 0: print() else: insertLetter('O', move) print('Computer placed an \'O\' in position', move , ':') speak("Computer placed an O in position"+str(move)) printBoard(board) else: print('X\'s won this time! Good Job!') speak("X won this time! Good Job!") break if isBoardFull(board): print('Tie Game!') Speak("Tie Game!") #answer = input('Do you want to play ? (Y/N)') speak("Do you want to play the game?") answer=takeCommand().lower() if answer == 'yes': board = [' ' for x in range(10)] print('-----------------------------------') computerGame(board) # - if __name__=='__main__': wishme() button_pressed=False # + top = tkinter.Tk() def giveCommand(): button_pressed=True while button_pressed==True: query=takeCommand().lower() if 'time' in query: #time time() elif 'date' in query: #date date() elif 'wikipedia' in query: #search on wikipedia speak("Searching...") query=query.replace("wikipedia","") result=wikipedia.summary(query,sentences=3) print(result) speak(result) elif 'human game' in query: #tic-tac-toe: human vs human speak("The human versus human game has begun") humanGame() elif 'computer game' in query: #tic-tac-toe: human vs computer speak("The human versus computer game has begun") computerGame(board) elif 'sum' in query: #add the given numbers speak("Please enter the first number") a=float(takeCommand()) if a<0: a=0 speak("The first number is"+str(a)) speak("Please enter the second number") b=float(takeCommand()) if b<0: b=0 speak("The second number is"+str(b)) sum=float(a)+float(b) speak(sum) print("Sum:"+str(sum)) elif 'subtract' in query: #subtract speak("Please enter the first number") c=float(takeCommand()) if c<0: c=0 speak("The first number is"+str(c)) speak("Please enter the second number") d=float(takeCommand()) if d<0: d=0 speak("The second number is"+str(d)) if c>d: sub=float(c)-float(d) else: sub=float(d)-float(c) speak(sub) print("Subtract:"+str(sub)) elif 'multiply' in query: #product speak("Please enter the first number") e=float(takeCommand()) speak("The first number is"+str(e)) speak("Please enter the second number") f=float(takeCommand()) speak("The second number is"+str(f)) product=float(e)*float(f) speak(product) print("Product:"+str(product)) elif 'divide' in query: #divide speak("Please enter the first number") g=float(takeCommand()) speak("The first number is"+str(g)) speak("Please enter the second number") h=float(takeCommand()) speak("The second number is"+str(h)) divide1=float(g)/float(h) divide2=float(h)/float(g) speak(str(g)+"by"+str(h)+"is equal to"+divide1) print("Division:"+str(divide1)) speak(str(h)+"by"+str(g)+"is equal to"+divide2) print("Division:"+str(divide2)) elif 'power' in query: #power speak("Please enter the base number") i=float(takeCommand()) speak("The base number is"+str(i)) speak("Please enter the power") j=float(takeCommand()) speak("The power is"+str(j)) pow=float(i)**float(j) speak(pow) print("Power:"+str(pow)) elif 'send email' in query: #send mail try: speak("What do you want to send?") content=takeCommand() speak(content) to='<EMAIL>' sendEmail(to,content) speak("Email sent") except Exception as e: print(e) speak("Some problem occured while sending the mail, please try again") elif 'search' in query: #google speak("What do you want to search?") chromepath='C:\Program Files (x86)\Google\Chrome\Application\chrome.exe %s' search= takeCommand().lower() wb.get(chromepath).open_new_tab(search+'.com') elif 'logout' in query: #logout os.system('shutdown -1') elif 'shutdown' in query: #shutdown os.system('shutdown /s /t 1') elif 'restart' in query: #restart os.system('shutdown /r /t 1') elif 'play song' in query: #play song songs="D:\\Music_Path" #song directory needs to be updated song=os.listdir(songs) os.startfile(os.path.join(songs,song[0])) elif 'pdf text extractor' in query: #pdf text extractor & reader pdf=open('C:/Users/Ishan/Downloads/Introduction_to.pdf','rb') #pdftext=takeCommand() #pdf=open('C:/Ishan/PDF/'+pdftext+'.pdf','rb') reader=PyPDF2.PdfFileReader(pdf) reader.numPages x=int(input()) #x=int(takeCommand()) page=reader.getPage(x) text=page.extractText() text=text.replace("\n"," ") print(text) speak(text) elif 'remember' in query: #notes to remember speak("What do you want to remember?") data=takeCommand() speak("You said me to remember" + data) remember=open('data.txt','w') remember.write(data) remember.close() elif 'know anything' in query: #read remember notes remember=open('data.txt','r') speak("You said to remember that"+remember.read()) elif 'open photo' in query: #pics im = Image.open(r"C:Picture.png/.jpg") #picture directory need to be updated im.show() elif 'take screenshot' in query: #screenshot screenshot() speak("Screenshot taken") elif 'motivate me' in query: #motivational quotes genQuotes=["The Way Get Started Is To Quit Talking And Begin Doing.","The Pessimist Sees Difficulty In Every Opportunity. The Optimist Sees Opportunity In Every Difficulty.","Don’t Let Yesterday Take Up Too Much Of Today.","You Learn More From Failure Than From Success. Don’t Let It Stop You. Failure Builds Character.","It’s Not Whether You Get Knocked Down, It’s Whether You Get Up.","If You Are Working On Something That You Really Care About, You Don’t Have To Be Pushed. The Vision Pulls You.","People Who Are Crazy Enough To Think They Can Change The World, Are The Ones Who Do.","Failure Will Never Overtake Me If My Determination To Succeed Is Strong Enough.","Entrepreneurs Are Great At Dealing With Uncertainty And Also Very Good At Minimizing Risk. That’s The Classic Entrepreneur.","We May Encounter Many Defeats But We Must Not Be Defeated","Knowing Is Not Enough; We Must Apply. Wishing Is Not Enough; We Must Do.","Imagine Your Life Is Perfect In Every Respect; What Would It Look Like?","We Generate Fears While We Sit. We Overcome Them By Action.","Whether You Think You Can Or Think You Can’t, You’re Right.","Security Is Mostly A Superstition. Life Is Either A Daring Adventure Or Nothing.","The Man Who Has Confidence In Himself Gains The Confidence Of Others.","“The Only Limit To Our Realization Of Tomorrow Will Be Our Doubts Of Today.","Creativity Is Intelligence Having Fun.","What You Lack In Talent Can Be Made Up With Desire, Hustle And Giving 110% All The Time.","Do What You Can With All You Have, Wherever You Are.","You Are Never Too Old To Set Another Goal Or To Dream A New Dream.","To See What Is Right And Not Do It Is A Lack Of Courage.","Reading Is To The Mind, As Exercise Is To The Body.","Fake It Until You Make It! Act As If You Had All The Confidence You Require Until It Becomes Your Reality.","The Future Belongs To The Competent. Get Good, Get Better, Be The Best!","For Every Reason It’s Not Possible, There Are Hundreds Of People Who Have Faced The Same Circumstances And Succeeded.","Things Work Out Best For Those Who Make The Best Of How Things Work Out.","A Room Without Books Is Like A Body Without A Soul.","I Think Goals Should Never Be Easy, They Should Force You To Work, Even If They Are Uncomfortable At The Time.","One Of The Lessons That I Grew Up With Was To Always Stay True To Yourself And Never Let What Somebody Else Says Distract You From Your Goals.","Today’s Accomplishments Were Yesterday’s Impossibilities.","The Only Way To Do Great Work Is To Love What You Do. If You Haven’t Found It Yet, Keep Looking. Don’t Settle.","You Don’t Have To Be Great To Start, But You Have To Start To Be Great.","A Clear Vision, Backed By Definite Plans, Gives You A Tremendous Feeling Of Confidence And Personal Power.","There Are No Limits To What You Can Accomplish, Except The Limits You Place On Your Own Thinking.","Integrity Is The Most Valuable And Respected Quality Of Leadership. Always Keep Your Word.","Leadership Is The Ability To Get Extraordinary Achievement From Ordinary People","Leaders Set High Standards. Refuse To Tolerate Mediocrity Or Poor Performance","Clarity Is The Key To Effective Leadership. What Are Your Goals?","The Best Leaders Have A High Consideration Factor. They Really Care About Their People","Leaders Think And Talk About The Solutions. Followers Think And Talk About The Problems.","The Key Responsibility Of Leadership Is To Think About The Future. No One Else Can Do It For You.","The Effective Leader Recognizes That They Are More Dependent On Their People Than They Are On Them. Walk Softly.","Leaders Never Use The Word Failure. They Look Upon Setbacks As Learning Experiences.","Practice Golden Rule Management In Everything You Do. Manage Others The Way You Would Like To Be Managed.","Leaders Are Anticipatory Thinkers. They Consider All Consequences Of Their Behaviors Before They Act.","The True Test Of Leadership Is How Well You Function In A Crisis.","Leaders Concentrate Single-Mindedly On One Thing– The Most Important Thing, And They Stay At It Until It’s Complete.","The Three ‘C’s’ Of Leadership Are Consideration, Caring, And Courtesy. Be Polite To Everyone.","Respect Is The Key Determinant Of High-Performance Leadership. How Much People Respect You Determines How Well They Perform."] x=random.randint(0,49) print(x) print(genQuotes[x]) speak(genQuotes[x]) elif 'drive me crazy' in query: #joker quotes jokerQuotes=["The only sensible way to live in this world is without rules.","Smile, because it confuses people. Smile, because it’s easier than explaining what is killing you inside.","What doesn’t kill you, simply makes you stranger!","April sweet is coming in, let the feast of fools begin!","They need you right now, but when they don’t, they’ll cast you out like a leper!","As you know, madness is like gravity…all it takes is a little push.","Let’s put a smile on that face!","We stopped checking for monsters under our bed, when we realized they were inside us.","If you’re good at something, never do it for free.","When the chips are down, these civilized people, they’ll eat each other.","Very poor choice of words.","Why don’t we cut you up into little pieces and feed you to your pooches? Hm? And then we’ll see how loyal a hungry dog really is.","You have nothing, nothing to threaten me with. Nothing to do with all your strength.","Introduce a little anarchy. Upset the established order, and everything becomes chaos. I’m an agent of chaos…","I like you, but I want to kill you.","Do I really look like a guy with a plan? You know what I am? I’m a dog chasing cars. I wouldn’t know what to do with one if I caught it! You know, I just… *do* things.","Why so serious?","Is it just me or is it getting crazier out there","And I won’t kill you because you’re just too much fun. I think you and I are destined to do this forever.","Those mob fools want you gone. So they can get back to the way things were. But I know the truth, there’s no going back. You’ve changed things…forever.","You can’t rely on anyone these days, you gotta do everything yourself, don’t we? That’s ok, I came prepared, it’s a funny world we live in. Speaking of which, you know how I got these scars?","As though we were made for each other… Beauty and the Beast. Of course, if anyone else calls you beast, I’ll rip their lungs out.","See, this is how crazy Batman’s made Gotham! If you want order in Gotham, Batman must take off his mask and turn himself in. Oh, and every day he doesn’t, people will die, starting tonight. I’m a man of my word.","See I’m a man of simple taste. I like things such as gunpowder…dynamite and…gasoline!","Until their spirit breaks completely. Until they get a good look at the real <NAME>, and all the heroic things he’s done.","This city deserves a better class of criminal. And I’m gonna give it to them!","And he didn’t die all at once. It was hours before the screaming stopped. I almost didn’t get to sleep that night. That was the last time I’d used crushed glass","All I have are negative thoughts.","I’m only laughing on the outside. My smile is just skin deep. If you could see inside, I’m really crying. You might join me for a weep.","I’ve been using it as a journal, but also as a joke diary, if I have any thoughts or frustrations. I think I told you, I’m pursuing a career in standup comedy.","I am not someone who is loved. I’m an idea. A state of mind.","It’s not about the money, it’s about sending a message. Everything burns!","Nobody panics when things go according to plan, even if the plan is horrifying!","The real joke is your stubborn, bone deep conviction that somehow, somewhere, all of this makes sense! That’s what cracks me up each time!","I used to think that my life was a tragedy. But now I realize, it’s a comedy.","It’s funny, when I was a little boy, and told people I was going to be a comedian, everyone laughed at me. Well, no one’s laughing now.","I’ll tell you what you get! You get what you f**king deserve!","The strongest hearts have the most scars!","Their morals, their code; it’s a bad joke. Dropped at the first sign of trouble. They’re only as good as the world allows them to be. You’ll see- I’ll show you. When the chips are down these, uh, civilized people? They’ll eat each other. See I’m not a monster, I’m just ahead of the curve.","Don’t test the monster in me!","You’re my friend, too.","I got you a kitty.","I’ll never understand why Superman wears the same outfits every day.","Why can’t a girl be nice to a guy without the mook trying to murder her?","My love for my Joker was stronger than their mad house walls","Look…I’m only doing this to help you. Let’s try this again. Acceptance.","They’ve got Hello Kitty on them. They’re fashionable.","Every woman has a crazy side that only the right man can bring out.","In case ya ain’t figured it out, today’s the Joker’s big homecoming, and you’re the guest of honor.","If I get mad at you that means I still care. Worry when I don’t get mad.","You got the look. And a lotta nerve. What you don’t have is the right. Joker was a hero. You’re not fit to lick his boutonniere!","Now you feel like you’ve someone by your side-to share the journey with you.","I love him not for the way he silenced my demons, but for the way his demons dances with mine.","You didn’t like my show? Well, try this one. It’s called ‘Animals Attack People I Hate.’ It’s a comedy.","What’s wrong with you, B-man? You come into Mista J’s home and just start smashing it to pieces! Don’t you know he’s sick?","I promised you some entertainment, right, boys?","Ladies and jerks! There’s been a slight change in tonight’s show. Insteada the opera robbin’ you for somethin’ like a thousand bucks a seat — we’re gonna rob you! Believe me folks, I’ve seen it already. I’m doin’ ya a big favor!","Let me get you outta here girl. We can team up again. Drive all the boys crazy. Ya know? Like the old days…","Nice work,butterfingers, why didn’tcha just turn the batsignal on while you were at it!","I sleep where I want, when I want, with who I want.","I dunno about ‘genius,’ but I do got a PhD.","Oh buckets full, honey. I was tryin’ too hard to impress the wrong guy. Kinda like you with Superman.","You really put the ‘fun’ in funeral"] y=random.randint(0,62) print(y) print(jokerQuotes[y]) speak(jokerQuotes[y]) elif 'stocks' in query: #stock rate x=si.get_live_price('NSE') print("$",x) speak("$",str(x)) elif 'toss a coin' in query: #toss coin n=1 p=0.5 x=np.random.binomial(n,p) print(x) speak(x) if(x==1): print("Heads") speak("Heads") else: print("Tails") speak("Tails") elif 'abilities' in query: #abilities abilities=["I can go offline","I can open your favourite pictures","I can log you out from the system, restart the system & even shutdown the system on your command","I can extract text from pdf & can speak the extracted text","I can play your favourite songs","I can tell you the time & date","I can search for a particular topic on Wikipedia","I can google for you","I can perform basic maths operations like add,subtract, multiply, divide & deal with powers","I can beat you in Tic Tac Toe game","I can help you play Tic Tac Toe game with your friend","I can send a mail for you","I can remember things you want me to remember","I can take Screenshots"] z=random.randint(0,4) y=random.randint(5,9) x=random.randint(10,13) speak(abilities[x]) print(abilities[x]) speak(abilities[y]) print(abilities[y]) speak(abilities[z]) print(abilities[z]) elif 'future abilities' in query: #what to do next speak("Alarm") speak("Stopwatch") speak("Predict weather") speak("Current weather conditions") speak("Pykemon") speak("Handwritten data extraction & reading") print("Alarm") print("Stopwatch") print("Predict weather") print("Current weather conditions") print("Pykemon") print("Handwritten data extraction & reading") elif 'translate to spanish' in query: #translate into spanish language translator=Translator() translations=translator.translate(input(),dest='es') print(translations.origin, '-->', translations.text) print(translations.text) speak(translations.text) elif 'translate to french' in query: #translate into french language translator=Translator() translations=translator.translate(input(),dest='fr') print(translations.origin, '-->', translations.text) print(translations.text) speak(translations.text) elif 'translate to french' in query: #translate into japanese language translator=Translator() translations=translator.translate(input(),dest='ja') print(translations.origin, '-->', translations.text) print(translations.text) speak(translations.text) elif 'translate to tamil' in query: #translate into tamil language translator=Translator() translations=translator.translate(input(),dest='ta') print(translations.origin, '-->', translations.text) print(translations.text) speak(translations.text) elif 'translate to punjabi' in query: #translate into punjabi language translator=Translator() translations=translator.translate(input(),dest='pa') print(translations.origin, '-->', translations.text) print(translations.text) speak(translations.text) elif 'translate to russian' in query: #translate into russian language translator=Translator() translations=translator.translate(input(),dest='ru') print(translations.origin, '-->', translations.text) print(translations.text) speak(translations.text) elif 'translate to german' in query: #translate into german language translator=Translator() translations=translator.translate(input(),dest='de') print(translations.origin, '-->', translations.text) print(translations.text) speak(translations.text) elif 'offline' in query: #quit break #or use this "button_pressed=False" def quit(): top.destroy() #quit/exit B1 = tkinter.Button(top, text ="Give Command", command = giveCommand) B2 = tkinter.Button(top, text="Quit", command = quit) B1.pack() B2.pack() top.mainloop()
Parallel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="dUlpPiVBB2Ek" # # Using EOF analysis for noise reduction in your data # [![Latest release](https://badgen.net/github/release/Naereen/Strapdown.js)](https://github.com/eabarnes1010/course_objective_analysis/tree/main/code) # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/eabarnes1010/course_ml_ats/blob/main/code/unsupervised_eof_noise_reduction.ipynb) # # # Demonstration of how to use EOF analysis to remove "noise" from your data set. # + colab={"base_uri": "https://localhost:8080/"} id="cTpO7vk_ObTR" executionInfo={"status": "ok", "timestamp": 1649418817133, "user_tz": 360, "elapsed": 6, "user": {"displayName": "<NAME>", "userId": "07585723222468022011"}} outputId="42bea3d8-8296-4234-bf6b-682db1357f75" try: import google.colab IN_COLAB = True except: IN_COLAB = False print('IN_COLAB = ' + str(IN_COLAB)) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 128153, "status": "ok", "timestamp": 1649418945282, "user": {"displayName": "<NAME>", "userId": "07585723222468022011"}, "user_tz": 360} id="QCa5UlPVB2Eo" outputId="8d148e1d-a57d-4d1e-b15f-9d67b8ad84e9" if IN_COLAB: # !apt-get install libproj-dev proj-data proj-bin # !apt-get install libgeos-dev # !pip install cython # !pip install cartopy # !apt-get -qq install python-cartopy python3-cartopy # !pip uninstall -y shapely # !pip install shapely --no-binary shapely # !pip install netcdf4 #Package for loading in netcdf4 files # !pip install cmocean #Package with beautiful colormaps import numpy as np import matplotlib.pyplot as plt import importlib import scipy.stats as stats import numpy.ma as ma import numpy.linalg as LA import matplotlib as mpl import xarray as xr import scipy import cartopy as ct import cartopy.crs as ccrs import cmocean as cmocean # + id="fug6BdsiB2Eu" mpl.rcParams['figure.dpi'] = 100 CL = 0. mapProj = ct.crs.Robinson(central_longitude = CL) # + [markdown] id="qKcftfMpB2Ex" # ### Globe plotting functions # + id="89T6yA48B2Ez" def drawOnGlobe(ax, data, lats, lons, cmap='coolwarm', vmin=None, vmax=None, inc=None, cbarBool=True, contourMap=[], contourVals = [], fastBool=False, extent='both'): data_crs = ct.crs.PlateCarree() data_cyc, lons_cyc = add_cyclic_point(data, coord=lons) #fixes white line by adding point#data,lons#ct.util.add_cyclic_point(data, coord=lons) #fixes white line by adding point ax.set_global() ax.coastlines(linewidth = 1.2, color='black') if(fastBool): image = ax.pcolormesh(lons_cyc, lats, data_cyc, transform=data_crs, cmap=cmap) else: image = ax.pcolor(lons_cyc, lats, data_cyc, transform=data_crs, cmap=cmap) if(np.size(contourMap) !=0 ): contourMap_cyc, __ = add_cyclic_point(contourMap, coord=lons) #fixes white line by adding point ax.contour(lons_cyc,lats,contourMap_cyc,contourVals, transform=data_crs, colors='fuchsia') if(cbarBool): cb = plt.colorbar(image, shrink=.75, orientation="vertical", pad=.02, extend=extent) cb.ax.tick_params(labelsize=6) else: cb = None image.set_clim(vmin,vmax) return cb, image def add_cyclic_point(data, coord=None, axis=-1): # had issues with cartopy finding utils so copied for myself if coord is not None: if coord.ndim != 1: raise ValueError('The coordinate must be 1-dimensional.') if len(coord) != data.shape[axis]: raise ValueError('The length of the coordinate does not match ' 'the size of the corresponding dimension of ' 'the data array: len(coord) = {}, ' 'data.shape[{}] = {}.'.format( len(coord), axis, data.shape[axis])) delta_coord = np.diff(coord) if not np.allclose(delta_coord, delta_coord[0]): raise ValueError('The coordinate must be equally spaced.') new_coord = ma.concatenate((coord, coord[-1:] + delta_coord[0])) slicer = [slice(None)] * data.ndim try: slicer[axis] = slice(0, 1) except IndexError: raise ValueError('The specified axis does not correspond to an ' 'array dimension.') new_data = ma.concatenate((data, data[tuple(slicer)]), axis=axis) if coord is None: return_value = new_data else: return_value = new_data, new_coord return return_value # + [markdown] id="w55AcPHcB2E2" # ### Data Description # # 2-meter monthly air temperature anomalies from the Berkeley Earth Surface Temperature (BEST) dataset. Below I sub-sample the data to only grab every fourth gridbox (to reduce the data size). # + colab={"base_uri": "https://localhost:8080/", "height": 957} executionInfo={"elapsed": 14212, "status": "ok", "timestamp": 1649418959491, "user": {"displayName": "<NAME>", "userId": "07585723222468022011"}, "user_tz": 360} id="GfpsAth5B2E4" outputId="cdd8d73d-8bd7-4f0e-e014-fc43ea90c167" # !pip install wget import wget if(IN_COLAB): # # !wget https://github.com/eabarnes1010/ats655-coursematerial/raw/master/jupyter_notebooks/data/BEST-1950-2018-monthlyTemp.nc filename = wget.download('https://eabarnes-data.atmos.colostate.edu/course_objective_analysis/BEST-1950-2018-monthlyTemp.nc') ds = xr.open_dataset(filename) # open the data else: ds = xr.open_dataset('../data/BEST-1950-2018-monthlyTemp.nc') # open the data ds = ds.sel(time = slice('1960-01-01', '2020-01-01')) # only grab certain years ds = ds[{'latitude': slice(None, None, 4), 'longitude': slice(None, None, 4)}] # downsample to make easier to manage ds = ds.fillna(0.) # fill Nans with 0 for a zero anomaly print(ds) ds['temperature'][0].plot() # + [markdown] id="hsw98RG8B2FC" # ### Calculate the EOFs # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1649418959492, "user": {"displayName": "<NAME>", "userId": "07585723222468022011"}, "user_tz": 360} id="Sl597eBCB2FE" outputId="0b96b5a0-833e-422f-8777-7462b5da3f59" # get data into a 2D matrix X = ds['temperature'].values X = X.reshape(X.shape[0],X.shape[1]*X.shape[2]) X.shape # + id="1G4yUKIqL9wY" # calculate the covariance matrix in the spatial dimension C = 1./np.size(X,axis = 0)*(np.dot(np.transpose(X),X)) # calculate eigenvalues and eigenvectors of C lam, E = LA.eig(C) Z = np.dot(X,E) # convert eigenvalues to percent variance explained pve = 100.*lam/np.sum(lam) # + [markdown] id="GGDXY8OuDG6Y" # ## Plotting functions # + [markdown] id="PXR4A5u4B2FT" # ### Plot the results: eigenvalues # + id="Iq-sWvJ8B2FU" def plot_eigenvalues(): # plot results: EIGENVALUES plt.figure() plt.plot(np.arange(1,np.size(pve)+1.),pve,'o-',linewidth = 1, color = 'black', markersize = 4) plt.plot([np.max(eof_number)+1.5,np.max(eof_number)+1.5],[0,20],'--k') plt.title('Variance Retained = ' + str(np.round(np.sum(pve[eof_number]))) + '%') plt.xlim(-0.5, np.max([60.,eof_number[-1]+1.5])) plt.ylim(0,np.max(pve)*1.02) plt.xlabel('eigenvalue position') plt.ylabel('percent variance explained (%)') # plot error bars according to North et al.abs # here we will assume that all of the data is independent (is that a good assumption?) # such that Nstar = N Nstar = np.size(X,axis = 1) eb = pve*np.sqrt(2./Nstar) plt.errorbar(np.arange(1,np.size(pve)+1.),pve,yerr = eb/2, xerr = None, linewidth = 1, color = 'black') plt.show() # + [markdown] id="MEmKy7jFB2Ff" # ### Plot the results: compare the raw map and the smoothed map # + id="62UcKVEhB2Fg" def plot_reconstructed_maps(): # plot the output maxVal = 3. titleSize = 10 cmap = cmocean.cm.balance fastBool = True lats = ds['latitude'] lons = ds['longitude'] plt.figure(figsize=(15,4.5)) ax = plt.subplot(1,2,1,projection=mapProj) xplot = X.reshape(X.shape[0],len(lats),len(lons))[month_to_plot,:,:] cb, image = drawOnGlobe(ax, xplot, lats, lons, cmap=cmap, vmin = -maxVal, vmax=maxVal, cbarBool=True, fastBool=fastBool) cb.set_label('', fontsize=titleSize) cb.ax.tick_params(labelsize=titleSize) plt.title('Month = ' + str(month_to_plot) + '\nRaw Data', fontsize=titleSize) ax = plt.subplot(1,2,2,projection=mapProj) xplot = Xrecon.reshape(Xrecon.shape[0],len(lats),len(lons))[month_to_plot,:,:] if(np.max(xplot.flatten())<1.): maxVal = np.max(xplot.flatten()) cb, image = drawOnGlobe(ax, xplot, lats, lons, cmap=cmap, vmin = -maxVal, vmax=maxVal, cbarBool=True, fastBool=fastBool) cb.set_label('', fontsize=titleSize) cb.ax.tick_params(labelsize=titleSize) plt.title('Month = ' + str(month_to_plot) + '\nsmoothed by retaining ' + str(eof_number[-1]+1) + ' of ' + str(len(pve)) + ' EOFs' + '\nvariance explained = ' + str(np.round(np.sum(pve[eof_number]))) + '%', fontsize=titleSize) plt.tight_layout() plt.show() # + id="KLqc0t3kL9wY" # + [markdown] id="go1A3zGZB2E9" # # User input # + id="lgS9XAaEB2E_" executionInfo={"status": "ok", "timestamp": 1649419312846, "user_tz": 360, "elapsed": 2400, "user": {"displayName": "<NAME>", "userId": "07585723222468022011"}} colab={"base_uri": "https://localhost:8080/", "height": 790} outputId="01659e83-443a-4c80-bba1-1a4ecf4c079d" #========== MODIFY ============= eof_number = np.arange(0,100) #how many EOFs to retain, maximum = 4050 month_to_plot = 100 #=============================== plot_eigenvalues() #--------------------------- # reduce E and Z to the number of eofs you want to retain, # then reconstruct X # retain only certain eofs Zrecon = np.copy(Z[:,eof_number]) Erecon = np.copy(E[:,eof_number]) # reconstruct X Xrecon = np.dot(Zrecon,np.transpose(Erecon)) #--------------------------- plot_reconstructed_maps() # + id="yPpI-qkTDQ9A"
code/unsupervised_eof_noise_reduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ## Basic Concepts # **Search Key** # **Index File** # - Access Types # - point search # - range search # - Access Time # - Insertion Time # - Deletion Time # - Space Overhead # # dense index : 전체 인덱스 # sparse index : 일부만 인덱스화 # clustring index 에만 적용 가능 # # clustring index : index 와 data 순서가 같음 # range search 유리 # non clustring index : data와 별개로 index 구성 # # # # # # # # # # # # # # # # #
_ipynbs/study-db-index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="N22NDjIjjxcI" # # Distributed Training with GPUs on Cloud AI Platform # # **Learning Objectives:** # 1. Setting up the environment # 1. Create a model to train locally # 1. Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy # # In this notebook, we will walk through using Cloud AI Platform to perform distributed training using the `MirroredStrategy` found within `tf.keras`. This strategy will allow us to use the synchronous AllReduce strategy on a VM with multiple GPUs attached. # # Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [Solution Notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/production_ml/solutions/distributed_training.ipynb) for reference. # # + id="Nny3m465gKkY" colab_type="code" colab={} # !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst # + [markdown] id="63e-3SdLlh7K" # Next we will configure our environment. Be sure to change the `PROJECT_ID` variable in the below cell to your Project ID. This will be the project to which the Cloud AI Platform resources will be billed. We will also create a bucket for our training artifacts (if it does not already exist). # - # ## Lab Task #1: Setting up the environment # # + id="TKZYJbBkBcOk" outputId="8dac5564-c864-4db8-9cc2-0c8036b75eb8" colab={"base_uri": "https://localhost:8080/", "height": 88} import os # TODO 1 PROJECT_ID = "cloud-training-demos" # Replace with your PROJECT BUCKET = PROJECT_ID REGION = 'us-central1' os.environ["PROJECT_ID"] = PROJECT_ID os.environ["BUCKET"] = BUCKET # + [markdown] id="Jth9W8NtmNUD" # Since we are going to submit our training job to Cloud AI Platform, we need to create our trainer package. We will create the `train` directory for our package and create a blank `__init__.py` file so Python knows that this folder contains a package. # + id="cavM78bf_mU4" # !mkdir train # !touch train/__init__.py # + [markdown] id="PIMQo_lImhn_" # Next we will create a module containing a function which will create our model. Note that we will be using the Fashion MNIST dataset. Since it's a small dataset, we will simply load it into memory for getting the parameters for our model. # # Our model will be a DNN with only dense layers, applying dropout to each hidden layer. We will also use ReLU activation for all hidden layers. # + id="88-9WeCQ_mU9" outputId="ae92afd1-93bd-49e5-aeda-6f6e177ac186" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile train/model_definition.py import tensorflow as tf import numpy as np # Get data (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() # add empty color dimension x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) def create_model(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten(input_shape=x_train.shape[1:])) model.add(tf.keras.layers.Dense(1028)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(512)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(256)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation('softmax')) return model # + [markdown] id="0DOKsnDhnU87" # Before we submit our training jobs to Cloud AI Platform, let's be sure our model runs locally. We will call the `model_definition` function to create our model and use `tf.keras.datasets.fashion_mnist.load_data()` to import the Fashion MNIST dataset. # - # ## Lab Task #2: Create a model to train locally # # + id="r8bPcX7T-SH1" outputId="1069992b-0599-4b19-f7e8-b4cefa7cb6bb" colab={"base_uri": "https://localhost:8080/", "height": 215} import os import time import tensorflow as tf import numpy as np from train import model_definition #Get data # TODO 2 # TODO -- Your code here. print("Training time without GPUs locally: {}".format(time.time() - start)) # + [markdown] id="L_U-u_tZ_mVK" # # # ## Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy # # + [markdown] id="P0VQ7GUsn8wb" # That took a few minutes to train our model for 20 epochs. Let's see how we can do better using Cloud AI Platform. We will be leveraging the `MultiWorkerMirroredStrategy` supplied in `tf.distribute`. The main difference between this code and the code from the local test is that we need to compile the model within the scope of the strategy. When we do this our training op will use information stored in the `TF_CONFIG` variable to assign ops to the various devices for the AllReduce strategy. # # After the training process finishes, we will print out the time spent training. Since it takes a few minutes to spin up the resources being used for training on Cloud AI Platform, and this time can vary, we want a consistent measure of how long training took. # # Note: When we train models on Cloud AI Platform, the `TF_CONFIG` variable is automatically set. So we do not need to worry about adjusting based on what cluster configuration we use. # + id="_AF4VDhT_mVg" outputId="e2e1a496-a369-4f62-856a-b2fe88178add" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile train/train_mult_worker_mirrored.py import os import time import tensorflow as tf import numpy as np from . import model_definition strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() #Get data (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() # add empty color dimension x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) def create_dataset(X, Y, epochs, batch_size): dataset = tf.data.Dataset.from_tensor_slices((X, Y)) dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True) return dataset ds_train = create_dataset(x_train, y_train, 20, 5000) ds_test = create_dataset(x_test, y_test, 1, 1000) print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) with strategy.scope(): model = model_definition.create_model() model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) start = time.time() model.fit( ds_train, validation_data=ds_test, verbose=2 ) print("Training time with multiple GPUs: {}".format(time.time() - start)) # - # ## Lab Task #3: Training with multiple GPUs/CPUs on created model using MultiWorkerMirrored Strategy # # + [markdown] id="ViUZcz7Tp9Kp" # First we will train a model without using GPUs to give us a baseline. We will use a consistent format throughout the trials. We will define a `config.yaml` file to contain our cluster configuration and the pass this file in as the value of a command-line argument `--config`. # # In our first example, we will use a single `n1-highcpu-16` VM. # + id="sBJw5hJAadVW" outputId="8f377e27-e1c3-4a44-e5ef-21c37a7f64fd" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile config.yaml # TODO 3a # TODO -- Your code here. # + id="_mlylgvCaeXW" outputId="383eb016-e791-4cfc-f382-72acd47932b4" colab={"base_uri": "https://localhost:8080/", "height": 195} language="bash" # # now=$(date +"%Y%m%d_%H%M%S") # JOB_NAME="cpu_only_fashion_minst_$now" # # gcloud ai-platform jobs submit training $JOB_NAME \ # --staging-bucket=gs://$BUCKET \ # --package-path=train \ # --module-name=train.train_mult_worker_mirrored \ # --runtime-version=2.3 \ # --python-version=3.7 \ # --region=us-west1 \ # --config config.yaml # + [markdown] id="g9tji3XRqbi-" # If we go through the logs, we see that the training job will take around 5-7 minutes to complete. Let's now attach two Nvidia Tesla K80 GPUs and rerun the training job. # + id="fCGWARBH_mVi" outputId="64f42735-7356-40c4-8460-afc096a9896c" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile config.yaml # TODO 3b # TODO -- Your code here. # + id="UmXeeg6r_mVk" outputId="0017abd4-077d-4089-f664-d15dac69f755" colab={"base_uri": "https://localhost:8080/", "height": 195} language="bash" # # now=$(date +"%Y%m%d_%H%M%S") # JOB_NAME="multi_gpu_fashion_minst_2gpu_$now" # # gcloud ai-platform jobs submit training $JOB_NAME \ # --staging-bucket=gs://$BUCKET \ # --package-path=train \ # --module-name=train.train_mult_worker_mirrored \ # --runtime-version=2.3 \ # --python-version=3.7 \ # --region=us-west1 \ # --config config.yaml # + [markdown] id="0fMLxLOgq7mc" # That was a lot faster! The training job will take upto 5-10 minutes to complete. Let's keep going and add more GPUs! # + id="ocXYk61hGRG_" outputId="ff6d1dc4-ab02-4ab7-acea-2420c5c1d5aa" colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile config.yaml # TODO 3c # TODO -- Your code here. # + id="MKRRVDLhZoj3" outputId="4012bb48-d7f9-4e6b-d034-daa258cc636f" colab={"base_uri": "https://localhost:8080/", "height": 195} language="bash" # # now=$(date +"%Y%m%d_%H%M%S") # JOB_NAME="multi_gpu_fashion_minst_4gpu_$now" # # gcloud ai-platform jobs submit training $JOB_NAME \ # --staging-bucket=gs://$BUCKET \ # --package-path=train \ # --module-name=train.train_mult_worker_mirrored \ # --runtime-version=2.3 \ # --python-version=3.7 \ # --region=us-west1 \ # --config config.yaml # + [markdown] id="F3PfreD5rfgE" # The training job will take upto 10 minutes to complete. It was faster than no GPUs, but why was it slower than 2 GPUs? If you rerun this job with 8 GPUs you'll actually see it takes just as long as using no GPUs! # # The answer is in our input pipeline. In short, the I/O involved in using more GPUs started to outweigh the benefits of having more available devices. We can try to improve our input pipelines to overcome this (e.g. using caching, adjusting batch size, etc.). # # + id="Z8pPmHAbn4CB"
courses/machine_learning/deepdive2/production_ml/labs/distributed_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Deep learning refers to neural networks with multiple hidden layers that can learn increasingly abstract representations of the input data. # + import cv2 import numpy as np np.random.seed(123) # for reproducibility from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from keras.datasets import mnist # - # # Load pre-shuffled MNIST data into train and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() # # Preprocess input data X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 # # Preprocess class labels Y_train = np_utils.to_categorical(y_train, 10) Y_test = np_utils.to_categorical(y_test, 10) # # Define model architecture # + model = Sequential() model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(28,28,1))) model.add(Convolution2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) # - # # Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # # Fit model on training data model.fit(X_train, Y_train, batch_size=32, epochs=3, verbose=1) # # Load a trained model model = load_model('deeplearning_snake_model.h5') # # Predict using the model # + img_width, img_height = 28, 28 img = cv2.imread('mystery_number.png', cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (img_width, img_height)) arr = np.array(img).reshape((img_width,img_height,1)) arr = np.expand_dims(arr, axis=0) prediction = model.predict(arr)[0] bestclass = '' bestconf = -1 for n in [0,1,2,3,4,5,6,7,8,9]: if (prediction[n] > bestconf): bestclass = str(n) bestconf = prediction[n] print('I think this digit is a ' + bestclass + ' with ' + str(bestconf * 100) + '% confidence.') # - # # Save the model model.save('deeplearning_snake_model.h5')
04-deep-learning-identify-digits/keras_snake.py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 层次 # reki 中的 `load_` 系列函数,使用 `level_type` 参数指定层次类型,使用 `level` 参数指定层次数值。 # + import xarray as xr import eccodes from reki.format.grib.eccodes import ( load_field_from_file, load_message_from_file, load_messages_from_file, ) xr.set_options(display_style="text") # - # ## 准备数据 # # 本示例使用 GRAPES GFS 的 GRIB 2 数据为例。 # # 获取 GRAPES GFS 2020 年 4 月 19 日 00 时次 024 时效的 GRIB 2 文件路径 # + from reki.data_finder import find_local_file data_path = find_local_file( "grapes_gfs_gmf/grib2/orig", start_time="2020041900", forecast_time="024h", ) data_path # - # 获取 GRAPES GFS 2020 年 4 月 19 日 00 时次 024 时效的等模式面层 GRIB 2 文件路径 # + from reki.data_finder import find_local_file modelvar_path = find_local_file( "grapes_gfs_gmf/grib2/modelvar", start_time="2020041900", forecast_time="024h", ) modelvar_path # - # ## 层次类型 # # `level_type` 可以接收以下类型的参数: # # - `str`:使用 `pl`,`sfc` 或 `ml`,使用变量的 `typeOfLevel` 检索 # - `dict`:设置用于过滤的键值对 # - `list`:指定多个层次类型,列表对象支持上面两个类型 # ### 字符串 # 支持两种类型的字符串 # # - 简化的层次类型,`pl`,`sfc` 和 `ml` # - GRIB key `typeOfLevel` 的值 # #### 简化层次类型 # # `pl` 表示等压面,`sfc` 表示地面场,`ml` 表示等模式面。 # ##### pl # # `pl` 表示等压面,单位是 `hPa`。在 nwpc-data 内部会被替换为 `dict` 类型 # # ```json # { # "typeOfFirstFixedSurface": 100 # } # ``` # 检索 500 hPa 的位势高度场。 h850 = load_field_from_file( data_path, parameter="gh", level_type="pl", level=850, ) h850 # 检索 0.1 hPa 的温度场 t0p1 = load_field_from_file( data_path, parameter="t", level_type="pl", level=0.1, ) t0p1 # 检索 850hPa 温度场的 GRIB 2 消息。 message_t850 = load_message_from_file( data_path, parameter="t", level_type="pl", level=850, ) print( eccodes.codes_get(message_t850, "shortName"), eccodes.codes_get(message_t850, "typeOfLevel"), eccodes.codes_get(message_t850, "level"), ) eccodes.codes_release(message_t850) # ##### sfc # # **警告**:sfc 层次类型正在开发当中。 # # `sfc` 表示地面场,当前函数会将 `sfc` 替换为 # # ```json # { # "typeOfLevel": "surface" # } # ``` # # 检索地面温度场。 t_sfc = load_field_from_file( data_path, parameter="t", level_type="sfc", level=0, ) t_sfc # ##### ml # # `ml` 表示模式层。当前函数会将 `ml` 替换为 GRAPES 模式的模式面数据使用的层次类型 # # ```json # { # "typeOfFirstFixedSurface": 131 # } # ``` # # 检索 20 层温度场。 t_ml20 = load_field_from_file( modelvar_path, parameter="t", level_type="ml", level=20, ) t_ml20 # 检索第 60 层的 u 分量。 message_u_ml60 = load_message_from_file( modelvar_path, parameter="u", level_type="ml", level=60, ) print( eccodes.codes_get(message_u_ml60, "shortName"), eccodes.codes_get(message_u_ml60, "typeOfFirstFixedSurface"), eccodes.codes_get(message_u_ml60, "level"), ) eccodes.codes_release(message_u_ml60) # #### typeOfLevel # # 使用 GRIB key `typeOfLevel` 进行检索,例如: # # - `isobaricInhPa` # - `isobaricInPa` # - `surface` # - `heightAboveGround` # - `meanSea` # - `depthBelowLandLayer` # - `cloudBase` # - `atmosphere` # - `nominalTop` # - ... # # 更多值可以访问 https://apps.ecmwf.int/codes/grib/format/edition-independent/3/ 查询。 # # 检索 2 米最高温度。 t2max = load_field_from_file( data_path, parameter="tmax", level_type="heightAboveGround", level=2, ) t2max # 检索 0- 1000m 垂直风切变。 # + message_vwsh_h1000 = load_message_from_file( data_path, parameter="vwsh", level_type="heightAboveGroundLayer", level=1000, ) print( eccodes.codes_get(message_vwsh_h1000, "shortName"), eccodes.codes_get(message_vwsh_h1000, "typeOfLevel"), eccodes.codes_get(message_vwsh_h1000, "level"), ) eccodes.codes_release(message_vwsh_h1000) # - # 可以使用 `grib_ls` 命令查看 GRIB 2 文件中包含的所有层次类型。 # !grib_ls -p typeOfLevel {data_path} | tail -n +3 | head -n -3 | sort | uniq # ### dict # # GRIB 2 消息的层次类型通常由下面几个 GRIB Key 确定: # # - `typeOfFirstFixedSurface` # - `scaleFactorOfFirstFixedSurface` # - `scaledValueOfFirstFixedSurface` # - `typeOfSecondFixedSurface` # - `scaleFactorOfSecondFixedSurface` # - `scaledValueOfSecondFixedSurface` # # nwpc-data 支持使用字典格式指定层次类型的筛选条件,包含上述 key 的键值对。 # # GRAPES 模式的模式面数据无法直接用 `typeOfLevel` 检索。 # # 检索第 40 层模式面的湿度场。 q_ml40 = load_field_from_file( modelvar_path, parameter="q", level_type={ "typeOfFirstFixedSurface": 131 }, level=40, ) q_ml40 # 当然,可以使用内置的 `ml` 检索模式面要素场。 # ### list # # 列表表示**逻辑或**关系,要素场只要满足其中一个条件就可以符合条件。 # 主要用于从文件中批量加载数据。 # # 列表对象可以是字符串或dict。 # # 检索文件中所有等压面层和 `surface` 层的温度场。 # + tset = load_messages_from_file( data_path, parameter="t", level_type=["pl", "surface"], ) for t in tset: print( eccodes.codes_get(t, "shortName"), eccodes.codes_get(t, "typeOfLevel"), eccodes.codes_get(t, "level"), ) eccodes.codes_release(t) # - # ## 层次值 # # `level` 可以接收以下类型的参数: # # - `int` 或 `float`:数值类型 # - `list`:指定多个层次值,用于批量检索 # ### 数值类型 # # 默认情况下,使用 GRIB Key `level` 值进行比较,上面示例已介绍。 # # 但下面的示例中使用 `typeOfLevel="isobaricInhPa"` 检索 1.5hPa 的位势高度场会失败。 h1p5 = load_field_from_file( data_path, parameter="gh", level_type="isobaricInhPa", level=1.5, ) print(h1p5) # 使用 grib_ls 命令,可以看到文件中有多个 `isobaricInhPa` 为 1 的位势高度场。 # 因为 eccodes 中 `isobaricInhPa` 的 `level` 值不支持小数。 # !grib_ls -w typeOfLevel=isobaricInhPa,level=1,shortName=gh {data_path} # 可以使用 `pl` 类型检索,单位是 `hPa`。 h1p5 = load_field_from_file( data_path, parameter="gh", level_type="pl", level=1.5, ) h1p5 # ### 列表 # # 如果想要检索特定几个层次的要素,可以使用列表。 hset = load_field_from_file( data_path, parameter="gh", level_type="pl", level=[500, 850, 925, 950, 1000], ) hset # `hset` 的层次维度 `pl` 没有按照列表中给定的顺序排列,需要用户手动修改维度索引。 # ## 默认值 # # `level_type` 和 `level` 的默认值都为 `None`。 # ### 使用 `level_type` 默认值 # # 省略 `level_type` 在某些情况下比较有用。 # # 比如示例文件中的垂直风切变 `vwsh` 只有一种层次类型 `heightAboveGroundLayer`,可以省略该值。 # + vwsh_h1000 = load_field_from_file( data_path, parameter="vwsh", level=1000, ) vwsh_h1000 # - # **注意**:`load_field_` 系列函数只能检索一种层次类型。如果给定的 `level` 可能对应多种层次类型,则不能保证执行会成功。 # 10 对应多个层次类型:isobaricInhPa,isobaricInPa 和 heightAboveGround # 不能保证返回何种类型数据 u = load_field_from_file( data_path, parameter="u", level=10, ) u # ### 使用 `level` 默认值 # # 省略 `level` 会加载符合 `level_type` 的所有要素场。 # # 下面示例使用 `load_field_from_file` 加载所有等压面层的温度场。 # # 注意,返回的数据包含三个维度:层次 `pl`,纬度 `latitude`,经度 `longitude`。 tset = load_field_from_file( data_path, parameter="t", level_type="pl", ) tset # ### 同时使用 `level_type` 和 `level` 的默认值 # # 当文件中只有一个要素场有特定的要素名称时,可以省略 `level_type` 和 `level`。 # # 例如检索 2m 温度场。 t2m = load_field_from_file( data_path, parameter="2t", ) t2m # 检索 10m 风场 u 分量 u10m = load_field_from_file( data_path, parameter="10u", ) u10m
03.2_level.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # European Option Simulation Method import time import numpy as np from math import exp, sqrt import functools @functools.lru_cache(maxsize=128) def MonteCarloValuation(So,T,k,sig,r,delta,option="Eput",n=1000): p = np.random.normal(0, 1, n).tolist() a, b = (r-delta-.5*(sig**2))*T, sig*sqrt(T) prices = (So*exp(a+b*z) for z in p) if option == "Eput": return exp(-r*T)*sum((max(0,k-S) for S in prices))*(1/n) if option == "Ecall": return exp(-r*T)*sum((max(0,S-k) for S in prices))*(1/n) # + #-----------------------------# So = 100 T = 1 # 3-months, 1/4 of a year k = 100 sig = .5 r = 0.01 delta = 0 option = "Ecall" n = 1000000 #-----------------------------# start = time.time() call_price = MonteCarloValuation(So,T,k,sig,r,delta,option,n) end = time.time() print("Call price: ",call_price) print("Time to run: ",end - start,"(s)") # + #-----------------------------# So = 100 T = 10 # 3-months, 1/4 of a year k = 120 sig = .3 r = 0.01 delta = 0.03 option = "Eput" n = 1000000 #-----------------------------# start = time.time() put_price = MonteCarloValuation(So,T,k,sig,r,delta,option,n) end = time.time() print("Put price: ",put_price) print("Time to run: ",end - start,"(s)")
MonteCarloPricing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import spacy import pandas as pd import multiprocessing import numpy as np from __future__ import unicode_literals, print_function, division from io import open import unicodedata import string df_hansard = pd.read_csv('data/hansard_all.csv') df_hansard.head() df_group = df_hansard.groupby('subjectOfBusinessId') # + # df_hansard = pd.read_csv('data/hansard_all.csv') # df_group = df_hansard.groupby('subjectOfBusinessId') q_a = [] for i, index in df_group.groups.items(): # don't bother with odd pairs if (len(index) % 2 != 0): continue # Create conversatoin pairs t = df_hansard.iloc[list(index)]['content'].values temp = list(zip(t[::2], t[1::2], df_hansard.iloc[index]['SubjectOfBusinessTitle'], df_hansard.iloc[index]['FloorLanguage'], df_hansard.iloc[index]['date'], df_hansard.iloc[index]['personSpeaking'], )) # temp.append(df_hansard.iloc[index]['SubjectOfBusinessTitle']) # print(list(zip(t[::2], t[1::2]))) # print(df_hansard.iloc[list(index)]['SubjectOfBusinessTitle']) # print(temp) q_a.append(temp) # break q_a = [item for sublist in q_a for item in sublist] # print(q_a[0]) # q_a = q_a[:10000] print('number of q & a', len(q_a)) df_q_a = pd.DataFrame(q_a) df_q_a.columns = ['Q', 'A', 'SubjectOfBusinessTitle', 'FloorLanguage', 'date', 'personSpeaking'] df_q_a.to_csv('data/q_a_all.csv') df_q_a.tail() # + # df_hansard = pd.read_csv('data/hansard_all.csv') # df_group = df_hansard.groupby('subjectOfBusinessId') # q_a = [] # for i, index in df_group.groups.items(): # # don't bother with odd pairs # if (len(index) % 2 != 0): # continue # # Create conversatoin pairs # t = df_hansard.iloc[list(index)]['content'].values # q_a.append(list(zip(t[::2], t[1::2]))) # q_a = [item for sublist in q_a for item in sublist] # # q_a = q_a[:10000] # print('number of q & a', len(q_a)) # df_q_a = pd.DataFrame(q_a) # df_q_a.columns = ['Q', 'A'] # df_q_a.to_csv('data/q_a_all.csv') # df_q_a.tail() # + # Load English tokenizer, tagger, parser, NER and word vectors nlp = spacy.load('en') def sentence_tokenizer(raw_text): # Create doc = nlp(raw_text) and parse sentences. return u' '.join([sent.string.strip() for sent in nlp(raw_text).sents]) # print(list(nlp(df_q_a['Q'][2]).sents)) # print(df_q_a['Q'][2]) # print(sentence_tokenizer(df_q_a['Q'][2])) # + # %%time def _apply_df(args): df, func, kwargs = args df['Q'] = df['Q'].apply(func, **kwargs) df['A'] = df['A'].apply(func, **kwargs) return df#df.apply(func, **kwargs) def apply_by_multiprocessing(df, func, **kwargs): workers = kwargs.pop('workers') pool = multiprocessing.Pool(processes=workers) result = pool.map(_apply_df, [(d, func, kwargs) for d in np.array_split(df, workers)]) pool.close() return pd.concat(list(result)) # + # %%time num_cores = multiprocessing.cpu_count() print(num_cores) df_q_a = pd.read_csv('data/q_a_all.csv') df_q_a = apply_by_multiprocessing(df_q_a, sentence_tokenizer, workers=num_cores) df_q_a.to_csv('data/q_a_all.csv') # df_q_a = pd.read_csv('data/q_a_all.csv') # df_q_a.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis=1, inplace=True) # df_q_a.to_csv('data/q_a_all.csv') # text= ' Mr. Speaker, when the Prime Minister interrupted a woman at a town hall, correcting her use of “mankind” with “peoplekind“, his mansplaining went viral. Around the world, the Prime Minister was mocked for his political correctness. The Prime Minister eventually conceded that it was a dumb joke, but his principal secretary, <NAME>, tweeted that any and all who criticized his boss were Nazis. The Prime Minister once said that any statement by Mr. Butts could be considered his own, and in this case?' # print(df_q_a['Q'][10]) # print(sentence_tokenizer(df_q_a['Q'][10])) # df_q_a['Q'] = df_q_a['Q'].apply(sentence_tokenizer) # df_q_a['A'] = df_q_a['A'].apply(sentence_tokenizer) # df_q_a.to_csv('data/q_a.csv') # df_q_a.tail() # - df_q_a.tail() # + df_q_a['Q_A'] = df_q_a[['Q', 'A']].apply(lambda x: u'\n'.join(x), axis=1) print(len(df_q_a['Q_A'])) with open('data/Q_A_pairs.txt', 'w') as file: for row in df_q_a['Q_A']: file.write(row.lower().strip() + '\n') # + df_q_a = pd.read_csv('data/q_a_test.csv') import re import nltk # Turn a Unicode string to plain ASCII, thanks to # http://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # Lowercase, trim, and remove non-letter characters def normalizeString(s): s = unicodeToAscii(s.lower().strip()) s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) return s def text_to_sents(text_seqs): tokens = normalizeString(unicodeToAscii( text_seqs)) return nltk.sent_tokenize(tokens) # this gives us a list of sentences df_q_a['Q_test'] = df_q_a['Q'].apply(text_to_sents) df_q_a['Q_test'] df_q_a.to_csv('data/q_a_test.csv') # -
minerva/preprocessor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + raw_mimetype="text/restructuredtext" active="" # .. _power: # # Power # ===== # # This section demonstrates raising secret-shared values to a power. As always, this is computed element-wise on arrays of any shape. # # Note that for this function, we are raising secret-shared values to a **public**, **unencoded**, **integer** power that is known to all players. The results are secret shared, maintaining the privacy of the inputs. # # In this case, we raise a vector of values :math:`[-1, 2, 3.4, -2.3]` to the power 3, # returning :math:`[-1, 8, 39.304, -12.167]` # + import logging import numpy import cicada.additive import cicada.communicator logging.basicConfig(level=logging.INFO) @cicada.communicator.NNGCommunicator.run(world_size=3) def main(communicator): log = cicada.Logger(logging.getLogger(), communicator) protocol = cicada.additive.AdditiveProtocol(communicator) base = numpy.array([-1, 2, 3.4, -2.3]) if communicator.rank == 0 else None exponent = 3 log.info(f"Player {communicator.rank} base: {base}", src=0) log.info(f"Player {communicator.rank} exponent: {exponent}") base_share = protocol.share(src=0, secret=protocol.encoder.encode(base), shape=(4,)) power_share = protocol.private_public_power(base_share, exponent) power = protocol.encoder.decode(protocol.reveal(power_share)) log.info(f"Player {communicator.rank} power: {power}") main(); # - # Note that the values 39.30381775 and -12.16680908 are slightly off due to the limited precision of our fixed point encoding.
docs/user-guide/power.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="SfwEUAWA3HAb" # ## Import Necessary Libraries # + id="G7rHKL0KNteY" import numpy as np import keras from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import os import pandas as pd from keras.utils import to_categorical import matplotlib.pyplot as plt from sklearn.metrics import classification_report import scikitplot from scikitplot.metrics import plot_confusion_matrix # + [markdown] id="r1dtvdVm3Wqq" # # Create a Dataframe for reading and analysing the text files easily # + id="uVI0V45VPqkH" df_train = pd.read_csv('../input/emotions-dataset-for-nlp/train.txt', header =None, sep =';', names = ['Input','Sentiment'], encoding='utf-8') df_test = pd.read_csv('../input/emotions-dataset-for-nlp/test.txt', header = None, sep =';', names = ['Input','Sentiment'],encoding='utf-8') df_val=pd.read_csv('../input/emotions-dataset-for-nlp/val.txt',header=None,sep=';',names=['Input','Sentiment'],encoding='utf-8') # + id="t5UXwqnoOkGV" outputId="2714606a-f695-45ff-c0d9-1d7234d37a6f" df_train.Sentiment.value_counts() # + [markdown] id="weLVgoak3lR0" # ## Reading the Train and Validation Data # + id="8jfpnq0qPEUF" X=df_train['Input'] # + id="YsdBhABuRkc2" lst=[] for i in X: lst.append(len(i)) # + id="Lf_wMk5wR7Wd" outputId="0f7f1b70-bd55-428f-f798-89f8c2f998c0" len1=pd.DataFrame(lst) len1.describe() # + id="JBAgUQh-1cCn" cts=[] for i in range(7,301): ct=0 for k in lst: if k==i: ct+=1 cts.append(ct) # + [markdown] id="dwTyJqGO4JM0" # # Trying to fix a length for the embedding layers' input # + id="UOYkt05D2Pkc" outputId="fbb734a9-cd62-4f8e-aeb6-cfba165e505e" plt.bar(range(7,301),cts) plt.show() # + [markdown] id="Vkx0Skhw4RTE" # # Using The tokenizer Class to convert the sentences into word vectors # + id="HPqJjmc2Qd1q" tokenizer=Tokenizer(15212,lower=True,oov_token='UNK') tokenizer.fit_on_texts(X) # + id="QAR53xsMQ9hp" outputId="85afe77d-62ca-4db5-9030-985c63cccdf1" len(tokenizer.word_index) # + id="OPlKIuEJRHaJ" X_train=tokenizer.texts_to_sequences(X) X_train_pad=pad_sequences(X_train,maxlen=80,padding='post') # + id="klWThk5xSiTj" df_train['Sentiment']=df_train.Sentiment.replace({'joy':0,'anger':1,'love':2,'sadness':3,'fear':4,'surprise':5}) # + id="IcB2LdpDTV1R" Y_train=df_train['Sentiment'].values # + [markdown] id="1Nxr63i-4eWH" # # One hot Encoding the Emotion Values # + id="Cq4bajBpWKnU" Y_train_f=to_categorical(Y_train) # + id="jM-qVgjjXHlb" outputId="598f6ccc-4000-49a8-b46e-34b06d0fdd53" Y_train_f[:6] # + id="PKJwSxEgYhay" X_val=df_val['Input'] Y_val=df_val.Sentiment.replace({'joy':0,'anger':1,'love':2,'sadness':3,'fear':4,'surprise':5}) # + id="v-5tPraZYYH6" X_val_f=tokenizer.texts_to_sequences(X_val) X_val_pad=pad_sequences(X_val_f,maxlen=80,padding='post') # + id="rLIVsrvgZLu_" Y_val_f=to_categorical(Y_val) # + id="zKw08DYYZLxl" outputId="914b33da-9bc8-463f-f87d-281ba533a2f8" Y_val_f[:6] # + id="fTW2fLQZU8rx" from keras.models import Sequential from keras.layers import LSTM,Bidirectional,Dense,Embedding,Dropout # + [markdown] id="zzZgSic_4mb8" # # Creating a Model # + id="F_tdJHIDVLqA" outputId="7ae62ccd-8288-4e6b-b614-c8080cd51a2a" model=Sequential() model.add(Embedding(15212,64,input_length=80)) model.add(Dropout(0.6)) model.add(Bidirectional(LSTM(80,return_sequences=True))) model.add(Bidirectional(LSTM(160))) model.add(Dense(6,activation='softmax')) print(model.summary()) # + [markdown] id="lQwbhHiO4rM7" # # Compiling and running the model # + id="_KYfMEyXVxK2" model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy']) # + id="J5UxHV4EV_qA" outputId="c1760502-94ff-4f81-9ead-59ddddab1149" hist=model.fit(X_train_pad,Y_train_f,epochs=12,validation_data=(X_val_pad,Y_val_f)) # - # # Plotting the Loss and Accuracy Curves # + id="r9ZX9npB95PY" outputId="9e3a5747-bc0e-4063-8ae5-6d047d6b94b6" plt.plot(hist.history['accuracy'],c='b',label='train') plt.plot(hist.history['val_accuracy'],c='r',label='validation') plt.legend(loc='lower right') plt.show() # + id="DPXjVCJe-dLM" outputId="f520d1fa-d97f-4c03-f4da-1ef837a0f8d1" plt.plot(hist.history['loss'],c='orange',label='train') plt.plot(hist.history['val_loss'],c='g',label='validation') plt.legend(loc='upper right') plt.show() # + [markdown] id="T8wkWWRM4wj5" # # Checking for Test Data # + id="qiqCoFvpWFlI" X_test=df_test['Input'] Y_test=df_test.Sentiment.replace({'joy':0,'anger':1,'love':2,'sadness':3,'fear':4,'surprise':5}) # + id="a_puRzZNaI_i" X_test_f=tokenizer.texts_to_sequences(X_test) X_test_pad=pad_sequences(X_test_f,maxlen=80,padding='post') # + id="ZWeCFXqBaUeF" Y_test_f=to_categorical(Y_test) # + id="iVhdSBnWcV3q" outputId="0aaf8ab6-ba10-4816-94be-fdddaeeb2eda" X_test_pad.shape # + id="K2gXQ80oaaok" outputId="747bf3f0-3126-4ea2-b730-a832de1447ff" Y_test_f[:7] # + [markdown] id="KI2H_6jh444s" # #Accuracy for Test Data # + id="PWY-3GJSacff" outputId="b28b942e-d800-4469-cc29-fbdbfedfb1e9" model.evaluate(X_test_pad,Y_test_f) # + [markdown] id="eDUBbS9dCnqc" # # Plotting the Confusion matrix # + id="xSBIEISGA6Id" Y_pred=model.predict_classes(X_test_pad) # + id="atULoe4HAqZv" outputId="d949ea61-ebe5-4f7a-f804-9676542f4cf5" plot_confusion_matrix(Y_test,Y_pred) # + id="GebauRAfChiq" outputId="b66eb181-8ce9-44ea-b7f7-88ffe6027a2c" print(classification_report(Y_test,Y_pred)) # + [markdown] id="5SG8AtHq48uQ" # # Creating a Function to check for Your own Sentence # + id="N4vDT3Eeft2i" def get_key(value): dictionary={'joy':0,'anger':1,'love':2,'sadness':3,'fear':4,'surprise':5} for key,val in dictionary.items(): if (val==value): return key # + id="AQuJATahalLP" def predict(sentence): sentence_lst=[] sentence_lst.append(sentence) sentence_seq=tokenizer.texts_to_sequences(sentence_lst) sentence_padded=pad_sequences(sentence_seq,maxlen=80,padding='post') ans=get_key(model.predict_classes(sentence_padded)) print("The emotion predicted is",ans) # + [markdown] id="J1_JYp1N5CFm" # # Check for Your Own Sentence # + id="VcTTjsficgqf" outputId="3e5e5d52-08eb-4c7c-b092-22b437bfe787" predict(str(input('Enter a sentence : '))) # + id="_hiYzFqN_h_-" outputId="3f96cb05-e759-48d8-eba8-94f7d1ed84b3" predict(str(input('Enter a sentence : '))) # - predict(str(input('Enter a sentence : '))) # Save Model # + import pickle model.save('model.h5') with open('tokenizer.pickle', 'wb') as handle: pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL) # -
hippo/classification_model/emotion-classification-using-bidirectional-lstm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Welcome to my EDA Kernel # # ### Description: # The dataset for this competition includes aggregate stopped vehicle information and intersection wait times. Your task is to predict congestion, based on an aggregate measure of stopping distance and waiting times, at intersections in 4 major US cities: Atlanta, Boston, Chicago & Philadelphia. # # <img src="https://cdn.citylab.com/media/img/citylab/2018/02/AP_17153592466989/facebook.jpg" alt="Italian Trulli"> # # # Objective: # It's a first contact with the data, so I want to explore it and understand how the data is. # # Some important things that is standard to analyze: # - what are the data types of the features? # - We have missing values? # - How many unique values we have in each feature; # - The shape of full dataset. # - The entropy of each feature (that show us the level of disorder on this column, it's like a "messy metric") # # After this first analyze we can think in other questions to explore: # - Which distribution we have in our columns? # - Which are the most common cities? # - Which are the distribution of the stops, time, distances? # - How long is our date range? # - What are the distribution of the regions? # # And many more questions; # # ## <font color="red"> I'm near of grandmaster tier, so, if you find this kernel useful or interesting, please don't forget to upvote the kernel =)</font> # ### Importing the Main Libraries to work with data # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy as sp from scipy import stats import matplotlib.pyplot as plt import seaborn as sns import plotly.graph_objs as go import plotly.tools as tls from plotly.offline import iplot, init_notebook_mode #import cufflinks #import cufflinks as cf import plotly.figure_factory as ff from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.metrics import mean_squared_error from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression from functools import partial from hyperopt import fmin, hp, tpe, Trials, space_eval, STATUS_OK, STATUS_RUNNING import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # - # ### Importing datasets # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" df_train = pd.read_csv('/kaggle/input/bigquery-geotab-intersection-congestion/train.csv') df_test = pd.read_csv('/kaggle/input/bigquery-geotab-intersection-congestion/test.csv') # - # ### Util functions # + _kg_hide-input=true def resumetable(df): print(f"Dataset Shape: {df.shape}") summary = pd.DataFrame(df.dtypes,columns=['dtypes']) summary = summary.reset_index() summary['Name'] = summary['index'] summary = summary[['Name','dtypes']] summary['Missing'] = df.isnull().sum().values summary['Uniques'] = df.nunique().values summary['First Value'] = df.loc[0].values summary['Second Value'] = df.loc[1].values summary['Third Value'] = df.loc[2].values for name in summary['Name'].value_counts().index: summary.loc[summary['Name'] == name, 'Entropy'] = round(stats.entropy(df[name].value_counts(normalize=True), base=2),2) return summary def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df # - # ### Summary of the data # + _kg_hide-output=true resumetable(df_train) # - # Nice, this func give us a lot of cool and useful informations; # - We have only two features with missing values. Entry and Exit StreetName # # City's # - I will start exploring the distribution of City's because it is a categorical with only a few categorys inside. # resumetable(df_train) # + _kg_hide-input=true total = len(df_train) plt.figure(figsize=(15,19)) plt.subplot(311) g = sns.countplot(x="City", data=df_train) g.set_title("City Count Distribution", fontsize=20) g.set_ylabel("Count",fontsize= 17) g.set_xlabel("City Names", fontsize=17) sizes=[] for p in g.patches: height = p.get_height() sizes.append(height) g.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}%'.format(height/total*100), ha="center", fontsize=14) g.set_ylim(0, max(sizes) * 1.15) plt.show() # - # We can note that: # - The most common value is Philadelphia and it have 45.29% of the total entries. # - The other categories don't have a so discrepant difference between them. # ? # Let's # # # Date Features # - Hour Distribution # - Month Distribution # tmp_hour = df_train.groupby(['City', 'Hour'])['RowId'].nunique().reset_index() # + _kg_hide-input=true plt.figure(figsize=(15,12)) plt.subplot(211) g = sns.countplot(x="Hour", data=df_train, hue='City', dodge=True) g.set_title("Hour Count Distribution by Week and Weekend Days", fontsize=20) g.set_ylabel("Count",fontsize= 17) g.set_xlabel("Hours of Day", fontsize=17) sizes=[] for p in g.patches: height = p.get_height() sizes.append(height) g.set_ylim(0, max(sizes) * 1.15) plt.subplot(212) g1 = sns.countplot(x="Month", data=df_train, hue='City', dodge=True) g1.set_title("Hour Count Distribution by Week and Weekend Days", fontsize=20) g1.set_ylabel("Count",fontsize= 17) g1.set_xlabel("Hours of Day", fontsize=17) sizes=[] for p in g1.patches: height = p.get_height() sizes.append(height) g1.set_ylim(0, max(sizes) * 1.15) plt.subplots_adjust(hspace = 0.3) plt.show() # - # Cool. <br> # # In the hours chart: # - We can see that cities can have different hours patterns. # - Philadelphia is by far the most common in all hours. Only on 5 a.m that is almost lose to Boston in total entries. # - Atlanta is the city with less entries in all day, but after 17 p.m to 4a.m it's the second city with more rides # # In the month chart: # - We can note that the data is about only 6 months (with few values in January and May) # - Also, the pattern of the Boston City improved througout the time and the others seem very unchanged. # # Now, let's explore the Entry and Exit features. # # # EntryHeading and Exit Heading # + _kg_hide-input=true plt.figure(figsize=(15,12)) tmp = round(((df_train.groupby(['EntryHeading'])['RowId'].nunique() / total) * 100)).reset_index() plt.subplot(211) g = sns.countplot(x="EntryHeading", data=df_train, order=list(tmp['EntryHeading'].values), hue='ExitHeading', dodge=True) g.set_title("Entry Heading by Exit Heading", fontsize=20) g.set_ylabel("Count",fontsize= 17) g.set_xlabel("Entry Heading Region", fontsize=17) gt = g.twinx() gt = sns.pointplot(x='EntryHeading', y='RowId', data=tmp, order=list(tmp['EntryHeading'].values), color='black', legend=False) gt.set_ylim(0, tmp['RowId'].max()*1.1) gt.set_ylabel("% of Total(Black Line)", fontsize=16) sizes=[] for p in g.patches: height = p.get_height() sizes.append(height) g.set_ylim(0, max(sizes) * 1.15) plt.subplot(212) g1 = sns.countplot(x="EntryHeading", order=list(tmp['EntryHeading'].values), data=df_train, hue='City') g1.set_title("Entry Heading Distribution By Cities", fontsize=20) g1.set_ylabel("Count",fontsize= 17) g1.set_xlabel("Entry Heading Region", fontsize=17) sizes=[] for p in g1.patches: height = p.get_height() sizes.append(height) g1.set_ylim(0, max(sizes) * 1.15) plt.subplots_adjust(hspace = 0.3) plt.show() # - # Nice. <br> # In Entry and Exit Heading chart: # - We can note that in general the Entry and Exit Region is exactly the same. # # In Entry by Cities chart: # - We can note the difference patterns on the cities. It's a very interesting and could give us many interesting insights. # ## IntersectionID # + plt.figure(figsize=(15,6)) df_train.IntersectionId.value_counts()[:45].plot(kind='bar') plt.xlabel("Intersection Number", fontsize=18) plt.ylabel("Count", fontsize=18) plt.title("TOP 45 most commmon IntersectionID's ", fontsize=22) plt.show() # - df_train.groupby(['IntersectionId', 'EntryHeading', 'ExitHeading'])['RowId'].count().reset_index().head() # # Exploring numerical features # If you readed the competition description, you know that these are the target features; # # The targets are: # - TotalTimeStopped_p20 # - TotalTimeStopped_p50 # - TotalTimeStopped_p80 # - DistanceToFirstStop_p20 # - DistanceToFirstStop_p50 # - DistanceToFirstStop_p80 # # And the as the TimeFromFirstStop is an optional data, I will use it to see the correlations. # t_stopped = ['TotalTimeStopped_p20', 'TotalTimeStopped_p50', 'TotalTimeStopped_p80'] t_first_stopped = ['TimeFromFirstStop_p20', 'TimeFromFirstStop_p50', 'TimeFromFirstStop_p80'] d_first_stopped = ['DistanceToFirstStop_p20', 'DistanceToFirstStop_p50', 'DistanceToFirstStop_p80'] # # Heatmap Target Features # + _kg_hide-input=true plt.figure(figsize=(15,12)) plt.title('Correlation of Features for Train Set', fontsize=22) sns.heatmap(df_train[t_stopped + #t_first_stopped + d_first_stopped].astype(float).corr(), vmax=1.0, annot=True) plt.show() # - # Cool!<br> # We can see that the best correlation between the metrics are: # - Distance to First Stop p20 and Total Time Stopped p20 have a high correlation. # # Scaling the target # - Geting the min_max transformation to get clusterization and PCA features # + from sklearn.preprocessing import minmax_scale target_cols = t_stopped + d_first_stopped # + for col in target_cols: df_train[col+str("_minmax")] = (minmax_scale(df_train[col], feature_range=(0,1))) min_max_cols = ['TotalTimeStopped_p20_minmax', 'TotalTimeStopped_p50_minmax', 'TotalTimeStopped_p80_minmax', 'DistanceToFirstStop_p20_minmax', 'DistanceToFirstStop_p50_minmax', 'DistanceToFirstStop_p80_minmax'] # - # # PCA # - To better see the distribution of our metrics, lets apply PCA to reduce the dimensionality of the data # + pca = PCA(n_components=3, random_state=5) principalComponents = pca.fit_transform(df_train[min_max_cols]) principalDf = pd.DataFrame(principalComponents) # df.drop(cols, axis=1, inplace=True) prefix='Target_PCA' principalDf.rename(columns=lambda x: str(prefix)+str(x), inplace=True) df_train = pd.concat([df_train, principalDf], axis=1) # - # Nice, now we have the PCA features... Let's see the ratio of explanation of the first two Principal Components pca.explained_variance_ratio_[:2].sum() # With the 2 first components we have almost 84% of the data explained. It's a very way to easiest visualize the differences between the patterns. # # Scatter plot of cities by the PCA # + _kg_hide-input=true g = sns.FacetGrid(df_train.sample(50000), col="City", col_wrap=2, height=5, aspect=1.5, hue='Weekend') g.map(sns.scatterplot, "Target_PCA0", "Target_PCA1", alpha=.5 ).add_legend(); g.set_titles('{col_name}', fontsize=17) plt.show() # - # Cool. We can see differet patterns by the Cities and their weekend patterns. # # KMeans Clusterization # - First, I will apply the elbow method to find the correct number of cluster we have in our data # - After it, we will implement the kmeans with the best quantity # + _kg_hide-input=true #sum of squared distances ssd = [] K = range(1,10) for k in K: km = KMeans(n_clusters=k, random_state=4) km = km.fit(df_train[min_max_cols]) ssd.append(km.inertia_) plt.plot(K, ssd, 'bx-') plt.xlabel('k') plt.ylabel('Sum of squared distances') plt.title('Elbow Method For Optimal k') plt.show() # - # Nice. <br> # Based on Elbow Method the best number of cluster is 4. So, let's apply the K means on data. km = KMeans(n_clusters=4, random_state=4) km = km.fit(df_train[min_max_cols]) df_train['clusters_T'] = km.predict(df_train[min_max_cols]) # ## Ploting Clusters # - Understanding the cluster distribution # - Exploring by Cities # + _kg_hide-input=true tmp = pd.crosstab(df_train['City'], df_train['clusters_T'], normalize='columns').unstack('City').reset_index().rename(columns={0:"perc"}) total = len(df_train) plt.figure(figsize=(15,16)) plt.subplot(311) g = sns.countplot(x="clusters_T", data=df_train) g.set_title("Cluster Target Count Distribution", fontsize=20) g.set_ylabel("Count",fontsize= 17) g.set_xlabel("Target Cluster Distributions", fontsize=17) sizes=[] for p in g.patches: height = p.get_height() sizes.append(height) g.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}%'.format(height/total*100), ha="center", fontsize=14) g.set_ylim(0, max(sizes) * 1.15) plt.subplot(312) g1 = sns.countplot(x="clusters_T", data=df_train, hue='City') g1.set_title("CITIES - Cluster Target Distribution", fontsize=20) g1.set_ylabel("Count",fontsize= 17) g1.set_xlabel("Target Cluster Distributions", fontsize=17) sizes=[] for p in g1.patches: height = p.get_height() sizes.append(height) g1.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}%'.format(height/total*100), ha="center", fontsize=10) g1.set_ylim(0, max(sizes) * 1.15) plt.subplot(313) g1 = sns.boxplot(x="clusters_T", y='Target_PCA0', data=df_train, hue='City') g1.set_title("PCA Feature - Distribution of PCA by Clusters and Cities", fontsize=20) g1.set_ylabel("PCA 0 Values",fontsize= 17) g1.set_xlabel("Target Cluster Distributions", fontsize=17) plt.subplots_adjust(hspace = 0.5) plt.show() # - # Nice. <br> # ### In the first chart: # - We can note that the most common cluster is the 1 that have 73% of all data. # # ### Second chart: # - Philadelphia is the most common in the first 3 clusters. # - Boston is the second most common in 0,1 and the most common on Cluster 3; # - In the second cluster, Atlanta is the second most common city. # # ### Third Chart: # - Is clear to understand how the algorithmn divided the data in PCA values # # ## NOTE: EVERY TIME I RUN IT, THE VALUES CHANGES, SO SORRY BY THE WRONG # # # PCA values by CLUSTERS # - Let's see in another way how the algorithmn have decided by the clusterization # + _kg_hide-input=true plt.figure(figsize=(15,6)) sns.scatterplot(x='Target_PCA0', y='Target_PCA1', hue='clusters_T', data=df_train, palette='Set1') plt.title("PCA 0 and PCA 1 by Clusters", fontsize=22) plt.ylabel("Target PCA 1 values", fontsize=18) plt.xlabel("Target PCA 0 values", fontsize=18) plt.show() # - # Cool. It gives us a good understand of the boundaries of Clusters. <br> # I suspect that the cluster 2 is about traffic; # # Let's plot it by each city and try to find any pattern in the PCA dispersion. # # PCA Dispersion by clusters and by Each City # - To better understand the patterns, let's plot by Cities # + _kg_hide-input=true g = sns.FacetGrid(df_train.sample(500000), col="City", col_wrap=2, height=4, aspect=1.5, hue='clusters_T') g.map(sns.scatterplot, "Target_PCA0", "Target_PCA1", alpha=.5).add_legend(); g.set_titles('{col_name}', fontsize=50) plt.suptitle("CITIES \nPrincipal Component Analysis Dispersion by Cluster", fontsize=22) plt.subplots_adjust(hspace = 0.3, top=.85) plt.show() # - # Cool! We can see that Atlanta and Philadelphia have similar pattern of the Cluster 2;<Br> # The other cluster seens very similar # # Clusters by the Hours # I was wondering and I had an insight that I will try to implement here. # - I think that make a lot of sense explore the hours by the clusters # - Let's see the distribution of PCA0 and the Clusters by the Hours # + g = sns.FacetGrid(df_train.sample(500000), col="City", col_wrap=2, height=4, aspect=1.5, hue='clusters_T') g.map(sns.scatterplot, "Hour", "Target_PCA0", alpha=.5).add_legend(); g.set_titles('{col_name}', fontsize=50) plt.suptitle("CITIES \nPrincipal Component Analysis Dispersion by HOURS AND CLUSTERS", fontsize=22) plt.subplots_adjust(hspace = 0.3, top=.85) plt.show() # - # Cool! We can have a best intuition about the data and how it posible clustered the data. # round(pd.crosstab([df_train['clusters_T'], df_train['Weekend']], df_train['City'], normalize='index' ) * 100,0) # # # Modeling # - As I was getting problems with my model, I decided to implement the solution of the public kernels # - I will import the datasets again # # Many parts of this implementation I got on @dcaichara Kernel. <br> # You can see the kernel here: https://www.kaggle.com/dcaichara/feature-engineering-and-lightgbm df_train = pd.read_csv('/kaggle/input/bigquery-geotab-intersection-congestion/train.csv') df_test = pd.read_csv('/kaggle/input/bigquery-geotab-intersection-congestion/test.csv') # ## Hour Feature # - Let's encode the Hour Features # + def date_cyc_enc(df, col, max_vals): df[col + '_sin'] = np.sin(2 * np.pi * df[col]/max_vals) df[col + '_cos'] = np.cos(2 * np.pi * df[col]/max_vals) return df df_train = date_cyc_enc(df_train, 'Hour', 24) df_test = date_cyc_enc(df_test, 'Hour', 24) # - # ## Flag - is day? # Testing some features about the data # + df_train['is_day'] = df_train['Hour'].apply(lambda x: 1 if 7 < x < 18 else 0) df_test['is_day'] = df_test['Hour'].apply(lambda x: 1 if 7 < x < 18 else 0) df_train['is_morning'] = df_train['Hour'].apply(lambda x: 1 if 6 < x < 10 else 0) df_test['is_morning'] = df_test['Hour'].apply(lambda x: 1 if 6 < x < 10 else 0) df_train['is_night'] = df_train['Hour'].apply(lambda x: 1 if 17 < x < 20 else 0) df_test['is_night'] = df_test['Hour'].apply(lambda x: 1 if 17 < x < 20 else 0) df_train['is_day_weekend'] = np.where((df_train['is_day'] == 1) & (df_train['Weekend'] == 1), 1,0) df_test['is_day_weekend'] = np.where((df_test['is_day'] == 1) & (df_train['Weekend'] == 1), 1,0) df_train['is_mor_weekend'] = np.where((df_train['is_morning'] == 1) & (df_train['Weekend'] == 1), 1,0) df_test['is_mor_weekend'] = np.where((df_test['is_morning'] == 1) & (df_train['Weekend'] == 1), 1,0) df_train['is_nig_weekend'] = np.where((df_train['is_night'] == 1) & (df_train['Weekend'] == 1), 1,0) df_test['is_nig_weekend'] = np.where((df_test['is_night'] == 1) & (df_train['Weekend'] == 1), 1,0) # - # # Intersec - Concatenating IntersectionId and City # + df_train["Intersec"] = df_train["IntersectionId"].astype(str) + df_train["City"] df_test["Intersec"] = df_test["IntersectionId"].astype(str) + df_test["City"] print(df_train["Intersec"].sample(6).values) # - # # # Label Encoder of Intersecion + City # + le = LabelEncoder() le.fit(pd.concat([df_train["Intersec"],df_test["Intersec"]]).drop_duplicates().values) df_train["Intersec"] = le.transform(df_train["Intersec"]) df_test["Intersec"] = le.transform(df_test["Intersec"]) # - # # Street Feature # - Extracting informations from street features road_encoding = { 'Road': 1, 'Street': 2, 'Avenue': 2, 'Drive': 3, 'Broad': 3, 'Boulevard': 4 } def encode(x): if pd.isna(x): return 0 for road in road_encoding.keys(): if road in x: return road_encoding[road] return 0 # ## Creating the new feature df_train['EntryType'] = df_train['EntryStreetName'].apply(encode) df_train['ExitType'] = df_train['ExitStreetName'].apply(encode) df_test['EntryType'] = df_test['EntryStreetName'].apply(encode) df_test['ExitType'] = df_test['ExitStreetName'].apply(encode) # # # Encoding the Regions directions = { 'N': 0, 'NE': 1/4, 'E': 1/2, 'SE': 3/4, 'S': 1, 'SW': 5/4, 'W': 3/2, 'NW': 7/4 } # ## Applying the transformation in Entry and Exit Heading Columns # + df_train['EntryHeading'] = df_train['EntryHeading'].map(directions) df_train['ExitHeading'] = df_train['ExitHeading'].map(directions) df_test['EntryHeading'] = df_test['EntryHeading'].map(directions) df_test['ExitHeading'] = df_test['ExitHeading'].map(directions) # - # # Difference between the regions df_train['diffHeading'] = df_train['EntryHeading']-df_train['ExitHeading'] df_test['diffHeading'] = df_test['EntryHeading']-df_test['ExitHeading'] # ## Getting the binary if the entry and exit was in the same street df_train["same_str"] = (df_train["EntryStreetName"] == df_train["ExitStreetName"]).astype(int) df_test["same_str"] = (df_test["EntryStreetName"] == df_test["ExitStreetName"]).astype(int) # # ## Concatenating City and Month # Concatenating the city and month into one variable df_train['city_month'] = df_train["City"] + df_train["Month"].astype(str) df_test['city_month'] = df_test["City"] + df_test["Month"].astype(str) # ## Month rainfall ratio by city and seasons # + monthly_rainfall = {'Atlanta1': 5.02, 'Atlanta5': 3.95, 'Atlanta6': 3.63, 'Atlanta7': 5.12, 'Atlanta8': 3.67, 'Atlanta9': 4.09,'Atlanta10': 3.11, 'Atlanta11': 4.10, 'Atlanta12': 3.82, 'Boston1': 3.92, 'Boston5': 3.24, 'Boston6': 3.22, 'Boston7': 3.06, 'Boston8': 3.37, 'Boston9': 3.47, 'Boston10': 3.79, 'Boston11': 3.98, 'Boston12': 3.73, 'Chicago1': 1.75, 'Chicago5': 3.38, 'Chicago6': 3.63, 'Chicago7': 3.51, 'Chicago8': 4.62, 'Chicago9': 3.27, 'Chicago10': 2.71, 'Chicago11': 3.01, 'Chicago12': 2.43, 'Philadelphia1': 3.52, 'Philadelphia5': 3.88, 'Philadelphia6': 3.29, 'Philadelphia7': 4.39, 'Philadelphia8': 3.82, 'Philadelphia9':3.88 , 'Philadelphia10': 2.75, 'Philadelphia11': 3.16, 'Philadelphia12': 3.31} # Creating a new column by mapping the city_month variable to it's corresponding average monthly rainfall df_train["average_rainfall"] = df_train['city_month'].map(monthly_rainfall) df_test["average_rainfall"] = df_test['city_month'].map(monthly_rainfall) # - # # # Getting Dummies # + print(f'Shape before dummy transformation: {df_train.shape}') df_train = pd.get_dummies(df_train, columns=['City' ],\ prefix=['City'], drop_first=False) print(f'Shape after dummy transformation: {df_train.shape}') df_test = pd.get_dummies(df_test, columns=['City' ],\ prefix=['City'], drop_first=False) # - # # MinMax Scaling the lat and long from sklearn.preprocessing import StandardScaler scaler = StandardScaler() for col in ['Latitude','Longitude']: scaler.fit(df_train[col].values.reshape(-1, 1)) df_train[col] = scaler.transform(df_train[col].values.reshape(-1, 1)) df_test[col] = scaler.transform(df_test[col].values.reshape(-1, 1)) # ## Dropping not used features df_train.drop(['RowId', 'Path','EntryStreetName','ExitStreetName' ],axis=1, inplace=True) df_test.drop(['RowId', 'Path', 'EntryStreetName','ExitStreetName'],axis=1, inplace=True) # + interesting_feat = ['IntersectionId', 'Latitude', 'Longitude', 'EntryHeading', 'ExitHeading', 'Hour', 'Weekend', 'Month', 'is_morning', 'is_night', 'is_day_weekend', 'is_mor_weekend', 'is_nig_weekend', # 'Hour_sin', 'Hour', 'same_str', 'Intersec', 'EntryType', 'ExitType', 'diffHeading', 'average_rainfall', 'is_day', 'City_Boston', 'City_Chicago', 'City_Philadelphia', 'City_Atlanta'] total_time = ['TotalTimeStopped_p20', 'TotalTimeStopped_p50', 'TotalTimeStopped_p80'] target_stopped = ['DistanceToFirstStop_p20', 'DistanceToFirstStop_p50', 'DistanceToFirstStop_p80'] # - # ## Setting X and y # + X = df_train[interesting_feat] y = df_train[total_time + target_stopped] X_test = df_test[interesting_feat] # - print(f'Shape of X: {X.shape}') print(f'Shape of X_test: {X_test.shape}') # ## Reduce memory usage # X = reduce_mem_usage(X) # X_test = reduce_mem_usage(X_test) # ## Spliting data into train and validation X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.10, random_state=42) # # Hyperopt Space # - Here we will set all range of our hyperparameters # # Define searched space hyper_space = {'objective': 'regression', 'metric':'rmse', 'boosting':'gbdt', 'gpu_device_id': 0, #'n_estimators': hp.choice('n_estimators', [25, 40, 50, 75, 100, 250, 500]), 'max_depth': hp.choice('max_depth', list(range(6, 18, 2))), 'num_leaves': hp.choice('num_leaves', list(range(20, 180, 20))), 'subsample': hp.choice('subsample', [.7, .8, .9, 1]), 'colsample_bytree': hp.uniform('colsample_bytree', 0.7, 1), 'learning_rate': hp.uniform('learning_rate', 0.03, 0.12), #'reg_alpha': hp.choice('reg_alpha', [.1, .2, .3, .4, .5, .6]), #'reg_lambda': hp.choice('reg_lambda', [.1, .2, .3, .4, .5, .6]), 'min_child_samples': hp.choice('min_child_samples', [20, 45, 70, 100])} # ## Building Hyperopt Function to be optimized cat_feat = ['IntersectionId','Hour', 'Weekend','Month', 'is_day', 'is_morning', 'is_night', 'same_str', 'Intersec', 'City_Atlanta', 'City_Boston', 'City_Chicago', 'City_Philadelphia', 'EntryType', 'ExitType'] # + from sklearn.model_selection import KFold import lightgbm as lgb def evaluate_metric(params): all_preds_test ={0:[],1:[],2:[],3:[],4:[],5:[]} print(f'Params: {params}') FOLDS = 4 count=1 for i in range(len(all_preds_test)): score_mean = 0 kf = KFold(n_splits=FOLDS, shuffle=False, random_state=42) for tr_idx, val_idx in kf.split(X, y): X_tr, X_vl = X.iloc[tr_idx, :], X.iloc[val_idx, :] y_tr, y_vl = y.iloc[tr_idx], y.iloc[val_idx] lgtrain = lgb.Dataset(X_tr, label=y_tr.iloc[:,i]) lgval = lgb.Dataset(X_vl, label=y_vl.iloc[:,i]) lgbm_reg = lgb.train(params, lgtrain, 2000, valid_sets = [lgval], categorical_feature=cat_feat, verbose_eval=0, early_stopping_rounds = 300) pred_lgb = lgbm_reg.predict(X_val, num_iteration=lgbm_reg.best_iteration) all_preds_test[i] = pred_lgb score_uni = np.sqrt(mean_squared_error(pred_lgb, y_val.iloc[:,i])) print(f'Score Validation : {score_uni}') pred = pd.DataFrame(all_preds_test).stack() pred = pd.DataFrame(pred) y_val_sc = pd.DataFrame(y_val).stack() y_val_sc = pd.DataFrame(y_val_sc) count = count +1 score = np.sqrt(mean_squared_error(pred[0].values, y_val_sc[0].values )) #score = metric(df_val, pred) print(f'Full Score Run: {score}') return { 'loss': score, 'status': STATUS_OK } # - # ## Running the hyperopt Function # + _kg_hide-output=true # Seting the number of Evals MAX_EVALS= 15 # Fit Tree Parzen Estimator best_vals = fmin(evaluate_metric, space=hyper_space, verbose=-1, algo=tpe.suggest, max_evals=MAX_EVALS) # Print best parameters best_params = space_eval(hyper_space, best_vals) # - # best_params all_preds ={0:[],1:[],2:[],3:[],4:[],5:[]} # + _kg_hide-output=true # %%time import lightgbm as lgb for i in range(len(all_preds)): print(f'## {i+1} Run') X_tr,X_val,y_tr,y_val=train_test_split(X, y.iloc[:,i], test_size=0.10, random_state=31) xg_train = lgb.Dataset(X_tr, label = y_tr) xg_valid = lgb.Dataset(X_val, label = y_val ) lgbm_reg = lgb.train(best_params, xg_train, 10000, valid_sets = [xg_valid], verbose_eval=500, early_stopping_rounds = 250) all_preds[i] = lgbm_reg.predict(X_test, num_iteration=lgbm_reg.best_iteration) print(f"{i+1} running done." ) # - # ## Importing submission file # - stacking all results in the same file sub = pd.read_csv("../input/bigquery-geotab-intersection-congestion/sample_submission.csv") dt = pd.DataFrame(all_preds).stack() dt = pd.DataFrame(dt) sub['Target'] = dt[0].values sub.head() # sub.to_csv("lgbm_pred_hyperopt_test.csv", index = False) # # Most part of the first modeling try I got from @danofer<br> # Plase, visit the kernel with all work here: https://www.kaggle.com/danofer/baseline-feature-engineering-geotab-69-5-lb # <br> # The Catboost model I got from @rohitpatil kernel, Link: https://www.kaggle.com/rohitpatil/geotab-catboost<br> # Some ideas of modelling I saw on: https://www.kaggle.com/dcaichara/feature-engineering-and-lightgbm # # # NOTE: This Kernel is not finished. # # Please stay tuned and votes up the kernel, please!
insightful-eda-modeling-lgbm-hyperopt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + altitude = int(input("Enter your value: ")) if (altitude <= 1000): print ("Safe to Land") elif (altitude > 1000 and altitude < 5000): print ("Bring down to 1000 altitude") else: print ("Turn around and try later") # -
Pilot Plane landing case study.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!pip install sentence-transformers # + ############################################################# #BEFORE THE GUI BEGAN!!!!!!!!!!!!!!!!! ############################################################# ############################################################# #THERE EXISTED THIS CODE FOR TEXT SUMMARIZATION ############################################################# import nltk from nltk.corpus import stopwords from nltk.cluster.util import cosine_distance from nltk.tokenize import sent_tokenize import numpy as np import networkx as nx import re from tkinter.filedialog import askopenfilename #Function to split text into sentences by fullstop(.) '''def read_article(text): article = text.split(". ") sentences =[] for sentence in article: print(sentence) sentences.append(sentence.replace("[^a-zA-Z]"," ").split(" ")) return sentences''' # Read the text and tokenize into sentences def read_article(text): sentences =[] sentences = sent_tokenize(text) for sentence in sentences: sentence.replace("[^a-zA-Z0-9]"," ") return sentences # Create vectors and calculate cosine similarity b/w two sentences def sentence_similarity(sent1,sent2,stopwords=None): if stopwords is None: stopwords = [] sent1 = [w.lower() for w in sent1] sent2 = [w.lower() for w in sent2] all_words = list(set(sent1 + sent2)) vector1 = [0] * len(all_words) vector2 = [0] * len(all_words) #build the vector for the first sentence for w in sent1: if not w in stopwords: vector1[all_words.index(w)]+=1 #build the vector for the second sentence for w in sent2: if not w in stopwords: vector2[all_words.index(w)]+=1 return 1-cosine_distance(vector1,vector2) # Create similarity matrix among all sentences def build_similarity_matrix(sentences,stop_words): #create an empty similarity matrix similarity_matrix = np.zeros((len(sentences),len(sentences))) for idx1 in range(len(sentences)): for idx2 in range(len(sentences)): if idx1!=idx2: similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1],sentences[idx2],stop_words) return similarity_matrix # Generate and return text summary def generate_summary(text,top_n): nltk.download('stopwords') nltk.download('punkt') stop_words = stopwords.words('english') summarize_text = [] # Step1: read text and tokenize sentences = read_article(text) # Steo2: generate similarity matrix across sentences sentence_similarity_matrix = build_similarity_matrix(sentences,stop_words) # Step3: Rank sentences in similarirty matrix sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_matrix) scores = nx.pagerank(sentence_similarity_graph) #Step4: sort the rank and place top sentences ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)),reverse=True) # Step 5: get the top n number of sentences based on rank for i in range(top_n): summarize_text.append(ranked_sentences[i][1]) # Step 6 : output the summarized version return " ".join(summarize_text) ############################################################################################ #On this note we ended the long line of code responsible for Text Summarization ############################################################################################ ############################################################################################ #Before we move on, let us initialize codes for our keywords extraction too ############################################################################################ def extract(top_n, nr_candidates, doc): from sklearn.feature_extraction.text import CountVectorizer n_gram_range = (1, 1) stop_words = "english" # Extract candidate words/phrases count = CountVectorizer(ngram_range=n_gram_range, stop_words=stop_words).fit([doc]) candidates = count.get_feature_names() from sentence_transformers import SentenceTransformer model = SentenceTransformer('distilbert-base-nli-mean-tokens') doc_embedding = model.encode([doc]) candidate_embeddings = model.encode(candidates) from sklearn.metrics.pairwise import cosine_similarity top_n = 10 distances = cosine_similarity(doc_embedding, candidate_embeddings) keywords = [candidates[index] for index in distances.argsort()[0][-top_n:]] import numpy as np import itertools # Calculate distances and extract keywords distances = cosine_similarity(doc_embedding, candidate_embeddings) distances_candidates = cosine_similarity(candidate_embeddings, candidate_embeddings) # Get top_n words as candidates based on cosine similarity words_idx = list(distances.argsort()[0][-nr_candidates:]) words_vals = [candidates[index] for index in words_idx] distances_candidates = distances_candidates[np.ix_(words_idx, words_idx)] # Calculate the combination of words that are the least similar to each other min_sim = np.inf candidate = None for combination in itertools.combinations(range(len(words_idx)), top_n): sim = sum([distances_candidates[i][j] for i in combination for j in combination if i != j]) if sim < min_sim: candidate = combination min_sim = sim return [words_vals[idx] for idx in candidate] ########################################################################################################################### #Keywords Extraction Done ########################################################################################################################### import tkinter as tk import center_tk_window import tkinter.font as TkFont from tkinter import scrolledtext from PIL import Image, ImageTk import docx win = tk.Tk() #win.configure(background='black') win.geometry("1300x650") #win.configure(bg='gr) win.title('Text Summarization and Keyword Extraction') win.resizable(False, False) center_tk_window.center_on_screen(win) font = ("Helvetica", 10) font2 = ("Segoe Boot Semilight", 7) helv36 = TkFont.Font(family="Helvetica",size=36,weight="bold") Frame1 = tk.Frame(win, width=1300, height=30, bg='white') Frame1.grid_propagate(0) Frame1.grid(row=0) Frame2 = tk.Frame(win, width=570, height=600, bg='white') Frame2.grid_propagate(0) Frame2.grid(row=1, sticky = tk.W) Frame3 = tk.Frame(win, width=160, height=600, bg='white') Frame3.grid_propagate(0) Frame3.grid(row=1) Frame4 = tk.Frame(win, width=570, height=600, bg='white') Frame4.grid_propagate(0) Frame4.grid(row=1, sticky=tk.E) Frame5 = tk.Frame(win, width=1300, height=20, bg='black') Frame5.grid_propagate(0) Frame5.grid(row=2, sticky=tk.S) user_text = scrolledtext.ScrolledText(Frame2, width=68, height=37, wrap=tk.WORD, relief='solid') user_text.grid(column=0, row=0, columnspan=3, padx=(5,5)) output = scrolledtext.ScrolledText(Frame4, width=68, height=37, wrap=tk.WORD, relief='solid') output.grid(column=0, row=0, columnspan=3, padx=(5,5)) def openFile(): global filename filename = askopenfilename() if filename.endswith(".txt"): with open(filename) as f: text = f.read() user_text.insert(tk.INSERT, text) elif filename.endswith(".pdf"): import PyPDF2 # creating a pdf file object pdfFileObj = open(filename, 'rb') # creating a pdf reader object pdfReader = PyPDF2.PdfFileReader(pdfFileObj) # printing number of pages in pdf file pages = pdfReader.numPages num = 0 while num <= pages: print(num) pageObj = pdfReader.getPage(num) user_text.insert(tk.INSERT, pageObj.extractText()) print(pageObj.extractText()) num+=1 if num == pages: pdfFileObj.close() break elif filename.endswith(("doc", "docx")): from docx import Document doc = Document(filename) for para in doc.paragraphs: user_text.insert(tk.INSERT, para.text) def clear_text(): user_text.delete("1.0", "end") output.delete("1.0", "end") def summarize(): text=user_text.get("1.0","end") print(text) a = generate_summary(text, top_n=5) output.insert(tk.INSERT, a) def extract_keywords(): text = user_text.get("1.0", "end") b = "\n *****************************KEYWORDS*************************** \n" + str(extract(top_n=10, nr_candidates=20, doc=text)) + "\n" output.insert(tk.INSERT, b) #Import image0 = Image.open("button_import.png") image0 = image0.resize((140, 45)) click_btn0 = ImageTk.PhotoImage(image0) img_label0= tk.Label(image=click_btn0) import_= tk.Button(Frame3, image=click_btn0, borderwidth=0, command=openFile) import_.grid(column=0, row=0, pady=(0,50), padx=(10,0)) #Summarize Button image = Image.open("button_summarize.png") image = image.resize((140, 45)) click_btn = ImageTk.PhotoImage(image) img_label= tk.Label(image=click_btn) summarize= tk.Button(Frame3, image=click_btn, borderwidth=0, command=summarize) summarize.grid(column=0, row=0, pady=(80,0), padx=(10,0)) #summarize = tk.Button(Frame3, height=10, width=40, text ="Hello", image=photo) #Extract Keywords image2 = Image.open("button_extract-keywords.png") image2 = image2.resize((140, 45)) click_btn2 = ImageTk.PhotoImage(image2) img_label2= tk.Label(image=click_btn2) extract_keywords= tk.Button(Frame3, image=click_btn2, borderwidth=0, command=extract_keywords) extract_keywords.grid(column=0, row=0, pady=(200,0), padx=(10,0)) #Clear image3 = Image.open("button_clear.png") image3 = image3.resize((140, 45)) click_btn3 = ImageTk.PhotoImage(image3) img_label3= tk.Label(image=click_btn3) clear= tk.Button(Frame3, image=click_btn3, borderwidth=0, command=clear_text) clear.grid(column=0, row=0, pady=(317,0), padx=(10,0)) # Making the text read only tk.Label(Frame5, text="Designed with love by: ", font=font2, bg='black', fg='white').grid(column=0, row=1, padx=(600,0), pady=(2,0)) win.mainloop()
Text-Summarization GUI .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Network test using Keras # + run_control={"frozen": false, "read_only": false} # %matplotlib inline from os.path import join import numpy as np import matplotlib.pyplot as plt from keras.models import Sequential from keras.utils.np_utils import to_categorical from keras.layers import Dense from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint from keras.models import load_model # - # # Generate data # **Mean subtraction**: Input data already has zero-mean and no need to demean. # # cf) http://cs231n.github.io/neural-networks-case-study/ N = 100 # number of points per class D = 2 # dimensionality K = 3 # number of classes X = np.zeros((N * K,D)) # data matrix (each row = single example) y = np.zeros(N * K, dtype='uint8') # class labels for j in range(K): ix = range(N * j, N * (j + 1)) r = np.linspace(0.0, 1, N) # radius t = np.linspace(j * 4,(j + 1) * 4,N) + np.random.randn(N) * 0.2 # theta X[ix] = np.c_[r * np.sin(t), r * np.cos(t)] y[ix] = j fig, ax = plt.subplots() ax.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral); X[:5, :] y print(X.shape) print(y.shape) # # Build model via Keras # **Initial weight**: Use default params. `kernel_initializer`='glorot_uniform', `bias_initializer`='zeros' # **Regularisation**: No regularisation # **y**: Vector y needs to be converted by *to_categorical()* model = Sequential() model.add(Dense(100, input_dim=2, activation='relu')) model.add(Dense(3, activation='softmax')) model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() model_checkpoint = ModelCheckpoint(filepath=join('output', 'keras_test.{epoch:02d}.hdf5'), verbose=0) history = model.fit(X, to_categorical(y, 3), batch_size=10, epochs=100, verbose=1, callbacks=[model_checkpoint]) # # Result fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(10, 4)) ax1.set_ylabel("Loss") ax2.set_ylabel("Accuarcy") ax1.set_xlabel("Epoch") ax2.set_xlabel("Epoch") ax1.plot(history.epoch, history.history["loss"]) ax2.plot(history.epoch, history.history["acc"]); # ## Predict loaded_model = load_model(join('output', 'keras_test.100.hdf5')) def traverse(o, tree_types=(list, tuple)): if isinstance(o, tree_types): for value in o: for subvalue in traverse(value, tree_types): yield subvalue else: yield o n_linspace = 100 axis_grid = np.linspace(-1.5, 1.5, n_linspace) grid_x, grid_y = np.meshgrid(axis_grid, axis_grid) x_test = np.array([(i, j) for i, j in zip(traverse(grid_x.tolist()), traverse(grid_y.tolist()))]) # Make predictions using the loaded model. y_hat = loaded_model.predict(x_test) y_hat_grid = np.argmax(y_hat, axis=1).reshape((n_linspace, n_linspace)) grid_x, grid_y = np.meshgrid(axis_grid, axis_grid) fig, ax = plt.subplots() ax.contourf(grid_x, grid_y, y_hat_grid, cmap=plt.cm.Spectral) ax.scatter(X[:, 0], X[:, 1], c=y, s=40, edgecolors='black', cmap=plt.cm.Spectral);
sandbox/keras_neural_network_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Basic Text Processing #Declare text text1 = "The Vikram Sarabhai Space Centre is a space research Centre of the ISRO, focused on rocket and space vehicles" #Length of the string - count of characters len(text1) #Length of words words1 = text1.split(" ") len(words1) text1[4] text1[4:10] #Get all words which has first character Capital [wordCap for wordCap in words1 if wordCap.istitle()] #Words with greater than length 5 [wordG5 for wordG5 in words1 if len(wordG5)>5] text1.capitalize() text1.title() text1.lower() text1.upper() text1.swapcase() text1.casefold() text1.index('a') text1.find('a') text1.rfind('a') text1.splitlines() text1.lstrip("The") import re text2 = "This news article is published on month:Jan" matchResult = re.search(r'month:\w\w\w',text2) if matchResult: print('Pattern exists ', matchResult.group()) else: print('Pattern not exists') print(matchResult) # + matchResult = re.search(r'de', 'abcdef') #Found, matchResult.group() - de matchResult = re.search(r'..cd', 'abcdef') #Found, matchResult.group() - abcd matchResult = re.search(r'\w\w\w', '%%abc') #Found, matchResult.group() - abc matchResult = re.search(r'ef$','abcdef') #Found, matchResult.group() - ef matchResult = re.search(r'^ab','abcdef') #Found, matchResult.group() - ab if matchResult: print('Pattern exists ', matchResult.group()) else: print('Pattern not exists') # + matchResult = re.search(r'de+f', 'abcdeef') #Found, matchResult.group() - deef matchResult = re.search(r'dk*e', 'abcdeef') #Found, matchResult.group() - de matchResult = re.search(r'e?f', 'abcdef') #Found, matchResult.group() - ef if matchResult: print('Pattern exists ', matchResult.group()) else: print('Pattern not exists') # + matchResult = re.search(r'#[abc]+', '#abcdef') #Found, matchResult.group() - #abc if matchResult: print('Pattern exists ', matchResult.group()) else: print('Pattern not exists') # + ## Consider a tweet with hashtags tweet = 'I am learning #datascience and it is awesome. #python #machinelearning' ## Here re.findall() returns a list of all hashtags present in the tweet. hastags = re.findall(r'#\w+', tweet) for tag in hastags: print(tag) # - import nltk nltk.download() from nltk.book import * text1 #Show one sentence each from all the above corpora sents() #Check any sentence sent1 print(len(set(text1))) freqCount = FreqDist(text3) vocabulary = freqCount.keys() freqCount['The'] fishwords = ['fishes','Fishings','Fishes'] prt = nltk.PorterStemmer() [prt.stem(ts) for ts in fishwords] fishwords = ['fishes','Fishings','Fishes'] WNlemma = nltk.WordNetLemmatizer() [WNlemma.lemmatize(ts) for ts in fishwords] text2 = "Why are you so intelligent?" words = text2.split(' ') print(words) print(nltk.word_tokenize(text2)) text3 = "His name John. He lives in U.K. with his wife Is he the best? No he is not!" sent1 = text3.split(".") print(sent1) sent2 = nltk.sent_tokenize(text3) print(sent2) tokenize_words = nltk.word_tokenize(text2) nltk.pos_tag(tokenize_words) nltk.pos_tag(tokenize_words) # + # Parsing sentence structure text15 = nltk.word_tokenize("Alice loves Bob") grammar = nltk.CFG.fromstring(""" S -> NP VP VP -> V NP NP -> 'Alice' | 'Bob' V -> 'loves' """) parser = nltk.ChartParser(grammar) trees = parser.parse_all(text15) for tree in trees: print(tree) # - # ### Text Classification from sklearn.naive_bayes import MultinomialNB #Import datasets from sklearn.datasets import fetch_20newsgroups #This data is well separated in training and testing. We can use training data for training our model and test performance on test data training_data = fetch_20newsgroups(subset='train', shuffle=True) #We can check for labels in the training dataset import random from random import shuffle a = random.shuffle(training_data.target_names) type(training_data.target_names) import numpy as np np.random.shuffle(training_data.target_names) training_data.target_names print(training_data.data[0]) #Convert these sentences to word vector. We will do this by CountVectorizer available in scikit-learn #First we need to import it from sklearn.feature_extraction.text import CountVectorizer #Now we need to create its object and fit the model. count_vector = CountVectorizer() count_vector.fit(training_data.data) #We need to transform the data using fitted model above training_count = count_vector.transform(training_data.data) #We can check shape of our matrix print(training_count.shape) #In above matrix we are storing only count of words. We can weight them based on importance. We can use TF-IDF to do that. #Import TFIDF from sklearn.feature_extraction.text import TfidfTransformer #Create Object and fit tfidf_model = TfidfTransformer() tfidf_model.fit(training_count) training_tfidf = tfidf_model.transform(training_count) #Fit model on training data NBModel = MultinomialNB().fit(training_tfidf, training_data.target) #Evaluate Model testing_data = fetch_20newsgroups(subset='test', shuffle=True) #Convert Testing data in the correct format testing_count = count_vector.transform(testing_data.data) testing_tfidf = tfidf_model.transform(testing_count) #Predict newsgroups predicted = NBModel.predict(testing_tfidf) #Get Accuracy from sklearn.metrics import accuracy_score print(accuracy_score(testing_data.target,predicted)) #We can use nlp techniques to improve the result import nltk #Create a set of stop words from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) def data_cleaner(raw_data): cleaned_data = list() for row in raw_data: stemmed_words = list() #Find meaningful tokesn tokens = nltk.word_tokenize(row) #Convert words using stemming prt = nltk.PorterStemmer() for token in tokens: if token not in stop_words: if(token.isalnum()==True): stemmed_words.append(prt.stem(token) ) sent = ' '.join(stemmed_words) cleaned_data.append(sent) return cleaned_data training_data_modified = data_cleaner(training_data.data) print(training_data_modified[0]) training_data_modified[1] test_data_modified = data_cleaner(testing_data.data) #Transform and do training. count_vector.fit(training_data_modified) training_count = count_vector.transform(training_data_modified) tfidf_model.fit(training_count) training_tfidf = tfidf_model.transform(training_count) NBModel = MultinomialNB().fit(training_tfidf, training_data.target) testing_count = count_vector.transform(test_data_modified) testing_tfidf = tfidf_model.transform(testing_count) predicted = NBModel.predict(testing_tfidf) print(accuracy_score(testing_data.target,predicted)) training_data_modified[3] # ### Topic Modeling #Take data. We will use sample of 1000 rows from 20newsgroup data for this task import random from sklearn.datasets import fetch_20newsgroups training_data = fetch_20newsgroups(subset='train', shuffle=True) data500 = random.sample(training_data.data, 500) #Clean the data from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) training_data_modified = list() for training_example in data500: stemmed_words = list() #Find meaningful tokesn tokens = nltk.word_tokenize(training_example) #Convert words using stemming prt = nltk.PorterStemmer() for token in tokens: if token not in stop_words: if(token.isalnum()==True): stemmed_words.append(prt.stem(token) ) sent = ' '.join(stemmed_words) training_data_modified.append(sent) training_data_cleaned = [doc.split() for doc in training_data_modified] #Gensim is used to handle text data and convert corpus into document term matrix import gensim from gensim import corpora #Create document term matrix dictionary = corpora.Dictionary(training_data_cleaned) doc_term_matrix = [dictionary.doc2bow(doc) for doc in training_data_cleaned] # + # Create model using gensim Lda = gensim.models.ldamodel.LdaModel # Training LDA Model ldamodel = Lda(doc_term_matrix, num_topics=3, id2word = dictionary, passes=50) # - #Each line represent topic with words. print(ldamodel.print_topics(num_topics=3, num_words=3))
Chapter 09/9. Text Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import sklearn.datasets import pandas as pd import numpy as np housingData = sklearn.datasets.fetch_california_housing() print housingData housing = pd.DataFrame(housingData.data, columns=housingData.feature_names) print "\n housing.head()\n", housing.head() print "\n housing.info()\n", housing.info() print "\n housing.describe()\n", housing.describe() # -
HousingTerminalTest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from pathlib import Path INPUT_PATH = Path("input") # - df = pd.read_csv(INPUT_PATH / "day02_input.txt", header=None, delimiter=" |:|-", names=["Min_letters", "Max_letters", "Letter", "Gap", "Password"], engine="python") del df["Gap"] df # + def count_letters(x): word = x["Password"] letter = x["Letter"] return word.count(letter) count_letters(df.iloc[0]) # - df["Length"] = df["Password"].str.len() df["Letter_counts"] = df.apply(count_letters, axis=1) df df["Pass"] = np.where((df["Letter_counts"] >= df["Min_letters"]) & (df["Letter_counts"] <= df["Max_letters"]), 1, 0) df # Answer df["Pass"].sum() # + # Part 2 # + def check_char1(x): pos = x["Min_letters"] - 1 letter = x["Letter"] password = x["Password"] if password[pos] == letter: return 1 else: return 0 def check_char2(x): pos = x["Max_letters"] - 1 letter = x["Letter"] password = x["Password"] if password[pos] == letter: return 1 else: return 0 # - df["pass1"] = df.apply(check_char1, axis=1) df["pass2"] = df.apply(check_char2, axis=1) df["pass3"] = df["pass1"] + df["pass2"] df["pass3"].value_counts() # + # Answer = 275
day02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This code calculates precision and recall scores of title/prose detection. import os, fnmatch import webbrowser def find_files(pattern, path): """ Walks the path recursively and returns a list of files whose filenames match the pattern passed. Input: pattern, path Output: list of file names """ result = [] for root, dirs, files in os.walk(path): for name in files: if fnmatch.fnmatch(name, pattern): result.append(os.path.join(root, name)) return result semantic_header_files = find_files('dom_ind.headers', r'C:/UsablePrivacyPolicy/Data/Misc') master_header_files = find_files('gold.headers', r'C:/UsablePrivacyPolicy/Data/Misc') def extract_final_folder(absolute_path): """ Extracts the string which appears between the final folder separator and the pre final folder separator Ex - Input = C:\\UsablePrivacyPolicy\\Data\\OtherSites\\activision.com\\activision.txt Returns activision.com """ substring = absolute_path[:absolute_path.rfind('\\')] return substring[substring.rfind('\\')+1 :] def extract_headers(absolute_path): """ returns all the lines present in the file passed as a list """ headers = [] with open (absolute_path) as f: headers = f.readlines() headers_return = [] for each_line in headers: each_line = each_line.strip().lower() headers_return.append(''.join(e for e in each_line if e.isalnum())) return set(headers_return) def create_header_dict(header_file_list): """ returns a dictionary with the following pattern {final_folder_name:[Header1,Header2]} """ dict_headers = {} for each_file in header_file_list: final_folder_name = extract_final_folder(each_file) list_headers = extract_headers(each_file) dict_headers[final_folder_name] = list_headers return dict_headers dict_semantic_headers = create_header_dict(semantic_header_files) dict_master_headers = create_header_dict(master_header_files) assert(len(dict_semantic_headers) == len(dict_master_headers)), "Lengths don't match" # On to precision and recall part # + results_file = open('results_headers_markers.html','w') file_contents = """<html> <head><title>Header Evaluation Results</title></head> <body><table style="width:40%"> <tr> <th>Website</th> <th>Precision</th> <th>Recall</th> <th>F1</th> </tr>""" close_html = """</body></html>""" #f.write(message) #f.close() #webbrowser.open_new_tab('helloworld.html') # + total_precision = 0 total_recall = 0 global_correct_headers = 0 global_predicted_headers = 0 global_correct_predicted_headers = 0 for website, master_header_list in dict_master_headers.items(): # Get the corresponding semantic header try: semantic_header_list = dict_semantic_headers[website] except: continue #print(master_header_list) #print(semantic_header_list) total_correct_headers = len(master_header_list) total_predicted_headers = len(semantic_header_list) total_correct_predicted_headers = 0 for each_master_header in master_header_list: for each_semantic_header in semantic_header_list: if(each_master_header in each_semantic_header or each_semantic_header in each_master_header): total_correct_predicted_headers += 1 #print(each_master_header) break if(total_correct_predicted_headers > total_predicted_headers): total_correct_predicted_headers = total_predicted_headers try: recall = total_correct_predicted_headers / total_correct_headers except ZeroDivisionError: recall = 0 try: precision = total_correct_predicted_headers / total_predicted_headers except ZeroDivisionError: precision = 0 #total_precision += precision try: f1 = 2 * precision * recall / (precision + recall) except ZeroDivisionError: f1 = 0 global_correct_headers += total_correct_headers global_predicted_headers += total_predicted_headers global_correct_predicted_headers += total_correct_predicted_headers file_contents = file_contents + "<tr><td>" + website + "</td>" + "<td>" + '{0:.2f}'.format(precision) + "</td>" file_contents = file_contents + "<td>" + '{0:.2f}'.format(recall) + "</td>" file_contents = file_contents + "<td>" + '{0:.2f}'.format(f1) + "</td></tr>" # + try: total_precision = global_correct_predicted_headers / global_predicted_headers except ZeroDivisionError: precision = 0 try: total_recall = global_correct_predicted_headers / global_correct_headers except ZeroDivisionError: precision = 0 total_f1 = 2 * total_precision * total_recall / (total_precision + total_recall) print(global_correct_headers) print(global_correct_predicted_headers) print(global_predicted_headers) file_contents = file_contents + "<tr><td>" + "Total" + "</td>" + "<td>" + '{0:.4f}'.format(total_precision) + "</td>" file_contents = file_contents + "<td>" + '{0:.4f}'.format(total_recall) + "</td>" file_contents = file_contents + "<td>" + '{0:.4f}'.format(total_f1) + "</td></tr>" file_contents = file_contents + "</table>" + close_html results_file.write(file_contents) results_file.close() webbrowser.open_new_tab('results_headers_markers.html')
Software/Python Code/Lab_Calculate_Precision_Recall_Title_Prose.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Multi-Convolutional Net for Sentiment Classification # This Conv Net performs sentiment analysis on the Google toxicity dataset review dataset. # + import os import keras from keras.datasets import imdb from keras.preprocessing.sequence import pad_sequences from keras.preprocessing import text from keras.models import Model, Sequential from keras.layers import Input, concatenate from keras.layers import Dense, Flatten, Dropout, Activation, BatchNormalization from keras.layers import Embedding, Conv1D, SpatialDropout1D, GlobalMaxPool1D, LSTM from keras.layers.wrappers import Bidirectional from keras.callbacks import ModelCheckpoint, EarlyStopping from keras_contrib.layers.advanced_activations import SineReLU from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # - # #### Set Hyperparameters # + output_dir = 'model_output/multi-conv' e_param = 0.05 n_classes = 6 epochs = 3 patience = 1 batch_size = 128 test_split=.3 n_dim = 128 n_unique_words = 20000 max_review_length = 400 pad_type = trunc_type = 'pre' n_conv_1 = 32 n_conv_2 = 64 n_conv_3 = 128 k_conv_1 = 2 k_conv_2 = 4 k_conv_3 = 5 drop_conv = 0.5 n_dense = 512 dropout = 0.3 # - # #### Load Data train_df = pd.read_csv('kaggle/datasets/toxicity/train.csv') test_df = pd.read_csv('kaggle/datasets/toxicity/test.csv') # #### Preprocess Data test_df.shape # + train_sentences_series = train_df['comment_text'].fillna("_").values test_sentences_series = test_df['comment_text'].fillna("_").values # Tokeninze the Training data tokenizer = text.Tokenizer(num_words=n_unique_words) tokenizer.fit_on_texts(list(train_sentences_series)) train_tokenized_sentences = tokenizer.texts_to_sequences(train_sentences_series) # Tokeninze the Test data test_tokenized_sentences = tokenizer.texts_to_sequences(test_sentences_series) # toxic,severe_toxic,obscene,threat,insult,identity_hate classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] y_train = train_df[classes].values X_train = pad_sequences(train_tokenized_sentences, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0) X_test_sub = pad_sequences(test_tokenized_sentences, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0) X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=test_split) # - # #### Design Deep Net Architecture # + cnn_epsilon = 0.0025 dense_epsilon = 0.0083 input_layer = Input(shape=(max_review_length,), dtype='int16', name='input') embedding_layer = Embedding(n_unique_words, n_dim, input_length=max_review_length, name='embedding_1')(input_layer) conv_1 = Conv1D(n_conv_1, k_conv_1, name='conv_1')(embedding_layer) act1 = SineReLU(cnn_epsilon)(conv_1) maxp_1 = GlobalMaxPool1D(name='maxp_1')(act1) drop_1 = Dropout(drop_conv)(maxp_1) norm_1 = BatchNormalization()(drop_1) conv_2 = Conv1D(n_conv_2, k_conv_2 name='conv_2')(embedding_layer) act2 = SineReLU(cnn_epsilon)(conv_2) maxp_2 = GlobalMaxPool1D(name='maxp_2')(act2) drop_2 = Dropout(drop_conv)(maxp_2) norm_2 = BatchNormalization()(drop_2) conv_3 = Conv1D(n_conv_3, k_conv_3 name='conv_3')(embedding_layer) act3 = SineReLU(cnn_epsilon)(conv_3) maxp_3 = GlobalMaxPool1D(name='maxp_3')(act3) drop_3 = Dropout(drop_conv)(maxp_3) norm_3 = BatchNormalization()(drop_3) concat = concatenate([norm_1, norm_2, norm_3]) dense_layer_1 = Dense(n_dense, name='dense_1')(concat) act4 = SineReLU(dense_epsilon)(dense_layer_1) drop_dense_layer_1 = Dropout(dropout, name='drop_dense_1')(act4) dense_layer_2 = Dense(n_dense, name='dense_2')(drop_dense_layer_1) act5 = SineReLU(dense_epsilon)(dense_layer_2) drop_dense_layer_2 = Dropout(dropout, name='drop_dense_2')(act5) predictions = Dense(n_classes, activation='sigmoid', name='output')(drop_dense_layer_2) model = Model(input_layer, predictions) # - model.summary() # #### Configure the Model model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) modelCheckpoint = ModelCheckpoint(monitor='val_acc', filepath=output_dir+'/weights-multicnn-toxicity_new.hdf5', save_best_only=True, mode='max') earlyStopping = EarlyStopping(monitor='val_acc', mode='max', patience=patience) if not os.path.exists(output_dir): os.makedirs(output_dir) # ### Train the Model model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_valid, y_valid), callbacks=[modelCheckpoint, earlyStopping]) # #### Evaluate #model.load_weights(output_dir+'/weights-multicnn-toxicity.hdf5') model = keras.models.load_model(output_dir + '/weights-multicnn-toxicity_new.hdf5') y_hat = model.predict(X_test_sub) plt.hist(y_hat) _ = plt.axvline(x=0.5, color='orange') np.random.shuffle(y_hat) pct_auc = roc_auc_score(y_valid, y_hat[0:31915]) * 100 '{:0.2f}'.format(pct_auc) y_hat[0] # + sample_submission = pd.read_csv("kaggle/datasets/toxicity/sample_submission.csv") sample_submission.shape sample_submission[classes] = y_hat sample_submission.to_csv("kaggle/datasets/toxicity/submission_multicnn_relus.csv", index=False) # -
notebooks/ekholabs/multi-conv_net_SineReLU_toxicity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Capstone Project # - ### Data Extraction # + import pandas as pd from pandasql import sqldf, load_meat pysqldf = lambda q: sqldf(q,globals()) import numpy as np import seaborn as sns import matplotlib.pyplot as plt import time import datetime #download the dataset URL ='https://storage.googleapis.com/asiayo-pic-bed-dev/wine_data.csv' wine = pd.read_csv('https://storage.googleapis.com/asiayo-pic-bed-dev/wine_data.csv') print(wine.head()) print(wine.describe()) wine.isnull().sum() # - # - ### Clean Dataset # + wine[wine.country.isna() == True] wine[(wine.country.isna() == True) & (wine.province.isna() == True)] wine_clean = wine[~((wine.country.isna()==True) & (wine.province.isna() == True))] wine_clean.shape wine_clean.columns print(wine_clean.country.nunique()) print(wine_clean.variety.nunique()) wine_clean.groupby("variety")["points"].mean().sort_values(ascending = False).head(10) # - # - ### Visualize the Dataset # + s = wine_clean.groupby("variety")["points"].mean().sort_values(ascending = False).head(10) s.plot.bar() plt.xlabel("wine types") plt.ylabel("points") plt.title("10 best wine types") plt.tight_layout() plt.ylim(80,100) plt.show() wine_clean.groupby("country")["points"].mean().nlargest(8) wine_clean.country.value_counts() wineer_countries = wine_clean.groupby("country")["points"].mean().nlargest(9)[1:] wineer_countries wine_clean.groupby("country")["price"].mean().sort_values(ascending = False).head(5) wine_clean.loc[wine_clean.country == "US-France"].index wine_clean = wine_clean.drop(144054, axis = 0) wine_clean.country.value_counts() wine_clean.groupby("country")["price"].mean().sort_values(ascending = False).head(6)[1:6] average_price_countries = wine_clean.groupby("country")["price"].mean().sort_values(ascending = False).head(6)[1:6] average_price_countries.plot.bar() plt.xlabel("countries") plt.ylabel("average wine price") plt.title("most expensive wine countries") plt.tight_layout() plt.show() wine_clean[wine_clean.points == 100].count() wine_clean[wine_clean.points == 100]["variety"].value_counts() wine_clean["price"].corr(wine_clean["points"]) plt.scatter(x = wine_clean.price, y = wine_clean.points, c = "g", marker = ".") plt.xlabel("price") plt.ylabel("points") plt.title("price vs points") plt.tight_layout() plt.show() plt.scatter(x = wine_clean.price, y = wine_clean.points, c = "g", marker = ".", s = wine_clean.price) plt.xlabel("price") plt.ylabel("points") plt.title("price vs points") plt.tight_layout() plt.show() # - # - ### Apply Linear Regression Model # # + import statsmodels.api as sm import statsmodels.formula.api as smf results = smf.ols('price ~ points', data = wine).fit() print(results.params) results.summary() # - # - ### Publish your notebook online on github # # https://github.com/PaoTIngKung/0517
PaoTingKung_CapstoneProject.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Shamir Secret Sharing # # Shamir secret sharing is a $(t+1)$-out-of-$n$ secret sharing protocol. Given a secret value $s$, # - define $f(X) = f_t X^t + \ldots + f_1 X + s$, where $f_t, \ldots, f_1 \leftarrow \mathbb{F}_p$ for some prime $p > n$ # - give share $f(i)$ to $P_i$ ($i = 1, \ldots, n$) # # To reconstruct, at least $t+1$ parties pool their points $(i, f(i))$ and reconstruct the polynomial $f$, e.g. as # # $$ \sum_i^{t+1} \ell_i \cdot f(i), \text{ where } \ell_i(X) = \frac{\Pi_{j \neq i} (X-x_j)}{\Pi_{j \neq i} (x_i-x_j)} $$ # # Then, evaluate $f(0) = s$. import numpy as np import random, sympy import matplotlib.pyplot as plt # First, let's define some helper functions for dealing with polynomials. # + def term_to_string(coeff, deg): if coeff == 0: return "" temp = "{}".format(coeff) app = "" # constant term if deg == 0: return temp # x term elif deg == 1: app = "x" # others else: app = "x^{}".format(deg) return app if coeff == 1 else temp+app def print_poly(coeffs): poly_str = "" # coefficients from highest to lowest degree deg = len(coeffs)-1 for i in range(len(coeffs)): if(coeffs[i]!=0): poly_str += "{} + ".format(term_to_string(coeffs[i], deg-i)) # remove extra + at end print(poly_str[:-3]) def eval_poly(coeffs, x): # coefficients from highest to lowest degree deg = len(coeffs)-1 ans = 0 for i in range(len(coeffs)): ans += coeffs[i]*x**(deg-i) # in real SSS, this is over a finite field (mod p) # return ans%p # but this is not representable in 2D so we will use int arith # (note this is not secure) return ans # - # ## Share # Here is our sharing function: def share(s, n, t, p): # check p if not sympy.isprime(p): print("p={} is not prime!".format(p)) return if p <= n: print("p={} must be greater than n={}".format(p, n)) return # check t if t >= n: print("t={} must be less than n={}".format(t, n)) return # check s is in field if p <= s: print("s={} must be less than p={}".format(s, p)) return coeffs = [] for i in range(t): # sample coefficients from F_p = {0, ..., p-1} coeffs.append(random.randint(0,p-1)) # secret is the y-intercept coeffs.append(s) shares = [] for i in range(1,n+1): shares.append((i, eval_poly(coeffs, i))) # plot the polynomial print("The random degree t={} polynomial is".format(t)) print_poly(coeffs) x = np.linspace(0, n, n+1) y = [eval_poly(coeffs, i) for i in x] plt.plot(x, y) # plot the shares print() print("The shares are points on that polynomial:") print(shares) x1 = [shares[i][0] for i in range(len(shares))] y1 = [shares[i][1] for i in range(len(shares))] plt.scatter(x1, y1) # plot the secret plt.scatter(0, s) print() print("Here is a visual representation (secret in orange).") return shares # For example, say our secret is the number 42. We'd like to share it among 10 parties (n=10), and we'll allow any 4 of those to recover the secret (t=3, t+1 can recover). Now let's pick a prime number p that's bigger than both the secret and the number of parties (so, p>42). The next largest prime is 43, so let's try that! # # How would you call the `share` function with these parameters? # # **Answer** # ``` # shares = share(42, 10, 3, 43) # ``` shares = share(42, n=10, t=3, p=43) # Now we can distribute these points among our 10 parties! # ## Reconstruct # Say 4 of those parties (Alice, Bob, Charlie, and Diane) want to recover the secret. We set $t=3$, so they should be able to do this (remember, a minimum of $t+1$ parties is needed). Together, they hold 4 points on the degree-3 polynomial, which uniquely defines it! They can pool this information to recover the polynomial $f$ and evaluate it at $x=0$ using the `recon` function: def recon(shares, n, t): if len(shares) < t+1: print("Not enough shares to reconstruct! ({} < t+1={})".format(len(shares), t+1)) return # i Lagrange basis polynomials evaluated at 0 ell = [1]*len(shares) for i in range(len(shares)): #ell[i] = 1 for j in range(len(shares)): if i!=j: ell[i] *= float(0-shares[j][0])/(shares[i][0]-shares[j][0]) # interpolate # f(X) = sum_1^{t+1} ell_i(X) * y_i # s = f(0) s = 0 for i in range(len(shares)): s += ell[i]*shares[i][1] print("The reconstructed secret is:") return int(s) # Assuming Alice, Bob, Charlie, and Diane have the points for $x=1,2,3,4$, respectively, can you use the shares of 42 we created earlier to recover the secret? # # **Answer** # ``` # recon(shares[:4], n=10, t=3) # ``` recon(shares, n=10, t=3)
docs/assets/jupyter/ShamirSS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problema de prediccion de ventas # ## Planteamiento del problema y Objetivo: # El objetivo es construir un modelo de regresión simple para predecir las **ventas por producto de una tienda en particular**, que forma parte de una cadena de tiendas, y descubrir cuáles son los **principales factores que influencian dicha predicción**. # ### Importando Librerías import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import datetime as dt import seaborn as sns from scipy import stats # ### Lectura de los datasets data_train = pd.read_csv('../data/Train_BigMart.csv') data_test = pd.read_csv('../data/Test_BigMart.csv') # Identificando la data de train y de test, para posteriormente unión y separación data_train['Set'] = 'train' data_test['Set'] = 'test' # Combinando los dataset de *entrenamiento y test* para proceder a realizar la exploración, visualización, limpieza de datos, y posterior ingeniería de características y codificación de variables. data = pd.concat([data_train, data_test], ignore_index=True, sort=False) data.head(20) # ## EXPLORACIÓN DE LOS DATOS # Obteniendo una visión general del conjunto de datos y verificando los tipos de datos, conociendo las posibles variables de entradas y el target; así también la distribución de estos valores: print('Variables:', list(data.columns)) # ### Resumen de los datasets print('Número de registros de train:', data_train.shape[0], ' - Número de variables:', data_train.shape[1]) print('Número de registros de test:', data_test.shape[0], ' - Número de variables:', data_test.shape[1]) print('Número de registros totales:', data.shape[0], ' - Número de variables:', data.shape[1]) # Visión general de las variables en cada dataset: print('Dataset de entrenamiento:\n') data_train.info() print('\nDataset de test:\n') data_test.info() print('\nDataset de total:\n') data.info() #data_train.info(), data_test.info(), data.info() # Se tienen 8 variables de tipo "object" (que corresponden a variables de tipo categórica que más adelante se codificaran), 4 de tipo float64 y 1 de tipo int64 (estas 2 últimas corresponde a variables de tipo numéricas). Por lo que no corresponde por el momento realizar algún cambio en el tipo de variables. # # Se tiene una columna más en los datasets de train y total, correspondiente al Target (Item_Outlet_Sales). # # Se verifica la presencia de alguno valores faltantes. Que más adelante se trabajará con ellos. # ### Variables: # - Item_Identifier: nombre o identificador del producto # - Item_Weight: peso del producto (en gramos) # - Item_Fat_Content: clasificación del producto en términos de grasas contenidas en él. # - Item_Visibility: scoring de visibilidad del producto: medida que hace referencia al conocimiento del producto en el consumidor. ¿Qué tan fácil puede ser encontrado el producto? # - Item_Type: tipo de producto # - Item_MRP: máximum retailed price. Precio calculado por el fabricante que indica el precio más alto que se puede cobrar por el producto. # - Outlet_Identifier: identificador de la tienda # - Outlet_Establishment_Year: año de lanzamiento de la tienda # - Outlet_Size: tamaño de la tienda # - Outlet_Location_Type: clasificación de las tiendas según ubicación # - Outlet_Type: tipo de tienda # - Item_Outlet_Sales: ventas del producto en cada observacion # ## Planteamiento de Hipótesis: # Respecto a las variables que se disponen en el dataset y de acuerdo al objetivo propuesto, se plantean algunas hipótesis: # - El peso del producto no debería influir en los niveles de venta de la tienda. # - El contenido de grasas de los productos puede ser significativo pra el nivel de venta (Los productos con mayor contenido de grasa quiezás se compran menos). # - La visibilidad de un producto incide en el nivel de venta de la tienda (generalmente los productos más costosos se exhiben en sitios de fácil visualización para el cliente). # - El tipo de producto puede influir en el nivel de ventas (existe productos de mayor y menor rotación, pero también de mayor y menor precio). # - El precio de un producto es un factor que está directamente asociado con el nivel de ventas. # - El año de lanzamiento de la tienda, da información del tiempo de vida que puede tener la tienda; esto podría influir en el nivel del conocimiento que tiene el cliente de la existencia de la tienda, y por ende de su nivel de ventas. # - A mayor tamaño de la tienda, mayor nivel de ventas. Las personas le suelen gustar los lugares amplios para ir de compras. # - La ubicación de la tienda es un factor preponderante en el acceso al cliente y por ende en el nivel de ventas. # ## Análisis de Datos: # ### Análisis univariado # ### Resumen estadístico de variables cuantitativas o numéricas: # Obtener más información de los datos a través de el comportamiento y distribución de los mismos. data.describe() # - Máximos y mínimos: # # Se observan valores dentro de rangos razonables respecto de cada tipo de variable. El valor 0 en Item_Visibility podría sugerir un registro inadecuado, sin embargo para esta fase no realizaré cambios al respecto. # # - Media y Desviación estándar: # # En general se observan valores muy dispersos (no aplica este análisis para "Outlet_Establishment_Year") # # - Recuento (count): # # Refleja valores perdidos en la variable "Item_Weight" (la diferencia de valores en la variable "Item_Outlet_Sales" corresponde a los valores de TARGET en el train dataset) # # **La variable "Outlet_Establishment_Year" será tomada como vida del establecimiento en años, la cual puede dar una información más valiosa.** # ### Visualizando las variables numéricas: # Visualización de las caraterísticas númericas de entrada data.hist(column=['Item_Weight', 'Item_Visibility', 'Item_MRP', 'Item_Outlet_Sales'], figsize=(26,4), bins=30, layout=(1,4)) plt.show() # TARGET: Las ventas de la tiendas (Item_Outlet_Sales) presentan una distribución con sesgo positivo, es decir, sus valores se concentran más en los niveles de ventas inferiores. # # Los pesos de los productos (Item_Weight) presentan una distribución clara, no se encuentra concentración de frecuencias en valores específicos. # # La visibilidad de los productos (Item_Visibility) también presenta una distribución sesgada positivamente, se observa mayor concentración en valores inferiores. # # El precio máximo por producto (Item_MRP) presenta una distribución multimodal, de aproximadamente 4 niveles de precios distintos. # # *Las variables sesgadas se les tratará para eliminar dicho sesgo.* # # - Por ahora, se realizará el cálculo de años de vida de la tienda en base al año de establecimiento y el año actual (se asume que es data del actual año 2019): # #### FEATURES ENGINEERING: para los años del establecimiento data['Outlet_Establishment_Year'] = 2020 - data['Outlet_Establishment_Year'] # ## Definiendo las variables categóricas # ### Resumen estadístico de variables categóricas: data.describe(include = ['object', 'category']) # - Item_Identifier posee muchos valores únicos que no se podrán analizar de esta manera tan dispersa, se puede tratar de agrupar según alguna patrón de la codificación. # - Item_Type también posee un número de características que se podrían agrupar para evitar trabajar con 16 valores; de ser conveniente para la predicción. # - Las demás variables tienen número de categorías finitas convenientes para el análisis. # - Se tienen valores faltantes en la variable Outlet_Size que habrá que trabajar. # # Seguido se hace una exploración más detallada: # ### Conociendo las variables categóricas: categoricals = ['Item_Fat_Content', 'Item_Type', 'Outlet_Identifier', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type'] for cat in categoricals: print(cat, ':', set(data[cat])) # Del análisis se observa: # - Para "Item_Fat_Content" diferentes etiquetas para la misma categoría. **Acción**: unificar etiquetas. # - Se considera reagrupar algunas categorías de "Item_Type". # #### LIMPIEZA: Unificando etiquetas para 'Item_Fat_Content' data['Item_Fat_Content'] = data['Item_Fat_Content'].replace({'low fat': 'Low Fat', 'LF': 'Low Fat', 'reg': 'Regular'}) # Verificamos la unificación de etiquetas: set(data['Item_Fat_Content']) # ### Miramos el comportamiento de las frecuencias de las variables categóricas: for aux in ['Item_Fat_Content', 'Item_Type', 'Outlet_Identifier', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type']: print('\n', aux, ':\n', data[aux].value_counts()) # ### Visualizando la distribucón de frecuencias de las variables categóricas: for var_cat in ['Item_Fat_Content', 'Item_Type', 'Outlet_Identifier', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'Outlet_Establishment_Year']: ancho_bar = len(data[var_cat].unique()) plt.figure(figsize=(ancho_bar*1.8,4)) values = data[var_cat].dropna().sum() ax = sns.countplot(x= var_cat, data=data, palette='Set2') for p in ax.patches: ax.annotate('{:.0f} ({:.1f}%)'.format(p.get_height(), p.get_height()/len(data)*100), (p.get_x()+0.1, p.get_height()+30)) plt.title('Distribución de Frecuencias de ' + var_cat) plt.show() # - El mayor porcentaje de producto corresponde a los bajos en grasas (aprox 65%) # - Los productos con mayor registros son los vegetales-frutas y los snacks, seguidos de los productos del hogar, enlatados, lácteos, congelados y horneados. # - Las tiendas con menores registros son la OUT10 y OUT19, el resto de las tiendas tienen un número de registros similar. # - Se tienen mayores registros en la tiendas pequeñas y medianas. # - El mayor número de registros de ventas lo presentan las tiendas con locación Tier 3 y las tiendas de tipo Supermarket Type1. # #### Porcentaje de valores perdidos print('El porcentaje de valores perdidos de las variables: \n') for var in data.columns: num_nan = data[var].isnull().sum() print('{}: \t\t{} ({:,.2f}%)'.format(var, num_nan, num_nan*100/len(data))) # Se tiene 17,17% de valores perdidos en la variable de pesos del producto, lo cual se puede solucionar asignando el peso de un producto similar o desde otro registro del mismo producto. De similar manera se puede realizar con los valores faltantes (28,27%) de la variable Tamaño del outlet. # Parte del dataset con valores perdidos en la variable 'Item_Weight': data[data['Item_Weight'].isnull()].sort_values('Item_Identifier').head() print(list(data[data['Item_Weight'].isnull()]['Outlet_Identifier'].unique())) # Los valores faltantes de pesos de los productos corresponden a las tiendas cuyo código son 'OUT027' y 'OUT019' print(len(list(data[data['Item_Weight'].isnull()]['Item_Identifier'].unique()))) # Se tienen 1559 productos de los 2439 registros con valores perdidos en la variable 'Item_Weight' # Ahora se procede a rellenar los faltantes en los registros de pesos, basado en el valor modal del peso del producto. (Imputación de casos similares) # #### LIMPIEZA: de faltantes en el peso de los productos productos = list(data[data['Item_Weight'].isnull()]['Item_Identifier'].unique()) for producto in productos: moda = (data[data['Item_Identifier'] == producto][['Item_Weight']]).mode().iloc[0,0] data.loc[data['Item_Identifier'] == producto, 'Item_Weight'] = moda # Se verifica que no existan valores nulos para la variable peso del producto. print('El porcentaje de valores perdidos de la variable "Item_Weight" es de:', data['Item_Weight'].isnull().sum()/len(data)*100) # Se procede a revisar los faltantes de la variable tamaño de la tienda. data[data['Outlet_Size'].isnull()].sort_values('Item_Identifier').tail(10) print(list(data[data['Outlet_Size'].isnull()]['Outlet_Identifier'].unique())) # Los valores faltantes de tamaño de la tienda corresponden a las tiendas cuyo código son 'OUT010', 'OUT045' y 'OUT017' # Se procede primero a verificar qué valores de tamaño registran estas tiendas. outlets = list(data[data['Outlet_Size'].isnull()]['Outlet_Identifier'].unique()) for outlet in outlets: categoria = data[data['Outlet_Identifier'] == outlet]['Outlet_Size'].unique() print(outlet, categoria) # Se observa que estas 3 tiendas no tienen registros del tamaño de su tienda. Para dar solución a esto se buscará algún tipo de asociación de otra variable con el tamaño, para realizar la estimación de la categoría. # ### Análisis Bi-variado: # Variables Categóricas vs Categóricas: sns.catplot(x="Outlet_Size", hue='Outlet_Type', data=data, kind="count", height=3, aspect=2) plt.title('Outlet Size vs Outlet_Type por Outlet Identifier') plt.show() # - La mayoría de los "Supermarket Type 1" son de tamaño "Small". # - Las tiendas "Grocery Store" son de tamaño "Small". # - Las tiendas "Supermarket Type 2" y "Supermarket Type 3" son de tamaño "Medium". # - Outlet_Size vs Outlet_Type plt.figure(figsize=(10,6)) sns.heatmap(pd.crosstab(data['Outlet_Size'], data['Outlet_Type'], margins=False, normalize=False), annot=True, square=False, fmt='', cbar_kws={"orientation": "horizontal"}, linewidths=0.5) plt.show() # Se observa que no existe una relación entre el tipo de tienda y el tamaño de la misma. # - Item_Type vs Outlet_Type plt.figure(figsize=(10,12)) sns.heatmap(pd.crosstab(data['Item_Type'], data['Outlet_Type'], normalize=False), annot=True, square=False, fmt='', cbar_kws={"orientation": "horizontal"}, linewidths=0.5) plt.show() # El Supermarket Type 2 y 3 presentan distribución similar respecto de los tipos de productos, al igual que en el tamaño de la tienda. # Vemos: # - Outlet_Location_Type vs Outlet_Type import statsmodels.api as sm tab = pd.crosstab(data['Outlet_Location_Type'], data['Outlet_Type'], margins=False, normalize=False) plt.figure(figsize=(10,6)) sns.heatmap(tab, annot=True, square=False, fmt='', cbar_kws={"orientation": "horizontal"}, linewidths=0.5) plt.show() # - La mayor cantidad de registros son de la tienda "Supermarket Type 1" y de tamaño "Small"; en primer lugar de la ubicación "Tier 2" y en segundo de la ubicación "Tier 1". # Veamos el tamaño de la tienda con respecto al nivel de ventas. # ### Análisis Bi-variado: # Variables Categóricas vs Continuas: # - Veamos por un momento el tipo de tienda respecto a las ventas: # + plt.figure(figsize=(10,4)) sns.violinplot(x=data['Outlet_Type'], y=data["Item_Outlet_Sales"]) plt.show() # H0: las medias son significativamente iguales entre los grupos (Se utiliza el test de Kruskal-Wallis por tratarse de una variable que no tiene una distribución normal) print('\n', stats.kruskal(list(data.dropna().loc[data['Outlet_Type']== 'Supermarket Type1', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Outlet_Type']== 'Supermarket Type2', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Outlet_Type']== 'Supermarket Type3', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Outlet_Type']== 'Grocery Store', 'Item_Outlet_Sales']))) # - # - Se evidencia diferencias significativas en los niveles de ventas por tipo de tienda. # - La distribución de frecuencia de las variables estudiadas arriba son similares para los tipos de tiendas "Supermarket Type 2" y "Supermarket Type 3"; sin embargo no lo es así el comportamiento de las ventas. Se dejarán estas categorias separadas como están originalmente. # + sns.boxplot(x="Outlet_Size", y="Item_Outlet_Sales", data=data) plt.show() med=data.dropna().loc[data['Outlet_Size']=='Medium', 'Item_Outlet_Sales'] hig=data.dropna().loc[data['Outlet_Size']=='High', 'Item_Outlet_Sales'] sma=data.dropna().loc[data['Outlet_Size']=='Small', 'Item_Outlet_Sales'] sns.distplot(sma, kde=True, hist=False, label='Small'), sns.distplot(med, kde=True, hist=False, label='Medium'), sns.distplot(hig, kde=True, hist=False, label='High') plt.show() # Cálculo de promedios de ventas de cada tamaño de tienda print('\nVentas promedios (Small):', sma.mean()) print('Ventas promedios (Medium):', med.mean()) print('Ventas promedios (High):', hig.mean()) print('\n', stats.kruskal(list(med), list(hig), list(sma))) # H0: las medias son significativamente iguales entre los grupos # - # Mediante la prueba de Kruskal-Wallis se evidencia diferencias significativas en los niveles de venta para los distintos tamaños de tiendas. # # Se somete a prueba las diferencias estadísticas entre el tamaño de tienda Small y High, para descartar similitud en sus ventas: stats.mannwhitneyu(list(hig), list(med)) # H0: las medias son significativamente iguales para ambos grupos # Se evidencia diferencias significativas entre las ventas promedios de ambos tamaños de tiendas (Medium y High). # # Seguidamente se visualiza el comportamiento de las ventas de las tiendas que presentan VALORES PERDIDOS en el tamaño de tienda (Outlet_Size): data_aux = data[data['Outlet_Size'].isnull()] plt.figure(figsize=(10,4)) sns.boxplot(x="Outlet_Identifier", y="Item_Outlet_Sales", data=data_aux) plt.show() # Los valores de ventas en la tienda OUT10 son muy pequeños en comparación a las tiendas OUT17 y OUT45. # # Graficando los diagramas box-plot de los niveles de ventas de las tiendas según tamaño (Oulet_Size) vs tipo de tienda (Outlet_Type): plt.figure(figsize=(15,4)) sns.boxplot(x="Outlet_Identifier", y="Item_Outlet_Sales", hue='Outlet_Size', data=data) plt.show() # No se muestra algún patrón que se deba destacar. # # Graficando diagramas box-plot de los niveles de ventas de las tiendas según el tipo de tienda (Outlet_Type): plt.figure(figsize=(15,6)) sns.boxplot(x="Outlet_Identifier", y="Item_Outlet_Sales", hue='Outlet_Type', data=data) plt.show() # Se observa que la tienda OUT10 tiene un comportamiento similar en el nivel de ventas, que las tiendas OUT17 y OUT45 tienen coportamientos similares en sus ventas a las tiendas OUT13 y OUT46 respectivamente. # Se decide asignar a todos los valores perdidos del tamaño de las tiendas, la categoria "Small". # # Tomando en consideración lo siguiente: # - El OUT10 es una tienda de tipo "Grocery Store" (lo que implica ser una tienda pequeña) y además tiene unas ventas similares al OUT19. # - El OUT17 es una tienda de tipo "Supermarket Type 1" (la mayoría de las tiendas "Supermarket Type 1" son de tamaño "Small"). # - El OUT45 es una tienda de tipo "Supermarket Type 1" (la mayoría de las tiendas "Supermarket Type 1" son de tamaño "Small"). # #### LIMPIEZA: de faltantes en el tamaño de las tiendas for outlet in outlets: data.loc[data['Outlet_Identifier'] == outlet, 'Outlet_Size'] = 'Small' # Se verifica que no existan valores nulos para la variable peso del producto. print('El porcentaje de valores perdidos de la variable "Outlet_Size" es de:', data['Outlet_Size'].isnull().sum()/len(data)*100) # Verificamos de nuevo los valores perdidos: print('El porcentaje de valores perdidos de las variables: \n') for var in data.columns: print('{} \t\t {:,.2f}%:'.format(var, data[var].isnull().sum()/len(data)*100)) # El 40% de valores perdidos que se observa arriba, corresponde a los datos de test que no contiene esta variale (por ser la variable respuesta que queremos obtener). # Verificando de nuevo los valores de la variables categóricas: for aux in ['Item_Fat_Content', 'Item_Type', 'Outlet_Identifier', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type']: print(aux, ':', set(data[aux])) # Ya se cuenta con un dataset un poco más limpio. Falta verificar las variables numéricas y recodificar las categorias de la variable "Item_Type"; para esta recodificación prodecemos a realizar primero una pruebas de significancia estadísticas. Pero antes, vemos algunos otros comportamientos bivariados: for var in ['Item_Fat_Content', 'Outlet_Identifier', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'Outlet_Establishment_Year']: plt.figure(figsize=(len(data[var].unique())*2,4)) sns.violinplot(x=var, y="Item_Outlet_Sales", data=data) plt.show() # Los valores de ventas de las diferentes categorias no tienen distribución normal. Se utilizará el test de Kruskal-Wallis (técnica no paramétrica) para determinar relación significativa entre las distintas variables y los niveles de ventas de la tiendas (TARGET). # + # H0: las medias son significativamente iguales entre los grupos print('Test de Kruskal-Wallis para Item_Fat_Content vs Item_Outlet_Sales:\n\t', stats.kruskal(list(data.dropna().loc[data['Item_Fat_Content'] == 'Low Fat', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Fat_Content'] == 'Regular', 'Item_Outlet_Sales']))) # H0: las medias son significativamente iguales entre los grupos print('Test de Kruskal-Wallis para Item_Fat_Content vs Item_Outlet_Sales:\n\t', stats.kruskal(list(data.dropna().loc[data['Outlet_Location_Type'] == 'Tier 1', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Outlet_Location_Type'] == 'Tier 2', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Outlet_Location_Type'] == 'Tier 3', 'Item_Outlet_Sales']))) # - # En los graficos de violin se puede visualizar que el contenido de grasa en los productos no presenta influencia sobre el nivel de ventas y que las otras variables categóricas analizadas si tienen influencia sobre la variable TARGET; esto es corroborado por los test de Kruskal-Wallis realizados. # # Respecto de la variable contenido de grasa de los productos, dicha conclusión arriba hecha no se corresponde con lo que se espera; lo que sugiere revisar más a fondo el registro de estas categorias. Para ello, realicemos una vista general de los datos: data[data['Item_Fat_Content'] == 'Low Fat'].head() # En la 3ra linea se encuentra una inconsistencia; no tiene sentido clasificar como "Low Fat" un producto del hogar. Veamos esto en un gráfico agrupado: sns.catplot(y="Item_Type", hue="Item_Fat_Content", kind="count", data=data, height=6, aspect=2) plt.show() # Existen productos con categoría "Low Fat" que no son comestibles o que simplemente no tienen ningún contenido de grasa, para ser consistentes se asigna una nueva categoría NA (No aplica) para los tipos de productos Household, Health and Hygiene, Hard Drinks, Soft Drinks, Fruits and Vegetables: # ## Features Engineering # #### FEATURES ENGINEERING: asignación de nueva categorías para 'Item_Fat_Content' # FEATURES ENGINEERING: asignación de nueva categorías para 'Item_Fat_Content' for prod in ['Household', 'Health and Hygiene', 'Hard Drinks', 'Soft Drinks', 'Fruits and Vegetables']: data.loc[data['Item_Type'] == prod, 'Item_Fat_Content'] = 'NA' sns.catplot(y="Item_Type", hue="Item_Fat_Content", kind="count", data=data, height=6, aspect=2) plt.show() # Analicemos los niveles de ventas por contenido de grasa de los productos: # H0: las medias son significativamente iguales entre los grupos stats.kruskal(list(data.dropna().loc[data['Item_Fat_Content']== 'Low Fat', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Fat_Content']== 'Regular', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Fat_Content']== 'NA', 'Item_Outlet_Sales'])) # No se evidencia diferencias significativas en los niveles de ventas entre las 3 categorias de la característica Item_Fat_Content. Veamos un gráfico de ello: sns.violinplot(x="Item_Fat_Content", y='Item_Outlet_Sales', kind="bar", data=data) plt.show() sns.catplot(x="Item_Type", y='Item_Outlet_Sales', hue="Item_Fat_Content", kind="bar", data=data, height=5, aspect=4) plt.show() # De forma similar lo vemos en el gráfico por tipo de producto, intentemos reagrupar dichas categoría para buscar una relación significativa con el nivel de ventas. # # Veamos una clasificación por usos: # - Consultando las categorias de idenificación de los tipos de productos print(list(data[data['Item_Type'] == 'Others']['Item_Identifier'].unique())) print(list(data[data['Item_Type'] == 'Health and Hygiene']['Item_Identifier'].unique())) print(list(data[data['Item_Type'] == 'Household']['Item_Identifier'].unique())) # En general se observa: FD = ALIMENTOS - NC = HOGAR, SALUD E HIG, OTROS - DR = BEBIDAS, # + active="" # 'Others', 'Health and Hygiene', 'Household', 'Baking Goods', 'Breakfast', 'Snack Foods', 'Dairy', 'Fruits and Vegetables', 'Breads', 'Seafood', 'Soft Drinks', 'Starchy Foods', 'Meat', 'Frozen Foods', 'Canned', 'Hard Drinks # # ESPAÑOL: # 'Otros', 'Salud e higiene', 'Hogar', 'Productos para hornear', 'Desayuno', 'Snack Foods', 'Lácteos', 'Frutas y verduras', 'Panes', 'Mariscos', 'Refrescos' , 'Alimentos con almidón', 'Carne', 'Alimentos congelados', 'Enlatados', 'Bebidas Duras # # RECATEGORIZACIÓN SUGERIDA (de acuerdo a la similitud entre los productos): # 1- 'Non perishable': 'Others', 'Health and Hygiene', 'Household' # 2- 'Fruits and Vegetables' # 3- 'Meats': 'Seafood', 'Meat' # 4- 'Processed Foods': 'Baking Goods', 'Frozen Foods', 'Canned' # 5- 'Starchy Foods': 'Breads', 'Starchy Foods', 'Snack Foods', 'Breakfast' # 6- 'Drinks': 'Soft Drinks', 'Hard Drinks, 'Dairy' # - # #### FEATURES ENGINEERING: creando categorías para 'Item_Type' # + # FEATURES ENGINEERING: creando categorías para 'Item_Type' data['Item_Type'] = data['Item_Type'].replace({'Others': 'Non perishable', 'Health and Hygiene': 'Non perishable', 'Household': 'Non perishable', 'Seafood': 'Meats', 'Meat': 'Meats', 'Baking Goods': 'Processed Foods', 'Frozen Foods': 'Processed Foods', 'Canned': 'Processed Foods', 'Snack Foods': 'Processed Foods', 'Breads': 'Starchy Foods', 'Breakfast': 'Starchy Foods', 'Soft Drinks': 'Drinks', 'Hard Drinks': 'Drinks', 'Dairy': 'Drinks'}) # FEATURES ENGINEERING: asignación de nueva categorías para 'Item_Fat_Content' data.loc[data['Item_Type'] == 'Non perishable', 'Item_Fat_Content'] = 'NA' # - # Visualicemos de nuevo esta recategorización en un gráfico: # + #plt.figure(figsize=(12,4)) #sns.violinplot(x="Item_Type", y='Item_Outlet_Sales', hue="Item_Fat_Content", data=data) #plt.show() plt.figure(figsize=(12,4)) sns.violinplot(x='Item_Type', y="Item_Outlet_Sales", data=data) plt.show() # - data['Item_Type'].unique() # H0: las medias son significativamente iguales entre los grupos stats.kruskal(list(data.dropna().loc[data['Item_Type']== 'Drinks', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Type']== 'Meats', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Type']== 'Fruits and Vegetables', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Type']== 'Non perishable', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Type']== 'Fruits and Vegetables', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Type']== 'Processed Foods', 'Item_Outlet_Sales']), list(data.dropna().loc[data['Item_Type']== 'Starchy Foods', 'Item_Outlet_Sales'])) # No se evidencia diferencias en los niveles de ventas entre las diferentes categorias de tipo de productos (reagrupados). # ### Análisis Bi-variado: # Variables Continuas vs Continuas numerics_var = ['Item_Weight', 'Item_Visibility', 'Item_MRP', 'Item_Outlet_Sales'] sns.pairplot(data.dropna(), x_vars=numerics_var, y_vars=numerics_var, kind='scatter', diag_kind='kde' ) #plt.savefig('hist_scatter') plt.show() # No se observa alguna relación significativa entre las variables, lo que señala la necesidad de preprocesar los datos y realizar algunas transformaciones. # # Veamos las correlaciones entre estas variables. data[numerics_var].corr() # - La característica con correlación más alta es Item_MRP (r=0.57), corresponde a una correlación de nivel moderado. # - El Target guarda una relación casi nula con los pesos de los productos, mientras que con el grado de visibilidad del producto se observa una correlación baja negativa (r=-0.13). Esta última correlación no parece tener sentido, lo que sugiere que estos valores puede que no esten bien registrados. # - Un aspecto positivo es que la correlación entre las variables independientes es baja, lo que indica que no existe autocorrelación entre estas vraiables. # #### FEATURES ENGINEERING: Codificando los niveles de precios de los productos print(pd.qcut(data['Item_MRP'], 4,).unique()) data['Item_MRP'] = pd.qcut(data['Item_MRP'], 4, labels = [1, 2, 3, 4]) # ### Codificación de variables ordinales: # Esta vez no se considera tomar las características: 'Item_Type' y 'Item_Fat_Content' # Se utiliza una copia de data para separar los valores codificados en un dataframe distinto. dataframe = data.drop(columns=['Item_Type', 'Item_Fat_Content']).copy() dataframe.head() # Se decide realizar una codificación manual y no con algún método automático, para guardar el orden de los valores. # + ordinals_var = ['Outlet_Size', 'Outlet_Location_Type'] for var in ordinals_var: serie_var = dataframe[var].unique() serie_var.sort() print(var, ':', serie_var) # - # #### FEATURES ENGINEERING: Codificación de variables ordinales # Codificación de variables ordinales dataframe['Outlet_Size'] = dataframe['Outlet_Size'].replace({'High': 2, 'Medium': 1, 'Small': 0}) dataframe['Outlet_Location_Type'] = dataframe['Outlet_Location_Type'].replace({'Tier 1': 2, 'Tier 2': 1, 'Tier 3': 0}) # Estas categorias se ordenaron asumiendo la categoria 2 como más lejos dataframe.head() # #### FEATURES ENGINEERING: Codificación de variables nominales dataframe = pd.get_dummies(dataframe, columns=['Outlet_Type']) dataframe.head() print(dataframe.info()) # Revisamos los valores de correlación: mask = np.zeros_like(dataframe.corr(), dtype=np.bool) mask[np.triu_indices_from(mask)] = True plt.figure(figsize=(12,12)) sns.heatmap(dataframe.corr(), mask=mask, vmax=.3, center=0, annot=True, square=True, linewidths=.5, cbar_kws={"shrink": .6}) plt.show() # - El coeficiente de correlación entre las variables independientes es entre bajo y medio, lo que indica que no existe autocorrelación fuerte entre estas variables. # ### Preparando data de entrenamiento y de test # + # Eliminación de variables que no contribuyen a la predicción por ser muy específicas dataset = dataframe.drop(columns=['Item_Identifier', 'Outlet_Identifier']) # División del dataset de train y test df_train = dataset.loc[data['Set'] == 'train'] df_test = dataset.loc[data['Set'] == 'test'] # Eliminando columnas sin datos df_train.drop(['Set'], axis=1, inplace=True) df_test.drop(['Item_Outlet_Sales','Set'], axis=1, inplace=True) # Guardando los datasets df_train.to_csv("train_final.csv") df_test.to_csv("test_final.csv") # - df_train.head() df_test.head() # #### ENTRENAMIENTO # + # Importando librerías para el modelo from sklearn.model_selection import train_test_split, cross_validate, cross_val_score from sklearn import metrics from sklearn.linear_model import LinearRegression seed = 28 model = LinearRegression() # División de dataset de entrenaimento y validación X = df_train.drop(columns='Item_Outlet_Sales') #[['Item_Weight', 'Item_MRP', 'Outlet_Establishment_Year', 'Outlet_Size', 'Outlet_Location_Type']] # .drop(columns='Item_Outlet_Sales') x_train, x_val, y_train, y_val = train_test_split(X, df_train['Item_Outlet_Sales'], test_size = 0.3, random_state=seed) # Entrenamiento del modelo model.fit(x_train,y_train) # Predicción del modelo ajustado para el conjunto de validación pred = model.predict(x_val) # Cálculo de los errores cuadráticos medios y Coeficiente de Determinación (R^2) mse_train = metrics.mean_squared_error(y_train, model.predict(x_train)) R2_train = model.score(x_train, y_train) print('Métricas del Modelo:') print('ENTRENAMIENTO: RMSE: {:.2f} - R2: {:.4f}'.format(mse_train**0.5, R2_train)) mse_val = metrics.mean_squared_error(y_val, pred) R2_val = model.score(x_val, y_val) print('VALIDACIÓN: RMSE: {:.2f} - R2: {:.4f}'.format(mse_val**0.5, R2_val)) print('\nCoeficientes del Modelo:') # Constante del modelo print('Intersección: {:.2f}'.format(model.intercept_)) # Coeficientes del modelo coef = pd.DataFrame(x_train.columns, columns=['features']) coef['Coeficiente Estimados'] = model.coef_ print(coef, '\n') coef.sort_values(by='Coeficiente Estimados').set_index('features').plot(kind='bar', title='Importancia de las variables', figsize=(12, 6)) plt.show() # - # ## Principales variables utilizadas por el modelo: # - Con relación directa: Outlet_Type_Supermarket Type3, Item_MRP # - Con relación inversa: Outlet_Type_Grocery Store, Item_Visibility # ## SUPUESTO DE REGRESIÓN LINEAL: # 1. Existe relación lineal entre la *variable respuesta y las variables predictoras.* # 2. Las variables predictoras (independientes) no están correlacionadas entre sí. La presencia de colinealidad conduce a un fenómeno conocido como multicolinealidad. # 3. Los residuos no están correlacionados. De lo contrario, presentará autocorrelación. # 4. Los residuos deben tener una varianza constante. La varianza no constante conduce a heterocedasticidad. # + import scipy.stats as stats res = pred - y_val print('Coeficiente de asimetría:', res.skew()) sm.qqplot(res, stats.t, fit=True, line='45') plt.show() plt.hist(res) plt.show() # - # Los residuos no presentan distribución normal, y tienen una asimetría izquierda. # residual plot plt.figure(figsize=(12,6)) x_plot = plt.scatter(pred, (pred - y_val), c='g') plt.hlines(y=0, xmin= -1100, xmax=5100) plt.xlabel('Valores ajustados o predichos') plt.ylabel('Valores Residuales') plt.title('Valores residuales vs predichos') plt.show() # El gráfico presenta forma de embudo, no existe sesgo pero si *heterocedasticidad*; la dispersión vertical indica varianza no constante. # ### Aplicación del modelo en el dataset de test # Predicción del modelo ajustado data_test = df_test.copy() data_test['pred_Sales'] = model.predict(data_test) data_test.to_csv('data_test') data_test.head() mask = np.zeros_like(data_test.corr(), dtype=np.bool) mask[np.triu_indices_from(mask)] = True plt.figure(figsize=(12,12)) sns.heatmap(data_test.corr(), mask=mask, vmax=.3, center=0, annot=True, square=True, linewidths=.5, cbar_kws={"shrink": .6}) plt.show() # Se obtienen valores similares de correlación entre las variables.
notebook/notebook_analysis_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### File - 파일 # - 파이썬을 이용해서 파일을 읽고, 쓰기가 가능합니다. # # - 파일을 읽고 쓸 때의 파일 작성에 대한 옵션 # - w : 파일쓰기 (덮어쓰기) # - x : 파일쓰기 (파일이 존재하면 에러가 발생) # - r : 파일읽기 # - a : 파일에 추가쓰기 (append) # # - 파일을 읽고 쓸 때의 파일 타입에 대한 옵션 # - t : 텍스트 데이터 타입 : 문자열을 저장할 때 사용 # - b : 이진 데이터 타입 : 객체를 저장할 때 사용 # # - open : 파일을 열 때 사용하는 함수 : open("파일경로 및 이름","옵션") # ##### 파일쓰기 s = """data science fighting!! """ f = open("test.text", "wt") f.write(s) f.close() # 꼭 해줘야함. # print를 활용해서 파일쓰기 f = open("test2.txt","wt") print(s, file=f, end="") f.close() # xt s = "datascience" f = open("test2.txt","xt") f.write(s) # + # 이진파일쓰기 - binary 파일 쓰기 # wb # bytes 함수 : 이진데이터를 바꿔주는 함수 # - data = bytes(range(10)) data f = open("range.b","wb") f.write(data) f.close() # ##### 파일 읽기 # 이진파일 읽기 f = open("range.b","rb") data = f.read() f.close() data, list(data) # 텍스트 파일 읽기 f = open("test2.txt","rt") s = f.read() f.close() # 텍스트 파일 일부만 읽어 오기 f = open("test.text","rt") s = f.read(10) #10자만 읽어오기 f.close() print(s) # 텍스트 파일 한줄 읽어오기 f = open("test.text","rt") s = f.readline() f.close() print(s) # 텍스트 파일 두줄 읽어오기 f = open("test.text","rt") s = f.readline() print(s) s = f.readline() print(s) f.close() f = open("test.text","rt") s = f.readlines() f.close() print(s) # #### with # - 파일을 열 때 close함수를 호출하지 않아도 됩니다. with open("test.text","rt") as f : s = f.read() print(s) # #### Pickle - 피클 # - 객체를 저장하는 것을 직렬화라고 합니다. # - pickle 파이썬에서 객체를 직렬화해서 저장하고 복원하는 모듈 import pickle # + class A: def __init__(self, data): self.data = data def disp(self): print(self.data) a = A("pickle test") a.disp() # - with open("obj.p","wb") as f: pickle.dump(a, f) #dump(obj, file): 객체 저장 with open("obj.p", "rb") as f: load_a = pickle.load(f) load_a.disp()
python/01_python_syntax/08_File_Pickle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re import os import sys # + path_to_csv = '/home/louisf/Dropbox/mtg/mtg_rules_mapping/data/MagicCompRules 20170925.txt' with open(path_to_csv, 'r', encoding='windows-1252') as f: strs = f.readlines() # - strs[30] example = strs[30] # + regexp = '\d\d\d\.\d*\.\s\w*\s\w*' regexp2 ='7\d\d\.\d*\.\s\w*\s\w*' p = re.compile(regexp2) print(p.match(example)) # - m = p.match(example) m.group() stuff = [] for line in strs: m = p.match(line) if m is not None: print(line) line
mapping_hacking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Capstone # language: python # name: capstone # --- # # Plot each step of EPI # ### This notebook is sixth in the series of soiling detection pipeline notebooks (following step after the notebook <font color='green'>soiling_detection_regression.ipynb</font>) # Author: <NAME> # ### LINA's note: In this notebook, I plot EPI after each step of the filtering. # 1. Unfiltered EPI # 2. EPI After preprocessing (remove bad strings) # 3. EPI with preprocessed and time masked # 4. EPI with preprocessed, time masked and big-drop filtered # 5. EPI with preprocessed, time masked, big-drop filtered and bad-day filtered import pandas as pd import numpy as np import copy import datetime from datetime import datetime import matplotlib.pyplot as plt from pathlib import Path # ## Import data from previous notebooks # ### 1. Unfiltered EPI root_path = "../data/temp/park2/preprocessing/" df_EPI_filepath = root_path + "df_EPI.csv" def read_data(df_EPI_filepath): df_EPI = pd.read_csv(df_EPI_filepath, delimiter=',',parse_dates=['datetime'], date_parser = pd.to_datetime, index_col='datetime') return df_EPI df_EPI = read_data(df_EPI_filepath) df_EPI['median_EPI'] = df_EPI.loc[:,:].median(axis=1) df_EPI_daily_median_series = df_EPI['median_EPI'] # ### 2. EPI After preprocessing (remove bad strings) root_path = "../data/temp/park2/time_mask_filter/" EPI_stringfilt_filepath = root_path + "EPI_stringfilt.csv" def read_data(EPI_stringfilt_filepath): EPI_stringfilt = pd.read_csv(EPI_stringfilt_filepath, delimiter=',',parse_dates=['datetime'], date_parser = pd.to_datetime, index_col='datetime') return EPI_stringfilt EPI_stringfilt = read_data(EPI_stringfilt_filepath) # EPI_stringfilt= EPI_stringfilt.drop(columns=['med_strings', 'H']) EPI_stringfilt['median_EPI'] = EPI_stringfilt.loc[:,:].median(axis=1) EPI_stringfilt_median_series = EPI_stringfilt['median_EPI'] # ### 3. EPI with preprocessed and time masked root_path = "../data/temp/park2/time_mask_filter/" EPI_timemask_filepath = root_path + "EPI_timemask.csv" def read_data(EPI_timemask_filepath): EPI_timemask = pd.read_csv(EPI_timemask_filepath, delimiter=',',parse_dates=['datetime'], date_parser = pd.to_datetime, index_col='datetime') return EPI_timemask EPI_timemask = read_data(EPI_timemask_filepath) # EPI_timemask= EPI_timemask.drop(columns='med_strings') EPI_timemask['median_EPI'] = EPI_timemask.loc[:,:].median(axis=1) EPI_timemaskt_median_series = EPI_timemask['median_EPI'] # ### 4. EPI with preprocessed, time masked and big-drop filtered root_path = "../data/temp/park2/big_drop_filter/" EPI_bigdrop_filt_filepath = root_path + "EPI_bigdrop_filt.csv" def read_data(EPI_bigdrop_filt_filepath): EPI_bigdrop_filt = pd.read_csv(EPI_bigdrop_filt_filepath, delimiter=',',parse_dates=['datetime'], date_parser = pd.to_datetime, index_col='datetime') return EPI_bigdrop_filt EPI_bigdrop_filt = read_data(EPI_bigdrop_filt_filepath) # EPI_bigdrop_filt= EPI_bigdrop_filt.drop(columns='med_strings') EPI_bigdrop_filt['median_EPI'] = EPI_bigdrop_filt.loc[:,:].median(axis=1) EPI_bigdrop_filt_median_series = EPI_bigdrop_filt['median_EPI'] # ### 5. EPI with preprocessed, time masked, big-drop filtered and bad-day filtered root_path = "../data/temp/park2/bad_day_filter/" EPI_dropsfilt_baddayfilt_filepath = root_path + "EPI_dropsfilt_baddayfilt.csv" def read_data(EPI_dropsfilt_baddayfilt_filepath): EPI_dropsfilt_baddayfilt = pd.read_csv(EPI_dropsfilt_baddayfilt_filepath, delimiter=',',parse_dates=['datetime'], date_parser = pd.to_datetime, index_col='datetime') return EPI_dropsfilt_baddayfilt EPI_dropsfilt_baddayfilt = read_data(EPI_dropsfilt_baddayfilt_filepath) # EPI_dropsfilt_baddayfilt= EPI_dropsfilt_baddayfilt.drop(columns='med_strings') EPI_dropsfilt_baddayfilt['median_EPI'] = EPI_dropsfilt_baddayfilt.loc[:,:].median(axis=1) EPI_dropsfilt_baddayfilt_median_series = EPI_dropsfilt_baddayfilt['median_EPI'] root_path = "../data/temp/park2/filtered/" EPI_bigdrop_filt_filepath = root_path + "drops_filt.csv" def read_data(EPI_bigdrop_filt_filepath): EPI_bigdrop_filt = pd.read_csv(EPI_bigdrop_filt_filepath, delimiter=',',parse_dates=['datetime'], date_parser = pd.to_datetime, index_col='datetime') return EPI_bigdrop_filt EPI_bigdrop_filt = read_data(EPI_bigdrop_filt_filepath) # EPI_bigdrop_filt= EPI_bigdrop_filt.drop(columns='med_strings') EPI_bigdrop_filt['median_EPI'] = EPI_bigdrop_filt.loc[:,:].median(axis=1) EPI_bigdrop_filt_median_series = EPI_bigdrop_filt['median_EPI'] # ## Plot: 1. unfiltered EPI df_EPI_daily_median_series # + # def soiling_interval_plot(soiling_info, normalized_yield, point_alpha=0.5, # profile_alpha=1, ymin=None, ymax=None, # point_color=None, profile_color=None): ymin=0.7 ymax=1.0 fig, ax = plt.subplots(figsize=(12,6)) ax.plot(df_EPI_daily_median_series.index,df_EPI_daily_median_series,'o',alpha=0.5,markersize=10, color='#03989e') ax.set_ylim(ymin, ymax) ax.set_yticks(np.arange(ymin,1.05,0.1),minor=False) ax.set_ylabel('EPI',fontsize=20) ax.set_title('Daily Energy Performance Index (EPI)',fontsize=20) ax.tick_params(axis = 'both', which = 'major', labelsize = 20) fig.autofmt_xdate(); # fig.savefig('EPI1.png'); # - # ## Plot: 2. EPI after preprocessing EPI_stringfilt_median_series # + # def soiling_interval_plot(soiling_info, normalized_yield, point_alpha=0.5, # profile_alpha=1, ymin=None, ymax=None, # point_color=None, profile_color=None): ymin=0.8 ymax=1.1 fig, ax = plt.subplots(figsize=(12,6)) ax.plot(EPI_stringfilt_median_series.index,EPI_stringfilt_median_series,'o',alpha=0.5,markersize=10, color='#03989e') ax.set_ylim(ymin, ymax) ax.set_yticks(np.arange(ymin,1.05,0.1),minor=False) ax.set_ylabel('EPI',fontsize=20) ax.set_title('Daily Energy Performance Index (EPI)',fontsize=20) ax.tick_params(axis = 'both', which = 'major', labelsize = 20) fig.autofmt_xdate(); # fig.savefig('EPI2.png'); # - # ## Plot: 3. preprocessed, time masked EPI EPI_timemaskt_median_series # + # def soiling_interval_plot(soiling_info, normalized_yield, point_alpha=0.5, # profile_alpha=1, ymin=None, ymax=None, # point_color=None, profile_color=None): ymin=0.7 ymax=1.0 fig, ax = plt.subplots(figsize=(12,6)) ax.plot(EPI_timemaskt_median_series.index,EPI_timemaskt_median_series,'o',alpha=0.5,markersize=10, color='#03989e') ax.set_ylim(ymin, ymax) ax.set_yticks(np.arange(ymin,1.05,0.1),minor=False) ax.set_ylabel('EPI',fontsize=20) ax.set_title('Daily Energy Performance Index (EPI)',fontsize=20) ax.tick_params(axis = 'both', which = 'major', labelsize = 20) fig.autofmt_xdate(); # fig.savefig('EPI3.png'); # - # ## Plot: 4. preprocessed, time masked, big-drop filtered EPI EPI_bigdrop_filt_median_series # + # def soiling_interval_plot(soiling_info, normalized_yield, point_alpha=0.5, # profile_alpha=1, ymin=None, ymax=None, # point_color=None, profile_color=None): ymin=0.7 ymax=1.0 fig, ax = plt.subplots(figsize=(12,6)) ax.plot(EPI_bigdrop_filt_median_series.index,EPI_bigdrop_filt_median_series,'o',alpha=0.5,markersize=10, color='#03989e') ax.set_ylim(ymin, ymax) ax.set_yticks(np.arange(ymin,1.05,0.1),minor=False) ax.set_ylabel('EPI',fontsize=20) ax.set_title('Daily Energy Performance Index (EPI)',fontsize=20) ax.tick_params(axis = 'both', which = 'major', labelsize = 20) fig.autofmt_xdate(); # fig.savefig('EPI4.png'); # - # ## Plot: 5. preprocessed, time masked, big-drop filtered, bad-day filtered EPI EPI_dropsfilt_baddayfilt_median_series # + # def soiling_interval_plot(soiling_info, normalized_yield, point_alpha=0.5, # profile_alpha=1, ymin=None, ymax=None, # point_color=None, profile_color=None): ymin=0.7 ymax=1.0 fig, ax = plt.subplots(figsize=(12,6)) ax.plot(EPI_dropsfilt_baddayfilt_median_series.index,EPI_dropsfilt_baddayfilt_median_series,'o',alpha=0.5,markersize=10,color='#03989e') ax.set_ylim(ymin, ymax) # ax.set_yticks(np.arange(ymin,ymax,0.5),minor=False) ax.set_yticks(np.arange(ymin,1.05,0.1),minor=False) ax.set_ylabel('EPI',fontsize=20) ax.set_title('Daily Energy Performance Index (EPI)',fontsize=20) ax.tick_params(axis = 'both', which = 'major', labelsize = 20) fig.autofmt_xdate(); # fig.savefig('EPI5.png');
notebooks/6_plot_each_step_of_EPI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import pandas as pd from PyFin.api import * from alphamind.api import * import numpy as np from alphamind.execution.naiveexecutor import NaiveExecutor from matplotlib import pyplot as plt data_source = 'postgresql+psycopg2://alpha:alpha@172.16.17.32:8889/alpha' engine = SqlEngine(data_source) # - universe = Universe('ashare_ex') freq = '10b' benchmark_code = 905 start_date = '2010-01-01' end_date = '2019-12-31' ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse') horizon = map_freq(freq) industry_name = 'sw' industry_level = 1 # + factors_store = { 'f01': CSQuantiles(LAST('IVR'), groups='sw1'), 'f02': CSQuantiles(LAST('ROE'), groups='sw1'), 'f03': CSQuantiles(LAST('FY12P'), groups='sw1')} factor_data_org = engine.fetch_factor_range(universe, factors_store, dates=ref_dates) factors = list(factors_store.keys()) # - factor_data_org industry = engine.fetch_industry_range(universe, dates=ref_dates) factor_data = pd.merge(factor_data_org, industry, on=['trade_date', 'code']).fillna(0.) risk_total = engine.fetch_risk_model_range(universe, dates=ref_dates)[1] risk_total # + return_data = engine.fetch_dx_return_range(universe, dates=ref_dates, horizon=horizon, offset=0, benchmark = benchmark_code) # - return_data benchmark_total = engine.fetch_benchmark_range(dates=ref_dates, benchmark=benchmark_code) industry_total = engine.fetch_industry_matrix_range(universe, dates=ref_dates, category=industry_name, level=industry_level) # + # # Constraintes settings weight_gap = 0.01 industry_names = industry_list(industry_name, industry_level) constraint_risk = ['EARNYILD', 'LIQUIDTY', 'GROWTH', 'SIZE', 'BETA', 'MOMENTUM'] + industry_names total_risk_names = constraint_risk + ['benchmark', 'total'] b_type = [] l_val = [] u_val = [] previous_pos = pd.DataFrame() rets = [] turn_overs = [] leverags = [] for name in total_risk_names: if name == 'benchmark': b_type.append(BoundaryType.RELATIVE) l_val.append(0.0) u_val.append(1.0) elif name == 'total': b_type.append(BoundaryType.ABSOLUTE) l_val.append(.0) u_val.append(.0) else: b_type.append(BoundaryType.ABSOLUTE) l_val.append(-0.005) u_val.append(0.005) bounds = create_box_bounds(total_risk_names, b_type, l_val, u_val) # - bounds # + # take ref_dates[i] as an example for i in range(len(ref_dates)-1): # machine learning model print(ref_dates[i]) test_x = factor_data[factor_data.trade_date == ref_dates[i]] test_y_excess = return_data[return_data.trade_date == ref_dates[i]] total_data_test_excess = pd.merge(test_x, test_y_excess, on=['trade_date', 'code']).dropna() industry_matrix = industry_total[industry_total.trade_date == ref_dates[i]] benchmark_w = benchmark_total[benchmark_total.trade_date == ref_dates[i]] risk_matrix = risk_total[risk_total.trade_date == ref_dates[i]] total_data = pd.merge(industry_matrix, benchmark_w, on=['code'], how='left').fillna(0.) total_data = pd.merge(total_data, risk_matrix, on=['code']) total_data_test_excess = pd.merge(total_data, total_data_test_excess, on=['code']) benchmark_w = total_data_test_excess.weight.values is_in_benchmark = (benchmark_w > 0.).astype(float).reshape((-1, 1)) total_risk_exp = np.concatenate([total_data_test_excess[constraint_risk].values.astype(float), is_in_benchmark, np.ones_like(is_in_benchmark)], axis=1) total_risk_exp = pd.DataFrame(total_risk_exp, columns=total_risk_names) constraints = LinearConstraints(bounds, total_risk_exp, benchmark_w) lbound = np.maximum(0., benchmark_w - weight_gap) ubound = weight_gap + benchmark_w # target_pos, _ = er_portfolio_analysis(predict_xgboost, # total_data_test_excess['industry'].values, # None, # constraints, # False, # benchmark_w, # method = 'risk_neutral', # lbound=lbound, # ubound=ubound) # -
src/stacking/notebooks/cross_section/rolling_backtest/alphamind_machine_learning_backtest-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Problem Tutorial 1: Regression Model # # We want to predict the gas consumption (in millions of gallons/year) in 48 of the US states # based on some key features. # # These features are # * petrol tax (in cents); # * per capital income (in US dollars); # * paved highway (in miles); and # * population of people with driving licences # # <table> # <tr><td> # <img src="https://informedinfrastructure.com/wp-content/uploads/2012/06/traffic-jam.jpg" # alt="Bank Note " width="600"> # </td></tr> # <tr><td></td></tr> # <tr><td> # <img src="https://miro.medium.com/max/593/1*pfmeGgGM5sxmLBQ5IQfQew.png" # alt="Matrix" width="600"> # <tr><td></td></tr> # <tr><td>And seems like a bad consumption problem to have ...</td></tr> # </table> # # #### Solution: # # Since this is a regression problem where the value is a range of numbers, we can use the # common Random Forest Algorithm in Scikit-Learn. Most regression models are evaluated with # four [standard evalution metrics](https://medium.com/usf-msds/choosing-the-right-metric-for-machine-learning-models-part-1-a99d7d7414e4): # # * Mean Absolute Error (MAE) # * Mean Squared Error (MSE) # * Root Mean Squared Error (RSME) # * R-squared (r2) # # This example is borrowed from this [source](https://stackabuse.com/random-forest-algorithm-with-python-and-scikit-learn/) and modified and modularized for this tutorial # # Aim of this this: # # 1. Understand MLflow Tracking API # 2. How to use the MLflow Tracking API # 3. Use the MLflow API to experiment several Runs # 4. Interpret and observe runs via the MLflow UI # # Some Resources: # * https://mlflow.org/docs/latest/python_api/mlflow.html # * https://www.saedsayad.com/decision_tree_reg.htm # * https://towardsdatascience.com/understanding-random-forest-58381e0602d2 # * https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html # * https://towardsdatascience.com/regression-an-explanation-of-regression-metrics-and-what-can-go-wrong-a39a9793d914 # * https://www.analyticsvidhya.com/blog/2020/04/feature-scaling-machine-learning-normalization-standardization/ # Define all the classes and bring them into scope # ### Load the Dataset # %run ./setup/lab_utils_cls.ipynb # %run ./setup/rfr_regression_cls.ipynb # %run ./setup/rfc_classification_cls.ipynb # %run ./setup/rfr_regression_base_exp_cls.ipynb # Load and print dataset dataset = Utils.load_data("https://raw.githubusercontent.com/dmatrix/mlflow-workshop-part-1/master/data/petrol_consumption.csv") dataset.head(5) # Get descriptive statistics for the features dataset.describe() # Iterate over several runs with different parameters, such as number of trees. # For excercises, try changing max_depth, number of estimators, and consult the documentation what other tunning parameters # may affect a better outcome and supply them to the class constructor # Excercise 1 & 2: # 1) add key-value parameters to this list # 2) iterate over the list # 3) Compute R2 in the RFRModel class max_depth = 0 for n in range (20, 250, 50): max_depth = max_depth + 2 params = {"n_estimators": n, "max_depth": max_depth} rfr = RFRModel.new_instance(params) (experimentID, runID) = rfr.mlflow_run(dataset, run_name="Regression Petrol Consumption Model", verbose=True) print("MLflow Run completed with run_id {} and experiment_id {}".format(runID, experimentID)) print("-" * 100) # **Note**: # # With 20 trees, the root mean squared error is `64.93`, which is greater than 10 percent of the average petrol consumption i.e., `576.77`. # This may sugggest that we have not used enough estimators (trees). # ### Let's Explore the MLflow UI # # * Add Notes & Tags # * Compare Runs pick two best runs # * Annotate with descriptions and tags # * Evaluate the best run # !mlflow ui # #### Excercise Assignment. Try different runs with: # 1. Change or add parameters, such as depth of the tree or random_state: 42 etc. # 2. Change or alter the range of runs and increments of n_estimators # 3. Compute R2 metric in the `RFRModel` class and and log the metric # 3. Check in MLfow UI if the metrics are affected # #### HOMEWORK CHALLENGE # 6. Change the [RandomForestRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html) to a [LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html?highlight=linearregression#sklearn.linear_model.LinearRegression) # * Add extra tunning parameters # * Compare the evaluation metrics and ascertain which one is better
tracking/notebooks/jupyter/1_petrol_regression_lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime as dt url = "https://data.rivm.nl/covid-19/COVID-19_aantallen_gemeente_per_dag.csv" data = pd.read_csv(url, delimiter = ';') data.set_index('Date_of_report', inplace=True) data.index = pd.to_datetime(data.index) data.Date_of_publication = pd.to_datetime(data.Date_of_publication) gr_data = data.groupby(data.Date_of_publication).sum() def vline(date, text, plot_distance=True): plt.vlines(date, 0, 12500, linestyle = '--', color = 'k', linewidth = 1) plt.text(date-dt.timedelta(days=3), 13000, text, rotation = 90) if plot_distance: plt.text(date-dt.timedelta(days=7), -600, (date-min(gr_data.index)).days, color='white') # + plt.figure(figsize=(12,7)) plt.fill_between(gr_data.index, gr_data['Total_reported']*0, gr_data['Total_reported'].rolling(7).mean(), color = (254/255, 237/255, 1/255)) plt.fill_between([dt.datetime(2020,1,1), max(gr_data.index)], [-800, -800], [0,0], color = 'black') vline(dt.datetime(2020, 4,2), "Colline Distance Sociale") vline(dt.datetime(2020, 6,29), "l'Assouplissement") vline(dt.datetime(2020, 10,30), "<NAME>") vline(dt.datetime(2020,12,24), "<NAME>") vline(dt.datetime(2021,4,25), "Pandémie") vline(dt.datetime.today(), "<NAME>", plot_distance=False) plt.text(dt.datetime.today()+dt.timedelta(days=7), -600, (dt.datetime.today()-min(gr_data.index)).days, color='black') plt.ylim(-900, 24000) plt.xlim(min(gr_data.index), max(gr_data.index)+dt.timedelta(days=50)) plt.xticks([]) plt.savefig("TDF_corona_edition.png", dpi=200) # -
covid-tdf-profile.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modeling and Simulation in Python # # Starter code for the orbit example # # <NAME> # # Copyright 2017 <NAME> # # License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) # # + # Configure Jupyter so figures appear in the notebook # %matplotlib inline # Configure Jupyter to display the assigned value after an assignment # %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * # - # ### Earth falling into the sun # # Here's a question from the web site [Ask an Astronomer](http://curious.astro.cornell.edu/about-us/39-our-solar-system/the-earth/other-catastrophes/57-how-long-would-it-take-the-earth-to-fall-into-the-sun-intermediate): # # "If the Earth suddenly stopped orbiting the Sun, I know eventually it would be pulled in by the Sun's gravity and hit it. How long would it take the Earth to hit the Sun? I imagine it would go slowly at first and then pick up speed." # # Here's a solution. # + # Here are the units we'll need s = UNITS.second N = UNITS.newton kg = UNITS.kilogram m = UNITS.meter # + # And an inition condition (with everything in SI units) r_0 = 147e9 * m init = State(r = r_0, v = 0 * m / s) # + # Making a system object r_earth = 6.371e6 * m r_sun = 695.508e6 * m system = System(init=init, G=6.674e-11 * N / kg**2 * m**2, m1=1.989e30 * kg, r_final=r_sun + r_earth, m2=5.972e24 * kg, t_0=0 * s, t_end=1e7 * s) # + # Here's a function that computes the force of gravity def universal_gravitation(state, system): """Computes gravitational force. state: State object with distance r system: System object with m1, m2, and G """ r, v = state unpack(system) force = G * m1 * m2 / r**2 return force # - universal_gravitation(init, system) # + # The slope function def slope_func(state, t, system): """Compute derivatives of the state. state: position, velocity t: time system: System object containing `g` returns: derivatives of y and v """ y, v = state unpack(system) force = universal_gravitation(state, system) dydt = v dvdt = -force / m2 return dydt, dvdt # + # Always test the slope function! slope_func(init, 0, system) # + # Here's an event function that stops the simulation # before the collision def event_func(state, t, system): r, v = state return r - system.r_final # + # Always test the event function! event_func(init, 0, system) # + # Finally we can run the simulation results, details = run_ode_solver(system, slope_func, events=event_func) details # + # Here's how long it takes... t_final = get_last_label(results) * s # + # ... expressed in units we understand t_final.to(UNITS.day) # + # Before plotting, we run the simulation again with `t_eval` ts = linspace(t_0, t_final, 201) results, details = run_ode_solver(system, slope_func, events=event_func, t_eval=ts) # + # Scaling the time steps to days results.index /= 60 * 60 * 24 # + # Scaling the distance to million km r = results.r / 1e9; # + # And plotting plot(r, label='r') decorate(xlabel='Time (day)', ylabel='Distance from sun (million km)') # -
code/orbit_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import math t = np.arange(0,10,0.1) v = 30 a = 30 g = -9,81 Vy = v*(1/2) Vx = v*(0.86602) print (Vy) print (Vx) # + sy = 15*t - 4.905*t**2 sx = 25.9806*t plt.plot(sx,sy) plt.show() # -
courses/modsim2018/tasks/Ativ Python 06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import findspark findspark.init() from pyspark.sql import SparkSession spark = SparkSession.builder.appName('dates').getOrCreate() df = spark.read.csv('data/appl_stock.csv', inferSchema=True, header=True) df.select(['Date', 'Open']).show() from pyspark.sql.functions import (dayofmonth, hour, dayofyear, month, year, weekofyear,format_number, date_format) df.select(month(df['Date'])).show() # ### Calculate Average Closing Price Per Year # df.select(year(df['Date'])).show() newdf = df.withColumn('Year', year(df['Date'])) result = newdf.groupBy('Year').mean().select('Year', 'avg(Close)') newresult = result.withColumnRenamed('avg(Close)', 'Avg Close') newresult.select(['Year', format_number('Avg Close', 2).alias('Avg Close')]).show()
apache-spark/python/basics/Date Time Methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.display import HTML HTML('<img src="../giphy.gif">') from IPython.display import Image Image(filename="../giphy.gif.png") with open('../giphy.gif','rb') as f: display(Image(data=f.read(), format='png'))
GIF_________in notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.6 64-bit (''venv'': venv)' # metadata: # interpreter: # hash: 6bca6c8850f15a89fbb2388c46e468755a4d30f4afc825c6f172e3ab69684288 # name: python3 # --- # Import packages import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Run scripts to get the classes # %run ../tugofwar_horserace/horserace.py # %run ../tugofwar_horserace/tugofwar.py QUALITY_1 = [1.0, 1.2, 1.4, 1.6, 1.8, 2.0] QUALITY_2 = 2.0 THRES = 100 NUM_SIM = 1000 # Run simulations of tug-of-war model res = [] for quality_1 in QUALITY_1: t = TugOfWar(quality_1, QUALITY_2, THRES) res.append(t.simulate())
notebook/comparison.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![image](resources/qgss-header.png) # # Lab 9: Hardware-efficient trial states for variational quantum eigensolvers # In this lab, we want to find the ground state energy of the following two-qubit Hamiltonian representing molecular $H_2$ at its equilibrium distance. Note that we have already mapped the fermionic operators onto qubits here. # # ``` # h2_hamiltonian = (-1.0523732) II + # (0.39793742) IZ + # (-0.3979374) ZI + # (-0.0112801) ZZ + # (0.18093119) XX # ``` # # Note that these co-efficients are functions of the interatomic distance between the atoms. A term like `IZ` is shorthand notation for a tensor product of two-qubit operators -- the identity operator ($I$) on one qubit and pauli-Z operator ($Z$) on the other qubit. # ### Installing necessary packages # # Before we begin, you will need to install some prerequisites into your environment. Run the cell below to complete these installations. At the end, the cell outputs will be cleared. # + # !pip install -U -r grading_tools/requirements.txt from IPython.display import clear_output clear_output() # - # # Hardware efficient trial states # We need to prepare a guess for the ground state of this Hamiltonian. For this exercise, we will construct a hardware efficient trial state of depth `d=2`, with CNOT (`CX`) gates for the entanglers. We will interleave each entangling layer with a set of $R_yR_z$ gates on each qubit (total 4 layers). For the first guess, we will use rotation angles of $\pi/2$ on all the $R_y$ and $R_z$ gates. from numpy import pi from qiskit import QuantumCircuit, Aer, execute from qiskit.visualization import plot_histogram def prepare_hets_circuit(depth, angle1, angle2): hets_circ = QuantumCircuit(depth) hets_circ.ry(angle1, 0) hets_circ.rz(angle1, 0) hets_circ.ry(angle1, 1) hets_circ.rz(angle1, 1) for ii in range(depth): hets_circ.cx(0,1) hets_circ.ry(angle2,0) hets_circ.rz(angle2,0) hets_circ.ry(angle2,1) hets_circ.rz(angle2,1) return hets_circ hets_circuit = prepare_hets_circuit(2, pi/2, pi/2) hets_circuit.draw() # # Measuring expectation values # Next, we measure expectation values. **We will begin by measuring the ZZ expectation value, or $\langle ZZ \rangle$**. We will first create a copy of the `hets_circ` quantum circuit that we created above, and add measurements to it. # + def measure_zz_circuit(given_circuit): zz_meas = given_circuit.copy() zz_meas.measure_all() return zz_meas zz_meas = measure_zz_circuit(hets_circuit) zz_meas.draw() # - # Next, let's execute this quantum circuit and see the measurement outcomes. simulator = Aer.get_backend('qasm_simulator') # + result = execute(zz_meas, backend = simulator, shots=10000).result() counts = result.get_counts(zz_meas) plot_histogram(counts) # - # We can analyze the counts and calculate the $\langle ZZ \rangle$ as follows: def measure_zz(given_circuit, num_shots = 10000): zz_meas = measure_zz_circuit(given_circuit) result = execute(zz_meas, backend = simulator, shots = num_shots).result() counts = result.get_counts(zz_meas) if '00' not in counts: counts['00'] = 0 if '01' not in counts: counts['01'] = 0 if '10' not in counts: counts['10'] = 0 if '11' not in counts: counts['11'] = 0 total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10'] zz = counts['00'] + counts['11'] - counts['01'] - counts['10'] zz = zz / total_counts return zz zz = measure_zz(hets_circuit) print("<ZZ> =", str(zz)) # **What about $\langle ZI \rangle$ and $\langle IZ \rangle$? Do these need new circuits?** # # The answer is no, and they can be computed from the results above. # + def measure_zi(given_circuit, num_shots = 10000): zz_meas = measure_zz_circuit(given_circuit) result = execute(zz_meas, backend = simulator, shots = num_shots).result() counts = result.get_counts(zz_meas) if '00' not in counts: counts['00'] = 0 if '01' not in counts: counts['01'] = 0 if '10' not in counts: counts['10'] = 0 if '11' not in counts: counts['11'] = 0 total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10'] zi = counts['00'] - counts['11'] + counts['01'] - counts['10'] zi = zi / total_counts return zi def measure_iz(given_circuit, num_shots = 10000): zz_meas = measure_zz_circuit(given_circuit) result = execute(zz_meas, backend = simulator, shots = num_shots).result() counts = result.get_counts(zz_meas) if '00' not in counts: counts['00'] = 0 if '01' not in counts: counts['01'] = 0 if '10' not in counts: counts['10'] = 0 if '11' not in counts: counts['11'] = 0 total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10'] iz = counts['00'] - counts['11'] - counts['01'] + counts['10'] iz = iz / total_counts return iz # + zi = measure_zi(hets_circuit) print("<ZI> =", str(zi)) iz = measure_iz(hets_circuit) print("<IZ> =", str(iz)) # - # **Next, we measure $\langle XX \rangle$** def measure_xx_circuit(given_circuit): xx_meas = given_circuit.copy() ### WRITE YOUR CODE BETWEEN THESE LINES - START xx_meas.h(0) xx_meas.h(1) xx_meas.measure_all() ### WRITE YOUR CODE BETWEEN THESE LINES - END return xx_meas xx_meas = measure_xx_circuit(hets_circuit) xx_meas.draw() # + def measure_xx(given_circuit, num_shots = 10000): xx_meas = measure_xx_circuit(given_circuit) result = execute(xx_meas, backend = simulator, shots = num_shots).result() counts = result.get_counts(xx_meas) if '00' not in counts: counts['00'] = 0 if '01' not in counts: counts['01'] = 0 if '10' not in counts: counts['10'] = 0 if '11' not in counts: counts['11'] = 0 total_counts = counts['00'] + counts['11'] + counts['01'] + counts['10'] xx = counts['00'] + counts['11'] - counts['01'] - counts['10'] xx = xx / total_counts return xx xx = measure_xx(hets_circuit) print("<XX> =", str(xx)) # - # ### Now we evaluate the energy of the trial state def get_energy(given_circuit, num_shots = 10000): zz = measure_zz(given_circuit, num_shots = num_shots) iz = measure_iz(given_circuit, num_shots = num_shots) zi = measure_zi(given_circuit, num_shots = num_shots) xx = measure_xx(given_circuit, num_shots = num_shots) energy = (-1.0523732)*1 + (0.39793742)*iz + (-0.3979374)*zi + (-0.0112801)*zz + (0.18093119)*xx return energy energy = get_energy(hets_circuit) print("The energy of the trial state is", str(energy)) # # Computing gradients # The objective of the next set of exercises is to compute the next set of angles to use for the trial state preparation. Create two quantum circuits using `prepare_hets_circuit(depth=2, angle2 = pi/2)` for measuring expectation values with values for `angle1 = pi/2 + 0.1*pi/2` and `angle1 = pi/2 - 0.1*pi/2`, and measure the energy of both trial states. # + hets_circuit_plus = None hets_circuit_minus = None ### WRITE YOUR CODE BETWEEN THESE LINES - START hets_circuit_plus = prepare_hets_circuit(2, pi/2 + 0.1*pi/2, pi/2) hets_circuit_minus = prepare_hets_circuit(2, pi/2 - 0.1*pi/2, pi/2) ### WRITE YOUR CODE BETWEEN THESE LINES - END energy_plus = get_energy(hets_circuit_plus, num_shots=100000) energy_minus = get_energy(hets_circuit_minus, num_shots=100000) print(energy_plus, energy_minus) # - # As you can see, one of these is certainly lower energy than the other, and is also lower energy than the case when `angle1 = pi/2`. This is a suitable next point for our iteration of a variational eigensolver. # # Submitting your solutions for grading # Now, grade your solution by running the cell below after filling in your name and email address. Always provide the same name and email as the one you used during registration to ensure consistency. # + name = '<NAME>' email = '<EMAIL>' ### Do not change the lines below from grading_tools import grade grade(answer=measure_xx_circuit(hets_circuit), name=name, email=email, labid='lab9', exerciseid='ex1') grade(answer=hets_circuit_plus, name=name, email=email, labid='lab9', exerciseid='ex2') grade(answer=hets_circuit_minus, name=name, email=email, labid='lab9', exerciseid='ex3') # - # **Help us improve our educational tools by submitting your code**<br> # If you would like to help us learn how to improve our educational materials and offerings, you can opt in to send us a copy of your Jupyter notebook. By executing the cell below, you consent to sending us the code in your Jupyter notebook. All of the personal information will be anonymized. from IPython.display import display, Javascript;display(Javascript('IPython.notebook.save_checkpoint();')); from grading_tools import send_code;send_code('ex1.ipynb') # ## Bonus 1 # While this is not graded, explore whether the decision above would be easy if your execution ran different numbers of shots. In particular, measure `energy_plus` and `energy_minus` again with `100`, `1000` and `10000` shots to explore how easy or difficult this decision gets with each one. # + energy_plus_100, energy_plus_1000, energy_plus_10000 = 0, 0, 0 energy_minus_100, energy_minus_1000, energy_minus_10000 = 0, 0, 0 ### WRITE YOUR CODE BETWEEN THESE LINES - START ### WRITE YOUR CODE BETWEEN THESE LINES - END print(energy_plus_100, energy_minus_100, "difference = ", energy_minus_100 - energy_plus_100) print(energy_plus_1000, energy_minus_1000, "difference = ", energy_minus_1000 - energy_plus_1000) print(energy_plus_10000, energy_minus_10000, "difference = ", energy_minus_10000 - energy_plus_10000) # - # ## Bonus 2 # While this is not graded, diagonalize the Hamiltonian by writing down the matrices for the Pauli operators `I`, `X` and `Z`, and find the exact ground state energy. # + ### WRITE YOUR CODE BETWEEN THESE LINES - START ### WRITE YOUR CODE BETWEEN THESE LINES - END
lab9/ex1.ipynb