code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 文本分类(Text classification)
# 文本分类任务是将一句话或一段话划分到某个具体的类别。比如垃圾邮件识别,文本情绪分类等。
#
# Example::
# 1,商务大床房,房间很大,床有2M宽,整体感觉经济实惠不错!
#
#
# 其中开头的1是只这条评论的标签,表示是正面的情绪。我们将使用到的数据可以通过http://dbcloud.irocn.cn:8989/api/public/dl/dataset/chn_senti_corp.zip 下载并解压,当然也可以通过fastNLP自动下载该数据。
#
# 数据中的内容如下图所示。接下来,我们将用fastNLP在这个数据上训练一个分类网络。
# 
# ## 步骤
# 一共有以下的几个步骤
# (1) 读取数据
# (2) 预处理数据
# (3) 选择预训练词向量
# (4) 创建模型
# (5) 训练模型
# ### (1) 读取数据
# fastNLP提供多种数据的自动下载与自动加载功能,对于这里我们要用到的数据,我们可以用\ref{Loader}自动下载并加载该数据。更多有关Loader的使用可以参考\ref{Loader}
# +
from fastNLP.io import ChnSentiCorpLoader
loader = ChnSentiCorpLoader() # 初始化一个中文情感分类的loader
data_dir = loader.download() # 这一行代码将自动下载数据到默认的缓存地址, 并将该地址返回
data_bundle = loader.load(data_dir) # 这一行代码将从{data_dir}处读取数据至DataBundle
# -
# DataBundle的相关介绍,可以参考\ref{}。我们可以打印该data_bundle的基本信息。
print(data_bundle)
# 可以看出,该data_bundle中一个含有三个\ref{DataSet}。通过下面的代码,我们可以查看DataSet的基本情况
print(data_bundle.get_dataset('train')[:2]) # 查看Train集前两个sample
# ### (2) 预处理数据
# 在NLP任务中,预处理一般包括: (a)将一整句话切分成汉字或者词; (b)将文本转换为index
#
# fastNLP中也提供了多种数据集的处理类,这里我们直接使用fastNLP的ChnSentiCorpPipe。更多关于Pipe的说明可以参考\ref{Pipe}。
# +
from fastNLP.io import ChnSentiCorpPipe
pipe = ChnSentiCorpPipe()
data_bundle = pipe.process(data_bundle) # 所有的Pipe都实现了process()方法,且输入输出都为DataBundle类型
# -
print(data_bundle) # 打印data_bundle,查看其变化
# 可以看到除了之前已经包含的3个\ref{DataSet}, 还新增了两个\ref{Vocabulary}。我们可以打印DataSet中的内容
print(data_bundle.get_dataset('train')[:2])
# 新增了一列为数字列表的chars,以及变为数字的target列。可以看出这两列的名称和刚好与data_bundle中两个Vocabulary的名称是一致的,我们可以打印一下Vocabulary看一下里面的内容。
char_vocab = data_bundle.get_vocab('chars')
print(char_vocab)
# Vocabulary是一个记录着词语与index之间映射关系的类,比如
index = char_vocab.to_index('选')
print("'选'的index是{}".format(index)) # 这个值与上面打印出来的第一个instance的chars的第一个index是一致的
print("index:{}对应的汉字是{}".format(index, char_vocab.to_word(index)))
# ### (3) 选择预训练词向量
# 由于Word2vec, Glove, Elmo, Bert等预训练模型可以增强模型的性能,所以在训练具体任务前,选择合适的预训练词向量非常重要。在fastNLP中我们提供了多种Embedding使得加载这些预训练模型的过程变得更加便捷。更多关于Embedding的说明可以参考\ref{Embedding}。这里我们先给出一个使用word2vec的中文汉字预训练的示例,之后再给出一个使用Bert的文本分类。这里使用的预训练词向量为'cn-fastnlp-100d',fastNLP将自动下载该embedding至本地缓存,fastNLP支持使用名字指定的Embedding以及相关说明可以参见\ref{Embedding}
# +
from fastNLP.embeddings import StaticEmbedding
word2vec_embed = StaticEmbedding(char_vocab, model_dir_or_name='cn-char-fastnlp-100d')
# -
# ### (4) 创建模型
# 这里我们使用到的模型结构如下所示,补图
# +
from torch import nn
from fastNLP.modules import LSTM
import torch
# 定义模型
class BiLSTMMaxPoolCls(nn.Module):
def __init__(self, embed, num_classes, hidden_size=400, num_layers=1, dropout=0.3):
super().__init__()
self.embed = embed
self.lstm = LSTM(self.embed.embedding_dim, hidden_size=hidden_size//2, num_layers=num_layers,
batch_first=True, bidirectional=True)
self.dropout_layer = nn.Dropout(dropout)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, chars, seq_len): # 这里的名称必须和DataSet中相应的field对应,比如之前我们DataSet中有chars,这里就必须为chars
# chars:[batch_size, max_len]
# seq_len: [batch_size, ]
chars = self.embed(chars)
outputs, _ = self.lstm(chars, seq_len)
outputs = self.dropout_layer(outputs)
outputs, _ = torch.max(outputs, dim=1)
outputs = self.fc(outputs)
return {'pred':outputs} # [batch_size,], 返回值必须是dict类型,且预测值的key建议设为pred
# 初始化模型
model = BiLSTMMaxPoolCls(word2vec_embed, len(data_bundle.get_vocab('target')))
# -
# ### (5) 训练模型
# fastNLP提供了Trainer对象来组织训练过程,包括完成loss计算(所以在初始化Trainer的时候需要指定loss类型),梯度更新(所以在初始化Trainer的时候需要提供优化器optimizer)以及在验证集上的性能验证(所以在初始化时需要提供一个Metric)
# +
from fastNLP import Trainer
from fastNLP import CrossEntropyLoss
from torch.optim import Adam
from fastNLP import AccuracyMetric
loss = CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=0.001)
metric = AccuracyMetric()
device = 0 if torch.cuda.is_available() else 'cpu' # 如果有gpu的话在gpu上运行,训练速度会更快
trainer = Trainer(train_data=data_bundle.get_dataset('train'), model=model, loss=loss,
optimizer=optimizer, batch_size=32, dev_data=data_bundle.get_dataset('dev'),
metrics=metric, device=device)
trainer.train() # 开始训练,训练完成之后默认会加载在dev上表现最好的模型
# 在测试集上测试一下模型的性能
from fastNLP import Tester
print("Performance on test is:")
tester = Tester(data=data_bundle.get_dataset('test'), model=model, metrics=metric, batch_size=64, device=device)
tester.test()
# -
# ### 使用Bert进行文本分类
# +
# 只需要切换一下Embedding即可
from fastNLP.embeddings import BertEmbedding
# 这里为了演示一下效果,所以默认Bert不更新权重
bert_embed = BertEmbedding(char_vocab, model_dir_or_name='cn', auto_truncate=True, requires_grad=False)
model = BiLSTMMaxPoolCls(bert_embed, len(data_bundle.get_vocab('target')), )
import torch
from fastNLP import Trainer
from fastNLP import CrossEntropyLoss
from torch.optim import Adam
from fastNLP import AccuracyMetric
loss = CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=2e-5)
metric = AccuracyMetric()
device = 0 if torch.cuda.is_available() else 'cpu' # 如果有gpu的话在gpu上运行,训练速度会更快
trainer = Trainer(train_data=data_bundle.get_dataset('train'), model=model, loss=loss,
optimizer=optimizer, batch_size=16, dev_data=data_bundle.get_dataset('test'),
metrics=metric, device=device, n_epochs=3)
trainer.train() # 开始训练,训练完成之后默认会加载在dev上表现最好的模型
# 在测试集上测试一下模型的性能
from fastNLP import Tester
print("Performance on test is:")
tester = Tester(data=data_bundle.get_dataset('test'), model=model, metrics=metric, batch_size=64, device=device)
tester.test()
# -
| tutorials/文本分类.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Data Source: https://www.kaggle.com/worldbank/world-development-indicators <br> Folder: 'world-development-indicators'
# # Using Folium Library for Geographic Overlays
#
# ### Further exploring CO2 Emissions per capita in the World Development Indicators Dataset
#
# Lets start by installing the folium module. (You can skip this if youve already downloaded it)
# !pip install folium
import folium
import pandas as pd
# ### Country coordinates for plotting
#
# Download the raw form: https://raw.githubusercontent.com/python-visualization/folium/588670cf1e9518f159b0eee02f75185301327342/examples/data/world-countries.json _(Right click, "save as")_
country_geo = 'geo/world-countries.json'
# Read in the World Development Indicators Database
data = pd.read_csv('world-development-indicators/Indicators.csv')
data.shape
data.head()
# Pull out CO2 emisions for every country in 2011
# +
# select CO2 emissions for all countries in 2011
hist_indicator = 'CO2 emissions \(metric'
hist_year = 2011
mask1 = data['IndicatorName'].str.contains(hist_indicator)
mask2 = data['Year'].isin([hist_year])
# apply our mask
stage = data[mask1 & mask2]
stage.head()
# -
# ### Setup our data for plotting.
#
# Create a data frame with just the country codes and the values we want plotted.
plot_data = stage[['CountryCode','Value']]
plot_data.head()
# label for the legend
hist_indicator = stage.iloc[0]['IndicatorName']
# ## Visualize CO2 emissions per capita using Folium
#
# Folium provides interactive maps with the ability to create sophisticated overlays for data visualization
# Setup a folium map at a high-level zoom @Alok - what is the 100,0, doesn't seem like lat long
map = folium.Map(location=[100, 0], zoom_start=1.5)
# choropleth maps bind Pandas Data Frames and json geometries. This allows us to quickly visualize data combinations
folium.Choropleth(geo_data=country_geo, data=plot_data,
columns=['CountryCode', 'Value'],
key_on='feature.id',
fill_color='YlGnBu', fill_opacity=0.7, line_opacity=0.2,
legend_name=hist_indicator).add_to(map)
# Create Folium plot
map.save('plot_data.html')
# Import the Folium interactive html file
from IPython.display import HTML
HTML('<iframe src=plot_data.html width=700 height=450></iframe>')
# More Folium Examples can be found at:<br>
# http://python-visualization.github.io/folium/docs-v0.5.0/quickstart.html#Getting-Started <br>
#
# Documentation at:<br>
# http://python-visualization.github.io/folium/docs-v0.5.0/modules.html
| Final Notebook/05c_Folium_Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3
# ---
# ## Install required libraries
#
# After your install these libraries it is recommended that you restart the notebook kernel from the Kernel menu above. After restarting the kernel, start from the **Azure Machine Learning Pipelines** section. Install the libraries only once.
# !pip install -U azureml-sdk
# # Azure Machine Learning Pipelines
#
# In this experience, you will learn how Contoso Auto can benefit from creating re-usable machine learning pipelines with Azure Machine Learning.
#
# The goal is to build a pipeline that demonstrates the basic data science workflow of data preparation, model training, and predictions. Azure Machine Learning allows you to define distinct steps and make it possible to re-use these pipelines as well as to rerun only the steps you need as you tweak and test your workflow.
#
# In this experience, you will be using a subset of data collected from Contoso Auto's fleet management program used by a fleet of taxis. The data is enriched with holiday and weather data. The goal is to train a regression model to predict taxi fares in New York City based on input features such as, number of passengers, trip distance, datetime, holiday information and weather information.
#
# The machine learning pipeline in this quickstart is organized into three steps:
#
# - **Preprocess Training and Input Data:** We want to preprocess the data to better represent the datetime features, such as hours of the day, and day of the week to capture the cyclical nature of these features.
#
# - **Model Training:** We will use data transformations and the GradientBoostingRegressor algorithm from the scikit-learn library to train a regression model. The trained model will be saved for later making predictions.
#
# - **Model Inference:** In this scenario, we want to support **bulk predictions**. Each time an input file is uploaded to a data store, we want to initiate bulk model predictions on the input data. However, before we do model predictions, we will re-use the preprocess data step to process input data, and then make predictions on the processed data using the trained model.
#
# Each of these pipelines is going to have implicit data dependencies and the example will demonstrate how AML make it possible to re-use previously completed steps and run only the modified or new steps in your pipeline.
#
# The pipelines will be run on the Azure Machine Learning compute.
# ## Azure Machine Learning and Pipeline SDK-specific Imports
# +
import numpy as np
import pandas as pd
import azureml.core
from azureml.core import Workspace, Experiment, Datastore
from azureml.data.azure_storage_datastore import AzureBlobDatastore
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.widgets import RunDetails
# Check core SDK version number
print("SDK version:", azureml.core.VERSION)
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import Pipeline, PipelineData, PipelineRun, StepRun
from azureml.pipeline.steps import PythonScriptStep
print("Pipeline SDK-specific imports completed")
# -
# ## Setup
# To begin, you will need to provide the following information about your Azure Subscription.
#
# **If you are using your own Azure subscription, please provide names for subscription_id, resource_group, workspace_name and workspace_region to use.** Note that the workspace needs to be of type [Machine Learning Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/setup-create-workspace).
#
# **If an environment is provided to you be sure to replace XXXXX in the values below with your unique identifier.**
#
# In the following cell, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments (*these values can be acquired from the Azure Portal*).
#
# To get these values, do the following:
# 1. Navigate to the Azure Portal and login with the credentials provided.
# 2. From the left hand menu, under Favorites, select `Resource Groups`.
# 3. In the list, select the resource group with the name similar to `XXXXX`.
# 4. From the Overview tab, capture the desired values.
#
# Execute the following cell by selecting the `>|Run` button in the command bar above.
# +
#Provide the Subscription ID of your existing Azure subscription
subscription_id = "" # <- needs to be the subscription with the Quick-Starts resource group
#Provide values for the existing Resource Group
resource_group = "tech_immersion_xxxxx" # <- replace XXXXX with your unique identifier
#Provide the Workspace Name and Azure Region of the Azure Machine Learning Workspace
workspace_name = "tech_immersion_aml_xxxxx" # <- replace XXXXX with your unique identifier (should be lowercase)
workspace_region = "eastus" # <- region of your Quick-Starts resource group
aml_compute_target = "gpucluster" # <- the name of your GPU-enabled cluster
experiment_name = 'fleet-pipeline'
# project folder for the script files
project_folder = 'aml-pipelines-scripts'
data_location = 'aml-pipelines-data'
test_data_location = 'aml-pipelines-test-data'
# -
# ## Download the training data and code for the pipeline steps
#
# Run the following cell to download the scripts that will be used by the pipeline.
# +
import urllib.request
import os
data_url = ('https://quickstartsws9073123377.blob.core.windows.net/'
'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
'quickstarts/nyc-taxi-data/nyc-taxi-sample-data.csv')
preprocess_script = ('https://quickstartsws9073123377.blob.core.windows.net/'
'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
'quickstarts/pipeline-scripts/preprocess.py')
train_script = ('https://quickstartsws9073123377.blob.core.windows.net/'
'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
'quickstarts/pipeline-scripts/train.py')
inference_script = ('https://quickstartsws9073123377.blob.core.windows.net/'
'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
'quickstarts/pipeline-scripts/inference.py')
# Download the raw training data to your local disk
os.makedirs(data_location, exist_ok=True)
urllib.request.urlretrieve(data_url, os.path.join(data_location, 'nyc-taxi-sample-data.csv'))
# Download the script files to your local disk
os.makedirs(project_folder, exist_ok=True)
urllib.request.urlretrieve(preprocess_script, os.path.join(project_folder, 'preprocess.py'))
urllib.request.urlretrieve(train_script, os.path.join(project_folder, 'train.py'))
urllib.request.urlretrieve(inference_script, os.path.join(project_folder, 'inference.py'))
# -
# The above cell downloaded three files:
# - `preprocess.py`: this script contains the data preparation logic.
# - `train.py`: this script contains the model training logic.
# - `inference.py`: this script contains the model scoring (e.g.,prediction) logic
# ## Create and connect to an Azure Machine Learning Workspace
#
# Run the following cell to access your Azure Machine Learning **Workspace**.
#
# **Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
# +
# By using the exist_ok param, if the worskpace already exists you get a reference to the existing workspace
# allowing you to re-run this cell multiple times as desired (which is fairly common in notebooks).
ws = Workspace.create(
name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group,
location = workspace_region,
exist_ok = True)
ws.write_config()
print('Workspace configuration succeeded')
# -
# ## Create AML Compute Cluster
#
# Azure Machine Learning Compute is a service for provisioning and managing clusters of Azure virtual machines for running machine learning workloads. Let's create a new Aml Compute in the current workspace, if it doesn't already exist. We will run all our pipelines on this compute target.
# +
from azureml.core.compute_target import ComputeTargetException
try:
aml_compute = AmlCompute(ws, aml_compute_target)
print("found existing compute target.")
except ComputeTargetException:
print("creating new compute target")
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_NC12",
min_nodes = 1,
max_nodes = 1)
aml_compute = ComputeTarget.create(ws, aml_compute_target, provisioning_config)
aml_compute.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
print("Aml Compute attached")
# -
# ## Create the Run Configuration
# +
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.runconfig import DEFAULT_CPU_IMAGE, DEFAULT_GPU_IMAGE
# Create a new runconfig object
run_amlcompute = RunConfiguration()
# Use the cluster you created above.
run_amlcompute.target = aml_compute_target
# Enable Docker
run_amlcompute.environment.docker.enabled = True
# Set Docker base image to the default CPU/GPU-based image
run_amlcompute.environment.docker.base_image = DEFAULT_GPU_IMAGE
# Use conda_dependencies.yml to create a conda environment in the Docker image for execution
run_amlcompute.environment.python.user_managed_dependencies = False
# Specify CondaDependencies obj, add necessary packages
run_amlcompute.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=[
'numpy',
'pandas',
'joblib',
'scikit-learn',
'sklearn-pandas==2.0.0'
])
# -
# ## Pipeline Step 1 - Process Training Data
# The process training data step in the pipeline takes raw training data as input. This data can be a data source that lives in one of the accessible data locations, or intermediate data produced by a previous step in the pipeline. In this example we will upload the raw training data in the workspace's default blob store. Run the following two cells at the end of which we will create a **DataReference** object that points to the raw training data *csv* file stored in the default blob store.
# Default datastore (Azure file storage)
def_file_store = ws.get_default_datastore()
print("Default datastore's name: {}".format(def_file_store.name))
def_blob_store = Datastore(ws, "workspaceblobstore")
print("Blobstore's name: {}".format(def_blob_store.name))
# +
# Upload the raw training data to the blob storage
def_blob_store.upload(src_dir=data_location,
target_path='nyc-taxi-raw-features',
overwrite=True,
show_progress=True)
raw_train_data = DataReference(datastore=def_blob_store,
data_reference_name="nyc_taxi_raw_features",
path_on_datastore="nyc-taxi-raw-features/nyc-taxi-sample-data.csv")
print("DataReference object created")
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Create the Process Training Data Pipeline Step
# -
# Within a pipeline, any intermediate data (e.g., the output of a previous Step) is represented by a PipelineData object. PipelineData can be produced by one step and consumed in another step by providing the PipelineData object as an output of one step and the input of one or more steps.
#
# In the cell that follows, you create an instance of the PythonScriptStep that takes a reference to input data, a script to run against the input data and a destination to write the output data that results.
#
# Specifically, the step takes the `raw_train_data` DataReference object as input, and it will output an intermediate PipelineData object represented by `processed_train_data` that holds the processed training data.
#
# This output data will have new engineered features for the datetime components: hour of the day, and day of the week, as scripted in `preprocess.py`.
#
# Run the cell below to construct the PipelineData objects and the PythonScriptStep pipeline step:
#
# *If you are curious, open `preprocess.py` from the project folder `aml-pipelines-scripts` and examine the arguments, inputs, and outputs for the script. Note that there is an argument called process_mode to distinguish between processing training data vs test data. Reviewing the Python script file will give you a good sense of why the script argument names used below are important.*
# +
processed_train_data = PipelineData('processed_train_data', datastore=def_blob_store)
print("PipelineData object created")
processTrainDataStep = PythonScriptStep(
name="process_train_data",
script_name="preprocess.py",
arguments=["--process_mode", 'train',
"--input", raw_train_data,
"--output", processed_train_data],
inputs=[raw_train_data],
outputs=[processed_train_data],
compute_target=aml_compute,
runconfig=run_amlcompute,
source_directory=project_folder
)
print("preprocessStep created")
# -
# At this point you have completed the first step in the pipeline.
# ## Pipeline Step 2 - Train Pipeline Step
# The train pipeline step takes the `processed_train_data` created in the above step as input and generates another PipelineData object to save the model that results as its output in `trained_model`. This is an example of how machine learning pipelines can have many steps and these steps can use (or reuse) datasources and intermediate data.
#
# *If you are curious, open `train.py` from the project folder `aml-pipelines-scripts` and examine the arguments, inputs, and outputs for the script.*
# +
trained_model = PipelineData('trained_model', datastore=def_blob_store)
print("PipelineData object created")
trainStep = PythonScriptStep(
name="train",
script_name="train.py",
arguments=["--input", processed_train_data, "--output", trained_model],
inputs=[processed_train_data],
outputs=[trained_model],
compute_target=aml_compute,
runconfig=run_amlcompute,
source_directory=project_folder
)
print("trainStep created")
# -
# ### Create and Validate the Pipeline
# Now that you have defined the core steps, it is time to define the actual Pipeline object.
#
# The `trainStep` has an implicit data dependency on the `processTrainDataStep`. As such, you need only include the `trainStep` in your Pipeline object.
#
# You will observe that when you run the pipeline that it will first run the **processTrainDataStep** followed by the **trainStep**.
#
# Run the following cell to create the Pipeline and validate it is configured correctly (e.g., check for issues like disconnected inputs).
# +
pipeline = Pipeline(workspace=ws, steps=[trainStep])
print ("Pipeline is built")
pipeline.validate()
print("Simple validation complete")
# -
# ### Submit the Pipeline
#
# Next, schedule the pipeline run by executing the following cell.
pipeline_run = Experiment(ws, experiment_name).submit(pipeline)
print("Pipeline is submitted for execution")
# ### Monitor the Run Details
#
# The pipeline run will take **about 8 minutes to complete.**
#
# You can monitor the execution of a pipeline in realtime both from a notebook and using the Azure Portal.
#
# Run the following cell and observe the order in which the pipeline steps are executed: **processTrainDataStep** followed by the **trainStep**
#
# Wait until both pipeline steps finish running. The cell below should periodically auto-refresh and you can also rerun the cell to force a refresh.
#
# Notice in the output of the cell below, at the bottom, there is a link you can click to see the status of the run from within the Azure Portal.
RunDetails(pipeline_run).show()
# ## Pipeline Step 3 - Inferencing
# At this point you have a trained model you can use to begin making predictions. In the following, you will create a new pipeline step that is used for the purpose of inferencing (or scoring).
# ### Create a DataReference Object to represent the Test data
#
# Run the following cell to upload the test data and create the DataReference object (`raw_bulk_test_data`) that references it.
# +
# Upload dummy raw test data to the blob storage
os.makedirs(test_data_location, exist_ok=True)
pd.DataFrame([[0]], columns = ['col1']).to_csv(os.path.join(test_data_location, 'raw-test-data.csv'), header=True, index=False)
def_blob_store.upload(src_dir=test_data_location,
target_path='bulk-test-data',
overwrite=True,
show_progress=True)
# Create a DataReference object referencing the 'raw-test-data.csv' file
raw_bulk_test_data = DataReference(datastore=def_blob_store,
data_reference_name="raw_bulk_test_data",
path_on_datastore="bulk-test-data/raw-test-data.csv")
print("DataReference object created")
# -
# ### Create the Process Test Data Pipeline Step
# Similar to the process train data pipeline step, we create a new step for processing the test data. Note that it is the same script file `preprocess.py` that is used to process both the train and test data. Thus, you can centralize all your logic for preprocessing data for both train and test. The key difference here is that the process_mode argument is set to *inference*, which the `preprocess.py` script will use to process the test data in a slightly different way than the train data.
#
# *If you are curious, open `preprocess.py` from the project folder `aml-pipelines-scripts` and examine the arguments, inputs, and outputs for the script. Note that there is an argument called process_mode to distinguish between processing training data vs test data. Reviewing the Python script file will give you a good sense of why the script argument names used below are important.*
# +
processed_test_data = PipelineData('processed_test_data', datastore=def_blob_store)
print("PipelineData object created")
processTestDataStep = PythonScriptStep(
name="process_test_data",
script_name="preprocess.py",
arguments=["--process_mode", 'inference',
"--input", raw_bulk_test_data,
"--output", processed_test_data],
inputs=[raw_bulk_test_data],
outputs=[processed_test_data],
allow_reuse = False,
compute_target=aml_compute,
runconfig=run_amlcompute,
source_directory=project_folder
)
print("preprocessStep created")
# -
# ### Create the Inference Pipeline Step to Make Predictions
# The inference pipeline step takes the `processed_test_data` created in the above step and the `trained_model` created in the train step as two inputs and generates `inference_output` as its output. This is yet another example of how machine learning pipelines can have many steps and these steps could use or reuse datasources and intermediate data.
#
# *If you are curious, open inference.py from the project folder `aml-pipelines-scripts` and examine the arguments, inputs, and outputs for the script.*
# +
inference_output = PipelineData('inference_output', datastore=def_blob_store)
print("PipelineData object created")
inferenceStep = PythonScriptStep(
name="inference",
script_name="inference.py",
arguments=["--input", processed_test_data,
"--model", trained_model,
"--output", inference_output],
inputs=[processed_test_data, trained_model],
outputs=[inference_output],
compute_target=aml_compute,
runconfig=run_amlcompute,
source_directory=project_folder
)
print("inferenceStep created")
# -
# ### Create and Validate the Pipeline
#
# The `inferenceStep` has an implicit data dependency with **ALL** of the previous pipeline steps. So when we create a Pipeline object that lists only `inferenceStep` in the steps array, we are implicitly including `process_test_data`, and the model created by `train`.
#
# Run the following cell to create and validate the inferencing pipeline.
# +
inference_pipeline = Pipeline(workspace=ws, steps=[inferenceStep])
print ("Inference Pipeline is built")
inference_pipeline.validate()
print("Simple validation complete")
# -
# ### Publish the Inference Pipeline
#
# As the next step, you will publish the inferencing pipeline so it can be executed to score any data that is supplied in a batch fashion.
#
# Note that we are not submitting the pipeline to run, instead we are publishing the pipeline. When you publish a pipeline, it can be submitted to run by invoking it via its REST endpoint.
#
# Run the following cell.
pipeline_name = 'Inference Pipeline'
published_pipeline = inference_pipeline.publish(name = pipeline_name)
# ### Schedule the Inference Pipeline
#
# We want to run the Inference Pipeline when any new data is uploaded at the location referenced by the `raw_bulk_test_data` DataReference object. The next cell creates a Schedule to monitor the datastore for changes, and is responsible for running the `Inference Pipeline` when it detects a new file being uploaded.
# +
from azureml.pipeline.core.schedule import Schedule
schedule = Schedule.create(workspace=ws, name=pipeline_name + "_sch",
pipeline_id=published_pipeline.id,
experiment_name=experiment_name,
datastore=def_blob_store,
wait_for_provisioning=True,
description="Datastore scheduler for Pipeline: " + pipeline_name,
path_on_datastore='bulk-test-data',
polling_interval=1 # in minutes
)
print("Created schedule with id: {}".format(schedule.id))
# -
# Check the status of the Schedule and confirm it's Active
print('Schedule status: ', schedule.status)
# ### Test the Inference Pipeline
#
# Run the following cell to create some test data to make bulk predictions and upload that data to the `bulk-test-data` blob store.
# +
# Create the raw test data
columns = ['vendorID', 'passengerCount', 'tripDistance', 'hour_of_day', 'day_of_week', 'day_of_month',
'month_num', 'normalizeHolidayName', 'isPaidTimeOff', 'snowDepth', 'precipTime',
'precipDepth', 'temperature']
data = [[1, 4, 10, 15, 4, 5, 7, 'None', False, 0, 0.0, 0.0, 80],
[1, 1, 5, 6, 0, 20, 1, '<NAME>, <NAME>', True, 0, 2.0, 3.0, 35]]
data_df = pd.DataFrame(data, columns = columns)
os.makedirs(test_data_location, exist_ok=True)
data_df.to_csv(os.path.join(test_data_location, 'raw-test-data.csv'), header=True, index=False)
from datetime import datetime
data_upload_time = datetime.utcnow()
print('Data upload time in UTC: ', data_upload_time)
# Upload the raw test data to the blob storage
def_blob_store.upload(src_dir=test_data_location,
target_path='bulk-test-data',
overwrite=True,
show_progress=True)
# Wait for 65 seconds...
import time
print('Please wait...')
time.sleep(65)
print('Done!')
# -
# ### Wait for Schedule to Trigger
#
# The Schedule polling interval is 1 minute. You can also log into Azure Portal and navigate to your `resource group -> workspace -> experiment` to see if the `Inference Pipeline` has started executing.
#
# **If the inference_pipeline_run object in the below cell is None, it means that the Schedule has not triggered yet!**
#
# **If the Schedule does not trigger in 2 minutes, try rerunning the data upload cell again!**
# Confirm that the inference_pipeline_run object is NOT None
inference_pipeline_run = schedule.get_last_pipeline_run()
print(inference_pipeline_run)
# *If you upload the test data file more than once, confirm that we have the latest pipeline run object. We will compare the pipeline start time with the time you uploaded the test data file.*
# +
# confirm the start time
import dateutil.parser
if inference_pipeline_run.get_details()['status'] != 'NotStarted':
pipeline_starttime = dateutil.parser.parse(inference_pipeline_run.get_details()['startTimeUtc'], ignoretz=True)
else:
pipeline_starttime = datetime.utcnow()
if(pipeline_starttime > data_upload_time):
print('We have the correct inference pipeline run! Proceed to next cell.')
else:
print('Rerun the above cell to get the latest inference pipeline run!')
# -
# ### Monitor the Run Details
#
# Observe the order in which the pipeline steps are executed based on their implicit data dependencies.
#
# Wait until all steps finish running. The cell below should periodically auto-refresh and you can also rerun the cell to force a refresh.
#
# **This example demonstrates how AML make it possible to reuse previously completed steps and run only the modified or new steps in your pipeline.**
RunDetails(inference_pipeline_run).show()
# ### Download and Observe the Predictions
# +
print("Get StepRun for inference step...")
pipeline_run_id = inference_pipeline_run.id
step_run_id = inference_pipeline_run.find_step_run('inference')[0].id
node_id = inference_pipeline_run.get_graph().node_name_dict['inference'][0].node_id
print('Pipeline Run ID: {} Step Run ID: {}, Step Run Node ID: {}'.format(pipeline_run_id,
step_run_id,
node_id))
step_run = StepRun(inference_pipeline_run.experiment,
step_run_id,
pipeline_run_id,
node_id)
print(step_run)
print("Downloading evaluation results...")
# access the evaluate_output
#data = pipeline_run.find_step_run('evaluate')[0].get_output_data('evaluate_output')
data = step_run.get_output_data('inference_output')
# download the predictions to local path
data.download('.', show_progress=True)
# -
with open(os.path.join('./', data.path_on_datastore, 'results.txt')) as f:
results = f.read()
print("Printing evaluation results...")
print(results)
# ### Cleanup Resources
#
# If you are done experimenting with this experience, run the following cell to clean up the schedule.
schedule.disable()
| data/pipelines-AML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: text-mining
# language: python
# name: python3
# ---
# # Prerequisistes : Data Types
# +
# Setting the Data Type
x = "Hello World!!!" #str
print(x)
x = 20 #int
print(x)
x = 20.5 #float
print(x)
x = ["apple", "banana", "cherry"] #list
print(x)
x = ("apple", "banana", "cherry") #tuple
print(x)
x = {"name" : "John", "age" : 36} #dict
print(x)
x = {"apple", "banana", "cherry"} #set
print(x)
# Setting the Specific Data Type
x = str(20)
print(x)
x = int("20")
print(x)
x = float("20.5")
print(x)
x = list(("apple", "banana", "cherry"))
print(x)
x = tuple(("apple", "banana", "cherry"))
print(x)
x = dict(name="John", age=36)
print(x)
x = set(("apple", "banana", "cherry"))
print(x)
x = bool(5)
print(x)
# +
# Text Type
#-------------------------------------------------------
# Strings
print("Hello")
print('Hello')
# Multiline Strings
x = """
Yo, check the diagonal
Three brothers gone
Come on
Doesn't that make it three in a row
Anger is a gift
"""
print(x)
# -
# Strings are Arrays
txt = "The answer is 42"
print(txt[0])
# Looping Through a String
for y in txt:
print(y)
# String Length
print(len(txt))
print(len(y))
# +
# Check String & Check if NOT
if "42" in txt:
print("Yes, the answer is 42!!!")
if "24" not in txt:
print("No, because the answer is 42!!!")
# -
# ## Using strings
# Slicing
x = "This is the End!"
print(x[2:7])
# Slice From the Start
print(x[:7])
# Slice To the End
print(x[7:])
# Negative Indexing
print(x[-7:-2])
print(x[:-2])
print(x[-7:])
# Upper/Lower Case
print(x.upper())
print(x.lower())
# Remove Whitespace
y = " Whitespace Removed! "
print(y.strip())
# Replace String
print(x.replace("End", "Start"))
# Split String
print(x.split(" "))
for w in x.split(" "):
print(w)
# String Concatenation
a = "String"
b = "Conca"
c = "tenation"
print(a+b+c)
print(a+" "+b+c)
# +
# String Format
result = 5
Answer = "Two plus Three is equal to "
print(Answer + str(result))
Answer = "Two plus Three is equal to {}"
print(Answer.format(result))
Two = 2
Three = 3
result = Two + Three
Answer = "{0} plus {1} is equal to {2}"
print(Answer.format(Two, Three, result))
print(f"{Two} plus {Three} is equal to {result}")
# -
# Escape Character
#txt = "We are studing "Python"!!!"
txt = "We are studing \"Python\"!!!"
print(txt)
| 2021-22/Code_01_Introduction/01_a_python_Strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !python --version
# !pip freeze
# !pip install datasets transformers
# +
import transformers
print(transformers.__version__)
# -
# GPU
# !nvidia-smi
# !apt install git-lfs
# !sudo apt install git-lfs
| scripts/itam_requirements-installation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cluster Pipeline
# This Notebook is providing a Cluster pipeline for aggregated data
##### REQUIRES THE DATAFRAME FOLDER TO BE NAMED 'Cohorts', WHICH INCLUDES ALL PRECOMPUTED DATAFRAMES #####
import fiber
from fiber.cohort import Cohort
from fiber.condition import Patient, MRNs
from fiber.condition import Diagnosis
from fiber.condition import Measurement, Encounter, Drug, TobaccoUse,LabValue
from fiber.storage import yaml as fiberyaml
import time
import pandas as pd
import pyarrow.parquet as pq
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from functools import reduce
from ppca import PPCA
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
import category_encoders as ce
import json
from sklearn import metrics
from sklearn.decomposition import FastICA
from sklearn.metrics import pairwise_distances
from sklearn.metrics import davies_bouldin_score
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold import TSNE
import seaborn as sns
from scipy.cluster.hierarchy import dendrogram
from sklearn.cluster import AgglomerativeClustering
from pickle import load
from pickle import dump
import pickle
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import PCA
from sklearn.decomposition import IncrementalPCA
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import LatentDirichletAllocation
from sklearn import preprocessing
import scipy.cluster.hierarchy as shc
import scipy.stats as stats
import researchpy as rp
from keras.models import Model
from keras.layers import Dense, Input
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from keras.layers import Input, Dense
from keras.models import Model, Sequential
from keras import regularizers
import umap
from sklearn.cluster import DBSCAN
import hdbscan
import plotly.express as px
# # Column Transformer
# +
#columnTransformer out of the scikit-learn libraby
#applying StandardScaler on numeric values and OneHotEncouder on categorical
def apply_columnTransformer(df,df_name,num_scaler_name,cat_scaler_name,experiment_name):
#load transformed df and column transformer if already available
name_transformed_df=df_name+'_'+num_scaler_name+'_'+cat_scaler_name
try:
with open('Cohort/Models/ColumnTransformer/'+experiment_name+'.pkl', 'rb') as f:
ctransformer = pickle.load(f)
print('ctransformer loaded')
load = np.load('Cohort/Models/ColumnTransformer/'+experiment_name+'.npz')
transformed_df=load['a']
print('transformed_df loaded')
#print(transformed_df)
return transformed_df, ctransformer
#if a new df was introduced, apply the column transformation
except:
#identify categorical and numerical Columns of the dataframe
categorical_cols = [c for c in df.columns if df[c].dtype in [np.object,np.str] ]
numerical_cols = [c for c in df.columns if df[c].dtype in [np.float, np.int] ]
#select the scaler that should be applied
if num_scaler_name=='StandardScaler':
num_scaler=StandardScaler()
if num_scaler_name=='MinMaxScaler':
num_scaler=preprocessing.MinMaxScaler()
if cat_scaler_name=='OneHotEncoder':
cat_scaler=ce.OneHotEncoder()
if cat_scaler_name=='BinaryEncoder':
cat_scaler=ce.BinaryEncoder(drop_invariant = True, handle_missing = 'return_nan')
#apply the Transformer
ctransformer = ColumnTransformer([
('num', num_scaler, numerical_cols),
('cat', cat_scaler, categorical_cols)])
#save the ctransformer and the transformed df
dump(ctransformer, open('Cohort/Models/ColumnTransformer/'+experiment_name+'.pkl', 'wb'))
transformed_df=ctransformer.fit_transform(df)
print(transformed_df)
np.savez_compressed('Cohort/Models/ColumnTransformer/'+experiment_name+'.npz',a=transformed_df)
return transformed_df, ctransformer
# -
# # Dimension Reduction
# ## PPCA
def apply_ppca(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
ppca = PPCA()
ppca.fit(data=transformed_df, d=dimension, verbose=True)
transformed_train = ppca.transform()
print(transformed_train)
dump(ppca, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return transformed_train
# ## TSNE
def apply_TSNE(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1, perplexity=160, n_iter=1000)
tsne_results = tsne.fit_transform(transformed_df)
#tsne_results = tsne.fit_transform(df)
print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
plt.scatter(tsne_results[:,0],tsne_results[:,1])
dump(tsne_results, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return tsne_results
# ## FAST ICA
#
def apply_ICA(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
transformer = FastICA(n_components=dimension,random_state=0)
X_transformed = transformer.fit_transform(transformed_df)
print(X_transformed.shape)
dump(transformer, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return X_transformed
# ## SVD
#https://machinelearningmastery.com/singular-value-decomposition-for-dimensionality-reduction-in-python/
#good if data is sparse
#n_iter=5 = Default
def apply_svd(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
svd = TruncatedSVD(n_components=dimension, n_iter=5, random_state=42)
X_transformed=svd.fit_transform(transformed_df)
#print(X_transformed.shape)
dump(svd, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return X_transformed
# ## PCA
def apply_pca(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
pca = PCA(n_components=dimension)
X_transformed=pca.fit_transform(transformed_df)
print()
print(X_transformed)
dump(pca, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return X_transformed
# ## Incremental PCA
def apply_ipca(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
ipca = IncrementalPCA(n_components=dimension, batch_size=30)
X_transformed=ipca.fit_transform(transformed_df)
print(X_transformed)
dump(ipca, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return X_transformed
# ## KernelPCA
def apply_kpca(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
kpca = KernelPCA(n_components=dimension, kernel='linear')
X_transformed=kpca.fit_transform(transformed_df)
print(X_transformed)
dump(kpca, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return X_transformed
# ## LDA
def apply_lda(df,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name):
lda = LatentDirichletAllocation(n_components=dimension,random_state=0)
transformed_df=abs(transformed_df)
X_transformed=lda.fit_transform(transformed_df)
print(X_transformed)
dump(lda, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return X_transformed
# # UMAP
def apply_umap(df,transformed_df,dimension,umap_distance,umap_neighbors,df_name,num_scaler_name,cat_scaler_name,experiment_name,save):
clusterable_embedding = umap.UMAP(
n_neighbors=umap_neighbors,
min_dist=umap_distance,
n_components=dimension,
random_state=42,
)
X_transformed=clusterable_embedding.fit_transform(transformed_df)
if save==True:
dump(clusterable_embedding, open('Cohort/Models/DimReduction/'+experiment_name+'.pkl', 'wb'))
return X_transformed
# ## Autoencoder
def apply_AE(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,a_f_decoder,a_f_encoder,n_layer,batchsize,epochs,optimizer,loss_function):
X=transformed_df
#Building the Auto-encoder neural network
# Building the Input Layer
input_layer = Input(shape =(X.shape[1], ))
# Building the Encoder network
encoded = Dense(50, activation =a_f_encoder)(input_layer)
encoded = Dense(dimension, activation =a_f_encoder)(encoded)
decoded = Dense(50, activation =a_f_decoder)(encoded)
# Building the Output Layer
output_layer = Dense(X.shape[1], activation =a_f_decoder)(decoded)
#Train AE
# Defining the parameters of the Auto-encoder network
autoencoder = Model(input_layer, output_layer)
autoencoder.compile(optimizer=optimizer, loss=loss_function)
# Training the Auto-encoder network
history = autoencoder.fit(X, X,
batch_size = batchsize, epochs = epochs,
shuffle = True, validation_split = 0.20)
#loss for result excel
print(history.history['val_loss'][(epochs-1)])
loss=history.history['val_loss'][(epochs-1)]
#Retaining the encoder part of the Auto-encoder to encode data
hidden_representation = Sequential()
hidden_representation.add(autoencoder.layers[0])
hidden_representation.add(autoencoder.layers[1])
hidden_representation.add(autoencoder.layers[2])
normal_hidden_rep = hidden_representation.predict(X)
#dump(history, open('Cohort/Models/DimReduction/'+df_name+'_'+num_scaler_name+'_'+cat_scaler_name+'_AE_'+str(dimension)+'_'+a_f_encoder+'_'+a_f_decoder+'_'+str(n_layer)+'_'+str(batchsize)+'_'+str(epochs)+'_'+optimizer+'_'+loss_function+'.pkl', 'wb'))
return normal_hidden_rep, loss
# # Cluster Method
# ## kmeans
def apply_kmeans(transformed_sample,ellbow_method,cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name):
if ellbow_method==True:
elbow_method(transformed_sample)
#scatter_plot(transformed_sample,None)
#plt.scatter(transformed_sample[:,0],transformed_sample[:,1])
kmeans = KMeans(n_clusters=cluster, init='k-means++', max_iter=5000, n_init=10, random_state=0)
pred_y = kmeans.fit_predict(transformed_sample)
#plt.scatter(transformed_sample[:,0], transformed_sample[:,1])
#plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red')
#plt.show()
#scatter_plot(transformed_sample,kmeans.labels_)
'''
from sklearn.metrics.pairwise import pairwise_distances_argmin
fig = plt.figure(figsize=(15, 5))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06','#FF0000','#8800FF']
k_means_labels = pairwise_distances_argmin(transformed_sample, kmeans.cluster_centers_)
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(cluster), colors):
my_members = k_means_labels == k
cluster_center = kmeans.cluster_centers_[k]
ax.plot(transformed_sample[my_members, 0], transformed_sample[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
experiment_name=experiment_name
ax.set_title(experiment_name)
ax.set_xticks(())
ax.set_yticks(())
fig.savefig('Cohort/Models/Plots/'+experiment_name+'.png')'''
return kmeans.labels_
def elbow_method(transformed_sample):
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
kmeans.fit(transformed_sample)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title('Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
# # DBSCAN
def apply_dbscan(transformed_sample,ellbow_method,cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name):
db = DBSCAN(eps=0.1, min_samples=5).fit(transformed_sample)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
scatter_plot(transformed_sample,labels)
X=transformed_sample
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
plt.title('DBSCAN' )
plt.show()
print(np.unique(labels))
return labels
# # HDBSCAN
def apply_hdbscan(transformed_sample,ellbow_method,cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name):
hdbscan_labels = hdbscan.HDBSCAN(min_samples=10, min_cluster_size=500).fit_predict(transformed_sample)
clustered = (hdbscan_labels >= 0)
plt.scatter(transformed_sample[~clustered, 0],
transformed_sample[~clustered, 1],
c=(0.5, 0.5, 0.5),
s=0.1,
alpha=0.5)
plt.scatter(transformed_sample[clustered, 0],
transformed_sample[clustered, 1],
c=hdbscan_labels[clustered],
s=0.1,
cmap='Spectral');
return hdbscan_labels
# ## Gaussian
#
def apply_gaussian(df,n_cluster):
gmm = GaussianMixture(n_components=n_cluster)
gmm.fit(df)
proba_lists = gmm.predict_proba(df)
#Plotting
colored_arrays = np.matrix(proba_lists)
colored_tuples = [tuple(i.tolist()[0]) for i in colored_arrays]
fig = plt.figure(1, figsize=(7,7))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
ax.scatter(df[:, 0], df[:, 1], df[:, 2],
c=colored_tuples, edgecolor="k", s=50)
ax.set_xlabel("Petal width")
ax.set_ylabel("Sepal length")
ax.set_zlabel("Petal length")
plt.title("Gaussian Mixture Model", fontsize=14)
# ## Hierachical
def apply_hierachical(df_dim_red,ellbow_method,n_cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name):
Dendogram=False
#Dendogram:
if Dendogram==True:
try:
#load Dendogram image
plt.figure(figsize=(250, 7))
experiment_name=experiment_name
img=mpimg.imread('Cohort/Models/Plots/Dendograms/'+experiment_name+'.png')
print('Dendogram loaded')
imgplot = plt.imshow(img)
plt.show()
except:
plt.figure(figsize=(10, 7))
experiment_name=df_name+'_'+num_scaler_name+'_'+cat_scaler_name+'_'+dim_red_method+'_'+str(dimension)+'_hierachical_'
plt.title(experiment_name)
dend = shc.dendrogram(shc.linkage(df_dim_red, method='ward'))
plt.savefig('Cohort/Models/Plots/Dendograms/'+experiment_name+'.png')
# setting distance_threshold=0 ensures we compute the full tree.
model = AgglomerativeClustering(n_clusters=n_cluster, affinity='euclidean', linkage='ward',distance_threshold=None)
model.fit_predict(df_dim_red)
scatter_plot(df_dim_red,model.labels_)
return model.labels_
# # Metrics
# ## Silhouette Coefficient
def get_silhouette_Coefficient(labels,df):
m=metrics.silhouette_score(df, labels, metric='euclidean')
print('silhouette_score:',m)
return m
# ## Calinski-Harabasz Index
def get_calinski_harabasz(labels,df):
m=metrics.calinski_harabasz_score(df, labels)
print('Calinski-Harabasz Index:',m)
return m
# ## Davies-Bouldin Index
def get_davies_bouldin(labels,df):
m=davies_bouldin_score(df, labels)
print('Davies-Bouldin Index:',m)
return m
# ## Feature Analysis
#
def analyse_num_column(df,col,kind):
total_patient=len(df)
df_mean=df[col].mean()
df_std= df[col].std()
df_median= df[col].median()
if kind=='Cluster':
row={'column_name':col,'col_type':'num','cat_total':'','cat_percentage':'','num_mean':df_mean,'num_std':df_std,'num_median':df_median,'total_patient':total_patient}
else:
row={'cat_total_all':'','cat_percentage_all':'','num_mean_all':df_mean,'num_std_all':df_std,'num_median_all':df_median,'total_patient_all':total_patient}
return row
def analyse_cat_column(df,col,kind):
total_patient=len(df)
cat_total=len(df.loc[df[col]==True])
cat_percentage=(cat_total/total_patient)
if kind=='Cluster':
row={'column_name':col,'col_type':'cat','cat_total':cat_total,'cat_percentage':cat_percentage,'num_mean':'','num_std':'','num_median':'','total_patient':total_patient}
else:
row={'cat_total_all':cat_total,'cat_percentage_all':cat_percentage,'num_mean_all':'','num_std_all':'','num_median_all':'','total_patient_all':total_patient}
return row
def analyse_gender_column(df,kind):
total_patient=len(df)
cat_total=len(df.loc[df['gender']=='Male'])
cat_percentage=(cat_total/total_patient)
if kind=='Cluster':
row1={'column_name':'gender_male','col_type':'cat','cat_total':cat_total,'cat_percentage':cat_percentage,'num_mean':'','num_std':'','num_median':'','total_patient':total_patient}
else:
row1={'cat_total_all':cat_total,'cat_percentage_all':cat_percentage,'num_mean_all':'','num_std_all':'','num_median_all':'','total_patient_all':total_patient}
cat_total=len(df.loc[df['gender']=='Female'])
cat_percentage=(cat_total/total_patient)
if kind=='Cluster':
row2={'column_name':'gender_female','col_type':'cat','cat_total':cat_total,'cat_percentage':cat_percentage,'num_mean':'','num_std':'','num_median':'','total_patient':total_patient}
else:
row2={'cat_total_all':cat_total,'cat_percentage_all':cat_percentage,'num_mean_all':'','num_std_all':'','num_median_all':'','total_patient_all':total_patient}
cat_total=len(df.loc[df['gender']=='Female'])
return row1, row2
def analyse_feature(ctransformer,df_cohort,n_cluster):
result_array=[]
min_max_scaler = preprocessing.MinMaxScaler()
num_columns=ctransformer.transformers[0][2]
cat_columns=ctransformer.transformers[1][2]
df_cohort[list(num_columns)] = min_max_scaler.fit_transform(df_cohort[list(num_columns)])
del cat_columns[:8]
col=['cat_total_all','cat_percentage_all','num_mean_all','num_std_all','num_median_all','total_patient_all']
result_all=pd.DataFrame(columns=col)
for col in num_columns:
row=analyse_num_column(df_cohort,col,'all')
result_all=result_all.append(row, ignore_index=True)
for col in cat_columns:
row=analyse_cat_column(df_cohort,col,'all')
result_all=result_all.append(row, ignore_index=True)
row=analyse_gender_column(df_cohort,'all')
result_all=result_all.append(row[0], ignore_index=True)
result_all=result_all.append(row[1], ignore_index=True)
for i in (range(n_cluster)):
col=['column_name','col_type','cat_total','cat_percentage','num_mean','num_std','num_median','total_patient']
result=pd.DataFrame(columns=col)
df=df_cohort.loc[df_cohort[dim_red_method]==i]
for col in num_columns:
row=analyse_num_column(df,col,'Cluster')
result=result.append(row, ignore_index=True)
for col in cat_columns:
row=analyse_cat_column(df,col,'Cluster')
result=result.append(row, ignore_index=True)
row=analyse_gender_column(df,'Cluster')
result=result.append(row[0], ignore_index=True)
result=result.append(row[1], ignore_index=True)
result=pd.concat([result,result_all],axis=1)
result_array.append(result)
return result_array
def get_important_features(result_array,n_cluster,top_features):
for i in (range(n_cluster)):
test_num=result_array[i].loc[result_array[i]['col_type']=='num']
test_num['dif_median']=abs(test_num['num_median']-test_num['num_median_all'])
test_num=test_num.sort_values(by=['dif_median'],ascending=False)
print('Cluster '+str(i)+' num features \n',test_num[['column_name' ,'num_median','num_median_all']].head(top_features))
test_cat=result_array[i].loc[result_array[i]['col_type']=='cat']
test_cat['dif_percentage']=abs(test_cat['cat_percentage']-test_cat['cat_percentage_all'])
test_cat=test_cat.sort_values(by=['dif_percentage'],ascending=False)
print('Cluster '+str(i)+' cat features \n',test_cat[['column_name' ,'cat_percentage','cat_percentage_all']].head(top_features))
# # Anova
def num_feature_importance_anova(df,ctransformer,dim_red_method,n_cluster,top_features):
df_temp=df
#replace cluster names
for cluster in (range(n_cluster)):
cluster_name='cluster_'+str(cluster)
df[dim_red_method].replace({cluster: cluster_name},inplace=True)
#normalize num columns
min_max_scaler = preprocessing.MinMaxScaler()
num_columns=ctransformer.transformers[0][2]
df_temp[list(num_columns)] = min_max_scaler.fit_transform(df_temp[list(num_columns)])
#iterate over num columns and calculate the p-Value:
col=['column name','F-Value','p-value','absolute_p','compared to other']
result_all=pd.DataFrame(columns=col)
result_anova=[]
for cluster in df_temp[dim_red_method].unique():
result_all=pd.DataFrame(columns=col)
df_temp['temp_cluster']=df_temp[dim_red_method]
df_temp.loc[df[dim_red_method] != cluster, "temp_cluster"] = "other_cluster"
for num_col in num_columns:
feature=num_col
result = df_temp.groupby('temp_cluster')[feature].apply(list)
#print(result)
feature_value_1=result[cluster]
#print(feature_value_1)
feature_value_2=result['other_cluster']
mean_1=mean(feature_value_1)
mean_2=mean(feature_value_2)
if mean_1 > mean_2:
compared='higher'
else:
compared='lower'
#print(len(result['cluster_3']))
#print(len(result['cluster_0']))
F, p = stats.f_oneway(*result)
p=format(p, '.300000000g')
p=float(p)
if p!=0:
importance=abs(np.log(p))
else:
importance=0
row={'column name':(feature+'_'+cluster),'F-Value':F,'p-value':p,'absolute_p':importance,'compared to other':compared}
result_all=result_all.append(row, ignore_index=True)
result_all=result_all.sort_values(by=['absolute_p'],ascending=False)
result_anova.append(result_all)
#result_all=result_all.drop_duplicates(subset='column name',keep='first', inplace=False)
#return result_all.head(top_features)
return result_anova
# # Chi Test
#https://www.pythonfordatascience.org/chi-square-test-of-independence-python/
def cat_feature_importance(df,ctransformer,sup_colums,dim_red_method,n_cluster,top_features):
#replace cluster names
#establish two categories in all Categories
for cluster in (range(n_cluster)):
cluster_name='cluster_'+str(cluster)
df[dim_red_method].replace({cluster: cluster_name},inplace=True)
df=df.replace(True, 'Yes')
df=df.replace(False,'No')
df=df.fillna('No')
df=df.replace(1, 'Yes')
df=df.replace(0,'No')
df=df.fillna('No')
col=['column name','Pearson Chi-square','Cramers V','p-value','absolute_p','compared to other']
result_all=pd.DataFrame(columns=col)
result_chi=[]
for cluster in df[dim_red_method].unique():
result_all=pd.DataFrame(columns=col)
df['temp_cluster']=df[dim_red_method]
df.loc[df[dim_red_method] != cluster, "temp_cluster"] = "other_cluster"
#print(df[[dim_red_method,'temp_cluster']])
cat_columns=ctransformer.transformers[1][2]
#iterate over cat columns and calculate the p-Value:
for cat_col in cat_columns:
feature=cat_col
crosstab, test_results, expected = rp.crosstab(df[feature], df['temp_cluster'],
test= "chi-square",
expected_freqs= True,
prop= "cell")
p=format(test_results["results"][1], '.300000000g')
#print(p)
# if test_results["results"][1]!=0:
p=float(p)
if p!=0:
importance=abs(np.log(p))
else:
importance=0
compared=''
if feature !='gender':
feature_count_1=len(df.loc[df['temp_cluster']==cluster])
feature_cluster=df.loc[df['temp_cluster']==cluster]
feature_percentage_1=(len(feature_cluster.loc[feature_cluster[feature]=='Yes'])/feature_count_1)
#print(feature_percentage_1)
feature_count_2=len(df.loc[df['temp_cluster']=='other_cluster'])
feature_cluster_2=df.loc[df['temp_cluster']=='other_cluster']
feature_percentage_2=(len(feature_cluster_2.loc[feature_cluster_2[feature]=='Yes'])/feature_count_2)
#print(feature_percentage_2)
if feature_percentage_1 > feature_percentage_2:
compared='higher'
else:
compared='lower'
row={'column name':(feature+'_'+cluster),'Pearson Chi-square':test_results["results"][0],'Cramers V':test_results["results"][2],'p-value':p,'absolute_p':importance,'compared to other':compared}
#row={'column name':feature,'Pearson Chi-square':test_results["results"][0],'Cramers V':test_results["results"][2],'p-value':p,'absolute_p':importance}
result_all=result_all.append(row, ignore_index=True)
for cat_col in sup_colums:
#print('Calculaint Supervised features')
feature=cat_col
crosstab, test_results, expected = rp.crosstab(df[feature], df['temp_cluster'],
test= "chi-square",
expected_freqs= True,
prop= "cell")
#print(crosstab)
p=format(test_results["results"][1], '.300000000g')
#print(p)
# if test_results["results"][1]!=0:
p=float(p)
if p!=0:
importance=abs(np.log(p))
else:
importance=0
compare=''
if feature !='gender':
feature_count_1=len(df.loc[df['temp_cluster']==cluster])
feature_cluster=df.loc[df['temp_cluster']==cluster]
feature_percentage_1=(len(feature_cluster.loc[feature_cluster[feature]=='Yes'])/feature_count_1)
# print(feature_percentage_1)
feature_count_2=len(df.loc[df['temp_cluster']=='other_cluster'])
feature_cluster_2=df.loc[df['temp_cluster']=='other_cluster']
feature_percentage_2=(len(feature_cluster_2.loc[feature_cluster_2[feature]=='Yes'])/feature_count_2)
# print(feature_percentage_2)
if feature_percentage_1 > feature_percentage_2:
compared='higher'
else:
compared='lower'
row={'column name':(feature+'_'+cluster),'Pearson Chi-square':test_results["results"][0],'Cramers V':test_results["results"][2],'p-value':p,'absolute_p':importance,'compared to other':compared}
#row={'column name':feature,'Pearson Chi-square':test_results["results"][0],'Cramers V':test_results["results"][2],'p-value':p,'absolute_p':importance}
result_all=result_all.append(row, ignore_index=True)
result_all=result_all.sort_values(by=['absolute_p'],ascending=False)
result_chi.append(result_all)
#result_all=result_all.drop_duplicates(subset='column name',keep='first', inplace=False)
#return result_all.head(top_features)
return result_chi
# +
#with open('Cohort/Models/ColumnTransformer/'+df_name+'_'+num_scaler_name+'_'+cat_scaler_name+'.pkl', 'rb') as f:
# ctransformer = pickle.load(f)
#cat_columns=ctransformer.transformers[1][2]
#cat_columns
# -
# # T Test
def num_feature_importance_t_test(df,ctransformer,dim_red_method,n_cluster,top_features,inp_colums,merge_w_inpatient):
df_temp=df
#replace cluster names
for cluster in (range(n_cluster)):
cluster_name='cluster_'+str(cluster)
df[dim_red_method].replace({cluster: cluster_name},inplace=True)
#normalize num columns
min_max_scaler = preprocessing.MinMaxScaler()
num_columns=ctransformer.transformers[0][2]
if merge_w_inpatient==True:
inpatient=inp_colums[0]
num_columns.append(inpatient)
#print(num_columns)
df_temp[list(num_columns)] = min_max_scaler.fit_transform(df_temp[list(num_columns)])
#iterate over num columns and calculate the p-Value:
col=['column name','T-Statistics','p-value','absolute_p','compared to other']
result_all=pd.DataFrame(columns=col)
result_t_test=[]
for cluster in df_temp[dim_red_method].unique():
result_all=pd.DataFrame(columns=col)
df_temp['temp_cluster']=df_temp[dim_red_method]
df_temp.loc[df[dim_red_method] != cluster, "temp_cluster"] = "other_cluster"
for num_col in num_columns:
feature=num_col
feature_value_1=df_temp.loc[df_temp['temp_cluster']==cluster][feature].values
feature_value_2=df_temp.loc[df_temp['temp_cluster']=="other_cluster"][feature].values
statistics,p=stats.ttest_ind(feature_value_1, feature_value_2, equal_var = False)
mean_1=feature_value_1.mean()
mean_2=feature_value_2.mean()
if mean_1 > mean_2:
compared='higher'
else:
compared='lower'
# print(feature_value_1)
# print(feature_value_2)
# print(p)
p=format(p, '.300000000g')
p=float(p)
if p!=0:
importance=abs(np.log(p))
else:
importance=0
row={'column name':(feature+'_'+cluster),'T-Statistics':statistics,'p-value':p,'absolute_p':importance,'compared to other':compared}
result_all=result_all.append(row, ignore_index=True)
result_all=result_all.sort_values(by=['absolute_p'],ascending=False)
result_t_test.append(result_all)
return result_t_test
# # Generic Cluster information:
# +
#function for statistics:
def get_base_characteristic_value(df , characteristic , kind):
if kind=="mean":
df_mean=df[characteristic].mean()
df_std= df[characteristic].std()
df_max= df[characteristic].max()
df_min= df[characteristic].min()
if characteristic == "HF_Onset_age_in_days":
base_characteristics_cohort=pd.DataFrame({'Variable': [characteristic+"_mean", characteristic+"_std", characteristic+"_max", characteristic+"_min"],
'Value': [(df_mean/365), (df_std/365), (df_max/365), (df_min/365)],})
else:
base_characteristics_cohort=pd.DataFrame({'Variable': [characteristic+"_mean", characteristic+"_std", characteristic+"_max", characteristic+"_min"],
'Value': [(df_mean), (df_std), (df_max), (df_min)],})
if kind=="count":
base_characteristics_cohort=pd.DataFrame(columns=["Variable","Value"])
feature_value=df[characteristic].unique()
#print(feature_value)
for value in feature_value:
df_condition=df.loc[df[characteristic]==value]
df_percent= df_condition.shape[0]/df.shape[0]
#print(df_percent)
new_row1 = {'Variable': value+"_total",'Value': df_condition.shape[0]}
new_row2 = {'Variable': value+"_relation",'Value': df_percent}
base_characteristics_cohort=base_characteristics_cohort.append(new_row1, ignore_index=True)
base_characteristics_cohort=base_characteristics_cohort.append(new_row2, ignore_index=True)
# print(df_condition.shape[0], df_percent)
#print (base_characteristics_cohort)
return base_characteristics_cohort
def get_base_characteristics(df, characteristics):
base_characteristics_cohort=pd.DataFrame(columns=["Variable","Value"])
for characteristic in characteristics:
intermediate_base_characteristics_cohort=get_base_characteristic_value(df,characteristic[0],characteristic[1])
base_characteristics_cohort=pd.concat([base_characteristics_cohort,intermediate_base_characteristics_cohort])
#print(base_characteristics_cohort)
return base_characteristics_cohort
def get_cluster_information(df,dim_red_method,base_characteristics):
baseline_characteristics=[]
for cluster in df[dim_red_method].unique():
cluster_characteristics=[]
df_temp=df.loc[df[dim_red_method] == cluster]
df_base_characteristics=get_base_characteristics(df_temp, base_characteristics)
cluster_characteristics.append(cluster)
cluster_characteristics.append(len(df_temp))
cluster_characteristics.append(df_base_characteristics)
baseline_characteristics.append(cluster_characteristics)
return baseline_characteristics
def get_cluster_statistics(df,dim_red_method):
#load inpatient and EF dataframe
hospitalization = pq.read_table('Cohort/Feature_Extraction/days_in_hospital.parquet').to_pandas()
ef=pq.read_table('Cohort/Feature_Extraction/avg_EF.parquet').to_pandas()
#merge both to the df:
df_cohort=pd.merge(df, hospitalization, how='left', left_index=True, right_on='medical_record_number')
df_cohort=pd.merge(df_cohort, ef, how='left',left_index=True, right_on='medical_record_number')
#get average days in hospital per patient per cluster
base_characteristics=[
[ "avg_ef","mean"],
["days_in_hospital","mean"],
[ "HF_Onset_age_in_days","mean"],
["gender","count"]
]
baseline_characteristics=get_cluster_information(df_cohort,dim_red_method,base_characteristics)
print (baseline_characteristics)
df_boxplt=df_cohort[["avg_ef",dim_red_method]]
df_boxplt.boxplot(by=dim_red_method)
df_boxplt=df_cohort[[ "days_in_hospital",dim_red_method]]
df_boxplt.boxplot(by=dim_red_method)
return baseline_characteristics
#print(str(cluster))
#print(len(df_temp))
#print(df_temp_baseline)
# -
# # Visualization
def subStringCluster(string):
a_string=string
split_string=a_string.split('_cluster_',1)
substring = split_string[0]
return substring
def create_overview_table(conv_df,features_tuples,features,dim_red_method):
feature_evaluation_df = pd.DataFrame()
feature_evaluation_df['features']=features
#print (feature_evaluation_df)
for cluster in conv_df[dim_red_method].unique():
feature_evaluation_df[cluster]=0
cluster_df= conv_df.loc[conv_df[dim_red_method]==cluster]
for features_tuple in features_tuples:
if features_tuple[1]=='categorical':
sum_feature=cluster_df[features_tuple[0]].sum()
percentage=sum_feature/len(cluster_df)
feature_evaluation_df.loc[feature_evaluation_df['features']==features_tuple[0],cluster]=percentage
#print('categorical')
if features_tuple[1]=='numeric':
mean_feature=cluster_df[features_tuple[0]].mean()
median_feature=cluster_df[features_tuple[0]].median()
feature_evaluation_df.loc[feature_evaluation_df['features']==features_tuple[0],cluster]=str((str(mean_feature)+'/'+str(median_feature)))
#print('numeric') '''
# print(feature_evaluation_df)
return feature_evaluation_df
def getTopCluster(evaluation_pandas, n_topFeature, n_cluster ):
topFeatures=[]
for n in range(n_cluster):
#print(n)
features=[]
#categorical features
features=evaluation_pandas[2][n]['column name'].values
all_features = evaluation_pandas[2][n]
x=0
for i in range(n_topFeature):
feature=subStringCluster(features[x])
if 'Procedure' in feature:
# print (feature)
#x=x+1
#print(subStringCluster(features[x]))
#topFeatures.append(subStringCluster(features[x]))
i=i-1
elif feature != 'gender' :
f=all_features.loc[all_features['column name']==features[x]]
p_value=f['p-value'].values
if p_value < 0.05 and p_value!=0.0 :
topFeatures.append([subStringCluster(features[x]),'categorical'])
#print(feature)
else:
i=i-1
x=x+1
#numeric
features=evaluation_pandas[1][n]['column name'].values
all_features = evaluation_pandas[1][n]
for i in range(n_topFeature):
f=all_features.loc[all_features['column name']==features[i]]
p_value=f['p-value'].values
if p_value < 0.05 and p_value!=0.0 :
topFeatures.append([subStringCluster(features[i]),'numeric'])
topFeatures_tuple=set(tuple(t)for t in topFeatures)
#print(topFeatures_tuple)
topFeatures=[t[0] for t in topFeatures_tuple]
#print(topFeatures)
#topFeatures=set(topFeatures)
#topFeatures=list(topFeatures)
#print(topFeatures)
return topFeatures_tuple, topFeatures
#https://github.com/hpi-dhc/robotehr/blob/e3673aef701aa817c74d04170986f01fa191212a/robotehr/evaluation/risk_groups.py#L70-L100
def plot_risk_groups(df, features,dim_red_method, friendly_names_converter=None, filename='', nrows=2, figsize=[12,3]):
#features=features[:2]
#ncols = int(len(features) / nrows)
ncols=1
#fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
fig, ax = plt.subplots(len(features),figsize=figsize)
fig.tight_layout(pad=3.0)
print(len(features))
for i in range(len(features)):
#row_index = int(i / ncols)
row_index=i
#col_index = i % int(len(features) / nrows)
col_index=0
#current_axis = ax[row_index][col_index] if nrows > 1 else ax[col_index]
current_axis = ax[row_index]
if df[features[i]].min() == 0 and df[features[i]].max() == 1:
current_axis.set_ylim(bottom=-0.5, top=1.5)
sns.violinplot(
x=dim_red_method,
y=features[i],
data=df,
palette="muted",
ax=current_axis,
hue='gender'
)
if friendly_names_converter:
title = friendly_names_converter.get(features[i])
else:
title = features[i]
if len(title) > 50:
title = f'{title[:50]} ...'
current_axis.set_title(f'{title}', fontsize=20)
current_axis.set_xlabel('')
current_axis.set_ylabel('')
if filename:
fig.savefig(filename, dpi=300, bbox_inches="tight")
return fig
def map_feature_names(feature_evaluation):
feature_look_up=pq.read_table('Cohort/feature_look_up.parquet').to_pandas()
feature_evaluation['human_readable']=''
for index,r in feature_evaluation.iterrows():
lookuplist=feature_look_up['original_feature_name'].to_list()
if r['features'] in lookuplist:
human_readable_row=feature_look_up.loc[feature_look_up['original_feature_name']==r['features']]
human_readable=human_readable_row['human_readable'].values
#print(human_readable)
feature_evaluation.loc[feature_evaluation['features']==r['features'],'human_readable']=human_readable[0]
else :
feature_evaluation.loc[feature_evaluation['features']==r['features'],'human_readable']=r['features']
return feature_evaluation
# +
def plotTopFeatures(df_path,df,merge_w_supervised,dim_red_method, evaluation_results, n_cluster, n_topFeatures):
'''#convert the dataframe
df_origin=pq.read_table(df_path).to_pandas()
#print(df_origin['gender'])
df_origin[dim_red_method]=df[dim_red_method]
conv_df=df_origin
if merge_w_supervised==True:
df_supervised_merge= pq.read_table('Cohort/Feature_Extraction/ALL_HF_cohort_supervised_only_ever_diag_drugFORMerge.parquet').to_pandas()
conv_df.index = conv_df.index.map(str)
df_supervised_merge.index = df_supervised_merge.index.map(str)
conv_df=pd.merge(conv_df, df_supervised_merge, left_on='medical_record_number', right_on='medical_record_number')
'''
conv_df=pq.read_table(df_path).to_pandas()
if merge_w_supervised==True:
df_supervised_merge= pq.read_table('Cohort/Feature_Extraction/ALL_HF_cohort_supervised_only_ever_diag_drugFORMerge_wLab.parquet').to_pandas()
conv_df.index = conv_df.index.map(str)
df_supervised_merge.index = df_supervised_merge.index.map(str)
conv_df=pd.merge(conv_df, df_supervised_merge, left_on='medical_record_number', right_on='medical_record_number')
conv_df=conv_df.replace(True, 1)
conv_df=conv_df.replace(False,0)
conv_df=conv_df.replace('yes', 1)
conv_df=conv_df.replace('no',0)
conv_df=conv_df.fillna(0)
conv_df[dim_red_method]=df[dim_red_method]
conv_df=conv_df.sort_values(by=[dim_red_method],ascending=True)
#get top featrues:
evaluation_pandas=evaluation_results
features_tuples,features=getTopCluster(evaluation_pandas, n_topFeatures, n_cluster)
#plot features
#print (cluster_name)
# fig_x=12*len(features)
fig_y=8*len(features)
plot_risk_groups(conv_df, features, dim_red_method,friendly_names_converter=None, filename='', nrows=10, figsize=[12,fig_y])
feature_evaluation_df=create_overview_table(conv_df,features_tuples,features,dim_red_method)
feature_evaluation_df=map_feature_names(feature_evaluation_df)
return feature_evaluation_df
# -
# # Nice Scatter Plot
def scatter_plot(df,labels):
sns.set(style='white', rc={'figure.figsize':(10,8)})
sns.color_palette("Set2")
plt.scatter(df[:, 0], df[:, 1], c=labels, s=0.1, cmap='Accent');
plt.show()
# px.scatter(df[:, 0], df[:, 1], c=labels, s=0.1 ,color_continuous_scale=px.colors.sequential.Inferno);
#px.show()
# # pipeline
def cluster_pipeline(df_path,df_name,age_filter,drop_gender,drop_age,tune_umap,umap_distance,umap_neighbors, check_pca,plotting,num_scaler_name,cat_scaler_name, dim_red_method,dimension,a_f_decoder,a_f_encoder,batchsize,epochs,optimizer,loss_function,cluster_method,ellbow_method,n_cluster,anova,chi,top_features,merge_w_supervised,merge_w_inpatient):
experiment_name=df_name+'_'+num_scaler_name+'_'+cat_scaler_name
if drop_gender==True:
experiment_name=experiment_name+'_woGender'
print(experiment_name)
if drop_age==True:
experiment_name=experiment_name+'_woAge'
print(experiment_name)
labels=[]
df_origin= pq.read_table(df_path).to_pandas()
#Age Filter:
if age_filter==True:
df_origin.loc[(df_origin['HF_Onset_age_in_days'] > 32850),'HF_Onset_age_in_days']=32850
#print(df_cohort)
#general columns that should be not included in the clustering
col_for_dropping=[
'religion',
'race',
'patient_ethnic_group',
'deceased_indicator',
'mother_account_number',
'address_zip',
'marital_status_code']
#Exclude gender in Cluster analysis
if drop_gender==True:
col_for_dropping.append('gender')
#Exclude age from Cluster Analysis
if drop_age==True:
col_for_dropping.append('HF_Onset_age_in_days')
df_cohort=df_origin.drop(col_for_dropping,axis=1)
#print(df_cohort)
#ColumnTransformer df,df_name,num_scaler_name,cat_scaler_name
a=apply_columnTransformer(df_cohort,df_name,num_scaler_name,cat_scaler_name,experiment_name)
transformed_df= a[0]
ctransformer=a[1]
loss=0
n_layer=0
# test best PCA Dimension:
if check_pca==True:
pca = PCA().fit(transformed_df)
fig, ax = plt.subplots()
d_pca=np.cumsum(pca.explained_variance_ratio_)
#x="year", y="passengers"
#sns.set(style='white', rc={'figure.figsize':(12,10)})
g=sns.lineplot(data=d_pca,ax=ax)
g.set_xticklabels([0,25,50,75,100,125,150,250])
#g.set_yticklabels([0,0.25,0.50,0.75,1])
ax.set_xlim(0,300)
ax.set_ylim(0,1)
#ax.set_xticks(range(1,200))
plt.show()
#print(len(d))
#sns.lineplot(data=may_flights, x="year", y="passengers")
#sns.set(style='white', rc={'figure.figsize':(10,8)})
#fig,ax=plt.plot(np.cumsum(pca.explained_variance_ratio_))
#ax.set_xticks(range(1,250))
#plt.xlabel('number of components')
#plt.ylabel('cumulative explained variance');
#return True
#Dimension Reduction:
experiment_name=experiment_name+'_'+dim_red_method+'_'+str(dimension)
if tune_umap==True:
experiment_name=experiment_name+'_'+str(umap_distance)+'_'+str(umap_neighbors)
try:
if dim_red_method=='AE':
n_layer=3
load = np.load('Cohort/Models/DimReduction/'+df_name+'_'+num_scaler_name+'_'+cat_scaler_name+'_AE_'+str(dimension)+'_'+a_f_encoder+'_'+a_f_decoder+'_'+str(n_layer)+'_'+str(batchsize)+'_'+str(epochs)+'_'+optimizer+'_'+loss_function+'.npz')
df_dim_red=load['a']
print('df_dim_red loaded')
else:
load = np.load('Cohort/Models/DimReduction/'+experiment_name+'.npz')
df_dim_red=load['a']
print('df_dim_red loaded!!!')
except:
if dim_red_method=="PPCA":
df_dim_red=apply_ppca(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="TSNE":
df_dim_red=apply_TSNE(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="ICA":
df_dim_red=apply_ICA(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="SVD":
df_dim_red=apply_svd(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="PCA":
df_dim_red=apply_pca(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="IPCA":
df_dim_red=apply_ipca(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="KPCA":
df_dim_red=apply_kpca(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="LDA":
df_dim_red=apply_lda(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="PPCA_TSNE":
df_dim_red=apply_ppca(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
df_dim_red=apply_TSNE(df_cohort,df_dim_red,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="PCA_TSNE":
df_dim_red=apply_pca(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
df_dim_red=apply_TSNE(df_cohort,df_dim_red,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="ICA_TSNE":
df_dim_red=apply_ICA(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
df_dim_red=apply_TSNE(df_cohort,df_dim_red,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="KPCA_TSNE":
df_dim_red=apply_kpca(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
df_dim_red=apply_TSNE(df_cohort,df_dim_red,dimension,df_name,num_scaler_name,cat_scaler_name,experiment_name)
if dim_red_method=="UMAP":
df_dim_red=apply_umap(df_cohort,transformed_df,dimension,umap_distance,umap_neighbors,df_name,num_scaler_name,cat_scaler_name,experiment_name,True)
if dim_red_method=='AE':
n_layer=3
df_dim_red,loss=apply_AE(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,a_f_decoder,a_f_encoder,n_layer,batchsize,epochs,optimizer,loss_function)
if dim_red_method=='AE_TSNE':
n_layer=3
df_dim_red,loss=apply_AE(df_cohort,transformed_df,dimension,df_name,num_scaler_name,cat_scaler_name,a_f_decoder,a_f_encoder,n_layer,batchsize,epochs,optimizer,loss_function)
df_dim_red=apply_TSNE(df_cohort,df_dim_red,dimension,df_name,num_scaler_name,cat_scaler_name)
if dim_red_method=="":
df_dim_red=transformed_df
if dim_red_method=='AE':
np.savez_compressed('Cohort/Models/DimReduction/'+df_name+'_'+num_scaler_name+'_'+cat_scaler_name+'_AE_'+str(dimension)+'_'+a_f_encoder+'_'+a_f_decoder+'_'+str(n_layer)+'_'+str(batchsize)+'_'+str(epochs)+'_'+optimizer+'_'+loss_function+'.npz',a=df_dim_red)
else:
np.savez_compressed('Cohort/Models/DimReduction/'+experiment_name+'.npz',a=df_dim_red)
#extend the experiment_name
experiment_name=experiment_name+'_'+cluster_method+'_'+str(n_cluster)
if cluster_method=="kmeans":
labels=apply_kmeans(df_dim_red,ellbow_method,n_cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name)
if cluster_method=="gaussian":
apply_gaussian(df_dim_red,4)
if cluster_method=="hierarchical":
labels=apply_hierachical(df_dim_red,ellbow_method,n_cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name)
if cluster_method=="dbscan":
labels=apply_dbscan(df_dim_red,ellbow_method,n_cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name)
if cluster_method=="hdbscan":
labels=apply_hdbscan(df_dim_red,ellbow_method,n_cluster,df_name,num_scaler_name,cat_scaler_name,dim_red_method,experiment_name)
if plotting==True:
#prepare data for plotting
df_dim_red_plot=apply_umap(df_cohort,df_dim_red,2,umap_distance,umap_neighbors,df_name,num_scaler_name,cat_scaler_name,experiment_name,False)
#print first 2 dim of dimensionality reduced data:
scatter_plot(df_dim_red_plot,None)
scatter_plot(df_dim_red_plot,labels)
# evaluation_results=[]
if len(labels)!=0:
evaluation_results=pq.read_table('Cohort/Models/Metrics_Results.parquet').to_pandas()
print(experiment_name)
if experiment_name in evaluation_results.values:
t=evaluation_results.loc[evaluation_results['Experiment Name'] == experiment_name]
print(t)
else :
print(labels)
silhouette_Coefficient= get_silhouette_Coefficient(labels,df_dim_red)
calinski_harabasz=get_calinski_harabasz(labels,df_dim_red)
davies_bouldin=get_davies_bouldin(labels,df_dim_red)
if dim_red_method!='PPCA'and dim_red_method!='PPCA_TSNE':
silhouette_Coefficient_original_Cohort= get_silhouette_Coefficient(labels,transformed_df)
calinski_harabasz_original_Cohort=get_calinski_harabasz(labels,transformed_df)
davies_bouldin_original_Cohort=get_davies_bouldin(labels,transformed_df)
else:
silhouette_Coefficient_original_Cohort=0
calinski_harabasz_original_Cohort=0
davies_bouldin_original_Cohort=0
evaluation_results=evaluation_results.append({'Experiment Name':experiment_name,'Dataset':df_name,'Numerical Scaler':num_scaler_name,'Categorical Scaler':cat_scaler_name,'Dimension Reduction Method':dim_red_method,'Number of Dimension':dimension,'Activation Function Decoder':a_f_decoder,'Activation Function Encoder':a_f_encoder,'Number of Layer':n_layer,'batchsize':str(batchsize),'epochs':str(epochs),'optimizer':optimizer,'loss function':loss_function,'validation loss':loss,'Cluster Method':cluster_method,'Number of Cluster':n_cluster,'silhouette_Coefficient':silhouette_Coefficient , 'calinski_harabasz':calinski_harabasz , 'davies_bouldin':davies_bouldin,'silhouette_Coefficient_original_Cohort':silhouette_Coefficient_original_Cohort , 'calinski_harabasz_original_Cohort':calinski_harabasz_original_Cohort , 'davies_bouldin_original_Cohort':davies_bouldin_original_Cohort} , ignore_index=True)
evaluation_results.to_parquet('Cohort/Models/Metrics_Results.parquet')
df_cohort[dim_red_method]=labels
#result_array=analyse_feature(ctransformer,df_cohort,n_cluster)
#get_important_features(result_array,n_cluster,40)
#Test try to add the supervised features:
sup_colums=[]
inp_colums=[]
if merge_w_supervised==True:
df_supervised_merge= pq.read_table('Cohort/Feature_Extraction/ALL_HF_cohort_supervised_only_ever_diag_drugFORMerge_wLab.parquet').to_pandas()
df_cohort.index = df_cohort.index.map(str)
df_supervised_merge.index = df_supervised_merge.index.map(str)
sup_colums=df_supervised_merge.columns
df_cohort=pd.merge(df_cohort, df_supervised_merge, left_on='medical_record_number', right_on='medical_record_number')
if merge_w_inpatient==True:
df_inpatient_merge= pq.read_table('Cohort/Feature_Extraction/Supervised_ALL_HF/inpatient_events_merge_wLab.parquet').to_pandas()
df_cohort.index = df_cohort.index.map(str)
df_inpatient_merge.index = df_inpatient_merge.index.map(str)
inp_colums=df_inpatient_merge.columns
df_cohort=pd.merge(df_cohort, df_inpatient_merge, left_on='medical_record_number', right_on='medical_record_number')
print(df_cohort)
#else:
# sup_colums=[]
#inp_colums=[]
df_cohort.to_parquet('Cohort/Models/Cluster/'+experiment_name+'.parquet')
#print(df_cohort)
evaluation_results=[]
df_origin[dim_red_method]=df_cohort[dim_red_method]
#added for without gender: NOT NEEEEDED
df_cohort['gender']=df_origin['gender']
df_cohort['HF_Onset_age_in_days']=df_origin['HF_Onset_age_in_days']
cluster_information=get_cluster_statistics(df_cohort,dim_red_method)
evaluation_results.append(cluster_information)
if anova==True:
top_numerical_features_anova=num_feature_importance_anova(df_cohort,ctransformer,dim_red_method,n_cluster,top_features)
print('Top Numerical features: \n',top_numerical_features_anova)
evaluation_results.append(top_numerical_features_anova)
if t_test==True:
top_numerical_features_t_test=num_feature_importance_t_test(df_cohort,ctransformer,dim_red_method,n_cluster,top_features,inp_colums,merge_w_inpatient)
print('Top Numerical features: \n',top_numerical_features_t_test)
evaluation_results.append(top_numerical_features_t_test)
if chi==True:
top_catigorical_features=cat_feature_importance(df_cohort,ctransformer,sup_colums,dim_red_method,n_cluster,top_features)
print('Top Categorical features: \n',top_catigorical_features)
evaluation_results.append(top_catigorical_features)
for entry in evaluation_results:
print(entry)
np.savez_compressed('Cohort/Models/ClusterEvaluation/'+experiment_name+'_evaluation.npz', a=evaluation_results)
return df_cohort,evaluation_results
# # Pipieline Configutations
#
# ## Dataframe
# - df_path: Path to dataframe (String)
# - df_name: Name of dataframe (String)
# - age_filter: Age over 90 is fixed to 90 (Boolean)
# - drop_age: age will be not considered in the pipeline (Boolean)
# - drop_gender: gender will be not considered in the pipeline (Boolean)
# ## Preprocessing
# - scaler: Encoder for Categorical Columns:
# - num_scaler_name:
# - StandardScaler
# - MinMaxScaler
# - cat_scaler_name:
# - BinaryEncoder
# - OneHotEncoder
# ## Dimension Reduction Methods
# - dim_red_method:
# - PPCA
# - ICA
# - PCA
# - check_pca: Calculating the Variance represented by the diffreent numbers of dimensions(Boolean)
# - KPCA
# - TSNE
# - SVD
# - LDA
# - PCA_TSNE
# - ICA_TSNE
# - AE
# - a_f_decoder: Activation Function of the decoder
# - a_f_encoder: Activation Function of the encoder
# - batchsize
# - epochs
# -optimizer
# - loss_function
# - AE_TSNE
# - UMAP
# - tune_umap: different configurations are tried out (Boolean)
# - umap_distace: Minimum Distance between the data points (Float)
# - umap_neighbours: Number of Neighbours (Float)
#
# - dimension: number of dimensions the dataset should be reduced to
# ## Clustering
# - cluster_method:
# - kmenas
# - hierarchical (AgglomerativeClustering)
# - ellbow-method: True or false
# - n_cluster: number of cluster that should be applied to the dataset
# ## Feature Evaluation
# - anova: apply anova test on numerical features
# - chi: apply chi test on categorical features
# - top_features: Number of top features that should be printed out
#
# ## General
# - plotting: Plotting of Scatter plots (Boolean)
#
#
# ## Configuration of CLuster Pipeline example:
df_path='Cohort/Feature_Extraction/ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab.parquet'
df_name='ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab'
#df_path='Cohort/Feature_Extraction/ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_small_cleaned.parquet'
#df_name='ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_small_cleaned'
#_wSupervised
#df_path='Cohort/Feature_Extraction/ALL_HF_cohort_supervised_only_ever_diag_drug.parquet'
#df_name='ALL_HF_cohort_supervised_only_ever_diag_drug.parquet'
age_filter=True
drop_gender=True
drop_age=True
tune_umap=False
umap_distance=0.1
umap_neighbors=50
check_pca=False
plotting=True
num_scaler_name="MinMaxScaler"
cat_scaler_name='BinaryEncoder'
dim_red_method='UMAP'# 'ICA_TSNE'
a_f_decoder=''
a_f_encoder=''
batchsize=''
epochs=''
optimizer=''
loss_function=''
cluster_method='kmeans'#'hierarchical'
ellbow_method=False
n_cluster=3
dimension=70
anova=False
t_test=True
chi=True
top_features=40
merge_w_supervised=True
merge_w_inpatient=False
df,evaluation_results=cluster_pipeline(df_path,df_name,age_filter,drop_gender,drop_age,tune_umap,umap_distance,umap_neighbors, check_pca,plotting,num_scaler_name,cat_scaler_name, dim_red_method,dimension,a_f_decoder,a_f_encoder,batchsize,epochs,optimizer,loss_function,cluster_method,ellbow_method,n_cluster,anova,chi,top_features,merge_w_supervised,merge_w_inpatient)
# ## Feature Evaluation example
feature_evaluation_df=plotTopFeatures(df_path,df,merge_w_supervised,dim_red_method, evaluation_results,n_cluster ,5)
feature_evaluation_df
feature_evaluation_df.to_excel("Cohort/Models/Feature_evaluation_"+dim_red_method+str(dimension)+cluster_method+".xlsx")
#feature_evaluation_df.to_parquet("Cohort/Models/Feature_evaluation_"+dim_red_method+str(dimension)+cluster_method+".parquet")
plotTopFeatures(df,df_path,merge_w_supervised,dim_red_method, evaluation_results,n_cluster , 10)
# ## Further Pipeleine Configurations
df_path='Cohort/Feature_Extraction/ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab.parquet'
df_name='ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab'
age_filter=True
drop_gender=True
drop_age=True
tune_umap=True
arr_umap_distance=[0.5]
arr_umap_neighbors=[15,50,100]
check_pca=False
plotting=False
num_scaler_name="MinMaxScaler"
cat_scaler_name='BinaryEncoder'
a_f_decoder=''
a_f_encoder=''
batchsize=''
epochs=''
optimizer=''
loss_function=''
ellbow_method=False
#n_cluster=3
anova=False
t_test=False
chi=False
top_features=40
merge_w_supervised=False
merge_w_inpatient=False
dim_red_method='UMAP'
arr_cluster_method=['kmeans','hierarchical']
arr_dimension=[50,60,70,80,90]
arr_n_cluster=[3,4,5]
#run experiments for AE in a loop:
df_path='Cohort/Feature_Extraction/ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab.parquet'
df_name='ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab'
age_filter=True
drop_gender=True
drop_age=True
tune_umap=True
umap_distance=0.1
umap_neighbors=50
check_pca=False
plotting=False
num_scaler_name="MinMaxScaler"
cat_scaler_name='BinaryEncoder'
a_f_decoder='tanh'
a_f_encoder='tanh'
batchsize=1000
epochs=100
optimizer='adam'
loss_function='mse'
ellbow_method=False
#n_cluster=3
anova=False
t_test=False
chi=False
top_features=40
merge_w_supervised=False
merge_w_inpatient=False
arr_dim_red_method=['AE']
arr_cluster_method=['kmeans','hierarchical']
arr_dimension=[50,60,70,80,90]
arr_n_cluster=[3,4,5]
x=0
for r in arr_dim_red_method:
dim_red_method=r
for c in arr_cluster_method :
cluster_method=c
for d in arr_dimension:
dimension=d
for n in arr_n_cluster:
n_cluster=n
x=x+1
df,evaluation_results=cluster_pipeline(df_path,df_name,age_filter,drop_gender,drop_age,tune_umap,umap_distance,umap_neighbors, check_pca,plotting,num_scaler_name,cat_scaler_name, dim_red_method,dimension,a_f_decoder,a_f_encoder,batchsize,epochs,optimizer,loss_function,cluster_method,ellbow_method,n_cluster,anova,chi,top_features,merge_w_supervised,merge_w_inpatient)
print(x)
#run experiments in a loop for UMAP:
df_path='Cohort/Feature_Extraction/ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab.parquet'
df_name='ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab'
age_filter=True
drop_gender=True
drop_age=True
tune_umap=True
arr_umap_distance=[0.5]
arr_umap_neighbors=[15,50,100]
check_pca=False
plotting=False
num_scaler_name="MinMaxScaler"
cat_scaler_name='BinaryEncoder'
a_f_decoder=''
a_f_encoder=''
batchsize=''
epochs=''
optimizer=''
loss_function=''
ellbow_method=False
#n_cluster=3
anova=False
t_test=False
chi=False
top_features=40
merge_w_supervised=False
merge_w_inpatient=False
dim_red_method='UMAP'
arr_cluster_method=['kmeans','hierarchical']
arr_dimension=[50,60,70,80,90]
arr_n_cluster=[3,4,5]
x=0
for dist in arr_umap_distance:
umap_distance=dist
for neighbors in arr_umap_neighbors:
umap_neighbors=neighbors
for c in arr_cluster_method :
cluster_method=c
for d in arr_dimension:
dimension=d
for n in arr_n_cluster:
n_cluster=n
x=x+1
df,evaluation_results=cluster_pipeline(df_path,df_name,age_filter,drop_gender,drop_age,tune_umap,umap_distance,umap_neighbors,check_pca,plotting,num_scaler_name,cat_scaler_name,dim_red_method,dimension,a_f_decoder,a_f_encoder,batchsize,epochs,optimizer,loss_function,cluster_method,ellbow_method,n_cluster,anova,chi,top_features,merge_w_supervised,merge_w_inpatient)
print(x)
#run experiments in a loop:
df_path='Cohort/Feature_Extraction/ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab.parquet'
df_name='ALL_HF_cohort_unsupervised_only_after_onset_HF_ALL_all_any_all_mean_medium_cleaned_wLab'
age_filter=True
drop_gender=True
drop_age=True
tune_umap=True
umap_distance=0.1
umap_neighbors=50
check_pca=False
plotting=False
num_scaler_name="MinMaxScaler"
cat_scaler_name='BinaryEncoder'
a_f_decoder=''
a_f_encoder=''
batchsize=''
epochs=''
optimizer=''
loss_function=''
ellbow_method=False
n_cluster=3
anova=False
t_test=False
chi=False
top_features=40
merge_w_supervised=False
merge_w_inpatient=False
arr_dim_red_method=['PCA','ICA','SVD','UMAP']
arr_cluster_method=['kmeans','hierarchical']
arr_dimension=[50,60,70,80,90]
arr_n_cluster=[3,4,5]
x=0
for r in arr_dim_red_method:
dim_red_method=r
for c in arr_cluster_method :
cluster_method=c
for d in arr_dimension:
dimension=d
for n in arr_n_cluster:
n_cluster=n
x=x+1
df,evaluation_results=cluster_pipeline(df_path,df_name,age_filter,drop_gender,drop_age,umap_distance,umap_neighbors,check_pca,plotting,num_scaler_name,cat_scaler_name,dim_red_method,dimension,a_f_decoder,a_f_encoder,batchsize,epochs,optimizer,loss_function,cluster_method,ellbow_method,n_cluster,anova,chi,top_features,merge_w_supervised,merge_w_inpatient)
print(x)
# ## Plotting of the result overvie table
import seaborn as sns
evaluation_metrics=pq.read_table('Cohort/Models/Metrics_Results.parquet').to_pandas()
cm = sns.light_palette("green", as_cmap=True)
evaluation_metrics=evaluation_metrics.sort_values([ "calinski_harabasz","silhouette_Coefficient", "davies_bouldin"], ascending = (False, False,True))
s = evaluation_metrics.style.background_gradient(cmap=cm)
s
#s.to_excel("Cohort/Models/10_26_Evaluation.xlsx")
# ## Creating new Result Array
col=['Experiment Name','Dataset','Numerical Scaler','Categorical Scaler','Dimension Reduction Method','Number of Dimension','Activation Function Decoder','Activation Function Encoder','Number of Layer','batchsize','epochs','optimizer','loss function','validation loss','Cluster Method','Number of Cluster','silhouette_Coefficient' , 'calinski_harabasz' , 'davies_bouldin','silhouette_Coefficient_original_Cohort' , 'calinski_harabasz_original_Cohort' , 'davies_bouldin_original_Cohort']
result=pd.DataFrame(columns=col)
result.to_parquet('Cohort/Models/Metrics_Results.parquet')
evaluation_results=pq.read_table('Cohort/Models/Metrics_Results.parquet').to_pandas()
# +
import seaborn as sns
cm = sns.light_palette("green", as_cmap=True)
evaluation_results=evaluation_results.sort_values(["silhouette_Coefficient", "calinski_harabasz", "davies_bouldin"], ascending = (False, False,True))
s = evaluation_results.style.background_gradient(cmap=cm)
s
| Cluster_Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
import os
from django.conf import settings
# settings.configure({
# 'DEBUG': 'True',
# 'AUTH_USER_MODEL': 'accounts.MyUser',
# 'AUTHENTICATION_BACKENDS': [
# 'accounts.backends.EmailAuthenticationBackend'
# ]
# })
from feed import models as feed_models
from accounts import models
os.environ['DJANGO_ALLOW_ASYNC_UNSAFE'] = 'true'
user = models.MyUser.objects.first()
follows = user.myuserprofile.follows.all()
l = follows.values_list('id', flat=True)
feed_models.Conversation.objects.filter
| .ipynb_checkpoints/queries-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# +
### Intersection of mrcanavar, PacBio CCS, and ONT high coverage regions to identify potential CNV
Generating excessive coverage bed files using calculations from mosdepth for both PacBio CCS 15kb_20kb merged and ONT bam files.
HG007vGRCh38_wm_ONT.sort.bam is from T2T Finishing Workshop globus /team-variants/read_aligns/hg005_grch37/HG007vGRCh38_wm_ONT.sort.bam
HG007.GRCh38.haplotagged_w_10x.bam is from
Chinesetrio-HG007.hg38.100x.bam.bilkentuniv.072319.dups.bed.gz is from https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/data/ChineseTrio/analysis/BilkentUni_mrCaNaVaR_GRCh38_07242019/Chinesetrio-HG007.hg38.100x.bam.bilkentuniv.072319.dups.bed.gz
HG007.GRCh38.pbsv.vcf.gz is from https://ftp-trace.ncbi.nlm.nih.gov/ReferenceSamples/giab/data/ChineseTrio/analysis/PacBio_deepvariant_gatk_pbsv_08282020/HG007/HG007.GRCh38.pbsv.vcf.gz
convert_mosdepth_to_excessive_coverage.py is at the end of this notebook
# +
### mosdepth commands
# +
mosdepth -b 1000 --mapq 20 --no-per-base HG007_GRCh38_PacBio_HiFi_1000_window_size_MAPQ20 HG007.GRCh38.haplotagged.bam
samtools view -1 -F 0x100 HG007vGRCh38_wm_ONT.sort.bam -h > HG007vGRCh38_wm_ONT.sort_filtered.bam
mosdepth -b 1000 -x --no-per-base HG007_GRCh38_ONT_filtered_window_size HG007vGRCh38_wm_ONT.sort_filtered.bam
# +
### Find coverage levels of excessive coverage on CCS and ONT data in R
# +
chr_1_22 <- c("chr1","chr2","chr3","chr4","chr5","chr6","chr7","chr8","chr9","chr10","chr11","chr12","chr13","chr14","chr15","chr16","chr17","chr18","chr19","chr20","chr21","chr22")
mosdepth_PB_HiFi_1000_window_size = read.delim("HG007_GRCh38_PacBio_HiFi_1000_window_size_MAPQ20.regions.bed", col.names = c("CHR","START","END","DEPTH"))
mosdepth_PB_HiFi_1000_window_size_chr_1_22 <- mosdepth_PB_HiFi_1000_window_size[which(mosdepth_PB_HiFi_1000_window_size[,"CHR"] %in% chr_1_22),]
quantile(mosdepth_PB_HiFi_1000_window_size_chr_1_22[,"DEPTH"])
IQR(mosdepth_PB_HiFi_1000_window_size_chr_1_22[,"DEPTH"])
#: 15.67
(quantile(mosdepth_PB_HiFi_1000_window_size_chr_1_22[,"DEPTH"])[3]/2)*2.5
#: 71.7375
mosdepth_ONT_1000_window_size = read.delim("HG007_GRCh38_ONT_filtered_window_size.regions.bed", col.names = c("CHR","START","END","DEPTH"))
mosdepth_ONT_1000_window_size_chr_1_22 <- mosdepth_ONT_1000_window_size[which(mosdepth_ONT_1000_window_size[,"CHR"] %in% chr_1_22),]
quantile(mosdepth_ONT_1000_window_size_chr_1_22[,"DEPTH"])
IQR(mosdepth_ONT_1000_window_size_chr_1_22[,"DEPTH"])
#: 10.19
(quantile(mosdepth_ONT_1000_window_size_chr_1_22[,"DEPTH"])[3]/2)*2.5
#: 51.1625
# +
### GRCh38_mrcanavar_intersect_HG007_GRCh38_PacBio_HiFi_1000_window_size_GRCh38_cnv_threshold_intersect_ONT_1000_window_size_cnv_threshold.bed
### What this does: find potential CNVs in HG7 from intersecting coverage files from PacBio HiFi, ONT, and Illumina data. This generates PacBio HiFi excessive coverage bed, intersect with mrCaNaVar dups bed, generates ONT excessive coverage bed, intersects to these all to generate exclusion bed
# +
python convert_mosdepth_to_excessive_coverage.py --input HG007_GRCh38_PacBio_HiFi_1000_window_size_MAPQ20.regions.bed --output HG007_GRCh38_PacBio_HiFi_1000_window_size_MAPQ20.regions_excessive_coverage_cnv_threshold.bed --threshold 71.7375
bedtools intersect -a Chinesetrio-HG007.hg38.100x.bam.bilkentuniv.072319.dups.bed -b HG007_GRCh38_PacBio_HiFi_1000_window_size_MAPQ20.regions_excessive_coverage_cnv_threshold.bed > mrcanavar_intersect_HG007_GRCh38_PacBio_HiFi_1000_window_size_GRCh38_cnv_threshold.bed
python convert_mosdepth_to_excessive_coverage.py --input HG007_GRCh38_ONT_filtered_window_size.regions.bed --output HG007_GRCh38_ONT_filtered_window_size.regions_excessive_coverage_cnv_threshold.bed --threshold 51.1625
bedtools intersect -a mrcanavar_intersect_HG007_GRCh38_PacBio_HiFi_1000_window_size_GRCh38_cnv_threshold.bed -b HG007_GRCh38_ONT_filtered_window_size.regions_excessive_coverage_cnv_threshold.bed > GRCh38_mrcanavar_intersect_HG007_GRCh38_PacBio_HiFi_1000_window_size_GRCh38_cnv_threshold_intersect_ONT_1000_window_size_cnv_threshold.bed
cat GRCh38_mrcanavar_intersect_HG007_GRCh38_PacBio_HiFi_1000_window_size_GRCh38_cnv_threshold_intersect_ONT_1000_window_size_cnv_threshold.bed | awk '{sum+=$3-$2} END {print sum}'
# +
### HG007_GRCh38_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_outlier_threshold.bed
### What this does: Find another set of potential CNVs from computing a coverage threshold using an elliciptal outlier for PacBio HiFi and ONT CNV file steps in R
# +
mosdepth_PacBio_HiFi_1000_window_size = read.delim("HG007_GRCh38_PacBio_HiFi_1000_window_size_MAPQ20.regions.bed", col.names = c("CHR","START","END","DEPTH"))
mosdepth_ONT_1000_window_size = read.delim("HG007_GRCh38_ONT_filtered_window_size.regions.bed", col.names = c("CHR","START","END","DEPTH"))
chr_1_22 <- c("chr1","chr2","chr3","chr4","chr5","chr6","chr7","chr8","chr9","chr10","chr11","chr12","chr13","chr14","chr15","chr16","chr17","chr18","chr19","chr20","chr21","chr22")
mosdepth_PacBio_HiFi_1000_window_size_chr_1_22 <- mosdepth_PacBio_HiFi_1000_window_size[which(mosdepth_PacBio_HiFi_1000_window_size[,"CHR"] %in% chr_1_22),]
mosdepth_ONT_1000_window_size_chr_1_22 <- mosdepth_ONT_1000_window_size[which(mosdepth_ONT_1000_window_size[,"CHR"] %in% chr_1_22),]
df_mosdepth_PacBio_HiFi_1000_window_size <- data.frame(mosdepth_PacBio_HiFi_1000_window_size_chr_1_22)
df_mosdepth_ONT_1000_window_size <- data.frame(mosdepth_ONT_1000_window_size_chr_1_22)
df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined <- df_mosdepth_PacBio_HiFi_1000_window_size
df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined[,5] <- mosdepth_ONT_1000_window_size_chr_1_22[,4]
colnames(df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined) <- c("CHR", "START", "END", "CCS_DEPTH", "ONT_DEPTH")
df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_values <- df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined
medianccsdepth = median(mosdepth_PacBio_HiFi_1000_window_size_chr_1_22[,"DEPTH"])
medianontdepth = median(mosdepth_ONT_1000_window_size_chr_1_22[,"DEPTH"])
df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_values[,6] <- sqrt(((df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined[,4]/medianccsdepth)^2 + (df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined[,5]/medianontdepth)^2)/2)
threshold_ellipctial_outlier = unname(quantile(df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_values[,6])[4]+(1.5*IQR(df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_values[,6])[1]))
df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_outliers <- df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_values[which(df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_values[,6] > threshold_ellipctial_outlier),]
write.csv(df_mosdepth_PacBio_HiFi_1000_window_size_ONT_1000_combined_outliers, file = "HG007_GRCh38_PacBio_HiFi_1000_window_size_ONT_1000_combined_elliptical_outlier_threshold.bed", row.names = FALSE)
# +
### HG007_GRCh38_pbsv_SV_gt49bp_slop50_repeatexpanded_slop25percent.bed
### What this does: find SV calls from PacBio HiFi that will be excluded from HG7 in addition to the v0.6 GIAB SV calls
# +
#Form bed to exclude SV regions from HG003 v4.2.1 benchmark
zgrep -v ^# HG007.GRCh38.pbsv.vcf.gz | awk '{FS=OFS="\t"} { if(length($4)>49 || length($5)>49) print $1,$2-50,$2+length($4)+50} ' > HG007.GRCh38.pbsv_gt49bp_slop50.bed
cat HG007.GRCh38.pbsv_gt49bp_slop50.bed | sed 's/^chr//' | sed 's/^X/23/;s/^Y/24/'| grep -Ev '^M|^[0-9][0-9]_|^[0-9]_|^[0-9]\||^[0-9][0-9]\||^Un|^HS'| sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/'| sed 's/^[a-zA-Z0-9_]/chr&/' > HG007.GRCh38.pbsv_gt49bp_slop50_sorted.bed
# HG007_GRCh38_pbsv_SV_gt49bp_slop50_repeatexpanded_slop25percent.bed
intersectBed -wa -a GRCh38_AllTandemRepeatsandHomopolymers_slop5.bed.gz -b HG007.GRCh38.pbsv_gt49bp_slop50_sorted.bed | multiIntersectBed -i stdin HG007.GRCh38.pbsv_gt49bp_slop50_sorted.bed | mergeBed -i stdin -d 1000 | awk '{FS=OFS="\t"} { slop=int(0.25*($3-$2)); if(slop>5000) slop=5000; print $1, $2-slop, $3+slop}' | awk '{FS=OFS="\t"} { if($2<0) $2=0; print}' > GRCh38_HG007_GIABv4.2.1_SV_pbsv_slop25percent.bed
# -
# +
#### convert_mosdepth_to_excessive_coverage.py script
import argparse
parser = argparse.ArgumentParser(description="Subset bed file to callable regions only")
parser.add_argument('--input_file', metavar="I", type=str, nargs="+", help="input bed file")
parser.add_argument('--output_file', metavar="O", type=str, nargs="+", help="output file")
parser.add_argument('--threshold', metavar="T", type=str, nargs="+", help="input threshold")
args = parser.parse_args()
f = open(args.input_file[0], "r")
f_lines = f.readlines()
f_out = open(args.output_file[0], "w+")
threshold = float(args.threshold[0])
for line in f_lines:
if "DEPTH" in line:
continue
line_split = line.split("\t")
depth_field = float(line_split[3])
if depth_field > threshold:
f_out.write(line)
f_out.flush()
f.close()
f_out.close()
| GRCh38/GenomeSpecific/HG007_GRCh38_CNV_exclusion_bed_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
import os,math,re,sys
#os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" #CPU Only
#os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'#GPU Running
import matplotlib.pylab as plt
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
AUTO = tf.data.experimental.AUTOTUNE
print("Tensorflow version " + tf.__version__)
# -
strategy = tf.distribute.MirroredStrategy(devices=None) #Use all available GPUs or CPU
print("REPLICAS:", strategy.num_replicas_in_sync)
# +
IMG_SIZE = [512,512]
BATCH_SIZE = 10
EPOCHS = 13
LR_START = 0.00001
LR_MAX = 0.0001
LR_MIN = 0.00001
LR_RAMPUP_EPOCHS = 0
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY = .95
def scheduler_epoch(epoch):
if epoch < LR_RAMPUP_EPOCHS:
learning_rate = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
learning_rate = LR_MAX
else:
learning_rate = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return learning_rate
lr_callback = tf.keras.callbacks.LearningRateScheduler(scheduler_epoch, verbose=True)
# +
DataSet_Path = '/104_Flowers'
Train_Size_Choice = {
#192: DataSet_Path + '/jpeg-192x192_train.tfrecord',
#224: DataSet_Path + '/jpeg-224x224_train.tfrecord',
#311: DataSet_Path + '/jpeg-311x311_train.tfrecord',
512: DataSet_Path + '/jpeg-512x512_train.tfrecord'
}
Val_Size_Choice = {
#192: DataSet_Path + '/jpeg-192x192_val.tfrecord',
#224: DataSet_Path + '/jpeg-224x224_val.tfrecord',
#311: DataSet_Path + '/jpeg-311x311_val.tfrecord',
512: DataSet_Path + '/jpeg-512x512_val.tfrecord'
}
TrainSet_Path = Train_Size_Choice[IMG_SIZE[0]]
ValSet_Path = Val_Size_Choice[IMG_SIZE[0]]
CLASS = ['toad lily', 'love in the mist', 'monkshood', 'azalea', 'fritillary',
'silverbush', 'canterbury bells', 'stemless gentian', 'pink primrose', 'buttercup',
'poinsettia', 'desert-rose', 'bird of paradise', 'columbine', 'frangipani',
'sweet pea', 'siam tulip', 'great masterwort', 'hard-leaved pocket orchid', 'marigold',
'foxglove', 'wild pansy', 'windflower', 'daisy', 'tiger lily',
'purple coneflower', 'orange dahlia', 'globe-flower', 'lilac hibiscus', 'fire lily',
'balloon flower', 'iris', 'bishop of llandaff', 'yellow iris', 'garden phlox',
'alpine sea holly', 'geranium', 'pink quill', 'tree poppy', 'spear thistle',
'bromelia', 'common dandelion', 'sword lily', 'peruvian lily', 'carnation',
'cosmos', 'spring crocus', 'lotus', 'bolero deep blue', 'anthurium',
'rose', 'water lily', 'primula', 'blackberry lily', 'gaura',
'trumpet creeper', 'globe thistle', 'sweet william', 'snapdragon', 'mexican petunia',
'cyclamen ', 'petunia', 'gazania', 'king protea', 'blanket flower',
'common tulip', 'giant white arum lily', 'wild rose', 'morning glory', 'thorn apple',
'pincushion flower', 'tree mallow', 'canna lily', 'camellia', 'pink-yellow dahlia',
'bee balm', 'wild geranium', 'artichoke', 'black-eyed susan', 'ruby-lipped cattleya',
'clematis', 'prince of wales feathers', 'hibiscus', 'cautleya spicata', 'lenten rose',
'red ginger', "colt's foot", 'hippeastrum ', 'mallow', 'californian poppy',
'corn poppy', 'moon orchid', 'passion flower', 'grape hyacinth', 'japanese anemone',
'watercress', 'cape flower', 'osteospermum', 'barberton daisy', 'bougainvillea',
'magnolia', 'sunflower', 'daffodil', 'wallflower']
NUM_TRAINING_IMG = 12753
STEPS_PER_EPOCH = NUM_TRAINING_IMG // BATCH_SIZE
NUM_VALIDATION_IMG = 3712
VALIDATION_STEPS = -(-NUM_VALIDATION_IMG // BATCH_SIZE) #The "-(- // )" trick rounds up instead of down
# +
def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3) #JPEG --> Tensor
image = tf.image.convert_image_dtype(image, tf.float32)
#image = tf.reshape(image, [*IMG_SIZE, 3])
image = tf.image.resize(image, IMG_SIZE) #image must be 3-D, so resize it
return image
def read_tfrecord(example):
image_feature_dict = {
'image': tf.io.FixedLenFeature([],tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
'image_id': tf.io.FixedLenFeature([],tf.string)
}
example = tf.io.parse_single_example(example, image_feature_dict) #return a dict
image = decode_image(example['image'])
label = tf.cast(example['label'], tf.int64) #104 classes in[0,103]
image_id = example['image_id']
return image,label #return a tuple
def load_dataset(filenames, ordered = False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.deterministic = False # experimental_deterministic has been baned
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_tfrecord, num_parallel_calls=AUTO)
return dataset
def data_augment(image, label):
image = tf.image.random_flip_left_right(image)
return image, label
def get_training_dataset():
dataset = load_dataset(TrainSet_Path) #ordered default is False
dataset = dataset.map(data_augment, num_parallel_calls = AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO) #Prepare later elements
return dataset
def get_validation_dataset(ordered = False):
dataset = load_dataset(ValSet_Path, ordered = ordered)
dataset = dataset.batch(BATCH_SIZE)
#dataset = dataset.cache() #cache files in memory
dataset = dataset.prefetch(AUTO)
return dataset
# -
print("Training data shapes:")
for image, label in get_training_dataset().take(3):
print(image.numpy().shape, label.numpy().shape)
print("Training label examples:", label.numpy())
labelarray = label.numpy()
print(labelarray[0],type(labelarray[0]))
imagearray = image.numpy()
print(imagearray[0], type(imagearray[0]))
# +
with strategy.scope():
def fire(x, squeeze, expand):
y = tf.keras.layers.Conv2D(filters=squeeze, kernel_size=1, activation='relu', padding='same')(x)
y = tf.keras.layers.BatchNormalization()(y)
#contraction stage
y1 = tf.keras.layers.Conv2D(filters=expand//2, kernel_size=1, activation='relu', padding='same')(y)
y1 = tf.keras.layers.BatchNormalization()(y1)
#expansion stage 1*1 filter
y3 = tf.keras.layers.Conv2D(filters=expand//2, kernel_size=3, activation='relu', padding='same')(y)
y3 = tf.keras.layers.BatchNormalization()(y3)
# expansion stage 3*3 filter
return tf.keras.layers.concatenate([y1, y3])
#combine y1 and y3 into a tensor which has channels of (y1 + y3) channels
def fire_module(squeeze, expand):
return lambda x: fire(x, squeeze, expand)
x = tf.keras.layers.Input(shape=[*IMG_SIZE, 3])
y = tf.keras.layers.Conv2D(kernel_size=3, filters=32, padding='same', use_bias=True, activation='relu')(x)#[3,3,3,32]
#[512,512,3] --> [512,512,32]
y = tf.keras.layers.BatchNormalization()(y)
# add this input layer
y = fire_module(16, 32)(y)
''' contraction stage [1,1,32,16] [512,512,32] --> [512,512,16]
expansion stage [1,1,16,16] [512,512,16] --> [512,512,16]
[3,3,16,16] [512,512,16] --> [512,512,16]
concatenate [512,512,32]
'''
y = tf.keras.layers.MaxPooling2D(pool_size=2)(y)
#[512,512,32] --> [256,256,32]
y = fire_module(48, 96)(y)
''' contraction stage [1,1,32,48] [256,256,32] --> [256,256,48]
expansion stage [1,1,48,48] [256,256,48] --> [256,256,48]
[3,3,48,48] [256,256,48] --> [256,256,48]
concatenate [256,256,96]
'''
y = tf.keras.layers.MaxPooling2D(pool_size=2)(y)
#[256,256,96] --> [128,128,96]
y = fire_module(64, 128)(y)
y = fire_module(80, 160)(y)
y = fire_module(96, 192)(y)
y = tf.keras.layers.MaxPooling2D(pool_size=2)(y)
y = fire_module(112, 224)(y)
y = fire_module(128, 256)(y)
y = fire_module(160, 320)(y)
y = tf.keras.layers.MaxPooling2D(pool_size=2)(y)
y = fire_module(192, 384)(y)
y = fire_module(224, 448)(y)
y = tf.keras.layers.MaxPooling2D(pool_size=2)(y)
y = fire_module(256, 512)(y)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(len(CLASS), activation='softmax', name='flower_prob')(y)
model = tf.keras.Model(x, y)
model.compile(
optimizer='adam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
steps_per_execution=1
)
model.summary()
# -
history = model.fit(
get_training_dataset(), steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS,
validation_data=get_validation_dataset(), validation_steps=VALIDATION_STEPS,
callbacks=[lr_callback]
)
# +
def display_training_curves(training, validation, title, subplot, zoom_pcent=None, ylim=None):
# zoom_pcent: X autoscales y axis for the last X% of data points
if subplot%10==1: # set up the subplots on the first call
plt.subplots(figsize=(10,10), facecolor='#F0F0F0')
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('#F8F8F8')
ax.plot(training)
ax.plot(validation, '--')
ax.set_title('model '+ title)
ax.set_ylabel(title)
if zoom_pcent is not None:
ylen = len(training)*(100-zoom_pcent)//100
ymin = min([min(training[ylen:]), min(validation[ylen:])])
ymax = max([max(training[ylen:]), max(validation[ylen:])])
ax.set_ylim([ymin-(ymax-ymin)/20, ymax+(ymax-ymin)/20])
if ylim is not None:
ymin = ylim[0]
ymax = ylim[1]
ax.set_ylim([ymin-(ymax-ymin)/20, ymax+(ymax-ymin)/20])
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 211, ylim=[0,2.5])
display_training_curves(history.history['sparse_categorical_accuracy'], history.history['val_sparse_categorical_accuracy'], 'accuracy', 212)
# -
cmdataset = get_validation_dataset(ordered=True) # since we are splitting the dataset and iterating separately on images and labels, order matters.
images_ds = cmdataset.map(lambda image, label: image)
labels_ds = cmdataset.map(lambda image, label: label).unbatch()
cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMG))).numpy() # get everything as one batch
cm_probabilities = model.predict(images_ds, steps=VALIDATION_STEPS)
cm_predictions = np.argmax(cm_probabilities, axis=-1)
print("Correct labels: ", cm_correct_labels.shape, cm_correct_labels)
print("Predicted labels: ", cm_predictions.shape, cm_predictions)
# +
def display_confusion_matrix(cmat, score, precision, recall):
plt.figure(figsize=(25,25))
ax = plt.gca()
ax.matshow(cmat, cmap='Reds')
ax.set_xticks(range(len(CLASS)))
ax.set_xticklabels(CLASS)
plt.setp(ax.get_xticklabels(), rotation=45, ha="left", rotation_mode="anchor")
ax.set_yticks(range(len(CLASS)))
ax.set_yticklabels(CLASS)
plt.setp(ax.get_yticklabels(), rotation=45, ha="right", rotation_mode="anchor")
#titlestring = ""
#if score is not None:
# titlestring += 'f1 = {:.3f} '.format(score)
#if precision is not None:
# titlestring += '\nprecision = {:.3f} '.format(precision)
#if recall is not None:
# titlestring += '\nrecall = {:.3f} '.format(recall)
#if len(titlestring) > 0:
# ax.text(101, 1, titlestring, fontdict={'fontsize': 18, 'horizontalalignment':'right', 'verticalalignment':'top', 'color':'#804040'})
plt.show()
cmat = confusion_matrix(cm_correct_labels, cm_predictions, labels=range(len(CLASS)))
score = f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASS)), average='macro')
precision = precision_score(cm_correct_labels, cm_predictions, labels=range(len(CLASS)), average='macro')
recall = recall_score(cm_correct_labels, cm_predictions, labels=range(len(CLASS)), average='macro')
cmat = (cmat.T / cmat.sum(axis=1)).T # normalized
display_confusion_matrix(cmat, score, precision, recall)
print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall))
# -
| 2.Convolutional Neural Network/ipynb Files/4.1 SqueezeNet_104flowers_online.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pickle
import vps_tester
from hetznercloud import constants
# -
# ### Save keys
# +
hetzner_api_key = ''
ssh_file_name = ''
with open('secret.pkl', 'rb') as f:
pickle.dump({'hetzner_api_key': hetzner_api_key, 'ssh_file_name': ssh_file_name}, f)
# -
# ### Load keys
# +
with open('secret.pkl', 'rb') as f:
secret = pickle.load(f)
ssh_file_name = secret['ssh_file_name']
hetzner_api_key = secret['hetzner_api_key']
# -
# ### Test single instance performance
h = vps_tester.HetznerTester(hetzner_api_key, ssh_file_name)
h.test_vps(constants.DATACENTER_HELSINKI_1, constants.SERVER_TYPE_1CPU_2GB)
# ### Compare different instance types
for server_type in [constants.SERVER_TYPE_1CPU_2GB,
constants.SERVER_TYPE_2CPU_8GB,
constants.SERVER_TYPE_2CPU_8GB_DVCPU,
]:
result, _ = h.test_vps(constants.DATACENTER_HELSINKI_1, server_type)
print(f'server type: {server_type}, result: {result}')
# ### Copmare different datacenters
for datacenter in [constants.DATACENTER_FALKENSTEIN_1,
constants.DATACENTER_HELSINKI_1,
constants.DATACENTER_NUREMBERG_1,
]:
result, _ = h.test_vps(datacenter, constants.SERVER_TYPE_1CPU_2GB)
print(f'datacenter: {datacenter}, result: {result}')
# ### Find best instance in any datacenter, shutdown others
# +
servers = []
results = []
for datacenter in [constants.DATACENTER_FALKENSTEIN_1,
constants.DATACENTER_HELSINKI_1,
constants.DATACENTER_NUREMBERG_1,
]:
result, server = h.test_vps(datacenter,
constants.SERVER_TYPE_1CPU_2GB,
kill_instantly=False)
results.append(int(result))
servers.append(server)
print(f'datacenter: {datacenter}, result: {result}')
best_server = servers[results.index[min[results]]]
for server in [s in servers if s is not best_server]:
h.delete_server(server)
print(f'best one: {best_server.public_net_ipv4}')
| examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
documents = ['This is the first sentence.',
'This one is the second sentence.',
'And this is the third one.',
'Is this the first sentence?']
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(documents)
# X.torray() is BoW
print(X.toarray())
# -
# Get the unique words
print(vectorizer.get_feature_names())
# the above array represents the number of times each feature name
# appears in the sentence
# +
# supervised learning vs unsupervised learning
#
# supervised learning includes linear regression, logistic regression, support vector machine
# this is called supervised because it infers a function from labeled training data
# consisting of a set of training examples
#
# unsupervised learning includes principal component analysis and clustering
# unsupervised learning attempts to find previously unknown patterns in data, without preexisting labels
# +
from figures import plot_kmeans_interactive
plot_kmeans_interactive()
# +
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1])
# +
from sklearn.cluster import KMeans
km = KMeans(n_clusters=4)
km.fit(X)
print(km.cluster_centers_)
# +
import numpy as np
from scipy.spatial import distance
distortions = []
K = range(1, 10)
for k in K:
km = KMeans(n_clusters=k)
km.fit(X)
distortions.append(sum(np.min(distance.cdist(X, km.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])
# Plot the elbow
plt.plot(K, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.show()
# -
def optimal(dist_arr):
best_delta = 0
optimal = 0
for index, val in enumerate(dist_arr):
k = index + 1
delta_slope = 0
if index > 0 and index < len(dist_arr) - 1:
prev_slope = dist_arr[index-1] - dist_arr[index]
next_slope = dist_arr[index] - dist_arr[index+1]
delta_slope = abs(prev_slope - next_slope)
if delta_slope > best_delta:
best_delta = delta_slope
optimal = k
return optimal
optimal(distortions)
# +
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
documents = ["This little kitty came to play when I was eating at a restaurant.",
"Merley has the best squooshy kitten belly.",
"Google Translate app is incredible.",
"If you open 100 tab in google you get a smiley face.",
"Best cat photo I've ever taken.",
"Climbing ninja cat.",
"Impressed with google map feedback.",
"Key promoter extension for Google Chrome."]
# -
# vec = CountVectorizer()
vec = TfidfVectorizer(stop_words='english')
J = vec.fit_transform(documents)
print(J.toarray()) # this matrix is called a "bag of words"
print(vec.get_feature_names())
print(J.shape)
model = KMeans(n_clusters=2, init='k-means++')
model.fit(J)
Y = vec.transform(["chrome browser to open."])
print('Y:')
print(Y.toarray())
prediction = model.predict(Y)
print(prediction)
Y = vec.transform(["My cat is hungry."])
prediction = model.predict(Y)
print(prediction)
model.get_params()
# beautiful
for index, sentence in enumerate(documents):
print(sentence)
print(model.predict(J[index]))
| notebooks/Clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: test_d
# language: python
# name: test_d
# ---
# %config Completer.use_jedi = False
# %load_ext autoreload
# %autoreload 2
from pathlib import Path
# %gui qt5
# # Utility notebook used to visualize the raw counting
# The counting is plotted on the preprocessed image. It requires all the rounds corresponding to a specific channel and fov.
#
# Requirements
# -------------
#
# - EXP-NAME_preprocessed_img_data.zarr
# - raw_counts files in the results folder
# - dataset
#
#
# __IMPORTANT__
# The visualization uses napari so it can be run only locally
from pysmFISH.visualization import visualize_raw_counts
experiment_fpath = '/Users/simone.codeluppi/Documents/data_analysis_jlabs_sc/notebooks_pysmFISH/processing_folder/LBEXP20210718_EEL_Mouse_448_2'
dataset_name = '210906_21_07_09_LBEXP20210718_EEL_Mouse_448_2_img_data_dataset.parquet'
# Define what to visualize
fov_num = 3
channel = 'Cy5'
# + tags=[]
visualize_raw_counts(experiment_fpath,dataset_name,
fov_num, channel)
# -
| notebooks/05-Visualize_raw_counting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={}
# ### Downloading Spleen segmentation dataset from the Medical Segmentation Decathlon (Task09_Spleen)
#
# + [markdown] pycharm={"metadata": false}
# In order to showcase the performance features within Clara we need a test dataset.
# For the associated [Performance Spleen Notebook](Performance/PerformanceSpleen.ipynb).
#
# + [markdown] pycharm={}
# This spleen dataset was comprised of patients undergoing chemotherapy treatment for liver metastases at Memorial Sloan Kettering Cancer Center (New York, NY, USA).
# Sixty-one portalvenous phase CT scans were included with CT acquisition and reconstruction parameters similar to the Task08_HepaticVessel dataset.
# The spleen was semi-automatically segmented using the Scout application.
# A spline was drawn on the region of interest and grown using a level-set based approach. Contours were manually adjusted by an expert abdominal radiologist.
# See [A large annotated medical image dataset for the development and evaluation of segmentation algorithms](https://arxiv.org/abs/1902.09063) for additional details.
#
# + [markdown] pycharm={"metadata": false}
# ### Downloading the data
#
# The data used for this task is Task09_Spleen.tar from http://medicaldecathlon.com/. To download the data you could:
# 1. Download the tar file can be found directly at https://drive.google.com/drive/folders/1HqEgzS8BV2c7xYNrZdEAnrHk7osJJ--2
# 2. Running the `download_data.sh` script.
# ```
# # # ! chmod 777 /claraDevDay/download_data.sh
# # # ! /claraDevDay/download_data.sh
# ```
# 3. Running cells bellow
# + pycharm={"metadata": false, "name": "#%%\n"}
# create directory and download cookie
FILEID="1jzeNU1EKnK81PyTsrx0ujfNl-t0Jo8uE"
FILNAME="Task09_Spleen.tar"
DEST_DIR="/claraDevDay/spleenData/"
# !mkdir -p $DEST_DIR
cmd='curl -c '+DEST_DIR+'/cookie -s -L "https://drive.google.com/uc?export=download&id='+FILEID+'" > /dev/null'
print(cmd)
# ! $cmd
# + pycharm={"metadata": false, "name": "#%%\n"}
# start the download
cmd='curl -Lb '+DEST_DIR+'/cookie '
cmd+='"https://drive.google.com/uc?export=download&confirm=`awk '
cmd+="'/download/ {print $NF}' "
cmd+=DEST_DIR+'/cookie`&id="'+FILEID
cmd+=" -o "+DEST_DIR+FILNAME
print(cmd)
# ! $cmd
# + pycharm={"metadata": false, "name": "#%%\n"}
# !ls $DEST_DIR
# + [markdown] pycharm={"metadata": false}
# ### After downloading the Data
# Now lets unzip the downloaded data
# + pycharm={"metadata": false, "name": "#%%\n"}
# !ls $DEST_DIR
# !tar -C $DEST_DIR -xvf $DEST_DIR$FILNAME
# + [markdown] pycharm={"metadata": false}
#
# Now lets see that data files
# + pycharm={"metadata": false, "name": "#%%\n"}
# !ls -la $DEST_DIR"Task09_Spleen"
# + [markdown] pycharm={"metadata": false}
#
# For some exercises in the Notebooks you would want to convert data ti a 1mm resolution.
#
# + pycharm={"metadata": false, "name": "#%%\n"}
# !nvmidl-dataconvert -d /claraDevDay/spleenData/Task09_Spleen -r 1 -s .nii.gz -e .nii.gz -o /claraDevDay/spleenData/Task09_Spleen_1x1x1mm
| NoteBooks/Data_Download.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import tarfile
import dataset
# # tar_path = 'data/datapack2.0train.tar'
# # tar = tarfile.open(tar_path)
# -
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import sklearn
import matplotlib.pyplot as plt
x = dataset.GG2('gg2/')
len(x)
'Testing with one image'
images, labels = x[430]
print(x[430][0][0].shape)
x[430][0][1].shape
x[430]
print([x ** 2 for x in range(5) if x % 2 == 0])
[labels['n_sources'] for images, labels in [x[i] for i in range(5)]]
vis, others = images
print(vis.shape)
print(others.shape)
print([4, 200, 200]) #what we need
# +
plt.imshow(vis.squeeze(), cmap = 'gray')
print('label:', labels['n_sources'])
# -
others_scaled = upsample(others[None])
# +
#list(zip(*[[1, 2, 3], [3, 4, 5], ['a', 'b', 'c']]))
# list(zip(*[
# sorted([x for x in tar.getnames() if '.fits' in x and band in x])
# for band in ("EUC_VIS", "EUC_J", "EUC_Y", "EUC_H")
# ]))
# -
others_scaled = upsample(others[None])
others_scaled.shape
combined = torch.cat([vis[None], others_scaled], dim=1)
combined.shape
model(combined)
#pred = model(vis[None].repeat(1, 3, 1, 1)).argmax().item()
pred = model(combined).argmax().item()
'Testing with a sample size of 500 images'
sample_size = 500
img = []
lblfull = []
for i in range(sample_size):
image, label = x[i]
img.append(image)
lblfull.append(label)
lbl = []
for i in range(sample_size):
labeln = lblfull[i]['n_sources']
lbl.append(labeln)
lbl = torch.tensor(lbl, dtype = torch.int32) # convert labels to tensor with integers
lbl.bincount()#unbalanced
gray = []
color = []
for i in range(sample_size):
gray_img, color_img = img[i]
gray.append(gray_img)
color.append(color_img)
gray = torch.stack(gray)
color = torch.stack(color)
print(gray.shape)
print(color.shape)
print([sample_size, 4, 200, 200]) #what we need
upsample = torch.nn.Upsample(size = (200,200), mode='bilinear', align_corners=True)
clr_scld = upsample(color)
print(clr_scld.shape)
data = torch.cat([gray, clr_scld], dim=1)
data.shape
model = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'efficientnet_b0', pretrained=True)
model.conv_stem = torch.nn.Conv2d(4, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
model.classifier = torch.nn.Linear(1280, 1)
print(model)
# +
import numpy as np
indices = np.random.choice(range(len(lbl)), 5, p=np.where(lbl == 1, 0.5 / sum(lbl == 1).item(), 0.5 / sum(lbl == 0).item()))
batch = data[indices]
grid = torchvision.utils.make_grid(batch[0:10, 0:1])
plt.figure(figsize=(15,15))
plt.imshow(np.transpose(grid, (1,2,0)))
print('labels:', lbl[indices])
# -
torch.set_grad_enabled(True)
# +
batch_size = 5
indices = np.random.choice(range(len(lbl)), batch_size, p=np.where(lbl == 1, 0.5 / sum(lbl == 1).item(), 0.5 / sum(lbl == 0).item()))
# pick randomly a number of *batch_size* images from the dataset with equal probabilities for 0 and 1
img_batch, lbl_batch = data[indices], lbl[indices] # [batch_size, 4, 200, 200]
pred = model(img_batch)
#loss = F(output, lbl_batch) = relu(1 - output * lbl_batch).mean()
#loss = F.cross_entropy(pred, torch.tensor(lbl_batch, dtype = torch.int64))
loss = torch.nn.functional.softplus(1 + torch.exp(-pred * (lbl_batch.float() * 2 - 1))).mean()
#loss = abs(lbl_batch - pred.T).mean()
#optimizer.zero_grad()
loss.backward()
# -
lbl_batch
model.conv_stem.weight.grad.shape # first layer gradient shape
optimizer = torch.optim.SGD(
list(model.conv_stem.parameters()) + list(model.classifier.parameters()),
lr=0.001, momentum=0.99)
# +
def get_num_correct(preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
print(loss.item())
get_num_correct(pred, lbl_batch)
# -
optimizer.step() # update weights
# +
pred = model(img_batch)
loss = abs(lbl_batch - pred.T).mean()
loss = torch.nn.functional.softplus(-pred * (lbl_batch.float() * 2 - 1)).mean()
print(loss.item())
get_num_correct(pred, lbl_batch)
# -
optimizer = torch.optim.SGD(
list(model.conv_stem.parameters()) + list(model.classifier.parameters()),
lr=0.1, momentum=0.99)
# +
data = [1, 2, 3]
for x in data:
print(x)
it = iter(data)
while True:
try:
x = next(it)
except StopIteration:
break
print(x)
# -
# +
import torch
is_torchvision_installed = True
try:
import torchvision
except:
is_torchvision_installed = False
import torch.utils.data
import random
class BalancedBatchSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, labels=None):
self.labels = labels
self.dataset = dict()
self.balanced_max = 0
# Save all the indices for all the classes
for idx in range(0, len(dataset)):
label = self._get_label(dataset, idx)
if label not in self.dataset:
self.dataset[label] = list()
self.dataset[label].append(idx)
self.balanced_max = len(self.dataset[label]) \
if len(self.dataset[label]) > self.balanced_max else self.balanced_max
# Oversample the classes with fewer elements than the max
for label in self.dataset:
while len(self.dataset[label]) < self.balanced_max:
self.dataset[label].append(random.choice(self.dataset[label]))
self.keys = list(self.dataset.keys())
self.currentkey = 0
self.indices = [-1]*len(self.keys)
def __iter__(self):
while self.indices[self.currentkey] < self.balanced_max - 1:
self.indices[self.currentkey] += 1
yield self.dataset[self.keys[self.currentkey]][self.indices[self.currentkey]]
self.currentkey = (self.currentkey + 1) % len(self.keys)
self.indices = [-1]*len(self.keys)
def _get_label(self, dataset, idx, labels = None):
transform = dataset.transform
dataset.transform = None
x, y = dataset[idx]
dataset.transform = transform
return y
def __len__(self):
return self.balanced_max*len(self.keys)
# +
from astropy.io import fits
def transform(images):
images = [fits.open(file, memmap=False)[0].data for file in images]
images = [torch.from_numpy(x.byteswap().newbyteorder()) for x in images]
normalize = [3.5239e+10, 1.5327e+09, 1.8903e+09, 1.2963e+09]
images = [x.mul(n) for x, n in zip(images, normalize)]
vis, others = images[0].unsqueeze(0), torch.stack(images[1:])
upsample = torch.nn.Upsample(size = (200,200), mode='bilinear', align_corners=True)
others = upsample(others[None]).squeeze(0)
return torch.cat([vis, others], dim=0)
def target_transform(labels):
return float(labels['n_sources'] * 2 - 1)
x = dataset.GG2('gg2/', transform=transform, target_transform=target_transform)
loader = torch.utils.data.DataLoader(x, batch_size=10, sampler=BalancedBatchSampler(x))
# -
for batch in loader:
x, y = batch
print(x.shape)
print(y)
break
# +
model.train(mode=True)
iters = 3
batch_size = 10
for step in range(iters):
indices = np.random.choice(range(len(lbl)), batch_size, p=np.where(lbl == 1, 0.5 / sum(lbl == 1).item(), 0.5 / sum(lbl == 0).item()))
# pick randomly a number of *batch_size* images from the dataset with equal probabilities for 0 and 1
img_batch, lbl_batch = data[indices], lbl[indices] # [batch_size, 4, 200, 200]
pred = model(img_batch)
loss = abs(lbl_batch - pred.T).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
#if step % 10 == 0:
pred = model(img_batch)
test_accuracy = get_num_correct(pred, lbl_batch) / len(lbl_batch)
print(test_accuracy)
# -
obj = [1, 2, 3]
# +
import pickle
with open('test.pkl', 'wb') as f:
pickle.dump(obj, f)
# +
import pickle
with open('test.pkl', 'rb') as f:
obj = pickle.load(f)
print(obj)
# -
sklearn.metrics.roc_curve(lbl_batch, pred)
torch.save(model.state_dict(), 'data/')
| Mario2.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Récupérer les données Velib (JCDecaux) et les visualiser
#
# La société [JCDecaux](http://www.jcdecaux.com/fr/pour-nos-partenaires/velos-en-libre-service) met à disoposition les données Velib pour toutes les villes dont il gère le service (Besançon, Nantes, Lyon, ...). Les données sont accessibles en temps réel via une API assez simple : [JCDecaux Developer](https://developer.jcdecaux.com/#/home). Le module [manydataapi](http://www.xavierdupre.fr/app/manydataapi/helpsphinx/index.html) inclut désormais la classe [DataCollectJCDecaux](http://www.xavierdupre.fr/app/manydataapi/helpsphinx/manydataapi/velib/data_jcdecaux.html?highlight=velib#velib.data_jcdecaux.DataCollectJCDecaux) qui permet de récupérer ces données et d'effectuer quelques traitements simples.
# Tout d'abord, il est nécessaire d'obtenir une clé en vous enregistrant sur le site de JCDecaux : [Votre compte développeur](https://developer.jcdecaux.com/#/signup). Le code suivant permet de récupérer des données pendant 5 minutes :
# %matplotlib inline
from manydataapi.velib import DataCollectJCDecaux
import os
import datetime
folder = os.path.abspath("data_velib")
if not os.path.exists(folder):
os.makedirs(folder)
import keyring
key = keyring.get_password("<PASSWORD>", "<PASSWORD>")
stop = datetime.datetime.now() + datetime.timedelta(minutes=2)
stop
DataCollectJCDecaux.run_collection(key, contract="besancon",
folder_file=os.path.join(folder, "besancon"), stop_datetime=stop,
delayms=20000) # remplacer par None pour récupérer les données sans s'arrêter
# On liste les fichiers dans le répertoire :
os.listdir(folder)
# On construit un DataFrame à partir de ces données :
df = DataCollectJCDecaux.to_df(folder, "^besancon.*[.]txt")
df.to_csv(os.path.join(folder, "resume_besancon.txt"), sep="\t", index=False, encoding="utf8")
df.to_excel(os.path.join(folder, "resume_besancon.xlsx"), index=False)
df[["name","lng", "lat", "collect_date", "available_bike_stands", "available_bikes", "file"]].head()
# On visualise les données pour une date donnée :
dt = df["file"][0]
subset = df [ df["file"] == dt ]
fig,ax,plt = DataCollectJCDecaux.draw(subset, figsize=(16,6))
ax.set_title("Besançon - {0} - {1} stations".format(dt.replace("besancon.","") \
.replace(".txt","").replace("_", " "), len(subset)));
# On crée une petite animation avec les données velib :
import matplotlib.animation as animation
anim = DataCollectJCDecaux.animation(df, interval = 100, figsize=(16,6))
from IPython.display import HTML
# Ne marche pas vraiment sur une aussi petite durée
# HTML(anim.to_jshtml())
# On essaye autrement.
anim = DataCollectJCDecaux.animation(df[-1000:], interval = 100, figsize=(16,6), module="moviepy")
# +
# anim.write_gif("anim1.gif", fps=20)
# from IPython.display import Image, display, HTML
# Image(url='anim1.gif') # does not work all the time
# HTML('''<div style="display: flex; justify-content: row;"><img src="anim1.gif"></div>''');
# -
# Ca ne bouge pas trop. Sur deux minutes, c'est plutôt attendu. On essaye sur plus de données.
from pyensae.datasource import download_data
import pandas
download_data('besancon.df.txt.zip', website = 'xdtd')
df = pandas.read_csv("besancon.df.txt", sep="\t", encoding="utf8")
df.shape
from manydataapi.velib import DataCollectJCDecaux
anim = DataCollectJCDecaux.animation(df[-1000:], interval=100, figsize=(16,6), module="moviepy")
# +
# anim.write_gif("anim2.gif", fps=20)
# Image('anim2.gif')
# HTML('''<div style="display: flex; justify-content: row;"><img src="anim2.gif"></div>''');
# -
# première image des données
dt = df["file"][0]
subset = df[df["file"] == dt]
fig,ax,plt = DataCollectJCDecaux.draw(subset, figsize=(16,6))
ax.set_title("Besançon - {0} - {1} stations".format(dt.replace("besancon.","") \
.replace(".txt","").replace("_", " "), len(subset)));
# dernière image
dt = df["file"][len(df)-1]
subset = df [ df["file"] == dt ]
fig,ax,plt = DataCollectJCDecaux.draw(subset, figsize=(16,6))
ax.set_title("Besançon - {0} - {1} stations".format(dt.replace("besancon.","") \
.replace(".txt","").replace("_", " "), len(subset)));
# Vous trouverez un exemple d'utilisation de ces données ici [Coding Party 22 mai 2014 : inférer des trajectoires de vélos](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/coding_party_1.html) et là [Déterminer la vitesse moyenne des vélib](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/notebooks/expose_velib.html).
DataCollectJCDecaux.draw(subset, use_folium=True, size=0.1)
| _doc/notebooks/api_velib_jcdecaux.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata_df = "data/Mouse_metadata.csv"
study_results_df = "data/Study_results.csv"
#
# Read the mouse data results
mouse_metadata = pd.read_csv(mouse_metadata_df)
mouse_metadata.head(5)
# Read the study results
study_results = pd.read_csv(study_results_df)
study_results.head(5)
# Combine the data into a single dataset
combined_study_mouse = pd.merge(study_results,mouse_metadata,how="outer", on=["Mouse ID"])
# Display the data table for preview
combined_study_mouse
# +
#combined_study_mouse.shape
# -
# Checking the number of mice.
total=combined_study_mouse["Mouse ID"].value_counts()
total
#length of the count of each unique mouse
total_unique_mouse = len(total)
total_unique_mouse
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_mice_id = combined_study_mouse.loc[combined_study_mouse.duplicated(subset=["Mouse ID","Timepoint",]), "Mouse ID"].unique()
duplicate_mice_id
# Optional: Get all the data for the duplicate mouse ID.
all_duplicate_mice_id=combined_study_mouse[combined_study_mouse.duplicated(["Mouse ID","Timepoint"],keep=False)]
all_duplicate_mice_id
all_duplicate_mice_id.shape
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
duplicate_drop=combined_study_mouse.loc[combined_study_mouse.duplicated(subset=['Mouse ID', 'Timepoint',]),'Mouse ID'].unique()
duplicate_drop
clean_data_df = combined_study_mouse[combined_study_mouse['Mouse ID'].isin(duplicate_drop)==False]
clean_data_df.head(5)
clean_data_df.shape
# Checking the number of mice in the clean DataFrame.
clean_number_mice=clean_data_df["Mouse ID"].value_counts()
clean_number_mice
#length of the count of each unique mouse
new_number_mice = len(clean_number_mice)
new_number_mice
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# This method produces everything in a single groupby function
combined_study_mouse_sort=combined_study_mouse.sort_values(["Tumor Volume (mm3)"], ascending=True)
combined_study_mouse_sort.head()
# +
#Identify the diferent Drug Regimen along with the total tumer volume for each using .groupby
drug_regimen_grouped = combined_study_mouse_sort.groupby(["Drug Regimen"])
drug_regimen_grouped
#total tumor volume for each drug regimen
total_tumor_volume = drug_regimen_grouped["Tumor Volume (mm3)"].sum()
total_tumor_volume
# -
#calculate the mean of each drug regimen
drug_regimen_mean = drug_regimen_grouped["Tumor Volume (mm3)"].mean()
drug_regimen_mean
#calculate the median of each drug regimen
drug_regimen_median = drug_regimen_grouped["Tumor Volume (mm3)"].median()
drug_regimen_median
#calculate the variance of each drug regimen
drug_regimen_variance = drug_regimen_grouped["Tumor Volume (mm3)"].var()
drug_regimen_variance
#calculate the standard deviation of each drug regimen
drug_regimen_std = drug_regimen_grouped["Tumor Volume (mm3)"].std()
drug_regimen_std
#calculate the SEM for each drug regimen
drug_regimen_sem = drug_regimen_grouped["Tumor Volume (mm3)"].sem()
drug_regimen_sem
#summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each drug regimen
summary_stats_table = pd.DataFrame({"Mean": drug_regimen_mean,
"Median":drug_regimen_median,
"Variance":drug_regimen_variance,
"Standard Deviation": drug_regimen_std,
"SEM": drug_regimen_sem})
summary_stats_table
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas.
#split data set into groups based on drup regimen
drug_regimen_grouped=combined_study_mouse.groupby(["Drug Regimen"])
#number of mice for each treatment
mice_treatment = drug_regimen_grouped["Mouse ID"].count()
mice_treatment
# -
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot.
mice_per_treatment_chart=mice_treatment.plot(kind="bar", title="Total number of mice per treatment")
mice_per_treatment_chart.set_xlabel("Drug Regimen")
mice_per_treatment_chart.set_ylabel("Data Points")
plt.show()
plt.tight_layout()
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
#split data set into groups based on mouse id and sex
mouse_gender_grouped=combined_study_mouse.groupby(["Mouse ID","Sex"])
mouse_gender_grouped
mouse_gender_df = pd.DataFrame(mouse_gender_grouped.size())
#total count of female and male mice
count_gender = pd.DataFrame(mouse_gender_df.groupby(["Sex"]).count())
count_gender.columns = ["Total Count"]
#add percentage format female vs male
count_gender["Percentage of Sex"] = (100*(count_gender["Total Count"]/count_gender["Total Count"].sum()))
#add percentage format to "Percentage of Sex" column
count_gender["Percentage of Sex"] = count_gender["Percentage of Sex"]
#gender_df
count_gender
# -
# Generate a pie plot showing the distribution of female versus male mice using pyplot
#gender_pie =count_gender.plot.pie(y="Total Count", startangle =140, explode=explote, shadow=True, title= ("Female versus Male Mice"))
explode=(0.1,0)
colors=["pink", "orange"]
plot = count_gender.plot.pie(y='Total Count',figsize=(5,5), colors = colors, startangle=140, explode = explode, shadow = True, autopct="%1.1f%%")
#gender_pie.set_ylabel
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
four_regimes = combined_study_mouse[combined_study_mouse["Drug Regimen"].isin(["Capomulin", "Ramicane", "Infubinol", "Ceftamin"])]
four_regimes = four_regimes.sort_values(["Timepoint"], ascending=True)
four_regimes
four_regimes_data = four_regimes[["Drug Regimen", "Mouse ID", "Timepoint", "Tumor Volume (mm3)"]]
four_regimes_data
# -
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
cap_reg = combined_study_mouse.loc[combined_study_mouse["Drug Regimen"] == "Capomulin",:]
ram_reg = combined_study_mouse.loc[combined_study_mouse["Drug Regimen"] == "Ramicane", :]
inf_reg = combined_study_mouse.loc[combined_study_mouse["Drug Regimen"] == "Infubinol", :]
cef_reg= combined_study_mouse.loc[combined_study_mouse["Drug Regimen"] == "Ceftamin", :]
cap_reg.head()
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
last_timepoint = cap_reg.groupby('Mouse ID').max()['Timepoint']
timepoint_df = pd.DataFrame(last_timepoint)
merge_df = pd.merge(timepoint_df, combined_study_mouse, on=("Mouse ID","Timepoint"),how="left")
merge_df.head(5)
# +
# Put treatments into a list for for loop (and later for plot labels)
# Create empty list to fill with tumor vol data (for plotting)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
data_set = ["tumors", "tumors2", "tumors3", "tumors4"]
#fig1, ax1 = plt.subplots()
plt.title('Tumors Volume')
plt.ylabel('Final Tumor Volume (mm3)')
plt.xlabel('Drug Regimen')
ax1.boxplot(data_set, labels=["Capomulin","Ramicane","Infubinol","Ceftamin",])
plt.savefig('boxplot')
plt.show()
# -
# ## Line and Scatter Plots
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
| Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Car playground 1
#
# This notebook provides some initial variables and creates one car object, but, here, you are encouraged to play around with the car movement code!
#
# This playground is especially useful if you **modify the car.py file** and want to test out some new car property or function!
#
# So, your only tasks for this notebook are to use it as your own personal testing ground. Some ideas:
# 1. Create multiple cars, with different initial states
# - And visualize them using `display_world()`
# 2. Add a function in car.py (by navigating back to it clicking the orange Jupyter icon), and then call that function, here!
# 3. Add a new initial variable to __init__ in car.py and use it!
# +
import numpy as np
import car
# %matplotlib inline
# Auto-reload function so that this notebook keeps up with
# changes in the class file
# %load_ext autoreload
# %autoreload 2
# -
# ### Create a new car object
# +
# Create a 2D world of 0's
height = 4
width = 6
world = np.zeros((height, width))
# Define the initial car state
initial_position = [0, 0] # [y, x] (top-left corner)
velocity = [0, 1] # [vy, vx] (moving to the right)
# Create a car with initial params
carla = car.Car(initial_position, velocity, world)
carla.display_world()
# -
## TODO: Create multiple cars and visualize them
| 4_5_State_and_Motion/2. Car, playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''venv'': venv)'
# name: python_defaultSpec_1598241736064
# ---
import librosa
import numpy as np
anger = "../../databases/TESS_and_RAVDESS/Emotions/Disgusted/03-01-07-01-01-01-01.wav"
anger2 = "../../databases/TESS_and_RAVDESS/Emotions/Angry/03-01-05-01-01-02-05.wav"
disgust = "../../databases/TESS_and_RAVDESS/Emotions/Disgusted/03-01-07-01-01-01-04.wav"
happy2 = "../../databases/TESS_and_RAVDESS/Emotions/Happy/03-01-03-01-01-01-16.wav"
happy = "../../databases/TESS_and_RAVDESS/Emotions/Happy/03-01-03-01-01-01-20.wav"
neutral = "../../databases/TESS_and_RAVDESS/Emotions/Neutral/03-01-01-01-01-01-11.wav"
# + tags=[]
li = [[1,2,100],[4,5,6],[8,9,10]]
li = np.array(li)
li = 2*((li-1)/(100-1))-1
print(li)
# + tags=[]
sample_rate=220250
signal, sr = librosa.load(anger, sr=sample_rate)
mfcc = np.mean(librosa.feature.mfcc(signal, n_mfcc=13, n_fft=1024, hop_length=256, sr=sample_rate), axis=0)
m = np.amax(mfcc)
mi = np.amin(mfcc)
print(m)
print(mi)
final = 2*((mfcc-mi)/(m-mi))-1
print(final)
# + tags=[]
signal, sr = librosa.load(anger2, sr=sample_rate)
mfcc = np.mean(librosa.feature.mfcc(signal, n_mfcc=13, n_fft=1024, hop_length=256, sr=sample_rate), axis=0)
print(mfcc)
# + tags=[]
signal, sr = librosa.load(anger, sr=sample_rate)
mfcc = librosa.feature.mfcc(signal, n_mfcc=13, n_fft=1024, hop_length=256, sr=sample_rate)
print(mfcc.shape)
# + tags=[]
signal, sr = librosa.load(disgust, sr=sample_rate)
mfcc = np.mean(librosa.feature.mfcc(signal, n_mfcc=13, n_fft=1024, hop_length=256, sr=sample_rate), axis=0)
print(mfcc)
# + tags=[]
signal, sr = librosa.load(neutral, sr=sample_rate)
mfcc = np.mean(librosa.feature.mfcc(signal, n_mfcc=13, n_fft=1024, hop_length=256, sr=sample_rate), axis=0)
print(mfcc)
# + tags=[]
signal, sr = librosa.load(happy, sr=sample_rate)
mfcc = np.mean(librosa.feature.mfcc(signal, n_mfcc=13, n_fft=1024, hop_length=256, sr=sample_rate), axis=0)
print(mfcc)
# + tags=[]
signal, sr = librosa.load(happy2, sr=sample_rate)
mfcc = np.mean(librosa.feature.mfcc(signal, n_mfcc=13, n_fft=1024, hop_length=256, sr=sample_rate), axis=0)
print(mfcc)
# -
| parse_dataset_labels/parse_sound_files/visualize_sound.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Selecting-Series-from-DataFrame" data-toc-modified-id="Selecting-Series-from-DataFrame-1"><span class="toc-item-num">1 </span>Selecting Series from DataFrame</a></span></li><li><span><a href="#Single-Series" data-toc-modified-id="Single-Series-2"><span class="toc-item-num">2 </span>Single Series</a></span></li><li><span><a href="#Multiple-Series" data-toc-modified-id="Multiple-Series-3"><span class="toc-item-num">3 </span>Multiple Series</a></span><ul class="toc-item"><li><span><a href="#About-the-Author" data-toc-modified-id="About-the-Author-3.1"><span class="toc-item-num">3.1 </span>About the Author</a></span></li></ul></li></ul></div>
# -
# ## Selecting Series from DataFrame
# ## Single Series
# conventional way to import pandas
import pandas as pd
# +
# read a dataset of UFO reports into DataFrame
ufo = pd.read_table('http://bit.ly/uforeports', sep=',')
# read a csv is equivalent to read_table, except it assumes a comma separator
ufo = pd.read_csv('http://bit.ly/uforeports')
# -
# examine first 5 rows
ufo.head()
# select 'City' Series using bracket notation
ufo['City']
type(ufo['City'])
# select 'City' Series using dot(.) notation
ufo.City
# __Note__
# - Bracket notation will always work, whereas dot notation has **limitations**
# - Dot notation doesn't work if there are **spaces** in the Series name
# - Dot notation doesn't work if the Series has the same name as a **DataFrame method or attribute** (like 'head' or 'shape')
# - Dot notation can't be used to define the name of a **new Series** (see below)
# create a new 'Location' Series (must use bracket notation to define the Series name)
ufo['Location'] = ufo.City + ', ' + ufo.State
ufo.head()
# ## Multiple Series
# select multiple series from dataframe
ufo[['City', 'State', 'Time']]
# <h3>About the Author</h3>
# This repo was created by <a href="https://www.linkedin.com/in/jubayer28/" target="_blank"><NAME></a> <br>
# <a href="https://www.linkedin.com/in/jubayer28/" target="_blank"><NAME></a> is a student of Microbiology at Jagannath University and the founder of <a href="https://github.com/hdro" target="_blank">Health Data Research Organization</a>. He is also a team member of a bioinformatics research group known as Bio-Bio-1.
#
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License</a>.m
| book/pandas/05-Selecting Series from a DataFrame.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''opencv'': conda)'
# name: python_defaultSpec_1596699535176
# ---
# # Automatic Number Plate Recognition Proof of Concept with Azure Cognitive Services
#
# Automatic number-plate recognition is a technology that uses optical character recognition on images to read vehicle registration plates. This notebook will illustrate how Azure Cognitive Services can be used to develop such a solution.
#
# Custom Vision will be used to develop object detection model will be used to identify the coordinates of a vehicle's number plate in an image. This will be used to crop the image to focus on the number plate. The Read API will than use this cropped image to perform optical character recognition (OCR) to extract the number plate from the image.
#
# **Before executing this notebook ensure the instructions in the README have followed.**
# ### Load dependencies and parse config file
# +
import configparser
import os
import matplotlib.pyplot as plt
import numpy as np
import requests
from matplotlib.patches import Polygon
import cv2
import imutils
# -
# Parse config file
cognitive_services_config_file = os.path.join(
"..", "configuration", "cognitive-services.ini"
)
config = configparser.ConfigParser()
config.read(cognitive_services_config_file)
# ### Perform number plate extraction
#
# In this section the number plate is identified in the specified image using the Custom Vision service. The image is than cropped to include only the number plate.
# + tags=[]
# Define target image
target_image_path = os.path.join("..", "images", "040603", "P6040011.jpg")
# Load an color (1) image in grayscale (0)
img = cv2.imread(target_image_path, 1)
# Convert image to byte string
img_str = cv2.imencode(".jpg", img)[1].tostring()
# Perform object detection using the custom vision service
custom_vision_response = requests.post(
url=config["custom_vision"]["imgurl"],
data=img_str,
headers={
"Content-Type": "application/octet-stream",
"Prediction-Key": config.get("custom_vision", "key"),
}
).json()
# +
# Find bounding box with the highest confidence level
best_custom_vision_prediction = max(
custom_vision_response["predictions"], key=lambda x: x["probability"]
)
# Extract the bounding box
bounding_box = best_custom_vision_prediction["boundingBox"]
# Define vertical distance from the left border
x = np.int32(bounding_box["left"] * img.shape[1])
# Define horizontal distance from the top border
y = np.int32(bounding_box["top"] * img.shape[0])
# Define rectangle width
w = np.int32(bounding_box["width"] * img.shape[1])
# Define rectangle height
h = np.int32(bounding_box["height"] * img.shape[0])
# Define top left point
point_one = (x, y)
# Define bottom right point
point_two = (x + w, y + h)
# Plot bounding box on image
img_box = cv2.rectangle(img, point_one, point_two, color=(0, 255, 0), thickness=2)
# Display image
plt.imshow(img_box)
plt.show()
# +
# Crop image
img_crop = img[point_one[1] : point_two[1], point_one[0] : point_two[0]]
# Resize image if width less than 500 pixels
if img_crop.shape[1] < 500:
img_resize = imutils.resize(img_crop, width=500)
# Display cropped image
plt.imshow(img_resize)
plt.show()
# -
# ### Perform optical character recognition
#
# In this section the number plate in the image is converted to text using the Read API as part of the Computer Vision service.
# +
# Convert cropped image to byte string
img_str = cv2.imencode(".jpg", img_resize)[1].tostring()
# Define Read API OCR endpoint
read_api_url = "{}/vision/v3.0/read/analyze".format(config.get("computer_vision", "url"))
# Call Read API to perform OCR
response = requests.post(
url=read_api_url,
data=img_str,
headers={
"Ocp-Apim-Subscription-Key": config["computer_vision"]["key"],
"Content-Type": "application/octet-stream",
},
)
# + tags=[]
# Call Read API to get result
response_final = requests.get(
response.headers["Operation-Location"],
headers={"Ocp-Apim-Subscription-Key": config["computer_vision"]["key"]},
)
result = response_final.json()
# + tags=[]
# Find text identified by the API
for line in result["analyzeResult"]["readResults"][0]["lines"]:
print("Recognised text:", line["text"])
| src/automatic-number-plate-recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import json
import operator
import os
from pathlib import Path
import re
import numpy as np
from matplotlib import pyplot as plt, rc
plt.rcParams['figure.figsize'] = [14, 8]
rc('font', **{'family': 'Open Sans', 'size': 18})
# +
directory = os.path.join(Path().resolve().parent, "decode/experiment-1")
metrics = {}
for root, dirs, files in os.walk(directory):
for file in files:
if file == "metrics.json":
step = int(root.split(".")[-1])
with open(os.path.join(root, file), "r") as f:
metrics[step] = json.load(f)
steps = sorted(metrics.keys())
metrics = [value for key, value in sorted(metrics.items(), key=operator.itemgetter(0))]
directory2 = os.path.join(Path().resolve().parent, "decode/experiment-11-1")
metrics2 = {}
for root, dirs, files in os.walk(directory2):
for file in files:
if file == "metrics.json":
step = int(root.split(".")[-1])
with open(os.path.join(root, file), "r") as f:
metrics2[step] = json.load(f)
steps2 = sorted(metrics2.keys())
metrics2 = [value for key, value in sorted(metrics2.items(), key=operator.itemgetter(0))]
# +
jga_all = [metric["#ALL_SERVICES"]["joint_goal_accuracy"] for metric in metrics]
jga_seen = [metric["#SEEN_SERVICES"]["joint_goal_accuracy"] for metric in metrics]
jga_unseen = [metric["#UNSEEN_SERVICES"]["joint_goal_accuracy"] for metric in metrics]
jca_all = [metric["#ALL_SERVICES"]["joint_cat_accuracy"] for metric in metrics]
jca_seen = [metric["#SEEN_SERVICES"]["joint_cat_accuracy"] for metric in metrics]
jca_unseen = [metric["#UNSEEN_SERVICES"]["joint_cat_accuracy"] for metric in metrics]
jnca_all = [metric["#ALL_SERVICES"]["joint_noncat_accuracy"] for metric in metrics]
jnca_seen = [metric["#SEEN_SERVICES"]["joint_noncat_accuracy"] for metric in metrics]
jnca_unseen = [metric["#UNSEEN_SERVICES"]["joint_noncat_accuracy"] for metric in metrics]
intent_all2 = [metric["#ALL_SERVICES"]["active_intent_accuracy"] for metric in metrics2]
intent_seen2 = [metric["#SEEN_SERVICES"]["active_intent_accuracy"] for metric in metrics2]
intent_unseen2 = [metric["#UNSEEN_SERVICES"]["active_intent_accuracy"] for metric in metrics2]
requested_all2 = [metric["#ALL_SERVICES"]["requested_slots_f1"] for metric in metrics2]
requested_seen2 = [metric["#SEEN_SERVICES"]["requested_slots_f1"] for metric in metrics2]
requested_unseen2 = [metric["#UNSEEN_SERVICES"]["requested_slots_f1"] for metric in metrics2]
jga_all2 = [metric["#ALL_SERVICES"]["joint_goal_accuracy"] for metric in metrics2]
jga_seen2 = [metric["#SEEN_SERVICES"]["joint_goal_accuracy"] for metric in metrics2]
jga_unseen2 = [metric["#UNSEEN_SERVICES"]["joint_goal_accuracy"] for metric in metrics2]
jca_all2 = [metric["#ALL_SERVICES"]["joint_cat_accuracy"] for metric in metrics2]
jca_seen2 = [metric["#SEEN_SERVICES"]["joint_cat_accuracy"] for metric in metrics2]
jca_unseen2 = [metric["#UNSEEN_SERVICES"]["joint_cat_accuracy"] for metric in metrics2]
jnca_all2 = [metric["#ALL_SERVICES"]["joint_noncat_accuracy"] for metric in metrics2]
jnca_seen2 = [metric["#SEEN_SERVICES"]["joint_noncat_accuracy"] for metric in metrics2]
jnca_unseen2 = [metric["#UNSEEN_SERVICES"]["joint_noncat_accuracy"] for metric in metrics2]
# print(jga_all)
print(intent_all2)
print(intent_seen2)
print(intent_unseen2)
print()
print(requested_all2)
print(requested_seen2)
print(requested_unseen2)
print()
print(jga_all2)
print(jga_seen2)
print(jga_unseen2)
print()
print(jca_unseen2)
print(jnca_unseen2)
# +
plt.plot(steps, jga_all, marker="o", label="all")
plt.plot(steps, jga_seen, marker="o", label="seen")
plt.plot(steps, jga_unseen, marker="o", label="unseen")
plt.xlabel("Steps")
plt.ylabel("Joint goal accuracy")
plt.grid()
plt.legend()
plt.show()
plt.plot(steps2, jga_all2, marker="o", label="all")
plt.plot(steps2, jga_seen2, marker="o", label="seen")
plt.plot(steps2, jga_unseen2, marker="o", label="unseen")
plt.xlabel("Steps")
plt.ylabel("Joint goal accuracy")
plt.grid()
plt.legend()
plt.show()
# +
plt.plot(steps, jca_all, marker="o", label="all")
plt.plot(steps, jca_seen, marker="o", label="seen")
plt.plot(steps, jca_unseen, marker="o", label="unseen")
plt.xlabel("Steps")
plt.ylabel("Joint categorical accuracy")
plt.grid()
plt.legend()
plt.show()
plt.plot(steps2, jca_all2, marker="o", label="all")
plt.plot(steps2, jca_seen2, marker="o", label="seen")
plt.plot(steps2, jca_unseen2, marker="o", label="unseen")
plt.xlabel("Steps")
plt.ylabel("Joint categorical accuracy")
plt.grid()
plt.legend()
plt.show()
# +
plt.plot(steps, jnca_all, marker="o", label="all")
plt.plot(steps, jnca_seen, marker="o", label="seen")
plt.plot(steps, jnca_unseen, marker="o", label="unseen")
plt.xlabel("Steps")
plt.ylabel("Joint non-categorical accuracy")
plt.grid()
plt.legend()
plt.show()
plt.plot(steps2, jnca_all2, marker="o", label="all")
plt.plot(steps2, jnca_seen2, marker="o", label="seen")
plt.plot(steps2, jnca_unseen2, marker="o", label="unseen")
plt.xlabel("Steps")
plt.ylabel("Joint non-categorical accuracy")
plt.grid()
plt.legend()
plt.show()
# +
plt.plot(steps, jga_all, marker="o", label="jga_all")
# plt.plot(steps, jga_seen, marker="o", label="jga_seen")
# plt.plot(steps, jga_unseen, marker="o", label="jga_unseen")
plt.plot(steps, jca_all, marker="o", label="jca_all")
# plt.plot(steps, jca_seen, marker="o", label="jca_seen")
# plt.plot(steps, jca_unseen, marker="o", label="jca_unseen")
plt.plot(steps, jnca_all, marker="o", label="jnca_all")
# plt.plot(steps, jnca_seen, marker="o", label="jnca_seen")
# plt.plot(steps, jnca_unseen, marker="o", label="jnca_unseen")
plt.xlabel("Steps")
plt.ylabel("Joint accuracy")
plt.grid()
plt.legend()
plt.show()
plt.plot(steps2, jga_all2, marker="o", label="jga_all")
# plt.plot(steps2, jga_seen2, marker="o", label="jga_seen")
# plt.plot(steps2, jga_unseen2, marker="o", label="jga_unseen")
plt.plot(steps2, jca_all2, marker="o", label="jca_all")
# plt.plot(steps2, jca_seen2, marker="o", label="jca_seen")
# plt.plot(steps2, jca_unseen2, marker="o", label="jca_unseen")
plt.plot(steps2, jnca_all2, marker="o", label="jnca_all")
# plt.plot(steps2, jnca_seen2, marker="o", label="jnca_seen")
# plt.plot(steps2, jnca_unseen2, marker="o", label="jnca_unseen")
plt.xlabel("Steps")
plt.ylabel("Joint accuracy")
plt.grid()
plt.legend()
plt.show()
# -
directory = os.path.join(Path().resolve().parent, "models/experiment-10")
log = os.path.join(directory, "logs/train.log")
batch_size = None
train_steps, dev_steps = [], []
train_loss, dev_loss = [], []
with open(log, "r") as f:
for line in f:
if batch_size is None and "batch_size" in line:
batch_size = int(line.split()[-1])
continue
result = re.search(r"Batch: ([0-9]+) \| (?:Train|Dev) loss: ([0-9.]+)", line)
if result is not None:
assert batch_size is not None
if "Train" in line:
train_steps.append(batch_size * int(result.group(1)))
train_loss.append(float(result.group(2)))
elif "Dev" in line:
dev_steps.append(batch_size * int(result.group(1)))
dev_loss.append(float(result.group(2)))
else:
raise
plt.plot(train_steps, train_loss, marker="o")
plt.xlabel("Steps")
plt.ylabel("Train loss")
plt.grid()
plt.show()
plt.plot(dev_steps, dev_loss, marker="o")
plt.xlabel("Steps")
plt.ylabel("Dev loss")
plt.grid()
plt.show()
| notebooks/analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="-tkTKycN1Wmk"
# # Age Estimation
# + colab_type="code" executionInfo={"status": "ok", "timestamp": 1586068216189, "user_tz": -480, "elapsed": 30725, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037383563643251312"}} id="yMMiEbMI1a4I" outputId="72722220-c0d1-423b-90df-c011c96bf539" pycharm={"is_executing": false} colab={"base_uri": "https://localhost:8080/", "height": 163}
from google.colab import drive
drive.mount('/content/drive')
# %cd /content/drive/My\ Drive/project10
import py_compile
py_compile.compile(r'helperP.py')
# + colab_type="code" id="vAZRqzmX1Wmp" pycharm={"is_executing": false} colab={}
import numpy as np
import os
from math import *
from helperP import *
# + [markdown] colab_type="text" id="1vHT5ILL1Wmw"
# # Loading Data
# + colab_type="code" executionInfo={"status": "ok", "timestamp": 1586068228147, "user_tz": -480, "elapsed": 42672, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037383563643251312"}} id="TGw9QDAA1Wmy" outputId="ab199a46-2f3f-4f0a-e76c-4ed96c528461" pycharm={"is_executing": false} colab={"base_uri": "https://localhost:8080/", "height": 171}
base_dir = 'DATASET/'
age_train, features_train = prepare_data('train', base_dir)
age_val, features_val = prepare_data('val', base_dir)
_, features_test = prepare_data('test', base_dir)
show_data(base_dir)
# + [markdown] colab_type="text" id="QHuX1vBS1Wm6"
# ## Linear class
# LINEAR
# Implementation of the linear layer (also called fully connected layer)
# which performs linear transoformation on input data y = xW + b.
# This layer has two learnable parameters, weight of shape (input_channel, output_channel)
# and bias of shape (output_channel), which are specified and initalized in init_param()
# function. In this assignment, you need to implement both forward and backward computation
# Arguments:
# input_channel -- integer, number of input channels
# output_channel -- integer, number of output channels
# + colab_type="code" id="T7gcTj-X1Wm7" pycharm={"is_executing": false, "name": "#%%\n"} colab={}
class Linear(object):
def __init__(self, input_channel, output_channel):
self.input_channel = input_channel
self.output_channel = output_channel
self.init_param()
def init_param(self):
self.weight = (np.random.randn(self.input_channel,self.output_channel) * sqrt(2.0/(self.input_channel+self.output_channel))).astype(np.float32)
self.bias = np.zeros((self.output_channel))
'''
Forward computation of linear layer, you may want to save some intermediate
variable to class membership (self.) for reusing in backward computation.
Arguments:
input -- numpy array of shape (N, input_channel)
Output:
output -- numpy array of shape (N, output_channel)
'''
def forward(self, input):
self.input = input
output = np.dot(input, self.weight) + self.bias[None,:]
return output
'''
Backward computation of linear layer, you need to compute the gradient
w.r.t input, weight and bias respectively. You need to reuse the variable in forward
computation to compute backward gradient.
Arguments:
grad_output -- numpy array of shape (N, output_channel)
Output:
grad_input -- numpy array of shape (N, input_channel), gradient w.r.t input
grad_weight -- numpy array of shape (input_channel, output_channel), gradient w.r.t weight
grad_bias -- numpy array of shape (output_channel), gradient w.r.t bias
'''
def backward(self, grad_input):
grad_bias = np.sum(grad_input, axis = 0)
grad_weight = self.input.T.dot(grad_input)
grad_output = grad_input.dot(self.weight.T)
return grad_output, grad_weight, grad_bias
# + [markdown] colab_type="text" id="_B9wj7HZ1WnB"
# ## CROSS_ENTROPY_LOSS_WITH_SOFTMAX
# Implementation of the combination of softmax function and cross entropy loss.
# In classification task, we usually firstly apply softmax to map class-wise prediciton
# into the probabiltiy distribution then we use cross entropy loss to maximise the likelihood
# of ground truth class's prediction. Since softmax includes exponential term and cross entropy includes
# log term, we can simplify the formula by combining these two functions togther so that log and exp term could cancell out
# mathmatically and we can avoid precision lost with float point numerical computation.
# If we ignore the index on batch sizel and assume there is only one grouth truth per sample,
# the formula for softmax and cross entropy loss are:
# Softmax: prob[i] = exp(x[i]) / \sum_{j}exp(x[j])
# Cross_entropy_loss: - 1 * log(prob[gt_class])
# Combining these two function togther, we got
# cross_entropy_with_softmax: -x[gt_class] + log(\sum_{j}exp(x[j]))
# In this assignment, you will implement both forward and backward computation.
# Arguments:
# None
# + colab_type="code" id="LWX0l4PK1WnC" pycharm={"is_executing": false, "name": "#%%\n"} colab={}
class CrossEntropyLossWithSoftmax(object):
def __init__(self):
pass
'''
Forward computation of cross entropy with softmax, you may want to save some intermediate variables to class membership (self.)
Arguments:
input -- numpy array of shape (N, C), the prediction for each class, where C is number of class
gt_label -- numpy array of shape (N), it's a integer array and the value range from 0 to C-1 which
specify the ground truth class for each input
Output:
output -- numpy array of shape (N), containing the cross entropy loss on each input
'''
def forward(self, input, gt_label):
exp = np.exp(input)
self.gt_label = gt_label
self.prob = exp / np.sum(exp, axis = -1)[:,None]
log_term = np.log(np.sum(exp, axis = -1))
output = -input[np.arange(input.shape[0]), gt_label] + log_term
return output
'''
Backward computation of cross entropy with softmax. It's recommended to resue the variable
in forward computation to simplify the formula.
Arguments:
grad_output -- numpy array of shape (N)
Output:
output -- numpy array of shape (N, C), the gradient w.r.t input of forward function
'''
def backward(self, grad_output):
self.prob[np.arange(self.prob.shape[0]),self.gt_label] -= 1
return grad_output[:,None] * self.prob
# + [markdown] id="4gM_wbkfCQet" colab_type="text"
# ## RELU
# Implementation of relu (rectified linear unit) layer. Relu is the no-linear activating function that
# set all negative values to zero and the formua is y = max(x,0).
# This layer has no learnable parameters and you need to implement both forward and backward computation
# Arguments:
# None
# + pycharm={"is_executing": false} id="ZofjUjkpCQet" colab_type="code" colab={}
class ReLU(object):
def __init__(self):
pass
'''
Forward computation of relu and you may want to save some intermediate variables to class membership (self.)
Arguments:
input -- numpy array of arbitrary shape
Output:
output -- numpy array having the same shape as input.
'''
def forward(self, input):
self.input = input
return np.maximum(input, 0)
'''
Backward computation of relu, you can either in-place modify the grad_output or create a copy.
Arguments:
grad_output-- numpy array having the same shape as input
Output:
grad_input -- numpy array has the same shape as grad_output. gradient w.r.t input
'''
def backward(self, grad_output):
grad_input = grad_output.copy()
grad_output[self.input<0] = 0
return grad_input
# + [markdown] colab_type="text" id="b_YUZIz41WnH"
# # Implement Stochastic Gradient descent
# Stochastic Gradient Descent (SGD) is a simple yet very efficient approach to discriminative learning of linear classifiers under convex loss functions such as (linear) Support Vector Machines and Logistic Regression. Even though SGD has been around in the machine learning community for a long time, it has received a considerable amount of attention just recently in the context of large-scale learning.
# ```
# Arguments:
# age -- numpy array, label, (n, )
# feature -- numpy array, features, (n, 2048)
# Return:
# weights -- numpy array, (2048, )
# bias -- numpy array, (1, )
# ```
# + colab_type="code" id="dz5u2NcH1WnK" pycharm={"is_executing": false} colab={}
def stochastic_gradient_descent(age, feature, age_val, feature_val):
# check the inputs
assert len(age) == len(feature)
# Set the random seed
np.random.seed(0)
# Set the learning rate
lr = 1e-3
# Set the momentum term
alpha = 0.9
# Set the hidden layers (You can tune this parameter for better performance)
num_lat = 128
# Define the net with the python layers
fc0 = Linear(2048, num_lat)
relu0 = ReLU()
fc1 = Linear(num_lat,101)
model = [fc0, relu0, fc1]
cretirion = CrossEntropyLossWithSoftmax()
# Initialize parameters of the layers
fc0.init_param()
fc1.init_param()
# Number of mini-batches
t = len(age) // batch_size
# Optimal weights and loss for val
model_opt = {'fc0_weight': fc0.weight, 'fc0_bias' : fc0.bias,
'fc1_weight': fc1.weight, 'fc1_bias' : fc1.bias}
loss_opt = 1e6
# Initialize the descent direction
v = {'fc0_weight': np.zeros_like(fc0.weight), 'fc0_bias' : np.zeros_like(fc0.bias),
'fc1_weight': np.zeros_like(fc1.weight), 'fc1_bias' : np.zeros_like(fc1.bias)}
for e in range(epoch_sgd):
# Shuffle training data
n = np.random.permutation(len(feature))
loss_train = []
for m in range(t):
# Providing mini batch with fixed batch size of 16
batch_feature = feature[n[m * batch_size: (m + 1) * batch_size]]
batch_age = age[n[m * batch_size: (m + 1) * batch_size]].reshape(-1, 1).astype(np.int16).squeeze()
##########################################################################
# TODO: YOUR CODE HERE
##########################################################################
# forward pass
z1 = fc0.forward(batch_feature)
a1 = relu0.forward(z1)
z2 = fc1.forward(a1)
ce = cretirion.forward(z2, batch_age)
loss = ce.mean()
loss_train.append(ce)
##########################################################################
# TODO: YOUR CODE HERE
##########################################################################
# calculate gradient here
dz2 = cretirion.backward(np.ones_like(ce) * 1 / len(batch_age))
da1, dw1, db1 = fc1.backward(dz2)
dz1 = relu0.backward(da1)
dinput, dw0, db0 = fc0.backward(dz1)
##########################################################################
# TODO: YOUR CODE HERE
##########################################################################
# calculate the gradient with momentum, momentum term is given by alpha
'''
if(e ==0 & m==0):
u['fc1_weight'] = dweight1
u['fc1_bias'] = dbias1
u['fc2_weight'] = dweight2
u['fc2_bias'] = dbias2
else:
'''
v['fc0_weight'] = v['fc0_weight'] * alpha + lr * dw0
v['fc0_bias'] = v['fc0_bias'] * alpha + lr * db0
v['fc1_weight'] = v['fc1_weight'] * alpha + lr * dw1
v['fc1_bias'] = v['fc1_bias'] * alpha + lr * db1
##########################################################################
# TODO: YOUR CODE HERE
##########################################################################
# update the parameters
fc0.weight -= v['fc0_weight']
fc0.bias -= v['fc0_bias']
fc1.weight -= v['fc1_weight']
fc1.bias -= v['fc1_bias']
##########################################################################
# TODO: YOUR CODE HERE
##########################################################################
# calcualte the mean absolute error on validation set
z_val = features_val
for k in range(len(model)):
z_val = model[k].forward(z_val)
a_val = np.exp(z_val)
p_val = a_val / np.sum(a_val, axis=-1)[:, None]
pred_val = np.dot(p_val, np.arange(0, 101).reshape(-1, 1))
loss_val = np.abs(pred_val.reshape(-1,1) - age_val.reshape(-1,1)).mean()
#loss_val = cretirion.forward(z_val, age_val.astype(np.int16)).mean()
#print(loss_val)
##########################################################################
# TODO: YOUR CODE HERE
##########################################################################
# update the optimal model w.r.t. the error
if (loss_opt > loss_val):
print('updated:')
loss_opt = loss_val
model_opt['fc0_weight'] = fc0.weight
model_opt['fc0_bias'] = fc0.bias
model_opt['fc1_weight'] = fc1.weight
model_opt['fc1_bias'] = fc1.bias
print('=> epoch:', e + 1, ' Validation MAE Loss:', round(loss_val, 4), 'Training Loss:', round(np.array(loss_train).mean(),4))
return model_opt
# + [markdown] colab_type="text" id="VU3NYowL1WnT"
# # Train and validate
# + colab_type="code" executionInfo={"status": "ok", "timestamp": 1586068330608, "user_tz": -480, "elapsed": 145113, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037383563643251312"}} id="MkO0N-bh1WnV" outputId="462ef135-424b-4b98-db5b-50d6839602d1" pycharm={"is_executing": false} colab={"base_uri": "https://localhost:8080/", "height": 1000}
model_opt = stochastic_gradient_descent(age_train, features_train, age_val, features_val)
loss = evaluate_hidden([model_opt['fc0_weight'], model_opt['fc1_weight']],
[model_opt['fc0_bias'], model_opt['fc1_bias']],
age_val, features_val)
print("Your mean absolute error for validation set is:", round(loss, 3))
# + [markdown] colab_type="text" id="MaRRLSaY1Wna"
# # Test and Generate results file
# + colab_type="code" executionInfo={"status": "ok", "timestamp": 1586068330609, "user_tz": -480, "elapsed": 145108, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037383563643251312"}} id="onUVv_731Wnc" outputId="85cdad6d-ca1f-41da-8602-c4de563c9065" pycharm={"is_executing": false} colab={"base_uri": "https://localhost:8080/", "height": 71}
prediction = test_hidden([model_opt['fc0_weight'], model_opt['fc1_weight']],
[model_opt['fc0_bias'], model_opt['fc1_bias']],
features_test, filename='sgd_hidden.txt')
print("Test results has saved to sgd_hidden.txt")
print(prediction[:10])
| finalProject/E1 Age estimation/NetworkFinal/answers/pynetworkHidden.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2.1 Scientific Computing & NumPy Basics
#
# ## 2.1.1 NumPy Arrays
# +
import numpy as np
a = np.array([1, 2, 3])
a
# -
b = np.array([1, 2, 'a'])
b
c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
c
zero_array = np.zeros((2, 2)) # 2 by 2 zero matrix
zero_array
one_array = np.ones((2, 2, 3), dtype=int) # 3D one integer matrix
one_array
rand_array = np.random.rand(2, 3)
rand_array
a = np.array([1, 2, 3])
a[0]
a[1]
b = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
b
b[1, 1] # commas are used when the array has many dimensions
a = np.random.rand(2, 3, 4) # random 2-by-3-by-4 matrix
a
a[1, 0: 2, 1:] # accessing numbers that are in the second element in 1st axis
# first two elements in 2nd axis, and last three elements in 3rd axis
a
a.shape # the `shape` attributes holds the value of the current shape
np.reshape(a, (3, 2, 4))
np.reshape(a, (3, 3, 3))
a.T
# ## 2.1.3 Random Sampling
sample = np.random.normal()
sample
sample = np.random.normal(loc=100, scale=10)
sample
samples = np.random.normal(loc=100, scale=10, size=(2, 3))
samples
samples = np.random.poisson(lam=10, size=(2, 2))
samples
np.random.randint(low=0, high=10, size=(2, 5))
np.random.choice([1, 3, 4, -6], size=(2, 2))
# +
a = [1, 2, 3, 4]
for _ in range(3):
np.random.shuffle(a)
print(a)
# +
import random
random.seed(0)
# -
np.random.seed(0)
# # 2.2 Working with Tabular Data with Pandas
#
# ## 2.2.1 The DataFrame Object
# +
import pandas as pd
my_dict = {'col1': [1, 2], 'col2': np.array([3, 4]),
'col3': [5, 6]}
df = pd.DataFrame(my_dict)
df
# +
my_array = np.array([[1, 3, 5], [2, 4, 6]])
alt_df = pd.DataFrame(my_array, columns=['col1', 'col2', 'col3'])
alt_df
# -
# ## 2.2.2 Accessing Rows and Columns
df.loc[0] # first row
df.loc[[0, 1]] # first and second rows (all data)
df.loc[0, ['col2', 'col3']] # first row, 2nd and 3rd columns
df['col3'] # third column
for item in df.loc[:, 'col3']:
print(item)
df.loc[0] = [3, 6, 9] # change first row
df
df['col2'] = [0, 0] # change second column
df
df['col4'] = [10, 10]
df.loc[3] = [1, 2, 3, 4]
df
df.loc[[False, True, False], [False, True, False, True]]
df.loc[:, df.loc[0] > 5]
# ## 2.2.4 Advanced Pandas Functionalities
df = pd.DataFrame({'x': [1, 2, -1], 'y': [-3, 6, 5], 'z': [1, 3, 2]})
df
df['x_squared'] = df['x'].apply(lambda x: x ** 2)
df
df['x_squared'] = df['x'] ** 2
df
# +
def parity_str(x):
if x % 2 == 0:
return 'even'
return 'odd'
df['x_parity'] = df['x'].apply(parity_str)
df
# -
df['x_parity']
df['x_parity'].value_counts()
pd.get_dummies(df['x_parity'])
# # 2.3 Data Visualization with Matplotlib and Seaborn
#
# ## 2.3.1 Scatter Plots
import matplotlib.pyplot as plt
# plt.rcParams['figure.figsize'] = [20, 10]
# plt.rcParams['font.size'] = 15
# +
x = [1, 2, 3, 1.5, 2]
y = [-1, 5, 2, 3, 0]
plt.scatter(x, y, s=100)
plt.show()
# +
sizes = [10, 40, 60, 80, 100]
colors = ['r', 'b', 'y', 'g', 'k']
plt.scatter(x, y, s=sizes, c=colors)
plt.show()
# -
# ## 2.3.2 Line Graphs
# +
import numpy as np
x = np.linspace(0, 10, 1000)
y = np.sin(x)
plt.plot(x, y)
plt.show()
# +
x = np.linspace(1, 10, 1000)
linear_line = x
log_curve = np.log(x)
sin_wave = np.sin(x)
curves = [linear_line, log_curve, sin_wave]
colors = ['k', 'r', 'b']
styles = ['-', '--', ':']
for curve, color, style in zip(curves, colors, styles):
plt.plot(x, curve, c=color, linestyle=style)
plt.show()
# -
# ## 2.3.3 Bar Graphs
# +
labels = ['Type 1', 'Type 2', 'Type 3']
counts = [2, 3, 5]
plt.bar(labels, counts)
plt.show()
# +
type_1 = [1, 1] # 1 of type A and 1 of type B
type_2 = [1, 2] # 1 of type A and 2 of type B
type_3 = [2, 3] # 2 of type A and 3 of type B
counts = [type_1, type_2, type_3]
locations = np.array([0, 1, 2])
width = 0.3
bars_a = plt.bar(locations - width / 2, [my_type[0] for my_type in counts], width=width)
bars_b = plt.bar(locations + width / 2, [my_type[1] for my_type in counts], width=width)
plt.xticks(locations, ['Type 1', 'Type 2', 'Type 3'])
plt.legend([bars_a, bars_b], ['Type A', 'Type B'])
plt.show()
# +
bars_a = plt.bar(locations, [my_type[0] for my_type in counts])
bars_b = plt.bar(locations, [my_type[1] for my_type in counts],
bottom=[my_type[0] for my_type in counts])
plt.xticks(locations, ['Type 1', 'Type 2', 'Type 3'])
plt.legend([bars_a, bars_b], ['Type A', 'Type B'])
plt.show()
# -
# ## 2.3.4 Histograms
# +
x = np.random.randn(100)
plt.hist(x)
plt.show()
# -
plt.hist(x, bins=100)
plt.show()
# +
y = np.random.randn(100) * 4 + 5
plt.hist(x, color='b', bins=20, alpha=0.2)
plt.hist(y, color='r', bins=20, alpha=0.2)
plt.show()
# -
plt.hist(x)
# ## 2.3.5 Heatmaps
# +
my_map = np.random.randn(10, 10)
plt.imshow(my_map)
plt.colorbar()
plt.show()
# -
# ## 2.3.6 Visualization Shorthand from Seaborn and Pandas
import seaborn as sns
# +
x = np.random.normal(0, 1, 1000)
y = np.random.normal(5, 2, 1000)
df = pd.DataFrame({'Column 1': x, 'Column 2': y})
df.head()
# -
sns.jointplot(x='Column 1', y='Column 2', data=df)
plt.show()
# +
student_df = pd.DataFrame({
'name': ['Alice', 'Bob', 'Carol', 'Dan', 'Eli', 'Fran',
'George', 'Howl', 'Ivan', 'Jack', 'Kate'],
'gender': ['female', 'male', 'female', 'male', 'male', 'female',
'male', 'male', 'male', 'male', 'female'],
'class': ['JR', 'SO', 'SO', 'SO', 'JR', 'SR',
'FY', 'SO', 'SR', 'JR', 'FY'],
'gpa': [90, 93, 97, 89, 95, 92,
90, 87, 95, 100, 95],
'num_classes': [4, 3, 4, 4, 3, 2,
2, 3, 3, 4, 2]
})
sns.catplot(x='class', y='gpa', hue='gender', kind='bar', data=student_df)
plt.show()
# -
student_df['gpa'].plot.hist()
plt.show()
student_df['class'].value_counts().plot.pie()
plt.show()
| Code Samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# All API's: http://developer.nytimes.com/
# Article search API: http://developer.nytimes.com/article_search_v2.json
# Best-seller API: http://developer.nytimes.com/books_api.json#/Documentation
# Test/build queries: http://developer.nytimes.com/
#
# Tip: Remember to include your API key in all requests! And their interactive web thing is pretty bad. You'll need to register for the API key.
#
# ### 1) What books topped the Hardcover Fiction NYT best-sellers list on Mother's Day in 2009 and 2010? How about Father's Day?
# +
#API Key: <KEY>
# -
import requests
bestseller_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/2009-05-10/hardcover-fiction?api-key=<KEY>')
bestseller_data = bestseller_response.json()
print("The type of bestseller_data is:", type(bestseller_data))
print("The keys of bestseller_data are:", bestseller_data.keys())
# Exploring the data structure further
bestseller_books = bestseller_data['results']
print(type(bestseller_books))
print(bestseller_books[0])
for book in bestseller_books:
#print("NEW BOOK!!!")
#print(book['book_details'])
#print(book['rank'])
if book['rank'] == 1:
for element in book['book_details']:
print("The book that topped the hardcover fiction NYT Beststeller list on Mothers Day in 2009 was", element['title'], "written by", element['author'])
# #### After writing a code that returns a result, now automating that for the various dates using a function:
# +
def bestseller(x, y):
bestsellerA_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/'+ x +'/hardcover-fiction?api-key=<KEY>')
bestsellerA_data = bestsellerA_response.json()
bestsellerA_books = bestsellerA_data['results']
for book in bestsellerA_books:
if book['rank'] == 1:
for element in book['book_details']:
print("The book that topped the hardcover fiction NYT Beststeller list on", y, "was",
element['title'], "written by", element['author'])
bestseller('2009-05-10', "Mothers Day 2009")
bestseller('2010-05-09', "Mothers Day 2010")
bestseller('2009-06-21', "Fathers Day 2009")
bestseller('2010-06-20', "Fathers Day 2010")
#Alternative solution would be, instead of putting this code into a function to loop it:
#1) to create a dictionary called dates containing y as keys and x as values to these keys
#2) to take the above code and nest it into a for loop that loops through the dates, each time using the next key:value pair
# for date in dates:
# replace value in URL and run the above code used inside the function
# replace key in print statement
# -
# ### 2) What are all the different book categories the NYT ranked in June 6, 2009? How about June 6, 2015?
# +
# STEP 1: Exploring the data structure using just one of the dates from the question
bookcat_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/names.json?published-date=2009-06-06&api-key=<KEY>')
bookcat_data = bookcat_response.json()
print(type(bookcat_data))
print(bookcat_data.keys())
bookcat = bookcat_data['results']
print(type(bookcat))
print(bookcat[0])
# -
# STEP 2: Writing a loop that runs the same code for both dates (no function, as only one variable)
dates = ['2009-06-06', '2015-06-15']
for date in dates:
bookcatN_response = requests.get('http://api.nytimes.com/svc/books/v2/lists/names.json?published-date=' + date + '&api-key=<KEY>')
bookcatN_data = bookcatN_response.json()
bookcatN = bookcatN_data['results']
category_listN = []
for category in bookcatN:
category_listN.append(category['display_name'])
print(" ")
print("THESE WERE THE DIFFERENT BOOK CATEGORIES THE NYT RANKED ON", date)
for cat in category_listN:
print(cat)
# ### 3) <NAME>'s name can be transliterated many many ways. His last name is often a source of a million and one versions - Gadafi, Gaddafi, Kadafi, and Qaddafi to name a few. How many times has the New York Times referred to him by each of those names?
#
# Tip: Add "Libya" to your search to make sure (-ish) you're talking about the right guy.
# +
# STEP 1a: EXPLORING THE DATA
test_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=Gaddafi+Libya&api-key=<KEY>')
test_data = test_response.json()
print(type(test_data))
print(test_data.keys())
test_hits = test_data['response']
print(type(test_hits))
print(test_hits.keys())
# +
# STEP 1b: EXPLORING THE META DATA
test_hits_meta = test_data['response']['meta']
print("The meta data of the search request is a", type(test_hits_meta))
print("The dictionary despot_hits_meta has the following keys:", test_hits_meta.keys())
print("The search requests with the TEST URL yields total:")
test_hit_count = test_hits_meta['hits']
print(test_hit_count)
# +
# STEP 2: BUILDING THE CODE TO LOOP THROUGH DIFFERENT SPELLINGS
despot_names = ['Gadafi', 'Gaddafi', 'Kadafi', 'Qaddafi']
for name in despot_names:
despot_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=' + name +'+Libya&api-key=<KEY>')
despot_data = despot_response.json()
despot_hits_meta = despot_data['response']['meta']
despot_hit_count = despot_hits_meta['hits']
print("The NYT has referred to the Libyan despot", despot_hit_count, "times using the spelling", name)
# -
# ### 4) What's the title of the first story to mention the word 'hipster' in 1995? What's the first paragraph?
#
#
# +
hip_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=hipster&fq=pub_year:1995&api-key=<KEY>')
hip_data = hip_response.json()
print(type(hip_data))
print(hip_data.keys())
# STEP 1: EXPLORING THE DATA STRUCTURE:
hipsters = hip_data['response']
#print(hipsters)
#hipsters_meta = hipsters['meta']
#print(type(hipsters_meta))
hipsters_results = hipsters['docs']
print(hipsters_results[0].keys())
#print(type(hipsters_results))
# +
#STEP 2: LOOPING FOR THE ANSWER:
earliest_date = '1996-01-01'
for mention in hipsters_results:
if mention['pub_date'] < earliest_date:
earliest_date = mention['pub_date']
print("This is the headline of the first text to mention 'hipster' in 1995:", mention['headline']['main'])
print("It was published on:", mention['pub_date'])
print("This is its lead paragraph:")
print(mention['lead_paragraph'])
# -
# ### 5) How many times was gay marriage mentioned in the NYT between 1950-1959, 1960-1969, 1970-1978, 1980-1989, 1990-2099, 2000-2009, and 2010-present?
#
# Tip: You'll want to put quotes around the search term so it isn't just looking for "gay" and "marriage" in the same article.
#
# Tip: Write code to find the number of mentions between Jan 1, 1950 and Dec 31, 1959.
# +
# data structure requested same as in task 3, just this time loop though different date ranges
def countmention(a, b, c):
if b == ' ':
marry_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q="gay marriage"&begin_date='+ a +'&api-key=<KEY>')
else:
marry_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q="gay marriage"&begin_date='+ a +'&end_date='+ b +'&api-key=<KEY>')
marry_data = marry_response.json()
marry_hits_meta = marry_data['response']['meta']
marry_hit_count = marry_hits_meta['hits']
print("The count for NYT articles mentioning 'gay marriage' between", c, "is", marry_hit_count)
#supposedly, there's a way to solve the following part in a more efficient way, but those I tried did not work,
#so it ended up being more time-efficient just to type it:
countmention('19500101', '19591231', '1950 and 1959')
countmention('19600101', '19691231', '1960 and 1969')
countmention('19700101', '19791231', '1970 and 1979')
countmention('19800101', '19891231', '1980 and 1989')
countmention('19900101', '19991231', '1990 and 1999')
countmention('20000101', '20091231', '2000 and 2009')
countmention('20100101', ' ', '2010 and present')
# -
# ### 6) What section talks about motorcycles the most?
#
# Tip: You'll be using facets
# +
moto_response = requests.get('http://api.nytimes.com/svc/search/v2/articlesearch.json?q=motorcycle&facet_field=section_name&facet_filter=true&api-key=<KEY>')
moto_data = moto_response.json()
#STEP 1: EXPLORING DATA STRUCTURE
#print(type(moto_data))
#print(moto_data.keys())
#print(moto_data['response'])
#print(moto_data['response'].keys())
#print(moto_data['response']['facets'])
#STEP 2: Code to get to the answer
moto_facets = moto_data['response']['facets']
#print(moto_facets)
#print(moto_facets.keys())
moto_sections = moto_facets['section_name']['terms']
#print(moto_sections)
#this for loop is not necessary, but it's nice to know the counts
#(also to check whether the next loop identifies the right section)
for section in moto_sections:
print("The section", section['term'], "mentions motorcycles", section['count'], "times.")
most_motorcycles = 0
for section in moto_sections:
if section['count'] > most_motorcycles:
most_motorcycles = section['count']
print(" ")
print("That means the section", section['term'], "mentions motorcycles the most, namely", section['count'], "times.")
# -
# ### 7) How many of the last 20 movies reviewed by the NYT were Critics' Picks? How about the last 40? The last 60?
#
# Tip: You really don't want to do this 3 separate times (1-20, 21-40 and 41-60) and add them together. What if, perhaps, you were able to figure out how to combine two lists? Then you could have a 1-20 list, a 1-40 list, and a 1-60 list, and then just run similar code for each of them.
# +
picks_offset_values = [0, 20, 40]
picks_review_list = []
for value in picks_offset_values:
picks_response = requests.get ('http://api.nytimes.com/svc/movies/v2/reviews/search.json?&offset=' + str(value) + '&api-key=<KEY>')
picks_data = picks_response.json()
#STEP 1: EXPLORING THE DATA STRUCTURE (without the loop)
#print(picks_data.keys())
#print(picks_data['num_results'])
#print(picks_data['results'])
#print(type(picks_data['results']))
#print(picks_data['results'][0].keys())
#STEP 2: After writing a test code (not shown) without the loop, now CODING THE LOOP
last_reviews = picks_data['num_results']
picks_results = picks_data['results']
critics_pick_count = 0
for review in picks_results:
if review['critics_pick'] == 1:
critics_pick_count = critics_pick_count + 1
picks_new_count = critics_pick_count
picks_review_list.append(picks_new_count)
print("Out of the last", last_reviews + value, "movie reviews,", sum(picks_review_list), "were Critics' picks.")
# -
# ### 8) Out of the last 40 movie reviews from the NYT, which critic has written the most reviews?
# +
#STEP 1: EXPLORING THE DATA STRUCTURE (without the loop)
#critics_response = requests.get('http://api.nytimes.com/svc/movies/v2/reviews/search.json?&offset=0&api-key=<KEY>')
#critics_data = critics_response.json()
#print(critics_data.keys())
#print(critics_data['num_results'])
#print(critics_data['results'])
#print(type(critics_data['results']))
#print(critics_data['results'][0].keys())
#STEP 2: CREATE A LOOP, THAT GOES THROUGH THE SEARCH RESULTS FOR EACH OFFSET VALUE AND STORES THE RESULTS IN THE SAME LIST
#(That list is then passed on to step 3)
critics_offset_value = [0, 20]
critics_list = [ ]
for value in critics_offset_value:
critics_response = requests.get('http://api.nytimes.com/svc/movies/v2/reviews/search.json?&offset=' + str(value) + '&api-key=<KEY>')
critics_data = critics_response.json()
critics = critics_data['results']
for review in critics:
critics_list.append(review['byline'])
#print(critics_list)
unique_critics = set(critics_list)
#print(unique_critics)
#STEP 3: FOR EVERY NAME IN THE UNIQUE CRITICS LIST, LOOP THROUGH NON-UNIQUE LIST TO COUNT HOW OFTEN THEY OCCUR
#STEP 4: SELECT THE ONE THAT HAS WRITTEN THE MOST (from the #print statement below, I know it's two people with same score)
max_count = 0
for name in unique_critics:
name_count = 0
for critic in critics_list:
if critic == name:
name_count = name_count + 1
if name_count > max_count:
max_count = name_count
max_name = name
if name_count == max_count:
same_count = name_count
same_name = name
#print(name, "has written", name_count, "reviews out of the last 40 reviews.")
print(max_name, "has written the most of the last 40 reviews:", max_count)
print(same_name, "has written the most of the last 40 reviews:", same_count)
| foundations-homework/05/.ipynb_checkpoints/homework-05-gruen-nyt-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # An Abbreviated Introduction to Quantum Computing
# For over twenty years inspiring and fascinating work has brought together one of our most fundamental theories of physics - quantum mechanics - and one of our most powerful technologies - computing. Quantum computing is a beautifully interdiscipliary area with two categories of major impact:
#
# 1. It reorients the relationship between physics and computer science: physics does not just place restrictions on what computers we can design, it also grants new power and inspiration.
#
# 2. It promises technology that simulates nature at its most fundamental level, allowing us to access deep problems in quantum chemistry, materials discovery, and more.
#
# To get here, we've needed to change our usual intuition in many ways. This sort of intuition reset will be familiar to many programmers from the first time that they learned object-oriented programming, functional programming, distributed programming, or any one of the other marvelous ways of thinking that have been expressed in code over the years. Like these other paradigms, quantum computing opens up new potential when the basic concepts are grasped. Unlike these other examples, quantum computing goes further, requiring something akin to an extension of probability theory. This extension, and the core of quantum computing, are about linear algebra, so this is where we begin: with linear algebra and probability.
# ## From bit to qubit
# #### Probabilistic bits as vector spaces
# From an operational perspective, a bit is described by the results of measurements of that bit. Let the possible results of measuring a bit (0 or 1) be represented by orthonormal basis vectors \\(\\vec{0}\\) and \\(\\vec{1}\\). We'll call these vectors **outcomes**. These outcomes span a two-dimensional vector space that represents a probabilistic bit. A probabilistic bit can be represented as a vector $$ \vec{v} = a\,\vec{0} + b\,\vec{1},$$ where \\(a\\) represents the probability of the bit being 0 and \\(b\\) represents the probability of the bit being 1. This clearly also requires that \\(a+b=1\\). In this picture the **system** (the probabilistic bit) is a two-dimensional real vector space and a **state** of a system is a particular vector in that vector space.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
outcome_0 = np.array([1.0, 0.0])
outcome_1 = np.array([0.0, 1.0])
a = 0.75
b = 0.25
prob_bit = a*outcome_0 + b*outcome_1
X,Y = prob_bit
plt.figure()
ax = plt.gca()
ax.quiver(X,Y,angles='xy',scale_units='xy',scale=1)
ax.set_xlim([0,1])
ax.set_ylim([0,1])
plt.draw()
plt.show()
# -
# Given some state vector, like the one plotted above, we can find the probabilities associated to each outcome by projecting the vector onto the basis outcomes. This gives us a rule like the following:
# $$ Pr(0) = \vec{v}^T\,\cdot\,\vec{0} = a \\ Pr(1) = \vec{v}^T\,\cdot\,\vec{1} = b,$$
# where Pr(0) and Pr(1) are the probabilities of the 0 and 1 outcomes respectively.
# #### A remark on notation: Dirac Notation
#
# Physicists have introduced a nice notation for these transposes and dot products (Dirac notation) which rewrites
# $$ \vec{v} = |\,v\rangle \\ \vec{v}^T = \langle v\,| \\ \vec{u}^T\cdot\vec{v} = \langle u\,|\,v \rangle.$$
# So we can rewrite out "measurement rule" in this notation as
# $$ Pr(0) = \langle v\,|\,0 \rangle = a \\ Pr(1) = \langle v\,|\,1 \rangle = b.$$
# We will use this notation throughout the rest of this introduction.
# #### Multiple probabilistic bits
#
# This vector space interpretation of a single probabilistic bit can be straightforwardly extended to multiple bits. Let's take two coins as an example (but labelled 0 and 1 instead of H and T since we are programmers). Their states can be represented as
# $$ |\,u\rangle = \frac{1}{2}|\,0_u\rangle + \frac{1}{2}|\,1_u\rangle \\
# |\,v\rangle = \frac{1}{2}|\,0_v\rangle + \frac{1}{2}|\,1_v\rangle, $$
# where \\(1_u\\) represents the 1 outcome on coin \\(u\\). The **combined system** of the two coins has four possible outcomes \\(\\{ 0_u0_v,\\;0_u1_v,\\;1_u0_v,\\;1_u1_v \\}\\) that are the basis states of a larger four-dimensional vector space. The rule for constructing the **combined state** is to take the tensor product of the individual states, e.g.
# $$ |\,u\rangle\otimes|\,v\rangle = \frac{1}{4}|\,0_u0_v\rangle+\frac{1}{4}|\,0_u1_v\rangle+\frac{1}{4}|\,1_u0_v\rangle+\frac{1}{4}|\,1_u1_v\rangle. $$
# The combined space is then also given by the tensor product of the vector spaces spanned by \\(\\{|\\,0_u\\rangle, |\\,1_u\\rangle\\}\\) and \\(\\{|\\,0_v\\rangle, |\\,1_v\\rangle\\}\\) respectively.
#
# We'll talk more about these larger spaces in the quantum case, but it is important to note that not all composite states can be written as tensor products of sub-states. In general, the combined state for \\(n\\) probabilistic bits is a vector of size \\(2^n\\) and is given by \\(\\bigotimes_0^{n-1}|\\,v_i\\rangle\\).
# #### Qubits
#
# Quantum mechanics rewrites these rules some. A quantum bit, called a qubit, is the quantum analog of a bit in that it has two outcomes when it is measured. Similar to the previous section, a qubit can also be represented in a vector space, but with complex coefficients instead of real ones. A qubit **system** is a two-dimensional complex vector space, and the **state** of a qubit is a complex vector in that space. Again we will define a basis of outcomes \\(\\{|\\,0\\rangle, |\\,1\\rangle\\}\\) and let a generic qubit state be written as
# $$\alpha |\,0\rangle + \beta |\,1\rangle.$$
# Since these coefficients can be imaginary, they cannot be immediately be interpreted as probabilities of their associated outcomes. Instead we rewrite the rule for outcomes in the following manner:
# $$ Pr(0) = |\langle v\,|\,0 \rangle|^2 = |a|^2 \\ Pr(1) = |\langle v\,|\,1 \rangle|^2 = |b|^2,$$
# and as long as \\(|a|^2+|b|^2 = 1\\) we are able to recover acceptable probabilities for outcomes based on our new complex vector.
#
# This switch to complex vectors means that rather than representing a state vector in the plane, we instead to representing the vector on a sphere (called the Bloch sphere in the quantum mechanics literature). From this perspective the quantum state corresponding to a certain 0 outcome is represented by:
#
# ## ---------IMAGE HERE----------
#
# Multiple qubits are represented in precisely the same way, but taking tensor products of the spaces and states. Thus \\(n\\) qubits have \\(2^n\\) possible states.
# #### An important distinction
#
# An important distinction between the probabilistic case described above and the quantum case is that probabilistic states may just mask out ignorance. For example a coin is physically only 0 or 1 and the probalistic view merely represents our ignorance about which it actually is. **This is not the case in quantum mechanics**. The quantum states - as far as we know - cannot mask any underlying state. This is what people mean when they say that there is no "hidden variable theory" for quantum mechanics. These probabilistic quantum states are as real as it gets: they don't describe our knowledge of the quantum system, they describe the physical reality of the system.
# #### Some code
# Let's take a look at some code in pyQuil to see how these quantum states play out. We'll explain many more details about operations and pyQuil in the next sections.
# +
# imports for pyQuil (ignore for now)
from pyquil.quil import Program
import pyquil.forest as qvm
quantum_simulator = qvm.Connection()
import numpy as np
# pyQuil is based around operations (or gates) so we'll start with the most
# basic one: the identity operation, called I. I takes one argument, the index
# of the qubit that it should be applied to.
from pyquil.gates import I
# make a quantum program that allocates one qubit (qubit #0) and does nothing to it
p = Program(I(0))
# quantum states are called wavefunctions for historical reasons
# so we can run this basic program on our connection to the simulator.
# This call will return the state of our qubits after we run program p.
alpha, beta = quantum_simulator.wavefunction(p)
print "Our qubit is in the state alpha={} and beta={}".format(alpha, beta)
print "The probability of measuring the qubit in outcome 0 is {}".format(np.conj(alpha)*alpha)
print "The probability of measuring the qubit in outcome 1 is {}".format(np.conj(beta)*beta)
# +
# we can import the qubit "flip" operation (called X), which we'll talk about in the next section
# and see what that does.
from pyquil.gates import X
p = Program(X(0))
alpha, beta = quantum_simulator.wavefunction(p)
print "Our qubit is in the state alpha={} and beta={}".format(alpha, beta)
print "The probability of measuring the qubit in outcome 0 is {}".format(np.conj(alpha)*alpha)
print "The probability of measuring the qubit in outcome 1 is {}".format(np.conj(beta)*beta)
# +
# multiple qubits also produce the expected scaling of the state
p = Program(I(0), I(1))
print "The quantum state is of dimension:", len(quantum_simulator.wavefunction(p))
p = Program(I(0), I(1), I(2), I(3))
print "The quantum state is of dimension:", len(quantum_simulator.wavefunction(p))
p = Program()
for x in range(10):
p.inst(I(x))
print "The quantum state is of dimension:", len(quantum_simulator.wavefunction(p))
# -
# wavefunction(Program) returns a coefficient array that corresponds to outcomes in the following order
print quantum_simulator.probabilities(Program(I(0), I(1)))
# ## Qubit operations
# In the previous section we introduced our first two "operations" the I (or identity operation) and the X operation. Here we'll get into some more details on what these operations are. Quantum states are complex vectors and quantum operations are **unitary matrices**. Applying an operation to a quantum state is the same as multiplying a vector by a certain kind of matrix. These matrices are called **gates**
#
# Since individual qubits are two-dimensional vectors, operations on individual qubits are 2x2 matrices. This is what the identity matrix looks like:
# $$
# I = \left(\begin{matrix}
# 1 & 0\\
# 0 & 1
# \end{matrix}\right)
# $$
# so the a program that applies this operation to the zero state is just
# $$ I\,|\,0\rangle = \left(\begin{matrix}
# 1 & 0\\
# 0 & 1
# \end{matrix}\right)\left(\begin{matrix}
# 1 \\
# 0
# \end{matrix}\right) = \left(\begin{matrix}
# 1 \\
# 0
# \end{matrix}\right) = |\,0\rangle$$
p = Program(I(0))
quantum_simulator.wavefunction(p)
# Other standard gates on single qubits are given by the Pauli operator matrices
# $$
# X = \left(\begin{matrix}
# 0 & 1\\
# 1 & 0
# \end{matrix}\right)
# \qquad
# Y = \left(\begin{matrix}
# 0 & -i\\
# i & 0
# \end{matrix}\right)
# \qquad
# Z = \left(\begin{matrix}
# 1 & 0\\
# 0 & -1
# \end{matrix}\right)
# $$
from pyquil.gates import X, Y, Z
p = Program(X(0))
print "X|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p), "This looks like a bit flip.\n"
p = Program(Y(0))
print "Y|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(Z(0))
print "Z|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
# composing qubit operations is the same as multiplying multiple matrices in sequence
p = Program(X(0), Y(0), Z(0))
print "ZYX|0> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
# Operations can also be applied to composite states of multiple qubits. One common example is the controlled-not or CNOT gate that works on two qubits. Its matrix form is:
# $$
# CNOT = \left(\begin{matrix}
# 1 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0 \\
# 0 & 0 & 0 & 1 \\
# 0 & 0 & 1 & 0 \\
# \end{matrix}\right)
# $$
# +
from pyquil.gates import CNOT
p = Program(CNOT(0, 1))
print "CNOT|00> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(X(0), CNOT(0, 1))
print "CNOT|01> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(X(1), CNOT(0, 1))
print "CNOT|10> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
p = Program(X(0), X(1), CNOT(0, 1))
print "CNOT|11> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
# -
# Another example is the SWAP gate:
# $$
# SWAP = \left(\begin{matrix}
# 1 & 0 & 0 & 0 \\
# 0 & 0 & 1 & 0 \\
# 0 & 1 & 0 & 0 \\
# 0 & 0 & 0 & 1 \\
# \end{matrix}\right)
# $$
# +
from pyquil.gates import SWAP
print "SWAP|01> = ", quantum_simulator.wavefunction(p)
print "With outcome probabilities\n", quantum_simulator.probabilities(p)
# -
# Quantum computing operations are then composed of a series of these complex matrices being applied to complex vectors. The reason that these matrices have to be unitary (i.e. that their complex conjugate transpose is their inverse) is that this preserves that the overall probability of all outcomes always sums to one.
# ## The Quantum Abstract Machine
# We now have enough background to introduce the overall programming model that underlies Quil. This is a hybrid quantum-classical model where some \\(N\\) qubits interact with \\(M\\) classical bits. It looks a little like this:
# ## IMAGE HERE
# These qubits and classical bits come with a defined gate set, e.g. which gate operations can be applied to which qubit. Different kinds of quantum computing hardware places different limitations on what gates can be applied, and the fixed gate set represents these limitations.
#
# The next section on measurements will describe the interaction between the classical and quantum parts of a Quantum Abstract Machine (QAM).
# ## Qubit measurements: classical/quantum interaction
# Measurements have two effects:
# 1. They project the state vector onto one of the basic outcomes
# 2. (and this is optional) They store the outcome of the measurement in a classical bit.
#
# Here's a simple example:
# create a program that stores the outcome of measuring qubit #0 into classical register [0]
classical_register_index = 0
p = Program(I(0)).measure(0, classical_register_index)
# So far we have used the quantum simulator to cheat a little bit. We have actually looked at the wavefunction that comes back, when, on real quantum hardware, we are unable to directly look at the wavefunction. Instead we only have access to the classical bits that are affected by measurements. This functionality is emulated by the `qvm_run` command.
# choose what classical registers to look in at the end of the computation
classical_regs = [0, 1]
quantum_simulator.run(p, classical_regs)
# We see that both registers are zero. However, if we had flipped the qubit before measurement then we obtain:
# +
classical_register_index = 0
p = Program(X(0)) # flip the qubit
p.measure(0, classical_register_index) # measure the qubit
classical_regs = [0, 1]
quantum_simulator.run(p, classical_regs)
# -
# These measurements are deterministic, e.g. if we make them multiple times the we always get the same outcome:
# +
classical_register_index = 0
p = Program(X(0)) # flip the qubit
p.measure(0, classical_register_index) # measure the qubit
classical_regs = [0]
trials = 10
quantum_simulator.run(p, classical_regs, trials)
# -
# However this is not the case in general. Looking at those examples will allow us to understand the way that measurements affect the quantum state as well. We will see that measurements act like projections onto the outcome basis states. First we introduce the Hadamard matrix:
# $$
# H = \frac{1}{\sqrt{2}}\left(\begin{matrix}
# 1 & 1\\
# 1 & -1
# \end{matrix}\right)
# $$
# +
from pyquil.gates import H
# The Hadamard produces what is called a superposition state
coin_program = Program(H(0))
print "H|0> = ", quantum_simulator.wavefunction(coin_program)
print "With outcome probabilities\n", quantum_simulator.probabilities(coin_program)
# -
# A qubit in this state will half the time be measured to be in 0 and half the time be measured to be in 1. In essence this qubit truly is a random variable representing a coin. In fact, there are many wavefunctions that will give this same operational outcome. There is a continuous family of states of the form:
# $$
# \frac{1}{\sqrt{2}}\left(|\,0\rangle + e^{i\theta}|\,1\rangle\right)
# $$
# that represent the outcomes of an unbiased coin.
# +
# introduce measurement
classical_reg = 0
coin_program = Program(H(0)).measure(0, classical_reg)
trials = 10
# we see probabilistic results of about half 1's and half 0's
quantum_simulator.run(coin_program, [0], trials)
# -
# Another interesting thing to do is to look at the wavefunction AFTER a measurement.
classical_reg = 0
coin_program = Program(H(0))
print "Before measurement: H|0> = ", quantum_simulator.wavefunction(coin_program)
coin_program.measure(0, classical_reg)
for x in range(5):
print "After measurement: ", quantum_simulator.wavefunction(coin_program)
# We can clearly see that measurement has an effect on the quantum state independent of what is stored classically. We begin in a state that has a 50-50 probability of being one or zero. After measurement, the state changes into being 100% 0 or 100% 1 depending on which outcome was obtained. This is the phenomemon reffered to as wavefunction "collapse". Mathematically, the wavefunction is being projected onto the vector of the obtained outcome.
# +
# This happens with bigger systems too
classical_reg = 0
# this program prepares something called a Bell state (a special kind of "entangled state")
bell_program = Program(H(0), CNOT(0, 1))
print "Before measurement: H|0> = ", quantum_simulator.wavefunction(bell_program)
bell_program.measure(0, classical_reg)
for x in range(5):
print "After measurement: ", quantum_simulator.probabilities(bell_program)
# -
# The above program prepares "entanglement" because even though their are random outcomes, after all measurement both qubits are the same. They are either both 0 or both 1. This kind of special correllation is the sort of thing quantum mechanics is famous for.
# ## ____ <NAME>_______
# ## Classical control
# There are also ways of introducing classical control of quantum programs, i.e. using the state of classical bits to determine what quantum operations to run.
if_prog = Program(X(7)) # if branch
else_prog = Program(I(7)) # else branch
p = Program(X(0)).measure(0, 1).quil_if(1, if_prog, else_prog) # branch on classical reg [1]
p.measure(7, 7) # measure qubit #7 into classical register [7]
quantum_simulator.run(p, [7]) # run and check register [7]
# A [1] here means that qubit 7 was indeed flipped. ##DIAGRAMS
# ## Example: The Probabilistically Halting Problem
# A fun example is to create a program that has an exponentially decreasing chance of halting, but that may run forever!
# +
inside_loop = Program(H(0)).measure(0, 1)
p = Program().quil_while(1, inside_loop)
quantum_simulator.run(p, [1]) # run and check register [1]
# -
# ##IAMGDS
# # Next Steps
#
# We hope that you have enjoyed your whirlwind tour of quantum computing. Lots more resources can be found online and we recommend Nielsen and Chuang's Quantum Information and Quantum Computation as a particular excellent begginers's resource.
#
| docs/source/intro_to_qc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="NjRhJ-573r8g"
import pandas as pd #importing necessary libraries
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="A-dRovU43r8m" outputId="cf881267-b2dd-4bfd-c6c1-7e76952a3449"
data = pd.read_csv('DataFrame.csv') #Reading csv file and creating DataFrame
print(data.shape)
data.head()
# + id="FLzCWg_g3r8q"
data.drop('Unnamed: 7' , axis = 1 , inplace = True) #Dropping unnecessary Column:-> Unnamed: 7
# + id="uCSRAwj_3r8r"
data.rename(columns = str.upper , inplace = True)
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="WKlfXbvG3r8s" outputId="8ee015ea-a500-430b-9bc9-a13035c3d064"
data
# + [markdown] id="VD3JAUN23r8t"
# # CONVERTING STRING INTO DATE TIME SERIES
# + id="O7UfjCPS3r8u"
import datetime
data['DATE'] = pd.to_datetime(data['DATE'] , format = "%Y%m%d")
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="FuvyQBwV3r8v" outputId="0613a3cc-ac98-4751-8e3f-b49eefd12d36"
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 436} id="f3xURQ1p3r8w" outputId="dab3f08e-8619-43a6-a0fa-9d46466a6fc3"
import missingno as msno #Checking for the missing entries
msno.bar(data)
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="Q0BEaHXq3r8y" outputId="c33fd056-fd19-40d4-9f03-4588670f9c04"
data.describe() #Statistics of the Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="uxRDT7K83r8z" outputId="0251229b-7eeb-4fe4-8c2a-0027545dda54"
data.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 49} id="HHqLZ_m43r81" outputId="fe75e837-86c4-40df-ebdc-2703365a7d5d"
data[data.duplicated()] #Checking for the duplicated entries
# + id="TcDv2vNG3r82"
# + [markdown] id="cGuTPbTe3r83"
# # CONSIDERING ANOTHER DATASET
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="p9eZLJhV3r84" outputId="d611c0aa-ce8e-4138-f9e4-05cf442186e3"
df2 = pd.read_csv('MSFT.csv')
df2.head()
# + id="v-s6ikLC3r85"
df2.rename(columns = str.upper , inplace = True)
# + id="DWBbzXhK3r85"
df2['DATE'] = pd.to_datetime(df2['DATE'] , format = "%Y-%m-%d")
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="5KYU3hn93r86" outputId="4ad127d0-9399-4f5b-cfe5-b77623fbc7ef"
df2.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 49} id="Cvr7LE7F3r86" outputId="e8e28935-cbab-4757-ea8a-ae894dd8456e"
df2[df2.duplicated()]
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="alpxXG9N3r87" outputId="a72065c2-134c-4bc1-80ea-6398ed6e26b6"
df2.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="n3Epkzcx3r88" outputId="89d61f81-0e1a-4b9b-95ee-4745b5545172"
df2.isna().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="f8yBb5cI3r88" outputId="559b375e-1007-48c0-a3df-def5d3bafac4"
df2.info()
# + colab={"base_uri": "https://localhost:8080/"} id="YuTkG3_e3r89" outputId="e0685846-1f22-476d-d20a-3aab18ca7724"
df2.value_counts()
# + id="tcIO6y-F3r89"
# + [markdown] id="pTvCEwya3r8-"
# # EDA ANALYSIS (DataFrame.csv)
# + id="lxRBbFCK3r8-"
import matplotlib.pyplot as plt
import seaborn as sns
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="1a5p5a3S3r8_" outputId="c96c2235-6ea2-4339-eda2-8847f15986cc"
d1 = data.copy()
d1
# + id="lTOXpQG0gnKM"
d1.columns
d1 = d1.set_index(['TYPE','DATE','TIME'])
# + id="EPEEmJ43gsA5"
feature = ['OPEN', 'HIGH', 'LOW', 'CLOSE']
# + colab={"base_uri": "https://localhost:8080/"} id="dGLAlmT7hEMD" outputId="f0f8362d-cf8a-44ef-f78d-9457f4762252"
list(enumerate(feature))
# + colab={"base_uri": "https://localhost:8080/", "height": 750} id="-ML4KQrChMwC" outputId="722a8aeb-cd0a-43df-d85e-4cd7a47dcf2e"
plt.figure(figsize=(15,12))
for i in enumerate(feature):
plt.subplot(2,2,i[0]+1)
base_color = sns.color_palette()[i[0]]
sns.distplot(x = d1[i[1]] , kde = False , color = base_color)
plt.xlabel(i[1]);
# + colab={"base_uri": "https://localhost:8080/", "height": 742} id="8bLKIkpiO2ms" outputId="377627ab-13c9-42ce-b058-5fb5b499bd16"
color = sns.PairGrid(d1)
color.map_upper(sns.scatterplot,color = 'green')
color.map_lower(sns.scatterplot,color = 'orange')
color.map_diag(plt.hist)
# + colab={"base_uri": "https://localhost:8080/", "height": 567} id="sdihaLUEQUb4" outputId="9ec279b7-7be7-42db-ded8-041caf20bc67"
d1 = d1.reset_index() # line plot w.r.t DATE
d1 = d1.set_index('DATE')
plt.figure(figsize=(15,8))
sns.lineplot(data = d1)
plt.xticks(rotation = 90)
plt.title('Line Plot with respect to DATE')
plt.xlabel("DATE")
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="5FlVb2YnRkrJ" outputId="42b7781c-22ef-4ed7-9049-2e704168d6fd"
plt.figure(figsize=(15,10)) #creating Box plots of various attributes
for i in enumerate(feature):
plt.subplot(2,2,i[0]+1)
base_color = sns.color_palette()[i[0]]
sns.boxplot(x = d1[i[1]] , color = base_color)
plt.xlabel(i[1]);
# + id="NfBIycHg3r9D" outputId="94680bf3-c093-4905-f476-40b093570bc1"
sns.heatmap(d1.corr() , vmin = -1 , vmax = 1 , annot = True , cmap = 'Blues') #heat map to determine correlation
# + [markdown] id="_A_96PAs3r9D"
# # EDA ANALYSIS (MSFT.csv)
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="w3aVDnWq3r9E" outputId="67be431e-0b8f-4d22-b82c-260382391a5c"
d2 = df2.copy()
d2
# + id="dTaQ9g0RSW_P"
d2 = d2.set_index('DATE')
# + id="n3qoL67LShlF"
feat = d2.columns
# + colab={"base_uri": "https://localhost:8080/"} id="5Pfm4hI7St4J" outputId="c0300137-5859-4783-aa2d-5fbfd6782bda"
list(enumerate(feat))
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="_rvS3m4ySLW3" outputId="a5c107c9-4b43-4df0-aec2-e0e78f91bde7"
plt.figure(figsize=(15,15)) #creating distribution plots of various attributes
for i in enumerate(feat):
plt.subplot(3,2,i[0]+1)
base_color = sns.color_palette()[i[0]]
sns.distplot(x = d2[i[1]] , color = base_color)
plt.xlabel(i[1]);
plt.title(f'Distribution plot of {i[1]}')
# + colab={"base_uri": "https://localhost:8080/", "height": 631} id="I34yRMdN3r9F" outputId="d382cabb-29e7-4c78-a628-47c178908d34"
plt.figure(figsize=(15,10)) #plot of line plot btwn Date and various other attributes
d2 = d2.set_index('DATE')
sns.lineplot(data = d2)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="RwxQhpaaTygA" outputId="93e0e9e5-4c38-4b3b-e783-bf6f682a605f"
sns.pairplot(data = d2) #Plots b/w various attributes
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="GOnGmbGRUls9" outputId="34b7c8fe-f6ac-4f6b-b1b8-f0829a912d0c"
plt.figure(figsize=(15,10)) #creating Box plots of various attributes
for i in enumerate(feat):
plt.subplot(3,2,i[0]+1)
base_color = sns.color_palette()[i[0]]
sns.boxplot(x = d2[i[1]] , color = base_color)
plt.xlabel(i[1]);
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="wZTUBeCyU-Kv" outputId="f4d3d23b-b683-4579-a478-54f1a3302ae3"
plt.figure(figsize=(15,10)) #creating Violin plots of various attributes
for i in enumerate(feat):
plt.subplot(3,2,i[0]+1)
base_color = sns.color_palette()[i[0]]
sns.violinplot(x = d2[i[1]] , color = base_color)
plt.xlabel(i[1]);
# + colab={"base_uri": "https://localhost:8080/", "height": 329} id="R9Eo9fU83r9I" outputId="a28a7d16-e7fd-47ac-acfc-725fecfd910b"
sns.heatmap(d2.corr() , vmin = -1 , vmax = 1 , annot = True , cmap = 'Paired_r') #heat map to determine correlation
# + [markdown] id="CwfuYYiv3r9J"
# # AS SEEN THE PRESENCE OF OUTLIERS IN MSFT, SO SUPRESSING THEIR EFFECT
# -
iqr1 = d2['OPEN'].quantile(0.75) - d2['OPEN'].quantile(0.25)
iqr1
upper_whisker = d2['OPEN'].quantile(0.75)+(iqr1*1.5)
lower_whisker = d2['OPEN'].quantile(0.25)-(iqr1*1.5)
print(upper_whisker)
print(lower_whisker)
iqr2 = d2['CLOSE'].quantile(0.75) - d2['CLOSE'].quantile(0.25)
iqr2
upper_whisker = d2['CLOSE'].quantile(0.75)+(iqr2*1.5)
lower_whisker = d2['CLOSE'].quantile(0.25)-(iqr2*1.5)
print(upper_whisker)
print(lower_whisker)
iqr3 = d2['HIGH'].quantile(0.75) - d2['HIGH'].quantile(0.25)
iqr3
upper_whisker = d2['HIGH'].quantile(0.75)+(iqr3*1.5)
lower_whisker = d2['HIGH'].quantile(0.25)-(iqr3*1.5)
print(upper_whisker)
print(lower_whisker)
iqr4 = d2['LOW'].quantile(0.75) - d2['LOW'].quantile(0.25)
iqr4
upper_whisker = d2['LOW'].quantile(0.75)+(iqr4*1.5)
lower_whisker = d2['LOW'].quantile(0.25)-(iqr4*1.5)
print(upper_whisker)
print(lower_whisker)
iqr5 = d2['ADJ CLOSE'].quantile(0.75) - d2['ADJ CLOSE'].quantile(0.25)
iqr5
upper_whisker = d2['ADJ CLOSE'].quantile(0.75)+(iqr5*1.5)
lower_whisker = d2['ADJ CLOSE'].quantile(0.25)-(iqr5*1.5)
print(upper_whisker)
print(lower_whisker)
iqr6 = d2['VOLUME'].quantile(0.75) - d2['VOLUME'].quantile(0.25)
iqr6
upper_whisker = d2['VOLUME'].quantile(0.75)+(iqr6*1.5)
lower_whisker = d2['VOLUME'].quantile(0.25)-(iqr6*1.5)
print(upper_whisker)
print(lower_whisker)
d2.loc[d2['OPEN'] >= 86.05937385559082 , 'OPEN'] = 86.05937385559082
d2.loc[d2['CLOSE'] >= 85.87265515327454 , 'CLOSE'] = 85.87265515327454
d2.loc[d2['HIGH'] >= 86.6875 , 'HIGH'] = 86.6875
d2.loc[d2['LOW'] >= 84.5562515258789 , 'LOW'] = 84.5562515258789
d2.loc[d2['ADJ CLOSE'] >= 64.37626564502716 , 'ADJ CLOSE'] = 64.37626564502716
d2.loc[d2['VOLUME'] >= 130092200.0 , 'VOLUME'] = 130092200.0
d2.describe()
l = ['OPEN' , 'HIGH' , 'LOW' , 'CLOSE' , 'ADJ CLOSE' , 'VOLUME']
list(enumerate(l))
plt.figure(figsize=(15,12))#creating Box plots of various columns after removing outliers
for i in enumerate(l):
plt.subplot(3,2,i[0]+1)
base_color = sns.color_palette()[i[0]]
sns.boxplot(x = d2[i[1]] , color = base_color)
plt.xlabel(i[1]);
| .ipynb_checkpoints/STOCK_PRICE_PREDICTION_TECHNOCOLABS-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Stimulus preprocessing
#
# In this notebook, we will read the annotations for each file and create a dataset for all the images. Then we will go on to create different versions of our dataset as below.
#
#
from __future__ import division
from packages import *
# %matplotlib inline
import json
import random
import math
import tensorflow.keras.backend as K
K.set_floatx('float32')
from tensorflow.keras.applications import vgg16
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from get_activations import get_activations
subject = 1
run = True
build = False
predict = True
save = False
load = True
check = False
export_precision = np.float32
if load and not build:
stim_labels = load_pickle('stimulus-labels.p')
print('stimulus labels loaded, length: {}'.format(len(stim_labels)))
def batchify(l, batch_size):
batches = []
for i in xrange(0, len(l), batch_size):
batches.append(l[i:i+batch_size])
return batches
def preprocess_image(f, target_size, batch_dim_is_front=True):
img = stim_labels[f]
f_type = img['type']
f_path = os.path.join(IMAGES_DIR[img['type']], f)
original = load_img(f_path, target_size=target_size)
img_arr = img_to_array(original)
processed_img_arr = vgg16.preprocess_input(np.expand_dims(img_arr, 0))
return processed_img_arr
def get_vgg_predictions():
vgg_predictions = {}
i = 0
for f in stim_labels:
vgg_predictions[f] = vgg.predict(preprocess_image(f, (224,224)))
i += 1
if i % 100 == 0:
print i ,
return vgg_predictions
if run and predict:
vgg = vgg16.VGG16(weights = 'imagenet')
def create_stim_dicts():
labels_dir = os.path.join(STIM_DIR, 'Image_Labels/')
imagenet_labels = os.path.join(labels_dir, 'imagenet_final_labels.txt')
output = {}
prefix_labels = {}
with open(imagenet_labels) as f:
for line in f:
x = line.strip().split(' ', 1)
labels = x[1].split(', ')
filename = x[0]
prefix_labels[filename] = {'labels': labels, 'type': 'imagenet'}
imagenet_dir = os.path.join(STIM_DIR, 'Scene_Stimuli', 'Presented_Stimuli/', 'ImageNet/')
for x in os.listdir(imagenet_dir):
if x.startswith('._') or not x.endswith('.JPEG'):
continue
prefix = x.split('_')[0]
output[x] = prefix_labels[prefix]
print '{} imagenet files'.format(len(output))
cat_lookup = {}
with open(os.path.join(MISC_DIR, 'image_info_test2014.json')) as json_data:
cats = json.load(json_data)
for cat in cats['categories']:
cat_lookup[cat['id']] = cat['name']
with open(os.path.join(labels_dir, 'coco_final_annotations.pkl'), 'rb') as f:
coco_labels = pickle.load(f)
for key in coco_labels:
filename = 'COCO_train2014_{}.jpg'.format(('%012.1d' % key))
all_labels = set()
for entry in coco_labels[key]:
all_labels.add(entry['category_id'])
labels = []
for l in all_labels:
labels.append(cat_lookup[l])
output[filename] = {'labels': labels, 'type': 'coco'}
print '{} coco files'.format(len(coco_labels))
scene_dir = os.path.join(STIM_DIR, 'Scene_Stimuli', 'Presented_Stimuli/', 'Scene/')
count = 0
for x in os.listdir(scene_dir):
if x.startswith('._') or (not x.endswith('.jpg') and not x.endswith('.jpeg')):
continue
label = ''.join([ i for i in x.split('.')[0] if not i.isdigit()])
output[x] = {'labels': [label], 'type': 'scenes'}
count+=1
print '{} scene files'.format(count)
return output
if build:
stim_data = create_stim_dicts()
if save and build:
save_pickle(stim_data, 'stimulus-labels')
# #### Let's make sure that everyhing is in order with our final dictionary.
if check:
for i in xrange(100000):
key = random.choice(output.keys())
assert key.endswith('.jpg') or key.endswith('.JPEG') or key.endswith('.jpeg'), 'Key: {}'.format(key)
x = output[key]
assert type(x) is dict
assert type(x['labels']) is list
assert type(x['type']) is str
# # Building versions of the stimulus dataset
#
# ## Different datasets we will consider are:
#
# ##### (a) VGG16 outputs (100d vector) for each stimuli
# ##### (b) one-hot VGG16 outputs for each stimuli
# ##### (c) stimuli images resized and preprocessed
#
# ### (a) VGG16 outputs
if run and predict:
vgg_preds = get_vgg_predictions()
if save and predict and not load:
save_pickle(vgg_preds, 'vgg-preds')
# ### (b) one-hot VGG16 outputs
if load and not predict:
vgg_preds = load_pickle('vgg-preds.p')
x = random.choice(vgg_preds.keys())
print x
plt.imshow(preprocess_image(x, (224, 224))[0,])
vgg16.decode_predictions(vgg_preds[x])[0][0:3]
assert one_hot_vgg_preds[x] == np.argmax(vgg_preds[x])
one_hot_vgg_preds = {}
for f in stim_labels:
y = np.argmax(vgg_preds[f][0,])
one_hot_vgg_preds[f] = y
if save:
save_pickle(one_hot_vgg_preds, 'one_hot-vgg-preds')
# ### (c) stimulus image processed, resized and flattened
#
# resized to (50, 50, 3) and processed with the vgg16 preprocess tool
target_size = (50, 50)
stim_processed_resized_flat= {}
i = 0
for f in stim_labels:
img = stim_labels[f]
f_path = os.path.join(IMAGES_DIR[img['type']], f)
original = load_img(f_path, target_size=target_size)
img_arr = img_to_array(original)
processed_img_arr = vgg16.preprocess_input(np.expand_dims(img_arr, 0))
stim_processed_resized_flat[f] = processed_img_arr[0].flatten()
i+=1
if i%100 == 0:
print '*' ,
if save:
save_pickle(stim_processed_resized_flat, 'stimulus-processed-resized-flat')
# ### (d) VGG16 layer activations
#
# extract layer activations from VGG model when ran with the stimulus
subject = 1
session = 1
# + active=""
# NAME SHAPE #NODES
# block3_conv1 (1, 56, 56, 256) 802816
# block3_conv2 (1, 56, 56, 256) 802816
# block3_conv3 (1, 56, 56, 256) 802816
#
# block4_conv1 (1, 28, 28, 512) 401408
# block4_conv2 (1, 28, 28, 512) 401408
# block4_conv3 (1, 28, 28, 512) 401408
#
# block5_conv1 (1, 14, 14, 512) 100352
# block5_conv2 (1, 14, 14, 512) 100352
# block5_conv3 (1, 14, 14, 512) 100352
# -
sample_num = 1000
def generate_random_sample_indices(shape):
channels = shape[-1]
sample_per_channel = int(math.ceil(sample_num/channels))
dim = shape[1]
indexes = []
for channel in xrange(channels):
channel_samples = []
for _ in range(sample_per_channel):
ix = np.where(arrays[dim].reshape(dim, dim) == np.random.choice(arrays[dim]))
channel_samples.append((ix[0][0], ix[1][0]))
indexes.append(channel_samples)
return np.array(indexes)
layers = [layer.name for layer in vgg.layers if 'conv' in layer.name and 'block1' not in layer.name and 'block2' not in layer.name]
a = get_activations(vgg, preprocess_image('childsroom2.jpg', (224, 224)), K, layers)
layers = {}
for key in a.keys():
layers[key] = a[key].shape
layers
vgg_layer_activations_link = {}
arrays = {14: np.arange(14**2), 28: np.arange(28**2), 56: np.arange(56**2)}
vgg_activation_masks = {}
vgg_activations = {}
for layer in layers:
vgg_activation_masks[layer] = generate_random_sample_indices(layers[layer])
vgg_activations[layer]= []
i = 0
for f in stim_labels:
vgg_layer_activations_link[f] = i
i+=1
a = get_activations(vgg, preprocess_image(f, (224, 224)), K, layers.keys())
for layer in a:
layer_samples = []
for channel in xrange(layers[layer][-1]):
for sample in xrange(vgg_activation_masks[layer].shape[1]):
layer_samples.append(a[layer][0, vgg_activation_masks[layer][channel, sample, 0], vgg_activation_masks[layer][channel, sample, 1], channel])
vgg_activations[layer].append(np.array(layer_samples).astype(export_precision))
if i%10 == 0:
print '*',
if i%100 == 0:
print '', i*100/5000 ,
| src/b4_stim_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
p = re.compile('(\d+)-(\d+) (\w): (\w+)')
content = open("input/2.txt").readlines()
# +
# part 1
count = 0
for entry in content:
(lower, upper, symbol, password) = p.match(entry).groups()
if int(lower) <= password.count(symbol) <= int(upper):
count += 1
print(count)
# +
# part 2
count = 0
for entry in content:
(pos1, pos2, symbol, password) = p.match(entry).groups()
locations = tuple(i + 1 for (i, c) in enumerate(password) if c == symbol)
if (int(pos1) in locations) != (int(pos2) in locations):
count += 1
print(count)
| 2020/day2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# #### **Title**: HSpan Element
#
# **Dependencies**: Bokeh
#
# **Backends**: [Matplotlib](../matplotlib/HSpan.ipynb), [Plotly](../plotly/HSpan.ipynb), [Bokeh](./HSpan.ipynb)
# +
import numpy as np
import holoviews as hv
from holoviews import opts
hv.extension('bokeh')
# -
# The ``HSpan`` element is a type of annotation that marks a range along the y-axis. Here is an ``HSpan`` element that marks the standard deviation in a collection of points:
# +
xs = np.random.normal(size=500)
ys = np.random.normal(size=500) * xs
ymean, ystd = ys.mean(), ys.std()
points = hv.Points((xs,ys))
hspan = hv.HSpan(ymean-ystd, ymean+ystd)
hspan.opts(color='blue') * points.opts(color='#D3D3D3')
# -
# Like all annotation-like elements `HSpan` is not included in the calculation of axis ranges by default, but can be included by setting `apply_ranges=True`:
(hv.HSpan(1, 3) * hv.HSpan(5, 8)).opts(
opts.HSpan(apply_ranges=True))
# For full documentation and the available style and plot options, use ``hv.help(hv.HSpan).``
| examples/reference/elements/bokeh/HSpan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import altair as alt
covid = pd.read_csv('https://npgeo-corona-npgeo-de.hub.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0.csv', parse_dates=['Meldedatum'])
einwohner = pd.read_csv('../Einwohnerzahlen.csv')
covid.head()
# +
aktuell_mask = covid.NeuerTodesfall.isin([0,1])
covid[aktuell_mask].AnzahlTodesfall.sum()
# +
aktuell_mask = covid.NeuerTodesfall.isin([-1,1])
covid[aktuell_mask].head()
# -
by_age = covid.set_index('Meldedatum').groupby('Altersgruppe')[['AnzahlFall', 'AnzahlTodesfall']].resample('D').sum()
by_age.reset_index().head()
alt.Chart(by_age.reset_index()).mark_bar().encode(
x='Meldedatum',
y=alt.Y('AnzahlFall'),
color="Altersgruppe",
).interactive(True).properties(width=800)
alt.Chart(by_age.reset_index()).mark_bar().encode(
x='Meldedatum',
y=alt.Y('AnzahlTodesfall'),
color="Altersgruppe",
).interactive().properties(width=800)
| notebooks/Plot_AnzahlFall.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Description
#
# The scripts computes the measures "coverage" and "overflow" to compare computed shots with the ground truth shots. It gives indication about how well the visual change classifier works.
#
# The script takes the visual change dataset and considers the root layer information, only. Each label as "visual change" from one frame to another is considered as shot boundary on the root layer. These shots are called "ground truth shots".
#
# It is also created a visual change classifier on basis of the labeling of one participant's session and computes "visual change" labels on all participants' sessions on one Web site. These labels are also considered for the root layer, only, and used again as shot boundaries. These shots are called "computed shots".
# "Coverage" and "overflow" measures are inspired from:
#
# `<NAME> and <NAME>, "Systematic evaluation of logical story unit segmentation," in IEEE Transactions on Multimedia, vol. 4, no. 4, pp. 492-499, Dec. 2002.
# doi: 10.1109/TMM.2002.802021
# `
# ### Imports
# +
# Modules
import pandas as pd
import numpy as np
import os.path
from collections import defaultdict
# Custom modules
from classifier import Classifier
# -
# # Settings
# +
# Settings
min_obs_extent = 32 # extent too which obs are withdrawn
baseline_feature_name = 'pixel_diff_count_bgr'
classifier_names = ['svc', 'forest', 'baseline']
# Defines
dataset_visual_change_dir = r'C:/StimuliDiscoveryData/Dataset_visual_change' # SET ME!
participants = ['p1', 'p2', 'p3', 'p4']
training_participants = ['p1']
exclude_training_from_test = False
# Categories
shopping = ['walmart', 'amazon', 'steam']
news = ['reddit', 'cnn', 'guardian']
health = ['nih', 'webmd', 'mayo']
cars = ['gm', 'nissan', 'kia']
categories = {'shopping': shopping, 'news': news, 'health': health, 'cars': cars}
# Drop and filter features
features_drop = [
'bag_of_words_vocabulary_size',
'optical_flow_angle_min',
'optical_flow_angle_max',
'optical_flow_magnitude_min']
features_filter = [ # not applied if empty (all features but dropped ones are then considered)
'edge_change_fraction',
'mssim_b',
'mssim_g',
'mssim_r',
'pixel_diff_acc_b',
'pixel_diff_acc_bgr',
'pixel_diff_acc_g',
'pixel_diff_acc_gray',
'pixel_diff_acc_hue',
'pixel_diff_acc_lightness',
'pixel_diff_acc_r',
'pixel_diff_acc_saturation',
'pixel_diff_count_b',
'pixel_diff_count_bgr',
'pixel_diff_count_g',
'pixel_diff_count_gray',
'pixel_diff_count_hue',
'pixel_diff_count_lightness',
'pixel_diff_count_r',
'pixel_diff_count_saturation',
'psnr',
'sift_match',
'sift_match_0',
'sift_match_16',
'sift_match_256',
'sift_match_4',
'sift_match_512',
'sift_match_64',
'sift_match_distance_max',
'sift_match_distance_mean',
'sift_match_distance_min',
'sift_match_distance_stddev',
'sift_match_spatial'
]
# -
# # Session
# Session class holds one site visit by a participant
class Session:
# Constructor
def __init__(self, participant, site):
# Store some members of general interest
self.p = participant
self.s = site
# Load dataset (header of features dataset has extra comma)
self.f_df = pd.read_csv(dataset_visual_change_dir + '/' + participant + '/' + site + '_features.csv')
self.mf_df = pd.read_csv(dataset_visual_change_dir + '/' + participant + '/' + site + '_features_meta.csv')
self.l1_df = pd.read_csv(dataset_visual_change_dir + '/' + participant + '/' + site + '_labels-l1.csv', header=None, names=['label'])
self.m_df = pd.read_csv(dataset_visual_change_dir + '/' + participant + '/' + site + '_meta.csv')
# Drop columns of non-interest
self.f_df = self.f_df.drop(features_drop, axis=1)
# Filter for columns of interest
if len(features_filter) > 0:
self.f_df = self.f_df.filter(items=features_filter, axis=1)
# Drop observations that are smaller than a certain extent
width_idxs = self.mf_df[self.mf_df['overlap_width'] <= min_obs_extent].index
height_idxs = self.mf_df[self.mf_df['overlap_height'] <= min_obs_extent].index
drop_idxs = list(set(width_idxs) | set(height_idxs))
self.f_df = self.f_df.drop(drop_idxs, axis=0)
self.mf_df = self.mf_df.drop(drop_idxs, axis=0)
self.l1_df = self.l1_df.drop(drop_idxs, axis=0)
# Replace some values
if 'optical_flow_magnitude_max' in self.f_df.columns:
# Replace infinity datapoints in 'optical_flow_magnitude_max' with maximum value (encoded as -1)
max_value = self.f_df['optical_flow_magnitude_max'].max() # maximum from complete training data
# In both, training an test data
self.f_df[self.f_df['optical_flow_magnitude_max'] == -1] = max_value
# # Compute shots
def compute_shots(df, frame_count):
# Filter for layer ('root') and label 1
df = df.loc[df['layer_type'] == 'root']
df = df.loc[df['label'] == 1]
# Shots
end_frames = list(df['prev_video_frame'])
end_frames.append(frame_count-1)
shots = []
for i in range(len(end_frames)):
start_frame = 0
prev_i = i - 1
if prev_i >= 0:
start_frame = end_frames[prev_i] + 1
end_frame = end_frames[i]
shots.append((start_frame, end_frame))
return shots
# # Compute coverage
def compute_coverage(gt_shots, c_shots):
overall_cover = 0.0
# Go over ground truth shots
for (gt_start, gt_end) in gt_shots:
shot_length = gt_end - gt_start + 1
max_cover = 0
gt_range = range(gt_start, gt_end+1)
# Go over computed shots and find one with maximum coverage
for (c_start, c_end) in c_shots:
c_range = range(c_start, c_end+1)
cover = len(list(set(gt_range) & set(c_range)))
if cover > max_cover:
max_cover = cover
# Compute coverage
cover = float(max_cover) / float(shot_length)
overall_cover += cover * (shot_length / frame_count)
return overall_cover
# # Compute overflow
def compute_overflow(gt_shots, c_shots):
overall_over = 0.0
# Go over ground truth shots
for i in range(len(gt_shots)):
(gt_start, gt_end) = gt_shots[i]
shot_length = gt_end - gt_start + 1
gt_range = range(gt_start, gt_end+1)
# Get previous ground truth shot
prev_gt_range = []
prev_gt_i = i-1
if prev_gt_i >= 0:
(prev_gt_start, prev_gt_end) = gt_shots[prev_gt_i]
prev_gt_range = range(prev_gt_start, prev_gt_end+1)
# Get next ground truth shot
next_gt_range = []
next_gt_i = i+1
if next_gt_i < len(gt_shots):
(next_gt_start, next_gt_end) = gt_shots[next_gt_i]
next_gt_range = range(next_gt_start, next_gt_end+1)
denom = float(len(prev_gt_range) + len(next_gt_range))
nom = 0.0
# Go over computed shots
for (c_start, c_end) in c_shots:
c_range = range(c_start, c_end+1)
# Check for intersection between frames from ground truth shot and computed shot
intersection_gt = len(list(set(gt_range) & set(c_range)))
intersection_gt_at_all = min(intersection_gt, 1) # limit to 0 or 1
# Count only frames from computed shot that intersect (aka overflow...) with prev or next ground truth shot
intersection_gt_prev = len(list(set(prev_gt_range) & set(c_range)))
intersection_gt_next = len(list(set(next_gt_range) & set(c_range)))
# Compute potential overflow
nom += max(intersection_gt_prev, intersection_gt_next) * intersection_gt_at_all
# Compute overflow
over = nom/denom
overall_over += over * (shot_length / frame_count)
return overall_over
# # Implementation
# +
sites_measures = {} # for each site, holds a dict mapping from training session to tuple of coverage and overflow
for name, sites in categories.items():
for site in sites:
print('Site: ' + site)
measures_per_training = {} # maps from training session to tuple of coverage and overflow
# Go over participants chosen for training
for training_p in training_participants:
print('Training with: ' + training_p)
coverages = defaultdict(list)
overflows = defaultdict(list)
# Create training data
training_session = Session(training_p, site)
X_train = training_session.f_df.values
y_train = training_session.l1_df.values.flatten()
idx_baseline = training_session.f_df.columns.get_loc(baseline_feature_name)
# Go over participants that are not used for training
test_participants = list(participants)
if exclude_training_from_test:
test_participants.remove(training_p)
for p in test_participants:
# Load session to work on
session = Session(p, site)
frame_count = int(session.m_df['screencast_frame_total_count'])
# Create classifier
classifier = Classifier()
# Create dataframes with ground truth labels and the computed labels
gt_df = pd.concat([session.mf_df, session.l1_df], axis=1)
c_df = pd.concat([session.mf_df, session.l1_df], axis=1)
# Apply classifier to create computed labels
pred = classifier.apply(X_train, y_train, session.f_df.values, idx_baseline)
# Go over available classifiers and perform computation for each
for name in classifier_names:
c_df['label'] = pred[name] # overwrite labels for computed shots
# Compute shots
gt_shots = compute_shots(gt_df, frame_count)
c_shots = compute_shots(c_df, frame_count)
# Compute measurements
coverage = compute_coverage(gt_shots, c_shots)
overflow = compute_overflow(gt_shots, c_shots)
coverages[name].append(coverage)# dict that maps classifier name to coverage
overflows[name].append(overflow)# dict that maps classifier name to overflow
# Output information
'''
print(
p + ':'
+ ' coverage = ' + '{:1.2f}'.format(coverage)
+ ', overflow = ' + '{:1.2f}'.format(overflow)
+ ', gt_shots = ' + '{:3}'.format(len(gt_shots))
+ ', c_shots = ' + '{:3}'.format(len(c_shots)))
'''
# Store results for one session as training
measures_per_training[training_p] = (coverages, overflows)
'''
print(
'$' + '{:1.2f}'.format(np.mean(coverages['svc'])) + '\\pm' + '{:1.2f}'.format(np.std(coverages['svc']) + '$')
+ ', overflow = ' + '{:1.2f}'.format(np.mean(overflows['svc'])) + '\\pm' + '{:1.2f}'.format(np.std(overflows['svc'])))
'''
# Store results across all sessions as training
sites_measures[site] = measures_per_training
# Print sites
for site, _ in sites_measures.items():
print(site, end=' ')
print()
print('Coverage')
# Print coverages
for _, measures_per_training in sites_measures.items(): # go over sites
svc_means = []
forest_means = []
baseline_means = []
for training_p in training_participants:
svc_means.append(np.mean(measures_per_training[training_p][0]['svc']))
forest_means.append(np.mean(measures_per_training[training_p][0]['forest']))
baseline_means.append(np.mean(measures_per_training[training_p][0]['baseline']))
print(
'$' + '{:1.2f}'.format(np.mean(svc_means)) + '\\pm' + '{:1.2f}'.format(np.std(svc_means)) + '$'
+ ' & '
+ '$' + '{:1.2f}'.format(np.mean(forest_means)) + '\\pm' + '{:1.2f}'.format(np.std(forest_means)) + '$'
+ ' & '
+ '$' + '{:1.2f}'.format(np.mean(baseline_means)) + '\\pm' + '{:1.2f}'.format(np.std(baseline_means)) + '$'
+ ' & ')
print()
print('Overflow')
# Print overflows
for _, measures_per_training in sites_measures.items(): # go over sites
svc_means = []
forest_means = []
baseline_means = []
for training_p in training_participants:
svc_means.append(np.mean(measures_per_training[training_p][1]['svc']))
forest_means.append(np.mean(measures_per_training[training_p][1]['forest']))
baseline_means.append(np.mean(measures_per_training[training_p][1]['baseline']))
print(
'$' + '{:1.2f}'.format(np.mean(svc_means)) + '\\pm' + '{:1.2f}'.format(np.std(svc_means)) + '$'
+ ' & '
+ '$' + '{:1.2f}'.format(np.mean(forest_means)) + '\\pm' + '{:1.2f}'.format(np.std(forest_means)) + '$'
+ ' & '
+ '$' + '{:1.2f}'.format(np.mean(baseline_means)) + '\\pm' + '{:1.2f}'.format(np.std(baseline_means)) + '$'
+ ' & ')
| tools/Notebooks/ShotAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Note:** Priority is measured from 0 being first priority.<br><br>
#
# | [Title:] [Priority:] [Estimate:] |
# |-|
# | **User Story:**<br>As a [description of user],<br>I want [functionality]<br>so that [benefit]. |
# | **Acceptance Criteria:**<br>*Given* [how things begin]<br>*When* [action taken]<br>*Then* [outcome of taking action] |
# | [Title: *Player*] [Priority: *0*] [Estimate: *Play the Game*] |
# |-|
# | **User Story:**<br><br>As a player, <br>they can set names/icons and play the game <br>so that the game can determine the winner. |
# | **Acceptance Criteria:**<br><br>*Given* a grid with empty slots. <br>*When* a player move, their icon is shown in the given slot. <br>*Then* if a player has a four in a row, they win. <br>If a player blocks a move then the dominating pieces in the row will disappear. <br>If all slots are full and no winner, then draw. |
#
# | [Title: *Players Class*] [Priority: *2*] [Estimate: *Length of Game*] |
# |-|
# | ***User Story:***<br>As a Players class, <br>it stores player names and icons<br>so that the game can communicate to the users. |
# | ***Acceptance Criteria:*** <br><br>**Given** user input from the player<br>**When** the game loads<br>**Then** the user can interact with the<br>Grid class to start playing the game. |
#
# | [Title: *GameStatus Class*] [Priority: *1*] [Estimate: *Length of Game*] |
# |-|
# | ***User Story:***<br>As a GameStatus class, <br>it controls the status of the game<br>so the game continues or ends appropriately. |
# | ***Acceptance Criteria:*** <br><br>**Given** the Grid class<br>**When** the grid has been updated<br>**Then** inform the players if there<br>is a winenr or not, and who it might be. |
#
# | [Title: *Grid Class*] [Priority: *3*] [Estimate: *Length of Game*] |
# |-|
# | ***User Story:***<br>As a Grid class, <br>it creates a grid layout and displays the grid to the players<br>so the players can see what moves the other player made. |
# | ***Acceptance Criteria:*** <br><br>**Given** player icons and player moves<br>**When** player sets up icons and makes a move<br>**Then** grid will store grid with the players <br>icons where they moved. |
| C4-JupyterNB/.ipynb_checkpoints/Connect Four - User Story-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 32-bit
# language: python
# name: python38332bit19a78f05c6cb434680bc6f0964f662fa
# ---
# # Challenge 012 - Passport Processing
# This challenge is taken from Advent of Code 2020 - Day 4: Passport Processing (https://adventofcode.com/2020/day/4).
#
# ## Problem - Part 1
#
# You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
#
# It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
#
# Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
#
# The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
#
# - byr (Birth Year)
# - iyr (Issue Year)
# - eyr (Expiration Year)
# - hgt (Height)
# - hcl (Hair Color)
# - ecl (Eye Color)
# - pid (Passport ID)
# - cid (Country ID)
#
# Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
#
# Here is an example batch file containing four passports:
# ```
# ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
# byr:1937 iyr:2017 cid:147 hgt:183cm
#
# iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
# hcl:#cfa07d byr:1929
#
# hcl:#ae17e1 iyr:2013
# eyr:2024
# ecl:brn pid:760753108 byr:1931
# hgt:179cm
#
# hcl:#cfa07d eyr:2025 pid:166559648
# iyr:2011 ecl:brn hgt:59in
# ```
#
# The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
#
# The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
#
# The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
#
# According to the above rules, your improved system would report 2 valid passports.
#
# Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
#
# ## Solution
#
# > To run this script, you need to save ```input-day-04.txt``` and run it in your local machine.
# +
from json import dumps
file_string = ""
expected_fields = ["byr", "iyr", "eyr", "hgt" , "hcl" ,"ecl" ,"pid", "cid"]
expected_fields2 = ["byr", "iyr", "eyr", "hgt" , "hcl" ,"ecl" ,"pid"]
total_valid = 0
with open("input-day-04.txt", "r") as file:
# Read file as string
file_string = file.read()
# Split content based on blank line
file_list = file_string.split("\n\n")
file_line_list_dict = []
# Loop throught list and build a dictionary
for file_line in file_list:
file_line_list = []
# Split line based on new lines
file_row_list = file_line.split("\n")
for row in file_row_list:
# Split item based on space
temp_line_list = row.split(" ")
file_line_list.extend(temp_line_list)
# Create a dictionary for each line
file_line_dict = {}
# Loop throught list and build a dictionary
for file_line_item in file_line_list:
# Split item based on colon sign
file_line_item_list = file_line_item.split(":")
# Assign key and value to dictionary
file_line_dict[file_line_item_list[0]] = file_line_item_list[1]
# Add dictionary to list
file_line_list_dict.append(file_line_dict)
for item in file_line_list_dict:
if set(item.keys()) == set(expected_fields) or set(item.keys()) == set(expected_fields2):
print(dumps(item) + " >> True")
total_valid = total_valid + 1
print("Total Valid: {}".format(total_valid))
# -
# ## Problem - Part 2
#
# The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
#
# You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
#
# - byr (Birth Year) - four digits; at least 1920 and at most 2002.
# - iyr (Issue Year) - four digits; at least 2010 and at most 2020.
# - eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
# - hgt (Height) - a number followed by either cm or in:
# * If cm, the number must be at least 150 and at most 193.
# * If in, the number must be at least 59 and at most 76.
# - hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
# - ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
# - pid (Passport ID) - a nine-digit number, including leading zeroes.
# - cid (Country ID) - ignored, missing or not.
#
# Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
#
# ```
# byr valid: 2002
# byr invalid: 2003
#
# hgt valid: 60in
# hgt valid: 190cm
# hgt invalid: 190in
# hgt invalid: 190
#
# hcl valid: #123abc
# hcl invalid: #123abz
# hcl invalid: 123abc
#
# ecl valid: brn
# ecl invalid: wat
#
# pid valid: 000000001
# pid invalid: 0123456789
# Here are some invalid passports:
#
# eyr:1972 cid:100
# hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
#
# iyr:2019
# hcl:#602927 eyr:1967 hgt:170cm
# ecl:grn pid:012533040 byr:1946
#
# hcl:dab227 iyr:2012
# ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
#
# hgt:59cm ecl:zzz
# eyr:2038 hcl:74454a iyr:2023
# pid:3556412378 byr:2007
# ```
#
# Here are some valid passports:
# ```
# pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
# hcl:#623a2f
#
# eyr:2029 ecl:blu cid:129 byr:1989
# iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
#
# hcl:#888785
# hgt:164cm byr:2001 iyr:2015 cid:88
# pid:545766238 ecl:hzl
# eyr:2022
#
# iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
# ```
#
# Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid?
#
# ## Solution
# +
import re
from json import dumps
file_string = ""
expected_fields = ["byr", "iyr", "eyr", "hgt" , "hcl" ,"ecl" ,"pid", "cid"]
expected_fields2 = ["byr", "iyr", "eyr", "hgt" , "hcl" ,"ecl" ,"pid"]
total_valid = 0
def validate_birth_year(birth_year):
# Check value must be 4 digits
if len(birth_year) == 4:
# Check if birth year is between 1920 and 2002
if int(birth_year) >= 1920 and int(birth_year) <= 2002:
return True
return False
def validate_issue_year(issue_year):
# Check value must be 4 digits
if len(issue_year) == 4:
# Check if issue year is between 2010 and 2020
if int(issue_year) >= 2010 and int(issue_year) <= 2020:
return True
return False
def validate_expiration_year(expiration_year):
# Check value must be 4 digits
if len(expiration_year) == 4:
# Check if expiration_year is between 2020 and 2030
if int(expiration_year) >= 2020 and int(expiration_year) <= 2030:
return True
return False
def validate_height(height):
# a number followed by either cm or in:
# If cm, the number must be at least 150 and at most 193.
# If in, the number must be at least 59 and at most 76.
if height[-2:]== "cm":
if int(height[:-2])>= 150 and int(height[:-2])<= 193:
return True
elif height[-2:]== "in":
if int(height[:-2])>= 59 and int(height[:-2])<= 76:
return True
return False
def validate_hair_color(hair_color):
# validate hair color using regex a # followed by exactly six characters 0-9 or a-f
if re.match("#[0-9a-fA-F]{6}", hair_color):
return True
return False
def validate_eye_color(eye_color):
# exactly one of: amb blu brn gry grn hzl oth
valid_color = ["amb","blu","brn","gry","grn","hzl","oth"]
if eye_color in valid_color:
return True
return False
def validate_passport_id(passport_id):
# a nine-digit number, including leading zeroes
if re.match("^[0-9]{9}$", passport_id):
return True
return False
with open("input-day-04.txt", "r") as file:
# Read file as string
file_string = file.read()
# Split content based on blank line
file_list = file_string.split("\n\n")
file_line_list_dict = []
# Loop throught list and build a dictionary
for file_line in file_list:
file_line_list = []
# Split line based on new lines
file_row_list = file_line.split("\n")
for row in file_row_list:
# Split item based on space
temp_line_list = row.split(" ")
file_line_list.extend(temp_line_list)
# Create a dictionary for each line
file_line_dict = {}
# Loop throught list and build a dictionary
for file_line_item in file_line_list:
# Split item based on colon sign
file_line_item_list = file_line_item.split(":")
# Assign key and value to dictionary
file_line_dict[file_line_item_list[0]] = file_line_item_list[1]
# Add dictionary to list
file_line_list_dict.append(file_line_dict)
for item in file_line_list_dict:
if set(item.keys()) == set(expected_fields) or set(item.keys()) == set(expected_fields2):
if validate_birth_year(item["byr"]) and validate_issue_year(item["iyr"]) and validate_expiration_year(item["eyr"]) and validate_height(item["hgt"]) and validate_hair_color(item["hcl"]) and validate_eye_color(item["ecl"]) and validate_passport_id(item["pid"]):
print(item['pid'])
# print(dumps(item) + " >> True")
total_valid = total_valid + 1
print("Total Valid: {}".format(total_valid))
| challenges/012-Passport_Processing/012-Day4_Passport_Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
from bs4 import BeautifulSoup
from html.parser import HTMLParser
import re
import pandas as pd
import numpy as np
from collections import defaultdict
import time
import pickle
from IPython.display import clear_output
# +
# Functions for getting links
def get_drug_exp_list_url(soup):
"""
Gets the 'Next' link for a drug_experience page
if it exists. Tacks on extra 0s to the max display
to show all links. Use to display and collect all
links for a drug/experience combo.
"""
next_page_exists = soup.find(alt='next')
if next_page_exists is not None:
list_url_original = soup.find(alt='next').find_previous('a')['href']
list_url = list_url_original.split('Start')[0] + 'Start=0&Max=10000' # set the range to display all links
return 'https://erowid.org/' + list_url
else:
return None
def get_drug_exp_links(soup):
"""
Gets list of links to actual experience reports
"""
exp_links = [link['href'] for link in soup.find_all('a') if 'exp.php' in str(link)]
return exp_links
# +
# Function for souping a page
def get_soup(url):
headers = requests.utils.default_headers()
response = requests.get(url, headers = headers)
try:
soup = BeautifulSoup(response.text, 'html.parser')
return soup
except:
print('Denied! Response code: ', str(response.status_code))
# +
# Functions for scraping text and other fields from experience pages
def get_title(soup):
"""
Get report title
"""
try:
title = soup.find('title').text.split('-')[2].strip("''").strip(" '")
except:
title = None
return title
def get_body(soup, demarc='xq'*50):
"""
Add demarcators before and after text body,
strip on the demarcators to isolate the body
demarc: arbitrary string not present in the soup (default 'xq' * 50)
"""
try:
soup.find(text=re.compile('Start Body')).replace_with(demarc)
soup.find(text=re.compile('End Body')).replace_with(demarc)
body = soup.text.split(demarc)[1]
except:
body = None
return body
def get_bodyweight(soup):
"""
Get author body weight
"""
try:
weight_text = soup.find(class_='bodyweight-amount').text
bodyweight = int(weight_text.split(' ')[0])
except:
bodyweight = np.nan
return bodyweight
def get_age(soup):
"""
Get author age
"""
try:
age = soup.find(class_='footdata').find(text=re.compile('Age at time of experience')).split(':')[1].strip()
try:
age = int(age)
except:
age = np.nan
except:
age = np.nan
return age
def get_gender(soup):
"""
Get author gender
"""
g = ['Male', 'Female']
try:
gender_long = soup.find(class_='footdata').find(text=re.compile('Gender')).split(':')[1].strip()
if gender_long in g:
gender = gender_long[0]
elif 'not' in gender_long.lower():
gender = None # may be 'not given', 'not specified' -- set to None
else:
gender = gender_long # for non-binary/other genders
except:
gender = None
return gender
def get_year(soup):
"""
Get year experience report was submitted
"""
try:
year = soup.find(class_='footdata').find(text=re.compile('Exp Year')).split(':')[1].strip().replace(',', '')
year = int(year)
except:
year = np.nan
return year
def get_views(soup):
"""
Get number of page views
"""
try:
views = soup.find(class_='footdata').find(text=re.compile('Views')).split(':')[1].strip().replace(',', '')
views = int(views)
except:
views = np.nan
return views
def get_exp_id(soup):
"""
Get id of experience, use to control for duplicates
"""
try:
exp_id = soup.find(class_='footdata').find(text=re.compile('ExpID')).split(':')[1].strip()
exp_id = int(exp_id)
except:
exp_id = np.nan
return exp_id
# +
# Main scraping script
main_url_prefix = 'https://www.erowid.org/experiences/subs/'
collected = ['Amphetamines', 'Belladonna', 'Cocaine', 'Datura', 'DMT', 'Ketamine',
'LSD', 'MDMA' 'Mushrooms', 'Nitrous_Oxide', 'Opiods', 'PCP', 'Peyote', 'Salvia_divinorum']
#drug_list = ['Amphetamines', 'Benzodiazepines', 'Belladonna', 'Cannabis', 'Cocaine',
# 'Datura', 'DMT', 'Ketamine', 'LSD', 'MDMA', 'Mushrooms', 'Nitrous_Oxide',
# 'Opioids', 'PCP', 'Peyote', 'Salvia_divinorum']
#exp_list = ['General', 'Bad_Trips', 'Glowing_Experiences', 'Mystical_Experiences']
drug_list = ['Cannabis']
exp_list = ['Glowing_Experiences', 'Mystical_Experiences']
### DRUG LOOP ###
for drug in drug_list:
dicts = [] # list for collecting dictionaries
#delays = [7, 4, 6, 2, 19, 10] # list of possible delay times after each beer scrape
delays = [.5]
### EXPERIENCE TYPE LOOP ###
for exp in exp_list:
print('Now scraping ' + drug + ': ' + exp)
exp_url_prefix = 'https://erowid.org/experiences/'
main_url = main_url_prefix + 'exp_' + drug + '_' + exp + '.shtml'
drug_exp_soup = get_soup(main_url)
if drug_exp_soup.text[:9] == 'not found': # skip iteration if drug_exp category not present
next
else:
exp_list_url = get_drug_exp_list_url(get_soup(main_url))
if exp_list_url is None:
exp_list_url = main_url # if no 'next' link at bottom of main, just use main
exp_url_prefix = 'http://erowid.org/' # if getting links from main_url, no need for 'experiences'
exp_links = get_drug_exp_links(get_soup(exp_list_url))
count50 = 0
save_count = 0
### EXPERIENCE REPORT LOOP ###
#exp_links = exp_links[4:]
for ix, link in enumerate(exp_links):
count50 += 1
exp_report_link = exp_url_prefix + link
exp_report_soup = get_soup(exp_report_link) # the actual experience report soup
d = defaultdict() # for collecting attributes
# Get body text and attributes
d['body'] = get_body(exp_report_soup)
d['drug'] = drug
d['experience'] = exp
d['title'] = get_title(exp_report_soup)
d['bodyweight'] = get_bodyweight(exp_report_soup)
d['age'] = get_age(exp_report_soup)
d['gender'] = get_gender(exp_report_soup)
d['year'] = get_year(exp_report_soup)
d['views'] = get_views(exp_report_soup)
d['exp_id'] = get_exp_id(exp_report_soup)
dicts.append(d)
if count50 < 50 and (ix +1) != len(exp_links):
# Choose random delays from list for all scrapes not multiples of 50
delay = np.random.choice(delays)
print(ix)
elif (count50 == 50) or ((ix + 1) == len(exp_links)):
# After every 50th scrape, or if reached the last experience link,
# increment the save_count, convert the dict to a dataframe and pickle.
# Then take a minute delay to avoid getting booted off
count50 = 0
save_count += 1
experiences_df = pd.DataFrame(dicts)
dicts = []
pickle.dump(experiences_df, open('data/' + drug + '_' + exp +
'_' + str(save_count) + '_raw_df' + '.pkl', 'wb'))
delay = 1
clear_output()
print('Scraped ' + str(save_count) + ' experience sets from ' + drug + ': ' + exp)
if delay==1:
print('Taking break...')
time.sleep(delay)
# -
ix
drug_exp_soup.text
exp_links
exp
df = pd.read_pickle('data/PCP_Mystical_Experiences_1_raw_df.pkl')
df
d
exp_report_link
get_soup(exp_report_link)
exp_list_url
exp_links
exp_report_link
| code/erowid_scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: HuggingFace
# language: python
# name: huggingface
# ---
# This is the notebook for experimenting with a zero-shot sliding-window Question-Answering model approach to the Modern Slavery Hackathon classification task: For each Modern Slavery Statement document, classifying whether or not the document gives evidence that the company has provided any training about modern slavery to their employees.
#
# The motivation is to use transfer learning from models pre-trained to extract relevant answers (as a span) from a document (context) in order to automate the identification of which small subsets of the documents might be relevant to modern slavery training. These smaller subsets can then make the job of human-labelling additional documents more efficient or be fed into another model which can only handle a limited number of tokens (perhaps a transformer trained for sequence classification).
#
# The idea behind the approach is to use a pretrained QA model (one trained on SQuAD v2 such that it can return a "no span found" result) to ask questions of the documents. Since most documents in the dataset are longer than the maximum input length, a sliding window approach is used: after the entire document is tokenized, the QA model is run on successive windows, each slid by stride=128 tokens (~1/4th of the window size). All spans returned by the QA model are recorded in a new dataframe (df_with_segments.parquet). A notebook for visualizing the results of the sliding-window QA model approach is available: 'QA results viewer.ipynb'
#
# Six questions are trialed to see which one(s) provide the best results:
# - 'Is there training provided?'
# - 'Is there training already in place?'
# - 'Has training been done?'
# - 'Is training planned?'
# - 'Is training in development?'
# - 'What kind of training is provided?'
#
# Note: as this is a zero-shot approach, we can ignore the labels as we will not be doing any training. Therefore, the labeled (train) and unlabeled (test) data will be concatenated into a single input dataframe.
# +
import pandas as pd
import re
from datetime import datetime, timedelta
# the custom classes used in this notebook are defined in SlidingWindowTransformersQA.py:
from SlidingWindowTransformersQA import SliderModel, SliderDocument
# +
# import the data, strip away the labels and combine into a single df
df_labeled = pd.read_csv('train (3).csv',index_col=0)
df_hidden = pd.read_csv('test (3).csv',index_col=0)
df_labeled['source'] = 'labeled'
df_hidden['source'] = 'hidden'
df = pd.concat([df_labeled[['source','TEXT']],
df_hidden[['source','TEXT']]],axis=0).reset_index()
# any characters repeated more than 4 times will be shortened to 4 repetitions:
# https://stackoverflow.com/questions/10072744/remove-repeating-characters-from-words
df['TEXT'] = df['TEXT'].apply(lambda x: re.sub(r'(.)\1{4,}', r'\1\1\1\1', str(x)))
df
# +
# Model chosen based on SQuAD v2 leaderboards December 2020 which favored ALBERT-based models.
# A base-sized model was selected for speed in this Proof-of-Concept. An ALBERT XLarge model could be substituted for better
# performance at the cost of inference time
model_name = 'twmkn9/albert-base-v2-squad2'
#instantiate the slider model:
slider_model = SliderModel(model_name = model_name,
max_batch_size = 8,
stride = 128)
# +
questions=['Is there training provided?',
'Is there training already in place?',
'Has training been done?',
'Is training planned?',
'Is training in development?',
'What kind of training is provided?'
]
slider_model.set_questions(questions)
# Create placeholder columns for each question that will receive the answer-spans identified by the sliding window model:
for question in questions:
df[question]=[[] for _ in range(len(df))] # each cell start as an empty list
# Create a dataframe to receive the tokens and token-class-labels as the rows are processed:
df_tokens = pd.DataFrame(columns=['tokens','token classes'], index=df.index)
# -
# This function will be applied to each document (row) in the dataframe, storing the results (tokens, token-classes, and
# answer-spans) in the associated dataframes
def process_row(row_id, slider_model):
#instantiate the slider document for this row:
slider_document = SliderDocument(slider_model=slider_model,
text=df.loc[row_id,'TEXT'])
# feed the document through the slider model to classify all the tokens:
slider_document.classify_tokens()
# store the token info in the df_tokens dataframe
df_tokens.loc[row_id,'tokens'] = slider_document.tokens[0].tolist()
df_tokens.loc[row_id,'token classes'] = slider_document.token_classes.tolist()
# store the answer-spans in the df dataframe
filtered_text = slider_document.filtered_text()
for question_response in filtered_text:
col_header = question_response['question']
text_segments = question_response['text segments']
for text_segment in text_segments:
df.loc[row_id,col_header].append(text_segment)
# +
# process all rows!
start_time=datetime.now()
for i in range(len(df)):
row_start = datetime.now()
process_row(row_id = i, slider_model = slider_model)
df.to_parquet('df_with_segments.parquet')
df_tokens.to_parquet('df_token_classes.parquet')
print(f'row {i}: Row time = {datetime.now() - row_start}. Total time elapsed = {datetime.now() - start_time}')
# -
# The results!
pd.concat([df,df_tokens],axis=1)
# All documents have now been processed through the sliding window model with the results stored in the two parquet files (df_with_segments.parquet and df_token_classes.parquet). I have visualized the results in a separate notebook: 'QA results viewer.ipynb' Feel free to hop over there to view them.
| QA-sliding window.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + [markdown] azdata_cell_guid="4a78104e-e6dd-438e-833e-a3ccad0f3618"
# # Running Code
# + [markdown] azdata_cell_guid="40fdc23f-a403-4897-8bf6-96c9543e80c7"
# First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
# + [markdown] azdata_cell_guid="a8c9c495-abac-48a4-93a3-d557d35604aa"
# ## Code cells allow you to enter and run code
# + [markdown] azdata_cell_guid="890d3df1-acd0-4c11-8988-c566afbab791"
# Run a code cell using `Shift-Enter` or pressing the <button class='btn btn-default btn-xs'><i class="icon-step-forward fa fa-step-forward"></i></button> button in the toolbar above:
# + azdata_cell_guid="8cab1949-c1e3-4a3c-8b51-8690a67796ac"
a = 11
# + azdata_cell_guid="66907bfc-0289-4ef7-8847-3ee2e6d7f7e8"
print(a + 4)
# + [markdown] azdata_cell_guid="c44a36c6-03a0-4639-a55e-a065da229138"
#
# + [markdown] azdata_cell_guid="212eb338-b538-473f-8f38-fe0e5b5f4768"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="d02f1ea0-5496-4b59-97ff-bc577a05fc28"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="cc96c1c7-ec70-4645-9203-a5ed6629bd26"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
# + azdata_cell_guid="15a7508d-e39a-4d75-bb4b-8e780a2b70a6"
import time
time.sleep(10)
# + [markdown] azdata_cell_guid="4b450870-fd21-4d89-81f3-6b5c3cbbe3cc"
# If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
# ctypes to segfault the Python interpreter:
# + azdata_cell_guid="5849691f-667b-4943-986e-f51deba3f31d"
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
# + [markdown] azdata_cell_guid="47e7d8c9-3617-45a1-b33d-95b711bc50fd"
# ## Cell menu
# + [markdown] azdata_cell_guid="40cbf820-2371-4de7-91ab-76afd7278050"
# The "Cell" menu has a number of menu items for running code in different ways. These includes:
#
# * Run and Select Below
# * Run and Insert Below
# * Run All
# * Run All Above
# * Run All Below
# + [markdown] azdata_cell_guid="c6c9fe21-61fb-4fb2-9bc9-2225ba000a71"
# ## Restarting the kernels
# + [markdown] azdata_cell_guid="e49313dc-cf5c-4b35-af54-fc9e4b549311"
# The kernel maintains the state of a notebook's computations. You can reset this state by restarting the kernel. This is done by clicking on the <button class='btn btn-default btn-xs'><i class='fa fa-repeat icon-repeat'></i></button> in the toolbar above.
# + [markdown] azdata_cell_guid="f115d40b-4b14-47e2-a569-0dadda8a9c31"
# ## sys.stdout and sys.stderr
# + [markdown] azdata_cell_guid="81daa3c5-c7f2-4921-89c3-09256fa54c8a"
# The stdout and stderr streams are displayed as text in the output area.
# + azdata_cell_guid="b0fd6434-6938-4459-9c47-c02c21d39e69"
print("hi, stdout")
# + azdata_cell_guid="be797407-9449-4e1b-8c40-989b96ed2b51"
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
# + [markdown] azdata_cell_guid="17d3ceac-45ab-424f-a1cf-087290570d90"
# ## Output is asynchronous
# + [markdown] azdata_cell_guid="9fe5e33f-71a8-48e7-bf34-e99ac0249be0"
# All output is displayed asynchronously as it is generated in the Kernel. If you execute the next cell, you will see the output one piece at a time, not all at the end.
# + azdata_cell_guid="1999edee-c366-48b2-850c-689d71915c55"
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
# + [markdown] azdata_cell_guid="b2fc487f-8c4a-4d60-ab9c-ce42603e3431"
# ## Large outputs
# + [markdown] azdata_cell_guid="8d6b248a-3919-4391-936b-767f07b210ba"
# To better handle large outputs, the output area can be collapsed. Run the following cell and then single- or double- click on the active area to the left of the output:
# + azdata_cell_guid="6dfb111d-f966-4a87-9f32-eff3ffeb5015"
for i in range(50):
print(i)
# + [markdown] azdata_cell_guid="d64bd883-6ec9-4b1c-8df6-4cffb146fd45"
# Beyond a certain point, output will scroll automatically:
# + azdata_cell_guid="c479ea8f-4dcc-4b50-814e-f3d000cdea33"
for i in range(500):
print(2**i - 1)
| samples/runCode copy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to use Ivis to reduce dimensionality of image data
# A common objective of image analysis is dimensionality reduction. Over the last couple of months we made some adjustments to our [`Ivis`](https://github.com/beringresearch/ivis) library with the goal to make it easy to create and apply custom Neural Network backbones for dimensionality reduction of complex datasets such as images.
#
# In this notebook we will demo how to create a custom neural network and then use it to reduce dimensionality of an imaging dataset using Ivis.
# + colab={"base_uri": "https://localhost:8080/"} id="yb_eyfTYVl_f" outputId="687071b3-598a-44b3-e700-4e5c3ab0485c"
# !pip install ivis
# + id="gbvX49m8QhTx"
# Ivis uses several stochastic processes.
# To make sure that results are consistent from one run to another,
# we fix the random seed
import os
os.environ["PYTHONHASHSEED"]="1234"
import random
import numpy as np
import tensorflow as tf
np.random.seed(1234)
random.seed(1234)
tf.random.set_seed(1234)
# + id="Eala8oyTVoCD"
import matplotlib.pyplot as plt
from ivis import Ivis
# -
# ## 1. Data import
# To start, we will use the Fashion MNIST dataset. Each image in this dataset is 28x28 pixels.
# +
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
# -
# ## 2. Nearest neighbour retrieval
# ### Image representation
# Ivis uses a nearest neighbour matrix to identify positive and negative observations that are then fed into the triplet loss function. For tabular data, nearest neighbours are computed directly from the input matrix using the Annoy library. However, since images are represented as multidimensional arrays, we will use a convolutional Autoencoder to first extract latent image features, which will then be fed into our nearest neighbour algorithm.
# > Note: By default, to extract nearest eighbours from a multidimensional array, Ivis will simply flattent the data prior to feeding it into the Annoy algorithm. However, Ivis will still be trained on the original multidimensional dataset.
# +
class Autoencoder(tf.keras.Model):
def __init__(self):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', padding='same'),
tf.keras.layers.MaxPooling2D((2, 2), padding='same'),
tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same'),
tf.keras.layers.MaxPooling2D((2, 2), padding='same'),
tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same'),
tf.keras.layers.MaxPooling2D((2, 2), padding='same'),
])
self.decoder = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same'),
tf.keras.layers.UpSampling2D((2, 2)),
tf.keras.layers.Conv2D(8, (3, 3), activation='relu', padding='same'),
tf.keras.layers.UpSampling2D((2, 2)),
tf.keras.layers.Conv2D(16, (3, 3), activation='relu'),
tf.keras.layers.UpSampling2D((2, 2)),
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
autoencoder = Autoencoder()
autoencoder.compile(optimizer='adam', loss=tf.keras.losses.MeanSquaredError())
# -
autoencoder.fit(x_train, x_train,
# In the interest of time, epochs are set to 10
epochs=10,
shuffle=True,
validation_data=(x_test, x_test))
# The final result is a matrix of latent image representations with shape 60,000 x 128.
encoder = tf.keras.Model(inputs=autoencoder.encoder.input, outputs=autoencoder.encoder.output)
x_train_encoded = encoder.predict(x_train)
x_train_encoded = x_train_encoded.reshape((60000, 128))
# ### Nearest neighbour retrieval
# We are now ready to extract the nearest neighbour matrix. In this example, we're using the built-in Annoy algorithm, but it can be substituted for your favourite approach.
from ivis.data.neighbour_retrieval import AnnoyKnnMatrix
annoy = AnnoyKnnMatrix.build(x_train_encoded, path='annoy.index')
nearest_neighbours = annoy.get_neighbour_indices()
# ## 3. Dimensionality reduction
# Finally, we're ready to train Ivis. We define a simple custom convolutional network that will be used to as a Siamese Network backbone.
# + id="g8g7jIA8WLsp"
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.BatchNormalization(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='elu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='elu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.BatchNormalization(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Conv2D(256, (5, 5), padding='same', activation='elu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(tf.keras.layers.Flatten())
return model
# + id="RZ7TpqRAjoop"
base_model = create_model()
# -
# > NOTE: It's recommended to train Ivis on a GPU, but if that isn't accessible, you can reduce the `epochs` hyperparameter to save some time.
# + colab={"base_uri": "https://localhost:8080/"} id="LF3P5yTz0bvP" outputId="6c256626-8800-46c4-981c-34119b196dcf"
# The base_model and the pre-computed nearest neighbour matrix is passed directly into ivis
ivis = Ivis(model=base_model, epochs=5,
neighbour_matrix=nearest_neighbours)
ivis.fit(x_train)
# -
# ## 4. Visualise Embeddings
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
# Final embeddins can be obtained using the `transform` method. Here we examine trainig and testing set embeddings.
# + colab={"base_uri": "https://localhost:8080/"} id="1gw1Dlws02F0" outputId="e210c485-b429-4352-e787-e4787a920e96"
embeddings_train = ivis.transform(x_train)
embeddings_test = ivis.transform(x_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="zMYOhy_J7iu5" outputId="bfd999cb-e520-48ee-9769-7b9ea89ba26d"
fig, axs = plt.subplots(1, 2, figsize=(9, 3))
axs[0].scatter(embeddings_train[:, 0], embeddings_train[:, 1], c=y_train, s=0.1)
axs[0].set_xlabel('ivis 1')
axs[0].set_ylabel('ivis 1')
axs[0].set_title('Training set')
axs[1].scatter(embeddings_test[:, 0], embeddings_test[:, 1], c=y_test, s=0.1)
axs[1].set_xlabel('ivis 1')
axs[1].set_ylabel('ivis 1')
axs[1].set_title('Testing set')
# + id="_16J1T2C7xJE"
| notebooks/ivis_cnn_backbone_fashion_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !pip3 install dm-sonnet
# # !pip3 install tensorflow-probability==0.7.0
# -
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import numpy as np
import tensorflow as tf
from dnc import DNC
# +
import json
with open('train-test.json') as fopen:
dataset = json.load(fopen)
with open('dictionary.json') as fopen:
dictionary = json.load(fopen)
# -
train_X = dataset['train_X']
train_Y = dataset['train_Y']
test_X = dataset['test_X']
test_Y = dataset['test_Y']
dictionary.keys()
# +
dictionary_from = dictionary['from']['dictionary']
rev_dictionary_from = dictionary['from']['rev_dictionary']
dictionary_to = dictionary['to']['dictionary']
rev_dictionary_to = dictionary['to']['rev_dictionary']
# -
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']
# +
for i in range(len(train_X)):
train_X[i] += ' EOS'
train_X[0]
# +
for i in range(len(test_X)):
test_X[i] += ' EOS'
test_X[0]
# -
num_reads = 5
num_writes = 1
memory_size = 128
word_size = 128
clip_value = 20
# +
def pad_second_dim(x, desired_size):
padding = tf.tile([[[0.0]]], tf.stack([tf.shape(x)[0], desired_size - tf.shape(x)[1], tf.shape(x)[2]], 0))
return tf.concat([x, padding], 1)
class Translator:
def __init__(self, size_layer, num_layers, embedded_size,
from_dict_size, to_dict_size, learning_rate, batch_size,
attn_input_feeding=True):
def attn_decoder_input_fn(inputs, attention):
if attn_input_feeding:
return inputs
def attention(encoder_out, cell, seq_len, encoder_last_state, reuse=False):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(num_units = size_layer,
memory = encoder_out,
memory_sequence_length = seq_len)
return tf.contrib.seq2seq.AttentionWrapper(
cell = cell,
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer,
cell_input_fn=attn_decoder_input_fn,
initial_cell_state=encoder_last_state,
alignment_history=False)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32)
access_config = {
"memory_size": memory_size,
"word_size": word_size,
"num_reads": num_reads,
"num_writes": num_writes,
}
controller_config = {
"hidden_size": size_layer,
}
self.dnc_cell = DNC(access_config=access_config, controller_config=controller_config,
output_size=size_layer, clip_value=clip_value)
self.dnc_initial = self.dnc_cell.initial_state
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
initial_state = self.dnc_initial(batch_size)
self.encoder_out, self.encoder_state = tf.nn.dynamic_rnn(
cell=self.dnc_cell, inputs=encoder_embedded,
sequence_length=self.X_seq_len, dtype=tf.float32,
initial_state=initial_state)
main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
# decoder
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
decoder_cell = attention(self.encoder_out, self.dnc_cell, self.X_seq_len,self.encoder_state)
dense_layer = tf.layers.Dense(to_dict_size)
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
sequence_length = self.Y_seq_len,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cell,
helper = training_helper,
initial_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32),
output_layer = dense_layer)
training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = training_decoder,
impute_finished = True,
output_time_major=False,
maximum_iterations = tf.reduce_max(self.Y_seq_len))
predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding = decoder_embeddings,
start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
end_token = EOS)
predicting_decoder = tf.contrib.seq2seq.BasicDecoder(
cell = decoder_cell,
helper = predicting_helper,
initial_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32),
output_layer = dense_layer)
predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = predicting_decoder,
impute_finished = True,
maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
self.training_logits = training_decoder_output.rnn_output
self.predicting_ids = predicting_decoder_output.sample_id
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# -
size_layer = 256
num_layers = 2
embedded_size = 256
learning_rate = 1e-3
batch_size = 96
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Translator(size_layer, num_layers, embedded_size, len(dictionary_from),
len(dictionary_to), learning_rate,batch_size)
sess.run(tf.global_variables_initializer())
# +
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k,UNK))
X.append(ints)
return X
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
# -
train_X = str_idx(train_X, dictionary_from)
test_X = str_idx(test_X, dictionary_from)
train_Y = str_idx(train_Y, dictionary_to)
test_Y = str_idx(test_Y, dictionary_to)
# +
import tqdm
for e in range(epoch):
pbar = tqdm.tqdm(
range(0, len(train_X), batch_size), desc = 'minibatch loop')
train_loss, train_acc, test_loss, test_acc = [], [], [], []
for i in pbar:
index = min(i + batch_size, len(train_X))
maxlen = max([len(s) for s in train_X[i : index] + train_Y[i : index]])
batch_x, seq_x = pad_sentence_batch(train_X[i : index], PAD)
batch_y, seq_y = pad_sentence_batch(train_Y[i : index], PAD)
feed = {model.X: batch_x,
model.Y: batch_y}
accuracy, loss, _ = sess.run([model.accuracy,model.cost,model.optimizer],
feed_dict = feed)
train_loss.append(loss)
train_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
pbar = tqdm.tqdm(
range(0, len(test_X), batch_size), desc = 'minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x, seq_x = pad_sentence_batch(test_X[i : index], PAD)
batch_y, seq_y = pad_sentence_batch(test_Y[i : index], PAD)
feed = {model.X: batch_x,
model.Y: batch_y,}
accuracy, loss = sess.run([model.accuracy,model.cost],
feed_dict = feed)
test_loss.append(loss)
test_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
print('epoch %d, training avg loss %f, training avg acc %f'%(e+1,
np.mean(train_loss),np.mean(train_acc)))
print('epoch %d, testing avg loss %f, testing avg acc %f'%(e+1,
np.mean(test_loss),np.mean(test_acc)))
# -
rev_dictionary_to = {int(k): v for k, v in rev_dictionary_to.items()}
# +
test_size = 20
batch_x, seq_x = pad_sentence_batch(test_X[: test_size], PAD)
batch_y, seq_y = pad_sentence_batch(test_Y[: test_size], PAD)
feed = {model.X: batch_x}
logits = sess.run(model.predicting_ids, feed_dict = feed)
logits.shape
# +
rejected = ['PAD', 'EOS', 'UNK', 'GO']
for i in range(test_size):
predict = [rev_dictionary_to[i] for i in logits[i] if rev_dictionary_to[i] not in rejected]
actual = [rev_dictionary_to[i] for i in batch_y[i] if rev_dictionary_to[i] not in rejected]
print(i, 'predict:', ' '.join(predict))
print(i, 'actual:', ' '.join(actual))
print()
# -
| mlmodels/model_dev/nlp_tfflow/neural-machine-translation/38.dnc-seq2seq-bahdanau-greedy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cord
# language: python
# name: cord
# ---
# # Load Research Papers
# %load_ext autoreload
# %autoreload 2
from cord import ResearchPapers
research_papers = ResearchPapers.load()
research_papers[3].get_json_paper()
research_papers.data_path
research_papers.searchbar('cruise ship', num_results=100)
| notebooks/load-research-papers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1-minute introduction to Jupyter ##
#
# A Jupyter notebook consists of cells. Each cell contains either text or code.
#
# A text cell will not have any text to the left of the cell. A code cell has `In [ ]:` to the left of the cell.
#
# If the cell contains code, you can edit it. Press <kbd>Enter</kbd> to edit the selected cell. While editing the code, press <kbd>Enter</kbd> to create a new line, or <kbd>Shift</kbd>+<kbd>Enter</kbd> to run the code. If you are not editing the code, select a cell and press <kbd>Ctrl</kbd>+<kbd>Enter</kbd> to run the code.
# Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
#
# Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
NAME = ""
COLLABORATORS = ""
# ---
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "3fae481e652c3a54cb5e8c00230fa563", "grade": false, "grade_id": "cell-b10e5636ae6dc30d", "locked": true, "schema_version": 3, "solution": false, "task": false}
# # Assignment 1: Python basic operators and data types
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "54c761519e1d1251d1ac52db28b1a6c7", "grade": false, "grade_id": "cell-403c63df3f91d28e", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Part 1
#
# Write program code to:
#
# 1. ask the user to input the number of seconds,
# 2. convert seconds to minutes in decimals,
# 3. print the number of minutes.
#
# The steps involved are:
#
# 1. Get the number of seconds from the user as an integer.
# 2. Convert `seconds` to `minutes`.
# 3. Output the value of `minutes`.
#
# ### Expected output
#
# Enter the number of seconds (integer): 48
# The duration is 0.8 minutes.
# Enter the number of seconds (integer): 72
# The duration is 1.2 minutes.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "4959ec6a58cf5b2fd843482917f29588", "grade": true, "grade_id": "cell-1ce057e052ae5b58", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
value1 = input('Enter the number of seconds (integer): ')
value1 = int(value1)
# ... complete the code below
print('The duration is',minutes,'minutes.')
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "df4b7acef4b03e0e2b40b031098a6d09", "grade": false, "grade_id": "cell-a2051e96fc638c2c", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Part 2
#
# Write program code to:
#
# 1. ask the user to input the number of seconds,
# 2. convert seconds to minutes and seconds (as integers),
# 3. print the number of minutes and seconds.
#
# *Hint: You can use `divmod()` to get the number of minutes and seconds in the same step.*
#
# ### Expected output
#
# Enter the number of seconds (integer): 48
# The duration is 0 minutes and 48 seconds.
# Enter the number of seconds (integer): 72
# The duration is 1 minutes and 12 seconds.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "114c7d6b2ed8baaf2cb0963b9316637b", "grade": true, "grade_id": "cell-4e8317de24a4418b", "locked": false, "points": 3, "schema_version": 3, "solution": true, "task": false}
# Replace the underscores (_____) with a valid expression to get user input
value2 = _____
value2 = int(value2)
# ... complete the code below
print('The duration is',minutes,'minutes and',seconds,'seconds.')
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "33b3f32eba427b56fd30b7b3dd6dbf09", "grade": false, "grade_id": "cell-a35f8b447470f9d4", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Part 3
#
# Write program code to:
#
# 1. ask the user to input the number of **minutes**,
# 2. convert **minutes** to hours and minutes (as integers),
# 3. print the number of **hours and minutes**.
#
# *Hint: You can use `divmod()` to get the number of hours and minutes in the same step.*
#
# ### Expected output
#
# Enter the number of minutes (integer): 48
# The duration is 0 hours and 48 minutes.
# Enter the number of minutes (integer): 72
# The duration is 1 hours and 12 minutes.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "44397aa837abf1d4fd5335fed0fcd1ac", "grade": true, "grade_id": "cell-12707c7362c69921", "locked": false, "points": 2, "schema_version": 3, "solution": true, "task": false}
# Replace the underscores (_____) with a valid expression to get user input
value3 = _____
value3 = int(value3)
# ... complete the code below
# Replace the underscores (_____) with a valid expression to print the number of hours and minutes
print(_____)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "911096e859d29b63f939cca21d6bbff1", "grade": false, "grade_id": "cell-6e99e374d4309fc3", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Part 4 (final)
#
# Write program code to:
#
# 1. ask the user to input the number of **seconds**,
# 2. convert seconds to hours, minutes, and seconds (as integers),
# 3. print the number of **hours, minutes, and seconds**.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "43d29609de5a3216ec4e28aa8179de43", "grade": true, "grade_id": "cell-2284470cb74cc6e9", "locked": false, "points": 3, "schema_version": 3, "solution": true, "task": false}
### Type your code below
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "b2ba5ce3feadafb85560af930a9270ff", "grade": false, "grade_id": "cell-e65dec44cdb73bae", "locked": true, "schema_version": 3, "solution": false, "task": false}
# # Feedback and suggestions
#
# Any feedback or suggestions for this assignment?
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "0f3d1a45447f39d75f9db5b74392a04a", "grade": true, "grade_id": "cell-a9fa64f600cfded6", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false}
# YOUR ANSWER HERE
| Assignment 1/assignment_01.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Develop Model Driver
#
# In this notebook, we will develop the API that will call our model. This module initializes the model, transforms the input so that it is in the appropriate format and defines the scoring method that will produce the predictions. The API will expect the input to be passed as an image. Once a request is received, the API will convert load the image preprocess it and pass it to the model. There are two main functions in the API: init() and run(). The init() function loads the model and returns a scoring function. The run() function processes the images and uses the first function to score them.
#
# Note: Always make sure you don't have any lingering notebooks running (Shutdown previous notebooks). Otherwise it may cause GPU memory issue.
from azureml.core import Workspace
from azureml.core.model import Model
from dotenv import set_key, find_dotenv
import logging
from testing_utilities import get_auth
import keras
import tensorflow
print("Keras: ", keras.__version__)
print("Tensorflow: ", tensorflow.__version__)
env_path = find_dotenv(raise_error_if_not_found=True)
# ## Write and save driver script
# +
# %%writefile driver.py
from resnet152 import ResNet152
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from azureml.contrib.services.aml_request import rawhttp
from azureml.core.model import Model
from toolz import compose
import numpy as np
import timeit as t
from PIL import Image, ImageOps
import logging
_NUMBER_RESULTS = 3
def _image_ref_to_pil_image(image_ref):
""" Load image with PIL (RGB)
"""
return Image.open(image_ref).convert("RGB")
def _pil_to_numpy(pil_image):
img = ImageOps.fit(pil_image, (224, 224), Image.ANTIALIAS)
img = image.img_to_array(img)
return img
def _create_scoring_func():
""" Initialize ResNet 152 Model
"""
logger = logging.getLogger("model_driver")
start = t.default_timer()
model_name = "resnet_model"
model_path = Model.get_model_path(model_name)
model = ResNet152()
model.load_weights(model_path)
end = t.default_timer()
loadTimeMsg = "Model loading time: {0} ms".format(round((end - start) * 1000, 2))
logger.info(loadTimeMsg)
def call_model(img_array_list):
img_array = np.stack(img_array_list)
img_array = preprocess_input(img_array)
preds = model.predict(img_array)
# Converting predictions to float64 since we are able to serialize float64 but not float32
preds = decode_predictions(preds.astype(np.float64), top=_NUMBER_RESULTS)
return preds
return call_model
def get_model_api():
logger = logging.getLogger("model_driver")
scoring_func = _create_scoring_func()
def process_and_score(images_dict):
""" Classify the input using the loaded model
"""
start = t.default_timer()
logger.info("Scoring {} images".format(len(images_dict)))
transform_input = compose(_pil_to_numpy, _image_ref_to_pil_image)
transformed_dict = {
key: transform_input(img_ref) for key, img_ref in images_dict.items()
}
preds = scoring_func(list(transformed_dict.values()))
preds = dict(zip(transformed_dict.keys(), preds))
end = t.default_timer()
logger.info("Predictions: {0}".format(preds))
logger.info("Predictions took {0} ms".format(round((end - start) * 1000, 2)))
return (preds, "Computed in {0} ms".format(round((end - start) * 1000, 2)))
return process_and_score
def init():
""" Initialise the model and scoring function
"""
global process_and_score
process_and_score = get_model_api()
@rawhttp
def run(request):
""" Make a prediction based on the data passed in using the preloaded model
"""
return process_and_score(request.files)
# -
# ## Test the driver¶
# We test the driver by passing data.
logging.basicConfig(level=logging.DEBUG)
# %run driver.py
# Let's load the workspace.
ws = Workspace.from_config(auth=get_auth())
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep="\n")
# Get the model and score against an example image
model_path = Model.get_model_path("resnet_model", _workspace=ws)
IMAGEURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg"
# Always make sure you don't have any lingering notebooks running. Otherwise it may cause GPU memory issue.
process_and_score = get_model_api()
resp = process_and_score({"lynx": open("220px-Lynx_lynx_poing.jpg", "rb")})
# Clear GPU memory
from keras import backend as K
K.clear_session()
# Next, we will [build a docker image with this modle driver and other supporting files](03_BuildImage.ipynb).
| architectures/Python-Keras-RealTimeServing/{{cookiecutter.project_name}}/03_DevelopScoringScript.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ArceusJC/CPEN-21A-ECE-2-3/blob/main/Demo1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="bN9pMzHRvoUi"
# ####Phyton INDENTATION
# + [markdown] id="9xtf_LXcw9Za"
#
# + colab={"base_uri": "https://localhost:8080/"} id="98ht2IdjvnT3" outputId="2baec404-c56b-4393-9932-1b1162afed80"
if 5>2:
print("Five is greater than two!") #
# + [markdown] id="H8h132ccw_JK"
# ####Phyton Variable
#
# + colab={"base_uri": "https://localhost:8080/"} id="vVJ_JAwfxF1F" outputId="1eb72509-caf1-4d22-f24b-8621b19dbb18"
a, b, c = 0,-1,2
print (a)
print (b)
print (c)
# + [markdown] id="5_8E5zqMxsV2"
# #####CASTING
#
# + colab={"base_uri": "https://localhost:8080/"} id="t8WrRsXfxvEO" outputId="679ea96d-8385-4619-cd3c-cdc5e8ac2634"
b ="sally" #this is a type of string
b=int(3)
print(b)
# + colab={"base_uri": "https://localhost:8080/"} id="Xv7uZy8mxMa_" outputId="3a773689-e075-47d9-feec-c0876a00b0a3"
b=float(3)
print(b)
# + id="4c3WlWtVyUdH"
# + [markdown] id="drkf2TQc0NOw"
# ##TYPE () FUNCTION
#
# + colab={"base_uri": "https://localhost:8080/"} id="ZnLcgjgs0SPg" outputId="490b468d-50c6-4605-e57b-ec1ff5b392ee"
x="1231"
y= 5
print (type(x))
print(type(y))
# + [markdown] id="0zcPbonZ1AKN"
# #Double Quotes or Single Quotes
# + colab={"base_uri": "https://localhost:8080/"} id="eldp0GU-0W_4" outputId="7be57453-a5b1-41f5-b860-c105b5144012"
x= "jane"
x= 'john'
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="pLmynBil1JYa" outputId="47b82096-75d3-4b31-ff34-bff344f6bfcf"
x='jane'
x="john"
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="0n9vQgeO1XDf" outputId="377df1cb-6d95-41a9-8821-3a214b8168be"
x="john"
x='Jane'
print(x)
# + id="30hb9BU51b8d"
# + [markdown] id="j7kvsvQL2DIp"
# ##Case Sensitive
# + colab={"base_uri": "https://localhost:8080/"} id="R1S_PwgX2GTA" outputId="ce00d1a5-4320-4405-c6ad-1539d841a1a5"
y=4
Y=31
print(y)
print(Y)
# + id="BQKri_cA2UO_"
# + [markdown] id="FSu-haLs5Wqs"
# ##Multiple Variables
#
# + colab={"base_uri": "https://localhost:8080/"} id="27_Litvy5Z__" outputId="83949bbb-812d-4e47-885e-7d62f7118ef7"
a,b,c=5,3,1
print(a)
print(b)
print(c)
# + id="PfsR6Gjn5dMd"
# + [markdown] id="XyoLZ0BL5nVy"
# ##One to multiple variable
# + colab={"base_uri": "https://localhost:8080/"} id="TXHDgy6F5qGu" outputId="71498251-2bd7-4ba6-845c-6975ebef0366"
x=y=z="Five"
print(x)
print(y)
print(z)
# + colab={"base_uri": "https://localhost:8080/"} id="krKPvXKx5zSH" outputId="91b8d2e5-9164-49a8-cfb9-60000f7862bd"
a=b=c=8
print(a)
print(b)
print(c)
# + [markdown] id="hVaGWnhv65k0"
# ##Output variables
# + colab={"base_uri": "https://localhost:8080/"} id="-ypi2OzV6Ar3" outputId="9e168d79-29bd-4813-cad4-ddcd4bd128f2"
x= "enjoying"
print ("Valorant is " + x)
# + id="Im08ZmpJ69JN"
# + [markdown] id="uYjkNySS-5Id"
# ##Arithmetic Operations
# + colab={"base_uri": "https://localhost:8080/"} id="rk1037Ue-6tW" outputId="a779b277-a127-4194-b013-f6232a4153d4"
x=5
y = 3
sum=x+y
sum
# + colab={"base_uri": "https://localhost:8080/"} id="3pHANVa6-_aE" outputId="f01b4197-6243-4b1f-bcd1-496f6efe49ec"
x=5
y = 3
print (x + y)
x=5
y=3
sum=x+y
sum
# + [markdown] id="ONCZ27F1_Y-O"
# ##Assignment Operators
# + colab={"base_uri": "https://localhost:8080/"} id="ENDLDMLp_aYE" outputId="9b3b2add-94e7-47b8-a160-a3035339bbeb"
a,b,c=0,2,13
c%b #% is the remainder
# + [markdown] id="WIEYCIK6A8TB"
# ##Logical Operators
# + colab={"base_uri": "https://localhost:8080/"} id="ds5-knRrA-C9" outputId="0645b6e9-2b88-4f83-a967-1d2c02f6e090"
a,b,c=0,-1,6
-2>b and c>b #if one is wrong then all is false
# + [markdown] id="KnwWcYm4Bfzy"
# ###Identity Operators
# + colab={"base_uri": "https://localhost:8080/"} id="I_550fXIBhFo" outputId="4cb74dd3-cd59-44e2-ab17-d1534515de15"
a,b,c=0,-1,5
a is c
| Demo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *This notebook was adapted from
# [course notes by Dr. <NAME>](https://github.com/kfollette/ASTR200-Spring2017), who also
# utilize notes from [<NAME>'s public course notes](https://sites.google.com/a/ucsc.edu/krumholz/teaching-and-courses/)*
#
# Further modified from notes by <NAME>, <NAME>, <NAME> (https://github.com/gurtina/P105A_2019)
#
#
# ## CIERA Summer Camp 2019: Intro to Jupyter Notebook
#
# ## Contents
#
# 1. Jupyter
# 2. Using Jupyter as a calculator
# 3. Variables
# 4. Jupyter Cautions
#
#
#
#
# # 1. Jupyter
#
# The Jupyter Notebook is an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and narrative text. Uses include: data cleaning and transformation, numerical simulation, statistical modeling, data visualization, machine learning, and much more.
#
# Jupyter notebooks have two kinds of cells. Markdown cells like this one have no labeling to the left of the cell, and, when executed, appear as ordinary text. Code cells like the one below have In [ ]: printed to the side of them and, when executed, a number appears inside of the brackets.
#
# To select a cell of either type for editing, either `double click` on the cell, or press `enter`
#
# To execute a cell of either type in Jupyter, hit `shift` + `enter` inside of that cell
#
# Try it by `double clicking` on this cell and hitting `shift` + `enter`.
#
# Do the same with the code cell below.
#
# Note that a markdown cell becomes pure text when you execute it, while a code cell spits out some output labelled Out [ ]:.
1+2
# The guts of Jupyter is the coding language Python, and any and all Python syntax will work inside of code cells.
#
# A key feature of Jupyter is that it integrates markdown (instructions/notes), code ***and*** output all in the same document. For this reason, many astronomers, including myself, use Jupyter as a sort of lab notebook rather than maintaining lab notes, code, and code output all in separate documents.
#
#
#
# <div class=hw>
#
# ### Exercise 1
# -------------
#
#
# Here is an extensive Jupyter "cheat sheet"[here](https://www.cheatography.com/weidadeyue/cheat-sheets/jupyter-notebook/).
#
# Alternatively you can press `escape` and then `h`. Try this now!
#
# Use the hot keys now to exectute the following tasks.
a. Change this cell to a markdown cell
# <div class=hw>
# b. Change the below cell to a code cell
# 2+3
# <div class=hw>
# c. Insert a blank code cell above this one, and one below as well.
# <div class=hw>
# d. Split the cell below in between the two operations so that each appears in its own separate cell, and then execute both cells.
1+2
1+3
# <div class=hw>
# e. Delete the cell below
# <div class=hw>
# A cheat sheet for syntax within markdown cells, which can be accessed [here](https://guides.github.com/pdfs/markdown-cheatsheet-online.pdf). Use this to complete the following tasks.
#
# f. Make this text italic.
#
# g. Make this text bold.
#
# h. Make this text bold and italic.
#
# i. Make this a heading.
#
# j. Make this a hyperlink to your favorite webpage.
#
# k. Make the asterisks in the following sentence visible: I *love* physics
# l. you can call basic UNIX commands in Jupyter. Run the below
# !ls
# !ls ..
# This command lists the contents of the directory you are in. Recall, the directory you are in is referred to as `./`
# In this directory should see this Jupyter notebook.
#
# You can also do other commands to, like `!ls ..` to list the contents of the directory one level up from here.
# There are obviously lots of other things that you can do, but these are the basics that you'll use the most often.
#
# # 2. Using Jupyter as a calculator: operators & expressions
#
# In Jupyter notebooks, every cell is a command prompt into which we can enter python commands. One very basic way to use python is as a calculator.
#
# Python knows the basic arithmetic operations plus `(+)`, minus `(-)`, times `(\*)`, divide `(/)`, and raise to a power `(\**)`.
# These are `operators`.
#
# An `expression` is the combination of one or more operators that the python language then interprets.
# e.g `2*3`
#
# *Sidenote*
# If you double click on this cell, you'll see that I've inserted a BACKSLASH (\) before the asterisks. That is because the asterisk has special meaning inside of a markdown cell. Single asterisks make an item *italicized* when you execute the cell, and double asterisks make it **bold**. This is ***NOT*** the case for code cells.
#
# Execute all of the cells below and make sure that you understand what they're doing before moving on.
2+3
2*3
2-3
2/3
2**3
# It also understands parentheses, and follows the normal rules for order of operations:
1+2*3
1+2**3
(1+2)*3
# # 3. Simple variables
#
# We can also define variables to store numbers, and we can perform arithmetic on those variables. Variables are just names for boxes that can store values, and on which you can perform various operations. For example, execute each of the cells below and make sure you understand the output:
a=4
a+1
a/2
a=a+1
# THIS IS A COMMENT !
a
a**2
# Note that the line a=a+1 redefined the variable as one greater than it was before.
#
# IN THIS CLASS YOU MUST use descriptive variable labels (e.g. lum_stara = , mass_starb =) and create a new variable name each time you do a calculation. ***Do not reuse variable names*** or you risk getting a nonsense answer when you refer to that variable.
#
# This is a simple example of how not using a new variable name could get you into trouble (if you were to assume that a was still 4). Note too that lines in which you have deifned a variable do not have any output by default. If you want to check that the variable is properly assigned or see its value, you can type the variable name alone on a line below the variable assignment line.
a=10**2
a
# Alternatively you can print the variable to the screen. Execute the below:
print(a)
# You can define multiple variables in one line
a,b = 1,2
print(a,b)
b
# There's also a subtle but important point to notice here, which is the meaning of the equal sign. In mathematics, the statement that `a = b` is a statement that two things are equal, and it can be either true or false. In python, as in almost all other programming languages, `a = b` means something different. It means that the value of the variable a should be changed to whatever value b has. Thus the statement we made `a = a + 1` is not an assertion (which is obviously false) that a is equal to itself plus one. It is an instruction to the computer to take the variable a, and 1 to it, and then store the result back into the variable a. In this example, it therefore changes the value of a from 4 to 5.
#
# One more point regarding assignments: the fact that = means something different in programming than it does in mathematics implies that the statements `a = b` and `b = a` will have very different effects. The first one causes the computer to forget whatever is stored in a and replace it by whatever is stored in b. The second statement has the opposite effect: the computer forgets what is stored in b, and replaces it by whatever is stored in a.
#
#
# # 4. Jupyter Cautions
#
# You should always keep in mind that ***cells within a Jupyter notebook can be executed and reexecuted in any order*** as this can sometimes get you into trouble. This feature is the reason why an executed code cell is numbered, so that you know what order the cells you see have been executed in. Note that this does not have to be linear from the top to the bottom of the document, but can jump around within it. Often, you will note a mistake or something that you want to try and will go back and change the code in an earlier cell. If you reexecute just this one cell, then any already executed cells that rely on variables defined in the reexecuted cell or its output will ***not*** be automatically updated.
#
# For the purposes of this class, labs and homeworks should always be executable in a linear fashion. For this reason, you should observe the following "best practices" for coding in notebooks.
#
# 1. Use descriptive variable labels (e.g. lum_stara = , mass_starb =) and create a new variable name each time you do a calculation. ***Do not reuse variable names*** or you risk getting a nonsense answer when you refer to that variable.
#
# 2. Before finishing up, restart the kernel through the menu at the top of the notebook and execute the whole thing from start to finish once, making sure that all of your code output is how you want it. Note that **restarting the kernel will clear the code output, not the code itself**, so you will not be losing your work.
#
# 3. If you note a mistake in an earlier cell and go back to correct it, you should be careful to also reexecute any cells that depend on it. If you want to do a calculation similar to one that you executed in a previous cell, you should instead copy the contents of that cell into a new cell, rename the variables, and execute it again.
| Intro_JupyterNotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: my_env
# language: python
# name: my_env
# ---
# + [markdown] tags=[]
# # Jack's Car Rental
# #### Figure 4.2
#
#
# Detailed analysis of the problem in Aditya Rastogi's blogpost
# [Elucidating Policy Iteration in Reinforcement Learning — Jack’s Car Rental Problem](https://towardsdatascience.com/elucidating-policy-iteration-in-reinforcement-learning-jacks-car-rental-problem-d41b34c8aec7)
#
# His repo https://github.com/thunderInfy/JacksCarRental
#
# Good ideas how to speed up code execution in Yang Zhang's repo https://github.com/zy31415/jackscarrental
#
# Those looking for an explanation of the probabilities calculation should look below cell 4.
# +
import copy
import math
import time
import itertools
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# -
# Constants and global variables:
# +
# maximum car number at each location
MAX_CARS = 20
# all possible states -- cartesian product of 21 (as 0 is a possible state)
STATES = tuple(itertools.product(range(MAX_CARS + 1), repeat=2))
# maximal number of cars to move overnight
MAX_MOVE = 5
# policy evaluation accuracy
THETA = 1e-2
# initialise values and policy
# values as 2d array axis0 and axis1 (x, y) being the numbers of cars at first and second locations respectively
values = np.zeros((MAX_CARS + 1,) * 2)
# action chosen at each state (integers from -5 to 5)
policy = np.zeros((MAX_CARS + 1,) * 2, dtype=int)
# rewards and transition probabilities will be precalculated
# 4d array for cumulative reward for transition from one state (x, y) to another (x1, y1)
rewards = np.zeros((MAX_CARS + 1, ) * 4)
# 4d array for transition probabilities between all states
transition_probs = np.zeros((MAX_CARS + 1, ) * 4)
# -
# From the book: "...the number of cars requested and returned at each location are Poisson random variables, meaning that the probability that the number is $n$ is $\frac{\lambda^n}{n!}e^{-\lambda}$, where $\lambda$ is the expected number":
# + tags=[]
def poisson_prob(n, _lambda):
"""The probability of k occurrences of a random variable having poisson distribution with expected value mu
:param n: number of occurrences
:type n: int
:param _lambda: expected value
:type _lambda: int
:return: probability of n
:rtype: float
"""
return math.exp(-_lambda) * pow(_lambda, n) / math.factorial(n)
# -
# As it is not stated how many cars may be requested or returned we assume it can be any natural number. The expectations for request are 3 and 4 and for return are 3 and 2 at the first and second locations respectively. Given that, probabilities for any number of cars above 11 is less than 0.001:
for _lambda in (2, 3, 4):
y = np.array([poisson_prob(n, _lambda) for n in range(21)])
y1 = np.where(y > 0.001)
print(f'if lambda is {_lambda}, max car number is {np.max(y1)}')
# We could calculate transition probabilities and rewards for all possible requests and returns below those numbers and get expected values that are close enough to the correct ones to find optimal policy. But <NAME> in his [repo](https://github.com/zy31415/jackscarrental) uses the approach to calculate exact probabilities for <em>any</em> number of cars requested or returned.
#
# Consider we have $x$ cars at the first location. All requests from $0$ to $x$ cars have probabilities according to poisson distribution:
# +
# let x be 5
x = 5
# expectation of the request at the first location
_lambda = 3
request_probabilities = [poisson_prob(n, _lambda) for n in range(x + 1)]
for n, prob in enumerate(request_probabilities):
print(f'probability that {n} cars will be requested is {prob}')
# -
# In this situation ($x=5$) we get the same reward ($50$ dollars) and move to the same new state ($x=0)$ after all requests above $5$. The probability to get any request above $5$ is $1 - \sum_{n=0}^{5}p(n)$. So if we sum the probability that $5$ will be requested and the probability that any number above $5$ will be requested, we will get the probability to get the reward $50$ and move to the state $x=0$:
# + tags=[]
request_probabilities[-1] += 1 - sum(request_probabilities)
print(f'Probability that 5 or more cars will be requested is {request_probabilities[-1]}')
# -
# The logic behid calculating returns is the same, but we need to sum all probabilities from $0$ to $20 - x$. If $x=5$ the return of $15$ cars and any number above that will move us to the same new state $x=20$. Let's calculate the series of probabilities for all possible car numbers and expectations:
poisson_probs = dict()
for _lambda in (2, 3, 4):
for cars_num in range(MAX_CARS + 1):
poisson_probs[cars_num, _lambda] = [poisson_prob(k, _lambda) for k in range(cars_num + 1)]
poisson_probs[cars_num, _lambda][-1] += 1 - sum(poisson_probs[cars_num, _lambda])
# Now we can precalculate all transition probabilities and respective rewards. It takes some time, but speeds up the policy evaluation.
# + tags=[]
t0 = time.perf_counter()
# x and y state for numbers of cars at first and second locations respectively
for x, y in STATES:
print('\r', f'Precalculating rewards and transition probabilities for state {x, y}', end=' ')
# can't request more cars then available at either location
for request_x, req_quest in itertools.product(range(x + 1), range(y + 1)):
# cars left after request
x1, y1 = x - request_x, y - req_quest
# probability for the number of requested cars at both locations
prob_req = poisson_probs[x, 3][request_x] * poisson_probs[y, 4][req_quest]
# $10 for each car rented
reward = (request_x + req_quest) * 10
# can't return more than 20 minus number of cars left (cars over 20 disappear from the problem)
for return_x, return_y in itertools.product(range(21 - x1), range(21 - y1)):
# cars on both locations after return
x2, y2 = x1 + return_x, y1 + return_y
# probability for the number of cars returned to both locations
prob_ret = poisson_probs[20 - x1, 3][return_x] * poisson_probs[20 - y1, 2][return_y]
# probability of this particular transition from (x, y) to (x2, y2)
prob_product = prob_req * prob_ret
# update reward for (x, y) to (x2, y2) transition
rewards[x, y, x2, y2] += reward * prob_product
# update probability of (x, y) to (x2, y2) transition
transition_probs[x, y, x2, y2] += prob_product
t1 = time.perf_counter()
print('done in ', round(t1 - t0, 3), 'seconds')
# -
# Remaining steps are pretty straightforward. The only trick is that we had to precalculate transition probabilities itself and the products of rewards and transition probabilities separately to get the correct result. If we expand brackets in Bellman equation we get:
#
# $$
# \sum_{s', r}p(s',r|s,a)[r + \gamma V(s')] = \sum_{s', r}p(s',r|s,a)r + \gamma \sum_{s', r}p(s',r|s,a)V(s') = \\ \sum_{s', r}p(s',r|s,a)r + \gamma \sum_{s'}p(s'|s,a)V(s')
# $$
#
# In cell 8 we calculate $\sum_{s', r}p(s',r|s,a)r$ when we update rewards, and $\sum_{s'}p(s'|s,a)$ when we update transition probabilities.
# +
def expected_value(values, state, action, discount=0.9):
"""Calculate expected value for a given action
:param state: state which value is updated
:type state: tuple
:param action: action taken in this state
:type action: int
:param discount: discount
:type discount: float
:return: new value
:rtype: float
"""
x, y = state
# move cars overnight, actions are validated during the policy improvement
x, y = min(x - action, 20), min(y + action, 20)
# pay $2 for each moved car
new_value = -2 * abs(action)
# Bellman equation
new_value += np.sum(rewards[x, y]) + np.sum(transition_probs[x, y] * values) * discount
return new_value
def argmax(iterable):
"""Returns the index of the maximum element for python built-in iterables (e.g. lists or tuples).
Turns out to be faster than numpy.argmax on low-dimensional vectors.
:param iterable iterable: The vector in which to find the index of the maximum element
:return: Maximum element index
:rtype: Int
"""
return max(range(len(iterable)), key=lambda x: iterable[x])
def policy_evaluation():
"""Policy evaluation implementation (step 2 of algorithm at page 80)
:return: None
"""
delta = 1
while delta > THETA:
delta = 0
old_values = copy.deepcopy(values)
for state in STATES:
values[state] = expected_value(old_values, state, policy[state])
delta = max(delta, abs(values[state] - old_values[state]))
def policy_improvement():
"""Policy improvement implementation (step 3 of algorithm at page 80)
:return: None
"""
policy_stable = True
for state in STATES:
old_action = policy[state]
# get all possible actions
x, y = state
actions = range(-min(y, MAX_MOVE), min(x, MAX_MOVE) + 1)
# list of action values
action_values = [expected_value(values, state, action) for action in actions]
# optimal action
policy[x, y] = actions[argmax(action_values)]
if old_action != policy[state]:
policy_stable = False
return policy_stable
def heatmap(data, title, axes, iteration):
"""Plot the heatmap for policy or values
:param data:
:param title:
:param axes:
:param iteration:
:return:
"""
h = sns.heatmap(data, ax=axes[iteration])
h.set_ylabel('#Cars at first location')
h.set_xlabel('#Cars at second location')
h.set_title(title)
h.invert_yaxis()
plt.rc('mathtext', fontset="cm")
def policy_iteration():
"""Policy iteration implementation from RL book page 80
:return: None
"""
# precalculate()
# plotting
fig, axes = plt.subplots(2, 3, figsize=(40, 20))
# plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
iteration = 0
policy_stable = False
while not policy_stable:
print('\r', 'policy iteration ', iteration, end='')
# plot policy
policy_title = r'$\pi_{}$'.format(iteration)
heatmap(policy, policy_title, axes, iteration)
# step2
policy_evaluation()
# step3
policy_stable = policy_improvement()
iteration += 1
# plot values
value_title = r'$v_{\pi_4}$'
heatmap(values, value_title, axes, iteration)
if __name__ == '__main__':
policy_iteration()
plt.show()
| chapter4/fig4_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# \tableofcontents
# \pagebreak
# + [markdown] tags=[]
# # Einführung in Python
# -
# Wichtige Eigenschaften von Python:
#
# - Interpretiert statt kompiliert
# - Dynamische Typisierung ("Duck"-Typing)
# - Unterstützt objektorientierte und funktionale Programmierung
# - Blöcke werden durch Einrücken statt durch Klammern begrenzt
# - Einfache Erweiterbarkeit durch externe Bibliotheken
# Die wichtigsten Prinzipien von Python lassen sich auch direkt mit dem Kommando `import this` ausgeben.
import this
# # Arbeiten mit Variablen
# ## Variablen und Datentypen
# ### Deklaration
# Variablen werden mit `=` deklariert oder auch neu zugewiesen. Auf Grund der dynamische Typisierung muss dabei nicht der Typ angegeben werden.
spam = 3 # Integer
eggs = "abc" # String
# Ebenfalls können Variablen ohne weiteres einem anderen Wert mit einem anderen Typ zugewiesen werden.
ham = 32.0 # Float
ham = True # Boolean
# ### Elementare Datentypen
# Die wichtigsten elementaren Datentypen sind:
# | Bezeichnung | Python-Schlüsselwort | Code Beispiel |
# |:----------------: |:--------------------: |:------------------------------: |
# | Ganzzahl | int | `4` |
# | Gleitkommazahl | float | `3.22` |
# | Wörter | string | `"Ham"` |
# | Feld | list | `[3, 2, "a"]` |
# | Wörterbuch | dict | `{"name" : "Max", "age" : 26}` |
# | Tupel | tuple | `(1, 2, 3)` |
# ### Inhalt und Typ einer Variablen
# Der Inhalt einer Variablen kann via `print()` ausgegeben werden.
eggs = "Oh dear"
print(eggs)
# Der Typ einer Variablen kann via `type()` ausgegeben werden.
eggs = True
type(eggs)
# ### In-Place Änderung
# Variablen können direkt im Speicher verändert werden ("In-Place" Änderung), indem die Zuweisung direkt die gewünschte Operation enthält.
x = 2
x = x - 1
x
# ## Arbeiten mit numerischen Datentypen
# ### Elementare Rechenoperationen
# Numerische Datentypen können mit den bekannten Rechenoperationen `+`,`-`,`*` und `/` verändert werden.
x = 3
x - 2
x = 5.0
x * 2
# ### Automatische Integer Konvertierung
# Integers werden direkt zu Floats umgewandelt, wenn die Formel einen Float enthält oder das Ergebnis nur als Float darstellbar ist.
spam = 25
ham = 5.0
spam / ham
spam = 25
eggs = 2
spam / eggs
# Um zu verhinden, dass bei einer Operation von zwei Integers ein Float rauskommt, muss der `//` Operator verwendet werden, wobei nur der ganzzahlige Teil des Ergebnisses ausgeben wird.
x = 5
y = 2
x // y
# Mit dem `%` Operator erhält man wiederum den Rest.
x = 5
y = 2
x % y # 5 % 2
# ### Logische Operationen
# Mit Hilfe von `<`, `>`, `==` und `!=` können numerische Werte verglichen werden.
x = 3
y = 4
x > y # 3 > 4
x = 5
y = 2
x != y # 5 != 2
# ### Erweiterte Rechenoperationen
# Durch den Operator `**` lässt sich eine Zahl potenzieren, wobei der Ausdruck dann die Form `Basis ** Exponent` hat.
z = 12
w = 3
z ** w # 12 ** 3
# Mehr Rechenoperationen lassen sich durch (externe) Bibliotheken einbinden, dazu später mehr.
# ## Behälter als Datentypen
# Listen und Dictionaries können als Container für andere Objekte aufgefasst werden, enthalten also selbst wieder andere Variablen und Datentypen. Wir beschäftigen uns zunächst nur mit Listen.
# ### Listen
# Listen beginnen und enden immer mit einer eckigen Klammer `[]`. Der Inhalt wird zwischen die eckigen Klammern geschrieben und durch Kommata getrennt.
x = [2, 3, 4]
x
# Listen können jeglichen Inhalt aufnehmen, beispielsweise Variablen, elementare Datentypen oder auch andere Listen.
y = True
x = [2, "a", y]
x
y = 3
x = [2, '"a"', y]
x
x = ["spam", 25.0, [2, 4, 1]]
x
# ### Listen Inhalte abrufen
# Listen sind geordnete Datenstrukturen, Inhalte werden also über ihre Position in der List addressiert. Hierzu wird die Index-Schreibweise verwendet, welche die Form `Listenname[Index]` hat. Der erste Eintrag in einer Liste hat den Index 0, der nächste Eintrag (rechts) daneben den Index 1 usw.
x = [2, 4, "a"]
x[0]
# Die Inhalte einer Liste können auch von hinten indiziert werden, wobei der letzte Eintrag einer Liste den Index -1 hat, der nächste Eintrag (links) daneben den Index -2 usw.
x = [2, 4, "a", "spam"]
x[-2]
# Um mehrere Elemente einer Liste abzurufen gibt man als Index einen Start- und einen End-Index an, in dessen Bereich man alle Elemente abrufen möchte. Der End-Index wird dabei als ausschließende Grenze verstanden, das Element an dieser Stelle wird also nicht mit abgerufen.
x = [6, 8, 9, 3, "a"]
x[1:3]
# ### Inhalte einer Liste modifizieren
# Um den Inhalt einer Liste zu modizieren verwendet man wieder die Index-Schreibweise und weist dem Element einen neuen Wert zu.
x = [2, 5, 8, 13]
x[3] = 9
x
# ### Einer Liste Inhalte hinzufügen
# Um einer Liste Inhalte hinzuzufügen, ruft man die Methode `.append()` der Liste auf. Hierdurch wird am Ende der jeweiligen Liste das Element hinzugefügt. Auf Methoden und Funktionen kommen wir später noch zu sprechen.
x = [2, 4, "a"]
x.append("b")
x
# Alternativ kann man die Methode `.insert()` nutzen, wobei man dann die genaue Position bestimmen kann, wo das Element hinzugefügt wird. Als ersters Argument der Methode wird der Index angegeben, vor den das Element platziert werden soll, und als zweite Argument das eigentliche Element an sich.
x = [2, 5, 9]
x.insert(0, "a")
x
# ### Inhalte einer Liste entfernen
# Möchte man jetzt ein bestimmtes Element einer Liste entfernen, verwendet man die `.pop` Methode. Hierzu übergibt man der Methode den Index des Elements, dass man entfernen möchte. Ohne Angabe des Index wird das letzte Element entfernt.
x = [2, 5, 8, 13]
x.pop(3)
x
# Wenn man sich nicht sicher ist, an welcher Stelle das Element ist, kann man auch die Methode `.remove()` verwenden. Diese geht die Liste von Anfang bis Ende durch und entfernt das erste Auftreten des jeweiligen Werts. Im Unterschied `.pop()` gibt man hier also nicht den Index an, sondern den zu entfernenden Wert.
x = [2, 3, 4, 1, 6, 9, 9, 9, 13, 15]
x.remove(9)
x
# Um eine Liste vollständig zu löschen nutzt man die `.clear()` Methode. Diese löscht schlicht und ergreifend alle Elemente einer Liste und hinterlässt eine leere Liste.
x = [2, 5, 8 , 13]
x.clear()
x
# ### Verschachtelte Listen
# Häufig kommt es in den Naturwissenschaften vor, dass man mit mehrdimensionalen Objekten arbeiten muss. Beispiele hierfür sind Vektoren und Matrizen. Um diese in Python abbilden zu können, lassen sich wie oben beschrieben Listen verschachteln. Dazu werden die innersten Listen, also die Zeilenvektoren, mit Elementen gefüllt und wiederum als Element für eine andere Liste verwendet.
x = [[3, 2, 1],
[4, 5, 6],
[7, 0, 7]]
x
# Um auf die Elemente einer verschachtelten Liste zuzugreifen, muss man hintereinander die entsprechende Anzahl an Indizes angeben, wobei man von Außen nach Innen vorgeht
x = [[11, 12, 15, 16],
[0, 9, 7, 8],
[12, 15, 75, 18]]
x[0][1]
x[1][1]
# $\mathbf{A} = \left( \begin{matrix} a_{11} & a_{12} & a_{13} & \ldots & a_{1n} \\ a_{21} & a_{22} & a_{23} & \ldots & a_{2n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ a_{m1} & a_{m2} & a_{m3} & \ldots & a_{mn} \end{matrix} \right)$
# Verschachtelte Listen verhalten sich genauso wie alle anderen Listen auch und haben auch die selben Mehthoden, weshalb hier auf weitere Beispiele verzichtet wird.
# # Funktionen und Methoden
# ## Vordefinierte Tätigkeiten auf Knopfdruck
# Funktionen und Methoden werden verwendet, um häufig ausgeführte Tätigkeiten wie Sortieren, Zählen, Normieren etc., über einen einfachen Befehlsaufruf zugänglich zu machen. Einfach ausgedrückt: Man will das Rad nicht immer neu erfinden, sondern verwendet das "Rad" immer wieder.
#
# Der Hauptunterschied zwischen Funktionen und Methoden besteht darin, dass Funktionen für sich alleine stehen und keinem Objekt fest zugeordnet sind, während Methoden stets einem Objekt zugeordnet sind und nicht unabhängig von diesem verwendet werden können.
#
# Die Syntax einer Funktion hat also die Form `Funktion(Objekt)`, während eine Methode die Form `Objekt.Methode()` hat.
# ## Methoden
# Die Verwendung von Methoden ist ziemlich simpel. Dazu rufen wir einfach die Methode des jeweiligen Objekts auf. Jedes Objekt hat seine eigenen Methoden, wobei Objekte auch gleichlautende Methoden haben können. Diese müssen aber nicht zwangsläufig das selbe tun, auch wenn dies aus didaktischen Gründen meist der Fall ist.
x = [3, 5 ,1, 2, 4]
print(x)
print("\nMethodenaufruf \n")
x.sort()
print(x)
# Dabei gilt es zu beachten, dass manche Methoden ein Objekt permanent verändern, während manche Methoden nur eine geänderte Kopie des Objekts zurückgeben.
x = [6, 8, 1, 9, 4]
print(x.count(1))
print()
print(x)
print()
x.sort()
print(x)
# ## Funktionen
# Funktionen können entweder von externen Quellen importiert, oder selbst geschrieben werden. Wir konzentrieren uns zunächst auf das Schreiben eigener Funktionen, importierte Funktionen werden wir im nächsten Teil kennenlernen.
# ### Funktionen definieren
# Jede selbstgeschriebene Funktion beginnt mit dem Schlüsselwort `def`, gefolgt von dem Namen der Funktion und den Parametern in runden Klammern und schließt mit einem Doppelpunkt. Der Funktionsblock wird dadrunter, um 4 Leerzeichen eingerückt, geschrieben. Zusammengesetzt sieht das dann so aus:
#
# `def Funktionsname(Parameter):`
# `____Parameter`
#
# Der Funktionsblock muss zwingend mit 4 Leerzeichen eingerückt werden, da der Python-Interpreter sonst nicht erkennt, dass die Anweisungen zu der Funktion gehört. Diese Eigenschaft von Python werden wir später erneut sehen, wenn wir auf Schleifen und Verzweigungen eingehen.
def add_two(x):
y = x + 2
return y
# Die oben definierte Funktion hat lediglich einen Parameter, `x`, es können aber natürlich mehrere verwendet werden. Der Funktionsblock definiert eine neue Variable, `y`, welche das Ergebniss der Addition vom Parameter `x` mit der Zahl 2 ist. Abschließend gibt die Funktion über den `return y` Befehl den Inhalt der Variable `y` zurück.
#
# Um nun `add_two` zu verwenden geben wir einfach Ihren Namen ein und, wieder ohne Leerzeichen, die um 2 zu erhöhende Zahl in Klammern direkt dahinter.
add_two(5)
# Wir können das Ergebniss der Funktion natürlich auch wieder in einer neuen Variablen speichern.
number = add_two(5)
number
# ### Zugriff auf Variablen in Funktionen
# Auf Variablen innerhalb einer Funktion kann von Außen nicht zugegriffen werden. Im Beispiel von `add_two` können wir also nicht ohne weiteres den Inhalt von `y` abrufen.
z = add_two(5)
y
# Anders rum können wir innerhalb einer Funktion auf global definierte Variablen zugreifen.
# +
spam = [0, 1, 2, 3]
def add_two(x):
print(spam)
y = x +2
return y
add_two(2)
# -
# # Verzweigungen und Schleifen
# Auch in Python gibt es die Möglichkeit, mit Verzweigungen und Schleifen fallbezogene Entscheidungen im Programmablauf zu treffen. Je nach Eingabe erhält man bei Verzweigungen unterschiedliche Ausgaben, während bei Schleifen ein statischer Befehlsblock für eine gewisse Anzahl von Wiederholungen ausgeführt wird.
#
# Wir gehen zunächst auf Verzweigungen ein und danach auf Schleifen.
# ## Verzweigungen
# Verzweigungen lassen sich wie eine Weggabelung an einer Straße begreifen, wo je nach gewünschtem Ziel eine anderer Weg eingeschlagen wird. Hierbei ist die Entscheidung für einen Weg an eine klare Ja/Nein-Frage gebunden. Es können auch mehrere Wege offen stehen, jedoch kann immer nur jeweils ein Weg beschritten werden.
# ### Verzweigungen definieren
# Verzweigungen folgen in Python einer festen Syntax. Die Verzweigung beginnt immer mit einem `if`, folgt dann mit einer Bedingung welche entweder wahr oder falsch ist und schließt mit einem Doppelpunkt. Die eigentlichen Anweisungen werden wieder mit 4 Leerzeichen eingerückt. Zusammengesetzt sieht das dann so aus:
#
# `if Bedingung:`
# `____Anweisungsblock`
#
# +
x = 3
if x > 2:
print(x)
# -
# Im oberen Beispiel wird der Befehl `print(x)` nur dann ausgeführt, wenn `x` größer als 2 ist. Möchten wir jetzt noch den gegenteiligen Fall abdecken, nämlich dass `x` kleiner oder gleich 2 ist, müssen wir unter den Anweisungsblock auf gleicher Höhe mit dem `if` ein `else` setzen. Unter das `else`, auch wieder mit 4 Leerzeichen eingerückt, folgen dann die Anweisungen für den gegenteiligen Fall.
# +
x = 2
if x > 2:
print(x)
else:
print("x ist kleiner oder gleich 2")
# -
# ### Mehr als 2 Fälle unterscheiden
# Soll nun zwischen mehr als 2 Fällen unterschieden werden, verwendet man das Schlüsselwort `elif`, ein Kofferwort aus `else` und `if`. Die Verzweigung beginnt wieder mit `if`, aber statt `else` folgen nun so viele `elif`-s, wie man Bedinungen hat. Das `else` kann dann als Abschluss optional verwendet werden, ist aber nicht zwingend erforderlich. Dies hängt davon ab, ob es eine universelle Anweisung gibt, welche bei Erfüllung keiner der Fälle greifen soll.
# +
x = 2
if x > 2:
print("x ist größer als 2")
elif x == 2:
print("x ist gleich 2")
else:
print("x ist kleiner als 2")
# -
# Bei mehr als zwei Bedingungen geht der Interpreter diese von oben nach unten durch und führt die erste aus, welche wahr ist. Hier sollte also drauf geachtet werden, dass die Bedingungen nach der Priorität/Restriktivität absteigend geordnet sind.
# +
x = 8
if x > 9:
print("x ist größer als 9")
elif x > 8:
print("x ist größer als 8")
elif x > 7:
print("x ist größer als 7")
elif x > 6:
print("x ist größer als 6")
elif x > 5:
print("x ist größer als 5")
# -
# ### Mehre Bedingungen prüfen
# Bisher haben wir immer nur eine Bedingung geprüft. Wir können aber auch mehrere Bedingungen gleichzeitig prüfen, also eine Anweisung nur dann ausführen, wenn mehr als eine Bedingung erfüllt ist. Dazu werden die Bedingungen mit den logischen Operatoren `and` oder `or` verknüpft. Zusätzlich kann noch `not` verwendet um eine Bedingung zu negieren.
# +
x = 2
y = 12
if x == 3 and y == 10:
print("spam")
elif x == 4 and y == 9:
print("ham")
elif x == 2 and y == 12:
print("eggs")
# -
# ## Schleifen
# Im Gegensatz zu Verzweigungen führen Schleifen immer die gleichen Anweisungen aus, diese jedoch mehrmals hintereinander. Die Anzahl der Wiederholungen hängt von der Abbruchbedingung ab. Ist diese erfüllt, wird die Schleife gestoppt. Die Abbruchbedingung kann
#
# >statisch (eg. die Schleife wird exakt 5 mal ausgeführt) oder
#
# >dynamisch (eg. die Schleife wird so lange ausgeführt, wie der User 1 eingibt)
#
# sein.
#
# In Python gibt es (statische) `for` Schleifen und (dynamische) `while` Schleifen. Wir beginnen mit den `for` Schleifen.
# ### for Schleifen mit numerischen Indizes
# Wie oben erwähnt sind `for` Schleifen statisch. Das bedeutet, dass bereits zu Beginn der Ausführung feststeht wie häufig die Schleife durchlaufen wird. Eine `for` Schleife beginnt immer mit dem Schlüsselwort `for`, gefolgt vom Durchlaufindex (Variable), anschließend das Schlüsselwort `in`, das zu durchlaufende Objekt und ein Doppelpunkt. In der nächsten Zeile mit 4 Leerzeichen eingerückt folgt der Anweisungsblock. Das Ergebnis sieht dann so aus:
#
# `for Index in Objekt:`
# `____Anweisungsblock`
#
# Sehr häufig werden fortlaufende Zahlen als Index verwendet. Dafür wird die `range()` Funktion verwendet. Bei jedem Aufruf gibt sie eine Ganzzahl in einem festgelegten Intervall zurück. Wenn nur eine einzelne Zahl als Parameter übergeben wird werden alle Zahlen von 0 bis außschließlich dieser Zahl zurückgegeben.
for i in range(5):
print(i)
# Die `range()` Funktion lässt sich aber noch weiter an die eigenen Bedürfnisse anpassen. Werden zwei Zahlen als Parameter angegeben wird die erste als Untergrenze und die zweite als Obergrenze verwendet. Auch hier gilt wieder: Die Obergrenze wird nicht erreicht.
for i in range(5, 10):
print(i)
# Mit diesem Wissen lassen sich nun ganz einfach `for` Schleifen zum Durchlaufen von Listen verwenden. Dazu verwendet man den Durchlaufindex der `for` Schleife einfach als Listenindex.
# +
spam = ["ham", "eggs", 7, [0, 1, 2], "me"]
for i in range(1, 4):
print(spam[i])
# -
# ### for Schleifen mit Objektindizes
# Es lassen sich aber nicht nur Zahlen als Durchlaufindex verwenden. Wenn es sich um Objekte wie Listen, Strings o.ä. handelt (sogenannte Iterables), können auch die einzelnen Elemente dieser Objekte selbst als Indizes verwenden. Dazu wird statt der `range` Funktion einfach das Objekt an sich angegeben, welches durchschritten werden soll. Dann wirkt der Durchlaufindex als Referenz auf die einzelnen Elemente eines Objekts.
# +
spam = [1, 5, 9, [0, 2], "eggs", "ham"]
for i in spam:
print(i)
# -
# So können selbst Strings durchschritten werden.
# +
this = "spam"
for i in this:
print(i)
# -
# ### while Schleifen
# Im Gegensatz zu `for` Schleifen haben `while` Schleifen eine dynamische Abbruchbedingung. Zu Beginn der Ausführung muss also die Anzahl der Wiederholungen noch nicht feststehen. Die Syntax von `while` Schleife ist ähnlich zu der von `for` Schleifen. Zu Beginn steht das Schlüsselwort `while`, danach die zu prüfende Bedingung und daran ein Doppelpunkt. Der Anweisungsblock wird wie üblich in der Zeile darunter mit 4 Leerzeichen eingerückt. Das sieht dann so aus:
#
# `while Bedingung:`
# `____Anweisungsblock`
#
# Im Gegensatz zur `for` Schleife muss bei der `while` Schleife also die Variable bereits deklariert sein, welche als Bedingung verwendet werden soll. Ebenfalls muss die Anweisung zum Abbruch der Schleife aus dem Anweisungsblock an sich kommen, da im Kopf der Schleife keine Änderung dieser erfolgt.
# +
i = 0
while i < 10:
print("i has the value " + str(i) + ", thus i is smaller than 10.")
i += 1
# -
# Wir sehen uns zum Abschluss dieses Kapitels noch ein einfaches Beispiel für eine `while` Schleife an, die Inabhängigkeit von der Eingabe des Benutzers eine weitere Iteration durchläuft oder abbricht.
# +
j = 0
while True:
print("This loop has run for " + str(j) + " iterations.")
answer = input("Do you wanna continue? press y for yes, n for no.")
j = j +1
if answer == "y":
continue
elif answer == "n":
break
| german/Grundlagen in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Searching by `class`
#
# Let's suppose we wanted to find all the tags that had the attribute `class="h2style"`. Unfortunately, in this case, we can't simply pass this attribute to the `.find_all()` method. The reason is that the **CSS** attribute, `class`, is a reserved word in Python. Therefore, using `class` as a keyword argument in the `.find_all()` method, will give you a syntax error. To get around this problem, BeautifulSoup has implemented the keyword `class_` (notice the underscore at the end) that can be used to search for the `class` attribute. Let's see how this works.
#
# In the code below, we will use the `.find_all()` method to search for all the tags in our `sample.html` file that have the attribute `class="h2style"`:
# +
# Import BeautifulSoup
from bs4 import BeautifulSoup
# Open the HTML file and create a BeautifulSoup Object
with open('./sample.html') as f:
page_content = BeautifulSoup(f, 'lxml')
# Print the tags that have the attribute class_ = 'h2style'
for tag in page_content.find_all(class_ = 'h2style'):
print(tag)
# -
# We can see that we get the two `<h2>` tags since they are the only ones in our document that have the attribute `class="h2style"`.
# # TODO: Find All Tags With Attribute `class='section'`
#
# In the cell below, use the `.find_all()` method to find all the tags in the `sample.html` file that have the attribute `class="section"`. Start by opening the `sample.html` file and passing the open filehandle to the BeautifulSoup constructor using the `lxml` parser. Save the BeautifulSoup object returned by the constructor in a variable called `page_content`. Then find all the tags that have the attribute `class="section"` from the `page_content` object. Loop through the list and print each tag in the list. Use the `.prettify()` method to improve readability.
# +
# Import BeautifulSoup
from bs4 import BeautifulSoup
# Open the HTML file and create a BeautifulSoup Object
with open('./sample.html') as f:
page_content = BeautifulSoup(f, 'lxml')
# Print the tags that have the attribute class_ = 'h2style'
for item in page_content.find_all(class_ = 'h2style'):
print(item)
# -
# # Searching With Regular Expressions
#
# We can also pass a regular expression object to the `.find_all()` method. Let's see an example. The code below uses a regular expression to find all the tags whose names contain the letter `i`. Remember that in order to use regular expressions we must import the `re` module. In this particular example we will be only interested in printing the tag name and not its entire content. In order to do this, we will use the `.name` attribute of the `Tag` object to only print the name of tag itself, as shown below:
# +
# Import BeautifulSoup
from bs4 import BeautifulSoup
# Import the re module
import re
# Open the HTML file and create a BeautifulSoup Object
with open('./sample.html') as f:
page_content = BeautifulSoup(f, 'lxml')
# Print only the tag names of all the tags whose name contain the letter i
for tag in page_content.find_all(re.compile(r'i')):
print(tag.name)
# -
# # TODO: Find All Tags The Start With The Letter `h`
#
# In the cell below, pass a regular expression to the `.find_all()` method to find all the tags whose names start with the letter `h`. Start by opening the `sample.html` file and passing the open filehandle to the BeautifulSoup constructor using the `lxml` parser. Save the BeautifulSoup object returned by the constructor in a variable called `page_content`. Then find all the tags whose names start with the letter `h` by passing a regular expression to the `.find_all()` method. Loop through the list and print each tag in the list.
# +
# Import BeautifulSoup
# Import the re module
# Open the HTML file and create a BeautifulSoup Object
page_content =
# Print only the tag names of all the tags whose names start with the letter h
# -
# # Solution
#
# [Solution notebook](searching_by_class_and_regexes_solution.ipynb)
| Quiz/m5_financial_statements/searching_by_class_and_regexes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
from bs4 import BeautifulSoup
import pandas as pd
headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15"
}
icd_chap_url=u'http://code.nhsa.gov.cn:8000/jbzd/public/toStdTcmTreeList.html?partcode=B'
icd_chapter=requests.get(icd_chap_url,headers=headers)
icd_chapter=icd_chapter.json()
icd_chapter[0]['tcmId']
icd_chapter=pd.DataFrame(icd_chapter)
icd_chapter.to_csv('./CHS_TCM_DISEASE_CHAPTER.csv')
icd_detail_url_base=u'http://code.nhsa.gov.cn:8000/jbzd/public/dataTcmSerachDetail.html?tcmId='
icd_detail_url_full=icd_detail_url_base+icd_chapter[0]['tcmId']
icd_detail_url_full
icd_detail=requests.get(icd_detail_url_full,headers=headers)
if icd_detail.status_code == 200:
info = {}
soup = BeautifulSoup(icd_detail.text)
icd_code_lst=[d.text.strip() for d in soup.find_all('a')]
icd_name_lst=[d.text.strip() for d in soup.find_all('span')]
CHS_ICD10=[]
for i in range(len(icd_chapter)):
icd_detail_url_full=icd_detail_url_base+icd_chapter[i]['tcmId']
icd_detail=requests.get(icd_detail_url_full,headers=headers)
if icd_detail.status_code == 200:
info = {}
soup = BeautifulSoup(icd_detail.text)
icd_code_lst=[d.text.strip() for d in soup.find_all('a')]
icd_name_lst=[d.text.strip() for d in soup.find_all('span')]
df_icd=pd.DataFrame({'tcm_code':icd_code_lst,'tcm_name':icd_name_lst})
CHS_ICD10.append(df_icd)
else:
print(icd_detail_url_full)
CHS_ICD10_Table=pd.concat(CHS_ICD10,axis=0,ignore_index=True)
CHS_ICD10_Table.head(20)
len(CHS_ICD10_Table)
CHS_ICD10_Table.to_csv('./LOOKUP_TABLES/CHS_TCM_DISEASE.csv',encoding='utf-8',index=False)
| other_tools/Get_CHS_TCM_DISEASE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Duplicated features
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
# ## Read Data
data = pd.read_csv('../Kyoto_Train.csv')
data.shape
# check the presence of missing data.
# (there are no missing data in this dataset)
[col for col in data.columns if data[col].isnull().sum() > 0]
data.head(5)
# ### Train - Test Split
# +
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['Label_code'], axis=1), # drop the target
data['Label_code'], # just the target
test_size=0.2,
random_state=0)
X_train.shape, X_test.shape
# -
# ## Remove constant and quasi-constant (optional)
# +
# remove constant and quasi-constant features first:
# we can remove the 2 types of features together with this code
# create an empty list
quasi_constant_feat = []
# iterate over every feature
for feature in X_train.columns:
# find the predominant value, that is the value that is shared
# by most observations
predominant = (X_train[feature].value_counts() / np.float64(
len(X_train))).sort_values(ascending=False).values[0]
# evaluate predominant feature: do more than 99% of the observations
# show 1 value?
if predominant > 0.998:
quasi_constant_feat.append(feature)
len(quasi_constant_feat)
# -
quasi_constant_feat
# +
# we can then drop these columns from the train and test sets:
X_train.drop(labels=quasi_constant_feat, axis=1, inplace=True)
X_test.drop(labels=quasi_constant_feat, axis=1, inplace=True)
X_train.shape, X_test.shape
# -
# ## Remove duplicated features
# +
# fiding duplicated features
duplicated_feat_pairs = {}
_duplicated_feat = []
for i in range(0, len(X_train.columns)):
if i % 10 == 0:
print(i)
feat_1 = X_train.columns[i]
if feat_1 not in _duplicated_feat:
duplicated_feat_pairs[feat_1] = []
for feat_2 in X_train.columns[i + 1:]:
if X_train[feat_1].equals(X_train[feat_2]):
duplicated_feat_pairs[feat_1].append(feat_2)
_duplicated_feat.append(feat_2)
# -
# let's explore our list of duplicated features
len(_duplicated_feat)
# We found 0 features that were duplicates of others.
# +
# these are the ones:
_duplicated_feat
# +
# let's explore the dictionary we created:
duplicated_feat_pairs
# -
# We see that for every feature, if it had duplicates, we have entries in the list, otherwise, we have empty lists. Let's explore those features with duplicates now:
# +
# let's explore the number of keys in our dictionary
# we see it is 21, because 2 of the 23 were duplicates,
# so they were not included as keys
print(len(duplicated_feat_pairs.keys()))
# -
# print the features with its duplicates
# iterate over every feature in our dict:
for feat in duplicated_feat_pairs.keys():
# if it has duplicates, the list should not be empty:
if len(duplicated_feat_pairs[feat]) > 0:
# print the feature and its duplicates:
print(feat, duplicated_feat_pairs[feat])
print()
# to remove the duplicates (if necessary)
X_train = X_train[duplicated_feat_pairs.keys()]
X_test = X_test[duplicated_feat_pairs.keys()]
X_train.shape, X_test.shape
# 0 duplicate features were found in the Kyoto dataset
# ## Standardize Data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
# ## Classifiers
from sklearn import linear_model
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoostClassifier
# ## Metrics Evaluation
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, f1_score
from sklearn import metrics
from sklearn.model_selection import cross_val_score
# ### Logistic Regression
# %%time
clf_LR = linear_model.LogisticRegression(n_jobs=-1, random_state=42, C=1).fit(X_train, y_train)
# +
pred_y_test = clf_LR.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_test))
f1 = f1_score(y_test, pred_y_test)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_test)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
# -
# ### Naive Bayes
# %%time
clf_NB = GaussianNB(var_smoothing=1e-05).fit(X_train, y_train)
# +
pred_y_testNB = clf_NB.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_testNB))
f1 = f1_score(y_test, pred_y_testNB)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testNB)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
# -
# ### Random Forest
# %%time
clf_RF = RandomForestClassifier(random_state=0,max_depth=70,n_estimators=100).fit(X_train, y_train)
# +
pred_y_testRF = clf_RF.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_testRF))
f1 = f1_score(y_test, pred_y_testRF, average='weighted', zero_division=0)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testRF)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
# -
# ### KNN
# %%time
clf_KNN = KNeighborsClassifier(algorithm='auto',leaf_size=1,n_neighbors=2,weights='uniform').fit(X_train, y_train)
# +
pred_y_testKNN = clf_KNN.predict(X_test)
print('accuracy_score:', accuracy_score(y_test, pred_y_testKNN))
f1 = f1_score(y_test, pred_y_testKNN)
print('f1:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testKNN)
print('fpr:', fpr[1])
print('tpr:', tpr[1])
# -
# ### CatBoost
# %%time
clf_CB = CatBoostClassifier(random_state=0,depth=7,iterations=50,learning_rate=0.04).fit(X_train, y_train)
# +
pred_y_testCB = clf_CB.predict(X_test)
print('Accuracy:', accuracy_score(y_test, pred_y_testCB))
f1 = f1_score(y_test, pred_y_testCB, average='weighted', zero_division=0)
print('F1 Score:', f1)
fpr, tpr, thresholds = roc_curve(y_test, pred_y_testCB)
print('FPR:', fpr[1])
print('TPR:', tpr[1])
# -
# ## Model Evaluation
import pandas as pd, numpy as np
test_df = pd.read_csv("../Kyoto_Test.csv")
test_df.shape
# Create feature matrix X and target vextor y
y_eval = test_df['Label_code']
X_eval = test_df.drop(columns=['Label_code','Malware_detection_code', 'Ashula_detection_code'])
# ### Model Evaluation - Logistic Regression
modelLR = linear_model.LogisticRegression(n_jobs=-1, random_state=42, C=1)
modelLR.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredLR = modelLR.predict(X_eval)
y_predLR = modelLR.predict(X_test)
train_scoreLR = modelLR.score(X_train, y_train)
test_scoreLR = modelLR.score(X_test, y_test)
print("Training accuracy is ", train_scoreLR)
print("Testing accuracy is ", test_scoreLR)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreLR)
print('F1 Score:',f1_score(y_test, y_predLR))
print('Precision Score:',precision_score(y_test, y_predLR))
print('Recall Score:', recall_score(y_test, y_predLR))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predLR))
# ### Cross validation - Logistic Regression
#
#
# +
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelLR, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
# -
# ### Model Evaluation - Naive Bayes
#
#
modelNB = GaussianNB(var_smoothing=1e-05)
modelNB.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredNB = modelNB.predict(X_eval)
y_predNB = modelNB.predict(X_test)
train_scoreNB = modelNB.score(X_train, y_train)
test_scoreNB = modelNB.score(X_test, y_test)
print("Training accuracy is ", train_scoreNB)
print("Testing accuracy is ", test_scoreNB)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreNB)
print('F1 Score:',f1_score(y_test, y_predNB))
print('Precision Score:',precision_score(y_test, y_predNB))
print('Recall Score:', recall_score(y_test, y_predNB))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predNB))
# ### Cross validation - Naive Bayes
#
# +
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelNB, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
# -
# ### Model Evaluation - Random Forest
#
#
modelRF = RandomForestClassifier(random_state=0,max_depth=70,n_estimators=100)
modelRF.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredRF = modelRF.predict(X_eval)
y_predRF = modelRF.predict(X_test)
train_scoreRF = modelRF.score(X_train, y_train)
test_scoreRF = modelRF.score(X_test, y_test)
print("Training accuracy is ", train_scoreRF)
print("Testing accuracy is ", test_scoreRF)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreRF)
print('F1 Score:', f1_score(y_test, y_predRF, average='weighted', zero_division=0))
print('Precision Score:', precision_score(y_test, y_predRF, average='weighted', zero_division=0))
print('Recall Score:', recall_score(y_test, y_predRF, average='weighted', zero_division=0))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predRF))
# ### Cross validation - Random Forest
#
# +
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelRF, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
# -
# ### Model Evaluation - KNN
modelKNN = KNeighborsClassifier(algorithm='auto',leaf_size=1,n_neighbors=2,weights='uniform')
modelKNN.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredKNN = modelKNN.predict(X_eval)
y_predKNN = modelKNN.predict(X_test)
train_scoreKNN = modelKNN.score(X_train, y_train)
test_scoreKNN = modelKNN.score(X_test, y_test)
print("Training accuracy is ", train_scoreKNN)
print("Testing accuracy is ", test_scoreKNN)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreKNN)
print('F1 Score:', f1_score(y_test, y_predKNN))
print('Precision Score:', precision_score(y_test, y_predKNN))
print('Recall Score:', recall_score(y_test, y_predKNN))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predKNN))
# ### Cross validation - KNN
#
#
# +
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='accuracy')
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
f = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='f1')
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
precision = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='precision')
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
recall = cross_val_score(modelKNN, X_eval, y_eval, cv=10, scoring='recall')
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
# -
# ### Model Evaluation - CatBoost
modelCB = CatBoostClassifier(random_state=0,depth=7,iterations=50,learning_rate=0.04)
modelCB.fit(X_train, y_train)
# Predict on the new unseen test data
y_evalpredCB = modelCB.predict(X_eval)
y_predCB = modelCB.predict(X_test)
train_scoreCB = modelCB.score(X_train, y_train)
test_scoreCB = modelCB.score(X_test, y_test)
print("Training accuracy is ", train_scoreCB)
print("Testing accuracy is ", test_scoreCB)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
print('Performance measures for test:')
print('--------')
print('Accuracy:', test_scoreCB)
print('F1 Score:',f1_score(y_test, y_predCB, average='weighted', zero_division=0))
print('Precision Score:',precision_score(y_test, y_predCB, average='weighted', zero_division=0))
print('Recall Score:', recall_score(y_test, y_predCB, average='weighted', zero_division=0))
print('Confusion Matrix:\n', confusion_matrix(y_test, y_predCB))
# ### Cross validation - CatBoost
# +
from sklearn.model_selection import cross_val_score
from sklearn import metrics
accuracy = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='accuracy')
f = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='f1')
precision = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='precision')
recall = cross_val_score(modelCB, X_eval, y_eval, cv=10, scoring='recall')
# -
print("Accuracy: %0.5f (+/- %0.5f)" % (accuracy.mean(), accuracy.std() * 2))
print("F1 Score: %0.5f (+/- %0.5f)" % (f.mean(), f.std() * 2))
print("Precision: %0.5f (+/- %0.5f)" % (precision.mean(), precision.std() * 2))
print("Recall: %0.5f (+/- %0.5f)" % (recall.mean(), recall.std() * 2))
| Kyoto2006+/1-Constant-Quasi-Constant-Duplicates/1.3-Duplicated-features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nteract={"transient": {"deleting": false}}
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import os
import PIL
# %matplotlib inline
# + nteract={"transient": {"deleting": false}}
batch_size = 40
img_height = 200
img_width = 200
# + [markdown] nteract={"transient": {"deleting": false}}
# Keras dataset preprocessing utilities, located at tf.keras.preprocessing, help you go from raw data on disk to a tf.data.Dataset object that can be used to train a model.
#
# See https://keras.io/api/preprocessing/
#
# Once the dataset is preprocessed and loaded, it can be directly used in calls to model.fit
#
#
# + nteract={"transient": {"deleting": false}}
## loading training data
training_ds = tf.keras.preprocessing.image_dataset_from_directory(
'data',
validation_split=0.2,
subset= "training",
seed=42,
image_size= (img_height, img_width),
batch_size=batch_size
)
# + nteract={"transient": {"deleting": false}}
## loading testing data
testing_ds = tf.keras.preprocessing.image_dataset_from_directory(
'data',
validation_split=0.2,
subset= "validation",
seed=42,
image_size= (img_height, img_width),
batch_size=batch_size
)
# + nteract={"transient": {"deleting": false}}
print(training_ds)
print(testing_ds)
# + nteract={"transient": {"deleting": false}}
class_names = training_ds.class_names
# -
training_labels = training_ds.class_names
testing_labels = testing_ds.class_names
# + nteract={"transient": {"deleting": false}}
plt.figure(figsize=(10, 10))
for images, labels in training_ds.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.grid(True)
# + nteract={"transient": {"deleting": false}}
## Configuring dataset for performance
AUTOTUNE = tf.data.experimental.AUTOTUNE
training_ds = training_ds.cache().prefetch(buffer_size=AUTOTUNE)
testing_ds = testing_ds.cache().prefetch(buffer_size=AUTOTUNE)
# + [markdown] nteract={"transient": {"deleting": false}}
# # Now build a deep neural network and train it and see how you do
# + nteract={"transient": {"deleting": false}}
num_classes = 2
model = Sequential([
layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(1, activation = "sigmoid")
])
# -
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', keras.metrics.Recall(), keras.metrics.Precision()])
history = model.fit(
training_ds,
validation_data=testing_ds,
epochs=10
)
# +
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(10)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# -
f1_train = (2*(0.9853*0.9850)/(0.9853+0.9850))
f1_test = (2*(0.9727*0.8989)/(0.9727+0.8989))
print(f1_train)
print(f1_test)
# ## Data Augmentation
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
# ## Dropout
model2 = Sequential([
data_augmentation,
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
model2.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', keras.metrics.Recall(), keras.metrics.Precision()])
history2 = model2.fit(
training_ds,
validation_data=testing_ds,
epochs=10
)
ypred = model2.predict(testing_ds)
ypred = np.where(ypred > 0.5, 1, 0)
ytrue = [y for x, y in testing_ds]
ytrue = np.concatenate(ytrue)
# +
from sklearn import metrics
print(metrics.confusion_matrix(ytrue, ypred))
print(metrics.classification_report(ytrue,ypred))
# +
acc = history2.history['accuracy']
val_acc = history2.history['val_accuracy']
loss = history2.history['loss']
val_loss = history2.history['val_loss']
epochs_range = range(10)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
| MaskDL_Foster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="arnY8EH0RHWH"
# ## Text Classification for the IMDB Dataset using BERT.
# In this notebook we build a binarry classifier for the IMDB Reviews Dataset using [BERT](https://arxiv.org/abs/1810.04805), a pre-Trained NLP model open soucred by google in late 2018 that can be used for [Transfer Learning](https://towardsdatascience.com/transfer-learning-in-nlp-fecc59f546e4) on textual data. The link for the dataset can be found [here](https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews).<br>
# This notebook requires a GPU to get setup. We suggest you to run this on your local machine only if you have a GPU setup or else you can use google colab.
# + colab={"base_uri": "https://localhost:8080/"} id="TtokjlkCQbiw" outputId="2523af87-0b7d-4ec7-f7ac-5baf1d366799"
#importing a few necessary packages and setting the DATA directory
DATA_DIR="."
import os
import numpy as np
import pickle
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore")
# install BERT
# !pip install pytorch_pretrained_bert pytorch-nlp
# BERT imports
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from pytorch_pretrained_bert import BertTokenizer, BertConfig
from pytorch_pretrained_bert import BertAdam, BertForSequenceClassification
from tqdm import tqdm, trange
import pandas as pd
import io
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# specify GPU device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
# + colab={"base_uri": "https://localhost:8080/", "height": 72, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} id="BI8AvyFZRAha" outputId="337ab24b-6a51-4bf7-b311-27de244d1805"
# uploading and reading the dataset
# source for dataset: https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews
try :
from google.colab import files
# After downloading the dataset, upload the dataset here.
uploaded = files.upload()
df = pd.read_csv("IMDB Dataset.csv",engine='python', error_bad_lines=False)
except ModuleNotFoundError :
# After downnloading the dataset, put the IMDB Dataset.csv file in Data folder.
path = os.getcwd()+'/Data'
df = pd.read_csv(path+"/IMDB Dataset.csv",engine='python', error_bad_lines=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="MD5sedPwN7tX" outputId="9b8f34b3-29ec-48eb-bf7a-8be32182918e"
df.head()
# + id="AzOdtJlCRAfb"
from sklearn.preprocessing import LabelEncoder
#sentiment is positive and negative we need to convert it to 0,1
le = LabelEncoder()
df["sentiment"] = le.fit_transform(df["sentiment"])
# + colab={"base_uri": "https://localhost:8080/"} id="Dop1ppy6XQYn" outputId="d3fb017c-9468-40af-93d8-076b82c62e73"
df['sentiment'].value_counts()
# + id="azXUgzdKXqX3"
#cleaning the text
from bs4 import BeautifulSoup
import re
def strip(text):
soup = BeautifulSoup(text, "html.parser")
text = re.sub('\[[^]]*\]', '', soup.get_text())
pattern=r"[^a-zA-z0-9\s,']"
text=re.sub(pattern,'',text)
return text
df['review']=df['review'].apply(strip)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="va7N2bOMjjKs" outputId="b9b0b65d-4541-4fe7-a566-5d7ad4de6cf0"
df.head()
# + [markdown] id="YYHojJ9Kr_5w"
# BERT expects input data in a specific format
# 1. We are performing a classification task so we use a special token [CLS] to indicate this to BERT.
# 2. It needs to know the end of a sentence so we use the [SEP] token.
# + id="-yCoUW-FZtD7"
#BERT needs to understand 2 things:
#1) The start and end of each sentiment
# so we declare a special token CLS which tells BERT that its a classification task
sentences = df['review']
sentence = ["[CLS] "+i+" [SEP]" for i in sentences]
# + colab={"base_uri": "https://localhost:8080/", "height": 137} id="NUpOJpndZw5J" outputId="00d61078-b8dd-43f2-e7f5-529a72c8fd0b"
sentence[0]
# + [markdown] id="c5A_V7BT2bKM"
# We now need to tokenize our text into tokens that correspond to BERT’s vocabulary.
# + colab={"base_uri": "https://localhost:8080/"} id="Vjfr85BSbY41" outputId="6786ef92-b85a-4b49-8436-13b000ec21d8"
# Tokenize with BERT tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Restricting the max size of Tokens to 512(BERT doest accept any more than this)
tokenized_texts = list(map(lambda t: tokenizer.tokenize(t)[:510] , sentence))
print ("Tokenize the first sentence:")
print (tokenized_texts[0])
# + id="kVXxiuqQbkVp"
labels = list(df['sentiment'])#storing the labels
# + [markdown] id="d1u24cpd2mBl"
# We now need to give BERT input ids,ie, a sequence of integers which uniquely identify each input token to its index number.
# + id="vgssRTWCc_Xl"
# Set the maximum sequence length.
MAX_LEN = 128
# Pad our input tokens so that everything has a uniform length
input_ids = pad_sequences(list(map(tokenizer.convert_tokens_to_ids, tokenized_texts)),
maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# + id="ApEi7LbLdIBj"
# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# + [markdown] id="GbHLewX_24ef"
# BERT is a MLM(Masked Language Model). We have to define its mask.
# + id="uDp1d_FIFhKz"
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
# + [markdown] id="evbWDiQ73QZj"
# Now we need to split the data into train and validation. Convert it to tensors and then create iterator for our data
# + id="wT7bDRYEFhPJ"
# Select a batch size for training.
batch_size = 16
# Use train_test_split to split our data into train and validation sets for training
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels,
random_state=2018, test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=2018, test_size=0.1)
# Convert all of our data into torch tensors, the required datatype for our model
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
# Create an iterator of our data with torch DataLoader
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
# + colab={"base_uri": "https://localhost:8080/"} id="0xRtmpSlFhSP" outputId="e90929c7-2d27-4c58-c6ae-6d18442f7bed"
#Loading pre trained BERT
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)#binary classification
model
# + [markdown] id="UzxxuH_6310Y"
# Finally, we move onto the Fine-Tuning of BERT.
# + [markdown] id="wUHr8g7PJRCj"
# <img src = "https://media.istockphoto.com/vectors/titre-icon-vector-id1064145390?k=6&m=1064145390&s=612x612&w=0&h=jr8FJDKUmxpCo8bZOE5I85qcubqzeAJemkt-SbwLXkE=" width=60 height= 60 align=left>
# #### This Cell takes 1.5 hrs to run on Colab with GPU.
# + colab={"base_uri": "https://localhost:8080/", "height": 683} id="eyXoiBmJFhIt" outputId="a642480f-7a24-4c32-df9d-b962ce94822a"
# BERT fine-tuning parameters
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=2e-5,
warmup=.1)
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
torch.cuda.empty_cache()
# Store our loss and accuracy for plotting
train_loss_set = []
# Number of training epochs
epochs = 4
# BERT training loop
for _ in trange(epochs, desc="Epoch"):
## TRAINING
# Set our model to training mode
model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# Forward pass
loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
## VALIDATION
# Put model in evaluation mode
model.eval()
# Tracking variables
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
# plot training performance
plt.figure(figsize=(15,8))
plt.title("Training loss")
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.plot(train_loss_set)
plt.show()
| Ch4/06_BERT_IMDB_Sentiment_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="PC5iYX_87zOH"
# # [HW12] Logistic Regression
#
# + [markdown] id="j952JCeI8Pj1"
# 우리는 지금까지 주어진 데이터와 가장 잘 맞는 직선을 찾는 linear regression을 진행하였습니다. 하지만 예측 값이 연속적인 값을 갖지 않는다면 어떻게 할까요?
#
# 가장 대표적인 것은 binary classification 입니다. 이는 종류가 2개로 나뉘어진 데이터가 있고 이를 분류하는 문제입니다. 수많은 메일들 중에서 스팸인 메일과 스팸이 아닌 메일을 분류하는 것이 하나의 예시입니다. 이 문제는 예측 값이 연속적인 값이 아닌 0 또는 1입니다.
#
# 이때는 우리의 예측 값을 확률 값으로 만든 다음에 확률 값이 우리의 기준보다 높으면 1, 아니면 0으로 분류합니다.
#
# 이러한 문제를 푸는 방법을 logistic regression이라고 합니다.
#
#
# + [markdown] id="O_U2Nd7N_87Z"
# ## 1.1 Logistic function
#
# Logistic regression을 진행하기 위해서는 출력 값을 0과 1의 값으로 맞춰주어야 합니다. 이를 위해서 우리는 **logistic function** 을 사용합니다. Logistic function은 다음과 같습니다.
#
# $$\sigma(z) = \frac{1}{1 + e^{-z}}$$
#
# Logistic regression을 진행할 때 입력 데이터를 $x$, 실제 class 값을 $y$, 예측된 출력 값을 $\hat{y}$라고 하면 $x$는 두가지 변환을 거쳐서 $\hat{y}$가 됩니다.
# $$z = wx + b$$
# $$\hat{y} = \sigma(z)$$
# 우리의 목표는 $\hat{y}$가 실제 $y$와 가장 가깝게 되도록 하는 $w$와 $b$를 찾는 것 입니다.
#
# Logistic function을 코드를 통해서 알아보도록 하겠습니다.
# + id="o6XG4PsA7yCn"
import sympy
import numpy
from matplotlib import pyplot
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/", "height": 52} id="wl47Xbw7CKOb" outputId="b7688d93-d641-439d-c052-2771f1fd3802"
z = sympy.Symbol('z', real=True)
logistic = 1/(1+ sympy.exp(-z))
logistic
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="wx9uQ9diCLio" outputId="805b6f28-bef5-4b33-99bc-16ff26b3ef81"
sympy.plotting.plot(logistic);
# + [markdown] id="qQwgC9YOCmK0"
# 위 그래프를 보면 $z=0$ 일 때 출력 값이 0.5가 됩니다. 그리고 양수 일 때는 1에 가까워지고 음수일 때는 0으로 가까워지게 됩니다. 이렇게 $z$값을 0과 1 사이로 표현할 수 있게 되었습니다.
#
# 이제 데이터를 직접 만들어서 진행해 보도록 하겠습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="kT-4DzmUCM7A" outputId="6810792f-62bc-40b8-d7df-e95d2f3d06de"
# synthetic data
x_data = numpy.linspace(-5, 5, 100)
w = 2
b = 1
numpy.random.seed(0)
z_data = w * x_data + b + numpy.random.normal(size=len(x_data))
y_data = 1 / (1+ numpy.exp(-z_data))
pyplot.scatter(x_data, y_data, alpha=0.4);
# + [markdown] id="hArMh-RTDS-s"
# 이번에도 [HW10]에서 진행한 것과 같이 조금의 noise를 추가해서 데이터를 생성하였습니다.
#
# 이제 실제 class 값을 정해주어야 합니다. Numpy 패키지 안의 where 함수로 0.5 보다 큰 값을 1, 작은 값을 0으로 class를 부여해 주겠습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="PolN3Oc-DHem" outputId="c97089c4-88d8-49c1-c3a4-5e85c3c089e9"
y_data = numpy.where(y_data >= 0.5, 1, 0)
pyplot.scatter(x_data, y_data, alpha=0.4);
# + [markdown] id="HdpwKYU4D2AQ"
# ## 1.2 Logistic loss function
#
# $\hat{y}$가 실제 $y$와 가장 가깝게 되도록 하는 $w$와 $b$를 찾으려면 우리는 cost function을 정의해야 합니다.
#
# Linear regression 문제를 해결할 때는 mean square error를 사용했습니다.
#
# 하지만 logistic regression에 적용하면 문제가 생깁니다.
#
# 기존의 linear regression에서의 mean square error 에서는
#
# $$\frac{1}{n} \sum_{i=1}^n (y_i - (wx_i + b))^2$$
#
# 의 형태를 이루고 있어서 convex 한 형태를 이루고 있었습니다.
#
# 그런데 logistic function을 포함한 logistic regression에서는
#
# $$\frac{1}{n} \sum_{i=1}^n (y_i - \sigma(wx_i + b))^2$$
#
# $\sigma$인 logistic function 때문에 더이상 convex 한 형태가 아닙니다. 예시를 통해서 왜 convex가 아닌지 알아보겠습니다.
#
# 간단한 예시를 위해 $w = 1, b=0$일 때 3개의 데이터를 통해서 알아보겠습니다.
#
# $(x, y) : (-1, 2), (-20, -1), (-5, 5)$ 일 때 cost function을 그래프로 나타내면 다음과 같습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 61} id="OUJU-DJtJQ4t" outputId="5da67d58-f93b-4530-daef-8901c64ff5aa"
badloss = (2 - 1/(1+ sympy.exp(-z)))**2 + \
(-1 - 1/(1+ sympy.exp(-20*z)))**2 + \
(5 - 1/(1+ sympy.exp(-5*z)))**2
badloss
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="u7CHq0w-mRGf" outputId="56fbf753-4325-491d-c973-dc6fc304b8f7"
sympy.plotting.plot(badloss, xlim=(-1,1));
# + [markdown] id="8CPKIhZTmVrU"
# Gradient descent 방식으로 위 cost function의 최솟값을 구하게 되면 중간에 기울기가 0인 지점에서 멈추게 되고, 우리는 원하는 가장 작은 값에 도달하지 못하고 local minimum에 도달하게 됩니다.
#
#
# + [markdown] id="g3tCDtPTn0u0"
# 그래서 mean square error말고 다른 방법을 찾기 위해 cost function의 의미를 다시 한번 생각해 보겠습니다.
#
# 우리가 어떤 값을 예측할 때 많이 틀렸다면, 예측하는데 쓰인 변수들을 많이 바꾸어야 합니다. 그에 비해 조금 틀렸다면, 이미 잘 예측하고 있기 때문에 변수들을 조금 바꾸어야 합니다. 많이 바꾸고, 조금 바꾸는 것은 기울기의 크기가 정합니다. 이러한 원리를 사용해서 linear regression에서는 square error를 쓰는 것입니다.
#
# 이 원리를 logistic regression에도 적용해 보겠습니다.
#
# $z = wx + b$ 일 때 cost function $L$을 b에 대해서 미분을 해보겠습니다. Chain rule을 사용하면 다음과 같게 됩니다.
#
# $$\frac{\partial{L}}{\partial{b}} = \frac{\partial{L}}{\partial{\sigma(z)}} \frac{\partial{\sigma(z)}}{\partial{z}}\frac{\partial{z}}{\partial{b}}$$
#
# 이 때 $\frac{\partial{z}}{\partial{b}}$는 1이 돼서 사라집니다.
#
# 이제 $\frac{\partial{\sigma(z)}}{\partial{z}}$에 대해서 알아보겠습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 60} id="0CQD2xfSD0Nx" outputId="4b0f599d-a44a-45b5-eb1f-6f3c29994b26"
lprime = logistic.diff(z)
lprime
# + [markdown] id="6kF_uFMToeOX"
# 위에서 나온 $\sigma'(z)$를 정리를 해보겠습니다.
#
#
# $$\frac{\partial{\sigma(z)}}{\partial{z}} = \frac{e^{-z}}{(1+e^{-z})^2} = \frac{1}{1+e^{-z}} \times \frac{e^{-z}}{1+e^{-z}} = \sigma(z)\times \frac{e^{-z}}{1+e^{-z}} $$
# 가 되고, 여기서 $\frac{e^{-z}}{1+e^{-z}} $를 다시 정리해보면 다음과 같습니다.
#
# $$\frac{e^{-z}}{1+e^{-z}} = \frac{1 + e^{-z} -1}{1 + e^{-z}} = 1 - \frac{1}{1+e^{-z}} = 1-\sigma(z)$$
#
# 결론적으로,
# $$\sigma'(z) = \sigma(z) ( 1 - \sigma(z))$$
# 가 됩니다.
#
# + [markdown] id="iPJhjtQSzFZ5"
# 다시한번 위 식을 정리해보면 다음과 같습니다.
#
#
# $$\frac{\partial{L}}{\partial{b}} = \frac{\partial{L}}{\partial{\sigma(z)}} \frac{\partial{\sigma(z)}}{\partial{z}} = \frac{\partial{L}}{\partial{\sigma(z)}} \sigma(z) (1-\sigma(z))$$
#
# 여기서 우리는 $\frac{\partial{L}}{\partial{b}}$의 값이 예측 값과 실제 값의 차이가 클수록 크고, 작을수록 작게 하고 싶기 때문에 $\frac{\partial{L}}{\partial{b}} = (y - \sigma(z))$로 정의하겠습니다. 정리하면 다음과 같습니다.
#
# $$\frac{\partial{L}}{\partial{\sigma(z)}} = \frac{(y-\sigma(z))}{\sigma(z)(1-\sigma(z))}$$
#
# 이제 위 식을 $L$에 대해서 코드를 통해서 정리해보겠습니다. 코드를 간단히 하기 위해서 $\sigma(z) = a$라고 치환해서 생각하겠습니다.
# + id="Kmx-QOeyqcc6"
a, y = sympy.symbols('a y', real=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 54} id="LuLCcaVKCdfq" outputId="5ad2b5bb-9778-4b6a-a215-fc89a2c35419"
dLda = (y-a)/a/(1-a)
dLda
# + colab={"base_uri": "https://localhost:8080/", "height": 57} id="18vn7htwCfPc" outputId="2814b212-09c5-4147-ca1b-6364d74cec15"
L = sympy.integrate(dLda, a)
L
# + colab={"base_uri": "https://localhost:8080/", "height": 38} id="1xi1BfW3Ch_v" outputId="b5854e88-1f1a-4a8a-da3f-cc9a0993725e"
sympy.simplify(L)
# + [markdown] id="Tk4D09J5ClfC"
# 여기서 $a = \sigma(z)$이기 때문에 $ a < 1 $이 됩니다. 그래서 $\log$ 안의 값이 음수가 되면 안되기 때문에 식을 변환해 줍니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 38} id="EbzmrF3WCjP-" outputId="14594830-7cb9-45d5-f764-069a4414b8fc"
L = -y*sympy.log(a) + (y-1)*sympy.log(1-a)
L
# + [markdown] id="pGka-88WDhIZ"
# 우리가 구한 cost function $L$은
# $$L = -y \log(a) + (y-1)\log(1-a)$$이 됩니다.
#
# 이제 실제로 차이가 클 때 $L$값이 커지는지 알아보도록 하겠습니다.
#
# + [markdown] id="tvBN_hRPD9Bk"
# 먼저 만약 $y=1$이라면 $L = -\log(a)$만 남게 됩니다. 그래프로 표현하면 다음과 같습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="1iU8OR9zDXVn" outputId="770906c2-fc64-40ba-926c-536e3de38454"
sympy.plotting.plot(-sympy.log(a), xlim=(0,1));
# + [markdown] id="z1sYhc0aE_CU"
# 실제 class 가 1일 때 예측 값이 0에 가까워지면 cost function값이 커지고, 1에 가까워지면 cost function이 작아지는 것을 알 수 있습니다. 이는 우리가 원래 목표했던 것과 동일합니다.
#
# 이제 $y=0$이라면 $L = \log(1-a)$ 만 남게 됩니다. 이를 또한 그래프로 표현하면 다음과 같습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="E6d2ppuEE-SH" outputId="04bb9906-f777-4f59-c54f-cf4be87bb0f7"
sympy.plotting.plot(-sympy.log(1-a), xlim=(0,1));
# + [markdown] id="xWO0g8TlFe17"
# 이번에도 예측값이 실제 값이랑 가까워지면 cost function값이 작아지고 멀어지면 커지게 됨을 알 수 있습니다.
# + [markdown] id="h45s_7puFlhT"
# ## 1.3 Find the parameters using autograd function
#
# 이제 logistic regression의 전체적인 과정을 코드를 통해서 알아보도록 하겠습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 52} id="6gt2Pt9XFdzJ" outputId="6848b454-09ee-4ba0-c252-a5bade43bbae"
logistic
# + colab={"base_uri": "https://localhost:8080/", "height": 57} id="hkKb-1A6HaiU" outputId="46459a0c-4e31-4b58-9eec-82a1dadd8db6"
w, b, x, y = sympy.symbols('w b x y')
logistic = 1/(1+ sympy.exp(-w*x-b)) # redefined with the composition
Loss = -y*sympy.log(logistic) - (1-y)*sympy.log(1-logistic)
Loss
# + [markdown] id="hl4w5eGzKHN_"
# 지금까지 diff 를 통해서 기울기를 구했습니다. 그런데 식이 복잡해질수록 속도가 느려지기 때문에 autograd를 통해서 구해보겠습니다.
# + id="E4fvMUa4Hcuf"
# import the autograd-wrapped version of numpy
from autograd import numpy
# + id="up7JooetKYb0"
# import the gradient calculator
from autograd import grad
# + id="4DWKtN9-KZhh"
# note: the namespace numpy is the autograd wrapper to NumPy
def logistic(z):
'''The logistic function'''
return 1 / (1 + numpy.exp(-z))
def logistic_model(params, x):
'''A prediction model based on the logistic function composed with wx+b
Arguments:
params: array(w,b) of model parameters
x : array of x data'''
w = params[0]
b = params[1]
z = w * x + b
y = logistic(z)
return y
def log_loss(params, model, x, y):
'''The logistic loss function
Arguments:
params: array(w,b) of model parameters
model: the Python function for the logistic model
x, y: arrays of input data to the model'''
y_pred = model(params, x)
return -numpy.mean(y * numpy.log(y_pred) + (1-y) * numpy.log(1 - y_pred))
# + id="hOWcZhUfKcGA"
# get a function to compute the gradient of the logistic loss
gradient = grad(log_loss)
# + colab={"base_uri": "https://localhost:8080/"} id="Cis2i_3pKdRc" outputId="52af5c04-9015-471d-a964-05f4bbc129f3"
type(gradient)
# + [markdown] id="UAAq18KCPUOu"
# 이 떄 grad 함수는 변수 개수만큼 output을 만들게 됩니다. 우리는 $w, b$ 2개의 변수가 있습니다. 먼저 예시로 랜덤으로 초기화한 변수를 넣어서 기울기값을 구해보겠습니다.
# + colab={"base_uri": "https://localhost:8080/"} id="0lx4uedmKeKH" outputId="0299762d-4c73-4a08-cc09-9d190e4cb56f"
numpy.random.seed(0)
params = numpy.random.rand(2)
print(params)
# + colab={"base_uri": "https://localhost:8080/"} id="xpcSkjhMPjRp" outputId="fa732932-9e29-40da-8b26-482660b0920c"
gradient(params, logistic_model, x_data, y_data)
# + [markdown] id="MqStwjeMPk-u"
# 이렇게 2개의 변수에 대해서 각각 기울기 값을 반환해줍니다.
#
# 이번에 gradient descent 를 진행할 때는 새로운 조건을 추가해서 진행하겠습니다. 우리가 정한 반복 수 외의 기울기 값이 0에 가까워지면 더이상 반복을 하지 않는 조건을 추가하겠습니다. 0에 가까운 값을 설정한 뒤 그것보다 작아지면 while 문이 멈추도록 설정하여서 gradient descent 를 진행하겠습니다.
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="rJMwsVLXPkXJ" outputId="9ace67ed-7fe9-4f5a-d8dd-83dc1cc2a977"
max_iter = 5000
i = 0
descent = numpy.ones(len(params))
while numpy.linalg.norm(descent) > 0.001 and i < max_iter:
descent = gradient(params, logistic_model, x_data, y_data)
params = params - descent * 0.01
i += 1
print('Optimized value of w is {} vs. true value: 2'.format(params[0]))
print('Optimized value of b is {} vs. true value: 1'.format(params[1]))
print('Exited after {} iterations'.format(i))
pyplot.scatter(x_data, y_data, alpha=0.4)
pyplot.plot(x_data, logistic_model(params, x_data), '-r');
# + [markdown] id="oUfsHqjmREyW"
# 빨간색 곡선이 우리의 모델입니다.
#
# 이제 기준값을 정하고 그것보다 크면 1, 작으면 0으로 분류를 하면 됩니다.
#
# 이번에는 0.5로 설정해서 진행하겠습니다.
# + id="1tK7xOBTQvEP"
def decision_boundary(y):
return 1 if y >= .5 else 0
# + [markdown] id="JZJ05slTRd8T"
# 모든 점을 함수에 넣어야 하는데 하나씩 넣으면 반복문을 돌아야해서 오래걸리기 때문에 numpy의 vectorize 함수를 사용하겠습니다.
# + id="pt4mqy51RTvI"
decision_boundary = numpy.vectorize(decision_boundary)
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="gp9y0s10RXKo" outputId="5e6c5e16-d05f-4309-8286-05904d08f3bd"
def classify(predictions):
'''
Argument:
predictions, an array of values between 0 and 1
Returns:
classified, an array of 0 and 1 values'''
return decision_boundary(predictions).flatten()
pyplot.scatter(x_data, y_data, alpha=0.4,
label='true value')
pyplot.scatter(x_data, classify(logistic_model(params, x_data)), alpha=0.4,
label='prediciton')
pyplot.legend();
# + [markdown] id="hwDmsPjBR6XG"
# 거의 모든 데이터들을 정확하게 예측한 것을 알 수 있습니다.
#
# 이번 시간에는 직접 데이터를 만들어서 간단한 logistic regression 문제를 해결해 보았습니다.
#
# 질문 있으면 편하게 해주세요~
# + id="ZweIVtd8RYbE"
| 03_Machine_Learning/sol/[HW12]_Logistic_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 3</font>
#
# ## Download: http://github.com/dsacademybr
# # Funções
# Definindo uma função
def primeiraFunc():
print('Hello World')
primeiraFunc()
# Definindo uma função com parâmetro
def primeiraFunc(nome):
print('Hello %s' %(nome))
primeiraFunc('Aluno')
def funcLeitura():
for i in range(0, 5):
print("Número " + str(i))
funcLeitura()
# Função para somar números
def addNum(firstnum, secondnum):
print("Primeiro número: " + str(firstnum))
print("Segundo número: " + str(secondnum))
print("Soma: ", firstnum + secondnum)
# Chamando a função e passando parâmetros
addNum(45, 3)
# ### Variáveis locais e globais
# +
# Variável Global
var_global = 10 # Esta é uma variável global
def multiply(num1, num2):
var_global = num1 * num2 # Esta é uma variável local
print(var_global)
# -
multiply(5, 25)
print(var_global)
# Variável Local
var_global = 10 # Esta é uma variável global
def multiply(num1, num2):
var_local = num1 * num2 # Esta é uma variável local
print(var_local)
multiply(5, 25)
print(var_local)
# ### Funções Built-in
abs(-56)
abs(23)
bool(0)
bool(1)
# ### Funções str, int, float
# Erro ao executar por causa da conversão
idade = input("Digite sua idade: ")
if idade > 13:
print("Você pode acessar o Facebook")
# Usando a função int para converter o valor digitado
idade = int(input("Digite sua idade: "))
if idade > 13:
print("Você pode acessar o Facebook")
int("26")
float("123.345")
str(14)
len([23,34,45,46])
array = ['a', 'b', 'c']
max(array)
min(array)
array = ['a', 'b', 'c', 'd', 'A', 'B', 'C', 'D']
array
max(array)
min(array)
list1 = [23, 23, 34, 45]
sum(list1)
# ### Criando funções usando outras funções
# +
import math
def numPrimo(num):
'''
Verificando se um número
é primo.
'''
if (num % 2) == 0 and num > 2:
return "Este número não é primo"
for i in range(3, int(math.sqrt(num)) + 1, 2):
if (num % i) == 0:
return "Este número não é primo"
return "Este número é primo"
# -
numPrimo(541)
# ### Fazendo split dos dados
# Fazendo split dos dados
def split_string(text):
return text.split(" ")
texto = "Esta função será bastante útil para separar grandes volumes de dados."
# Isso divide a string em uma lista.
print(split_string(texto))
# Podemos atribuir o output de uma função, para uma variável
token = split_string(texto)
token
caixa_baixa = "Este Texto Deveria Estar Todo Em LowerCase"
def lowercase(text):
return text.lower()
lowercased_string = lowercase(caixa_baixa)
lowercased_string
# Funções com número variável de argumentos
def printVarInfo( arg1, *vartuple ):
# Imprimindo o valor do primeiro argumento
print ("O parâmetro passado foi: ", arg1)
# Imprimindo o valor do segundo argumento
for item in vartuple:
print ("O parâmetro passado foi: ", item)
return;
# Fazendo chamada à função usando apenas 1 argumento
printVarInfo(10)
printVarInfo('Chocolate', 'Morango', 'Banana')
# # Fim
# ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
| Cap03/Notebooks/DSA-Python-Cap03-06-Funcoes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python for Environmental Science Day 3
# ## Topics
# * Functions in Python
# * Catching Errors in Python
#
# ## What the heck is a function?
# Functions are time saver. A good programmer is a lazy programmer. If you have the possibility not to write something, it is usually a good idea to do so. And this is were functions come into play. They allow you to recycle code you have already written. You might not have noticed it, but you have already used funcions. For example math.sqrt() is a function. A simple example for a function would be:
# +
def buscemi():
print("How do you do, fellow kids?")
buscemi()
buscemi()
# -
# So as you can see functions allow you to call code again, which you have already written in a previous part of your program.
#
# Again, MIT gives a very good introduction [here](https://youtu.be/MjbuarJ7SE0?t=2m6s).
# ### Practice Questions
#
# * During the talk the concepts of abstraction and decompostion are introduced. What do they mean?
# * What is the scope of a function and what does it do?
#
#
#
# ## Why functions?
# * Less work, as you can recycle code
# * Less errors, as you only have to debug once
# * Allows you to structure your code more easily
# * Allows other people (and your future self) to understand the code more easily
#
#
# ## Function Charateristics
# A function consists of several parts (as mentioned in the MIT video). Here is a little overview and later we will try to understand them in more depth.
#
#
# * **Name**: What you call the function by
# * **Parameters**: You can provide functions with variables. Those variables can then be used to compute something. An additional short explanation of functions and parameters can be found [here](https://www.youtube.com/watch?v=NE97ylAnrz4).
# * **Docstring**: A docstring allows you to write a little documentation were you explain how the function works (in our stage mainly for your future self)
# * **Body**: This is were the magic happens, as here is the place for the code itself
# * **Return values**: You usually create functions to do something that create a result. For example you could create a function that adds two numbers. The problem is that the result is "trapped" in our function, as it is deleted once the function terminates. But with the return keyword you can specify that your function should return the results to the main program. If you are confused about return values take a look at [this video](https://youtu.be/PgrlkpleEuw).
#
# The following figure explains this visually (except docstrings, which are explained [here](http://www.pythonforbeginners.com/basics/python-docstrings)).
#
# 
#
# ### Practice Questions
# * Functions seem confusing to me. Is this normal?
# * What is the syntax of a docstring?
# * What things should you write in your docstring?
# * Explain the concept of return values in your own words.
# * Can a function have more than one parameter and return value?
# * Can you give a function as a parameter to another function?
# * What is the difference between a normal and a keyword function?
# * What is allowed for a function name?
# * Can you call a function within a function?
# * What does a function return when you do not have a return statement?
# * What is "None"?
# * What is the difference between printing a value in a function instead of returning it?
# * Why is this a bad idea?
#
#
# <img src="http://devhumor.com/content/uploads/images/March2018/global_variable.jpg" alt="Drawing" style="width: 200px;"/ align="left">
#
# ## Catching Errors in Python (Making Exceptions)
# Usually errors occur in Python when you did something wrong and this is how it is supposed to be. But problems can arise when you did nothing wrong and your program terminates. A common example is user input. Think back to the program we used yesterday were you asked the user for the price of the vacation and so forth. You just assumed that a sane person would enter a number if asked for a price, but they could easily typed in a string. Now you have a problem, as you told the program to convert the user input to a number, but you cannot convert a word to a number in a meaningful way. And this is were error catching comes into play.
#
# It allows you to foresee such actions and tell the program what it should do if this happens. Try if for yourself.
price = float(input("What is the price? "))
# Now try the same problem with error catching.
while True:
try:
price = float(input("What is the price? "))
print("Good job. You managed to type a number!")
break
except ValueError:
print("This was not a number. Please try again")
# So we see that error catching is quite useful. An additional explanation can be found [here](https://youtu.be/nlCKrKGHSSk). In the video different kinds of errors are explained and how to handle them. A further explanation can be found [here](https://wiki.python.org/moin/HandlingExceptions).
# ### Practice Questions
# * What is the difference between try-except and if-else?
# * When do you use exceptions?
# * Can I raise my own exceptions?
# * When should I use try-except?
# * What goes in the try clause? What goes in the except clause?
# ## Important note!
# Do not forget to write docstrings and [comments](https://i.redditmedia.com/VJKytXlRyG7b2K_DD2o5WZe0ri6eE9P24dBgbkBLZIA.jpg?fit=crop&crop=faces%2Centropy&arh=2&w=640&s=b8789096d10eacaa513e0b1ad06b9afd) in all your code!
# ### Exercise 1
# Write a function that takes the volume and height of a rectangular pyramid and returns the length of one base side of the pyramid.
#
# Hint: The math package might be useful here.
#
# ### Exercise 2
# Go through the following functions and determine what will be printed. When you have finished execute the program to see if you were right. If so, great. If not, try to find out were you went wrong.
#
# Source: ThinkPython
def b(z):
prod = a(z, z)
print(z, prod)
return prod
def a(x, y):
x = x + 1
return x * y
def c(x, y, z):
total = x + y + z
square = b(total)**2
return square
x = 1
y = x + 1
print(c(x, y+3, x+y))
# ### Exercise 3
# A function object is a value you can assign to a variable or pass as an argument. For
# example, do_twice is a function that takes a function object as an argument and calls it twice:
def do_twice(f):
f()
f()
# Here’s an example that uses do_twice to call a function named print_spam twice.
#
def print_spam():
print('spam')
do_twice(print_spam)
# * Type this example into a script and test it.
# * Modify do_twice so that it takes two arguments, a function object and a value, and calls the
# function twice, passing the value as an argument.
# * Copy the definition of print_twice from here into your script.
#
#
def print_twice(to_print):
print(to_print)
print(to_print)
# * Use the modified version of do_twice to call print_twice twice, passing 'spam' as an
# argument.
# * Define a new function called do_four that takes a function object and a value and calls the
# function four times, passing the value as a parameter. There should be only two statements in
# the body of this function, not four.
# * Call do_four() with reasonable parameters.
#
# Source: ThinkPython
#
# ### Exercise 4
# Write a function that prompts a user for numbers until the user types "done". Then return the aggregated result and print it. Make sure that the program knows what to do if the user enters anything that is neither a number nor "done".
# ### Exercise 5
# Write a function named collatz() that has one parameter named number. If
# number is even, then collatz() should print number // 2 and return this value.
# If number is odd, then collatz() should print and return 3 * number + 1.
#
# Then write a program that lets the user type in an integer and that keeps
# calling collatz() on that number until the function returns the value 1.
# (Amazingly enough, this sequence actually works for any integer—sooner
# or later, using this sequence, you’ll arrive at 1! Even mathematicians aren’t
# sure why. Your program is exploring what’s called the Collatz sequence, sometimes called “the simplest impossible math problem.”)
#
# Remember to convert the return value from input() to an integer with
# the int() function; otherwise, it will be a string value.
#
# Hint: An integer number is even if number % 2 == 0, and it’s odd if
# number % 2 == 1.
#
# The output of this program could look something like this:
Enter number:
3
10
5
16
8
4
2
1
# Source: Automate the boring stuff with Python
#
# ### Exercise 6
#
# Write a Function named fibonacci(n) that takes an integer n as parameter and prints the n-th element of the Fibonacci Sequence.
# You can prompt the user to specify which element he wants to calculate.
#
# Hint: Think about how the Sequence starts and how many elements of the sequence you need to calculate the next one.
#
# ### Exercise 7
#
# Write a programm which asks the user to choose one of the following operations:
#
# a) squareroot
#
# b) power of two
#
# c) power of three
#
# d) logarithm naturalis
#
# Then ask for a number on which the operation should be executed. If the calculation was succesfully print the result, if not use error
# handling to tell the user what the problem was. It is always anoying to start a program over and over again. Try to keep it running so the
# user can execute as many operations as needed until an end keyword is entered so the program shuts down.
#
# Hint: To give the user a perfect user experience try to catch as many possible errors as you can.
#
| week_1/day_3_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="XS1Z478h9WNM"
import numpy as np
import matplotlib.pyplot as plt
import time
# + [markdown] id="f3l5IUhwRHNZ"
# ## Hypothesis function
#
# Outputs Y, Given X, by parameter θ
#
# 1. In Simple Definition:
#
# 
#
# 2. In Vector Form:
#
# 
#
# - m -> number of elements in the dataset
# - X -> mx2 matrix
# - θ -> 2x1 matrix
# - m -> dataset size
# - Outputs mx2 matrix
#
# Vector Form is implemented here for faster Operation
#
#
#
# + id="TA_QPYgR9XzD"
def hypothesis(x, theta):
'''calculates the hypothesis function'''
return np.matmul(x, theta)
# + [markdown] id="OZxdmqDTR1D1"
# ## Cost Function
#
# This essentially calculates the distance between what we need the line to be, and what it actually is:
#
#
# 1. Definition:
# - m -> number of elements in the dataset
# - x<sup>(i)</sup> -> value of x in i<sup>th</sup> data point
# - y<sup>(i)</sup> -> value of y in i<sup>th</sup> data point
#
# 
#
# 2. Vector Form for faster implementation
# - m -> number of elements in the dataset
# - h(θ, X) and Y -> mx1 matrix
# - Outputs 1x1 matrix
#
# 
# + id="7SDFhVRE9Y2N"
def compute_cost(x, y, theta):
'''outputs the cost function'''
m = len(y)
error = hypothesis(x, theta) - y
error_square_summed = np.matmul(np.transpose(error), error)
return 1/(2*m)*error_square_summed
# + [markdown] id="TaMrNg2ZTNwB"
# ## Gradient Descent
#
# Gradient Descent is an iterative way through which we can minimize the cost function J(θ,x), which essentially depends on the values of θ<sub>0</sub> and θ<sub>1</sub>
#
# Gradient Descent is implemented as follows:
#
# 
#
# where
#
# 1. α -> a constant, also called learning rate of algorithm
# 2. θ<sub>j</sub> -> j<sup>th</sup> value of θ
# 3. J( θ<sub>0</sub> , θ<sub>1</sub> ) -> The Cost Function
#
# This algorithm iteratively minimizes J(θ ,x) to reach it's minimum possible value
#
# - Vector Implementation to speed up Algorithm:
# - m -> dataset size
# - X -> mx2 matrix
# - h(θ, X) and Y -> mx1 matrix
#
# 
#
#
#
# + id="Pf-3GBGY9aG7"
def gradient_descent(x, y, theta, alpha, num_iter):
'''Performs Gradient Descent and outputs minimized theta and history of cost_functions'''
m = len(y)
J_history = np.zeros((num_iter, 1))
for iter in range(num_iter):
h = hypothesis(x, theta)
error = h-y
partial_derivative = 1/m * np.matmul(np.transpose(x), error)
theta = theta - alpha*partial_derivative
J_history[iter] = compute_cost(x, y, theta)
return theta, J_history
# + [markdown] id="DPJsLfqsT7dn"
# # Predict
#
# uses hypothesis() to predict value of new input
# + id="OLJ8t9oo9cSr"
def predict(value, theta):
x_array = [1, value]
return hypothesis(x_array,theta)
# + [markdown] id="e0J0HCqGUb-c"
# # Processing
# Loading Data from ex1data1.txt
#
# In each line, first value is 'Population of City in 10,000s' and second value is 'Profit in $10,000s'
# + id="3xf5-zeBNwlD"
data_path = "./ex1data1.txt"
data = np.loadtxt(data_path, delimiter=',')
# + [markdown] id="HQs8_rDkUweS"
# Extracting Poulation and Profits from data
# + id="RbaWL3HnUrWT"
# first value is independent variable x, second is dependant y
independent_x = data[:, 0]
dependant_y = data[:, 1]
# + [markdown] id="l99tLCfxVbtZ"
# Plotting the scatterplot of data
# + id="PzWrZUOzVa0c" colab={"base_uri": "https://localhost:8080/", "height": 346} outputId="f2e4f3be-54d0-41c4-c96e-ef77bba9ece1"
# showing data
print("Plotting Data ...\n")
plt.figure("Scatter Plot Visualization of Data")
plt.title("Scatter Plot Visualization of Data")
plt.scatter(independent_x, dependant_y, marker="x", c="r")
plt.ylabel('Profit in $10,000s')
plt.xlabel('Population of City in 10,000s')
# + [markdown] id="xcv38MKfVuyq"
# converting x and y in matrix form
# + id="Ad7LcfeqVky7"
# as we are going to use matrix multiplication, we need x as first column 1, second column values
dataset_size = independent_x.shape[0]
ones = np.ones(dataset_size)
x = np.stack((ones, independent_x), axis=1)
# also converting y in vector form to matrix form
y = dependant_y.reshape(len(dependant_y), 1)
# + [markdown] id="-GRPAn8CWFpa"
# Testing hypothesis and cost function
# + id="RM8u9dcfV2na" colab={"base_uri": "https://localhost:8080/"} outputId="3aab437d-9c20-4206-fd2c-ce753a0e4c23"
# initializing theta
theta = np.zeros((2, 1))
alpha = 0.01
num_iter = 1500
print("Testing the cost function ...")
print(f"with theta = [[0],[0]] \nCost computed = {compute_cost(x,y,theta)}")
print("Expected cost value (approx) 32.07\n")
print(f"with theta = [[-1],[2]] \nCost computed = {compute_cost(x,y,[[-1],[2]])}")
print("Expected cost value (approx) 54.24\n")
# + [markdown] id="IudQt6i-WTdB"
# Running Gradient Descent
# + id="k-UxmwVwWM3K" colab={"base_uri": "https://localhost:8080/"} outputId="181089b7-ca0a-433c-d715-3a9541e8f73e"
print("Running Gradient Descent ...\n")
minimized_theta, J_history = gradient_descent(x, y, theta, alpha, num_iter)
# + [markdown] id="LSepBauvWcOx"
# Plotting Value of J during Gradient Descent (This should Decrease with Epochs)
# + id="axcPfITcWa6x" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="b25dd393-2cba-4371-cb6c-c5cedfe4f69e"
plt.figure("Value of J during Gradient Descent")
plt.title('Value of J during Gradient Descent')
x_axis = range(len(J_history))
plt.xlabel('No. of iterations or Epochs')
plt.ylabel("Cost function J")
plt.plot(x_axis,J_history)
# + [markdown] id="U3y2xA4TWzoi"
# Minimised Theta
# + id="AJerwdB-Wt8S" colab={"base_uri": "https://localhost:8080/"} outputId="2e7e7214-236d-4bce-8916-90c31eb3910b"
print("Theta found by gradient descent:")
print(minimized_theta)
print("Expected theta values (approx)")
print(" 3.6303\n 1.1664\n")
# + [markdown] id="nskBDR1GW494"
# Predicting Value for New Input
# + id="pVV6u97QW4aZ" colab={"base_uri": "https://localhost:8080/"} outputId="1780f3d3-fc5a-4278-b36e-063a390922c8"
print(f"For population = 35,000, we predict a profit of {predict(3.5, minimized_theta)*10000}")
print(f"For population = 70,000, we predict a profit of {predict(7, minimized_theta)*10000}")
# + [markdown] id="fYnZRAK7XBoi"
# Plotting Scatterplot and Hypothesis Line
# + id="kO93t52sXAoD" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="daf0554d-b57a-4e45-d617-3779bcaf8ac9"
| src/MachineLearningAlgorithms/Regression/LinearRegression/Linear_Regression_using_Matrix_Multiplication/1_One_Variable/Linear_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Schritt 1
# +
# importiert Requests
# installiert die Library BeautifulSoup – saugt Daten aus html!
# installiert Pandas
import requests
from bs4 import BeautifulSoup
import pandas as pd
# +
# mit json Zahl ausgeben
# Variable erstellen r =
r = requests.get("https://www.ecb.europa.eu/home/html/index.en.html")
# +
# startet BeautifulSoup und gibt es als Text aus, html.parser stellt es als Website-Struktur an
soup = BeautifulSoup(r.text, 'html.parser')
# -
soup
# ## Schritt 2 Listen erstellen
# +
# 1. Listen erstellen
# 2. Eine leere Liste erstellen
# +
# find_all gibt immer eine Liste aus
percentage = soup.find_all('td', {'class':'stats-table-percentage'})
# -
title = soup.find_all('td', {'class':'stats-table-figure'})
# +
# \xa0 löscht ....
percentage[1].text.replace('\xa0','')
# +
# gibt den Titel an Position 0 aus
titel[0].text
# -
percentagelist = []
# Hier beginnt der for-Loop
for e, t in zip(percentage, title):
# Hier lese ich aus beiden Listen das Elemnte heraus
# Das e Element berücksichtig nur den Text
# und ersetzt die Zeichenfolge \xa0 mit Nichts
# Das t Element zieht nur den Text aus dem html-tag
# daraus wird einen Mini-Dictionary
minidict = {'Title':t.text,
'percentage':e.text.replace('\xa0','')}
percentagelist.append(minidict)
pd.DataFrame(percentagelist)
pd.DataFrame(percentagelist).to_csv('WB_Prozentliste')
| Scrapping-Test_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp core
# -
#colab
#hide
from google.colab import drive
drive.mount('/content/gdrive')
# %cd /content/gdrive/MyDrive/nbdev_template
# # Core
#
# > API details.
#hide
from nbdev.showdoc import *
# # functionality one
#export
def add(a,b):
"""sum a and b"""
return a+b
#hide
assert(add(1,2) ==3)
# ## functionality two
#export
def test_func():
return 'test'
assert(test_func() == "test")
| 00_core (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# We don't provide auto installation instructions for this notebook
# as tensorflow 1 rarely installs cleanly on a moderately updated OS.
#
# To run, install tensorflow 1 and other dependencies (numpy, pandas, etc.)
# The tf1 DoppelGANger implementation is at https://github.com/fjxmlzn/DoppelGANger
#
# We used tf1.14 with a GCP deep learning image specifically setup for tensorflow 1.
# +
from gan.doppelganger import DoppelGANger
from gan.util import add_gen_flag, normalize_per_sample, renormalize_per_sample
from gan.load_data import load_data
from gan.network import DoppelGANgerGenerator, Discriminator, AttrDiscriminator
from gan.output import Output, OutputType, Normalization
import matplotlib.pyplot as plt
import time
import os
import tensorflow as tf
import pandas as pd
import numpy as np
# +
import pandas as pd
S3_BASE_URL = "https://gretel-public-website.s3.us-west-2.amazonaws.com/datasets/wiki-web-traffic-data/"
wwt = pd.read_csv(S3_BASE_URL + "wikipedia-web-traffic-training.csv", index_col=0)
wwt.head()
# +
SAMPLE_LEN = 10
NUM_SAMPLES = 50000
# +
raw_attributes = wwt_df[["domain", "access", "agent"]].to_numpy()
attributes = []
for i in range(raw_attributes.shape[1]):
a = np.zeros((raw_attributes.shape[0], np.max(raw_attributes[:,i]) + 1), dtype="uint8")
a[np.arange(raw_attributes.shape[0]), raw_attributes[:,i]] = 1
attributes.append(a)
attributes = np.concatenate(attributes, axis=1)
attributes.shape
# -
attribute_outputs = [
Output(OutputType.DISCRETE, np.max(raw_attributes[:,i]) + 1)
for i in range(raw_attributes.shape[1])
]
attribute_outputs
features = np.expand_dims(wwt_df.iloc[:,:550].to_numpy(), axis=-1)
features.shape
# +
feature_outputs = [
Output(OutputType.CONTINUOUS, 1, Normalization.MINUSONE_ONE)
]
feature_outputs
# +
(train_features, train_attributes, train_attribute_outputs,
train_real_attribute_mask) = normalize_per_sample(
features,
attributes,
feature_outputs,
attribute_outputs
)
print(train_features.shape)
print(train_attributes.shape)
print(train_attribute_outputs)
print(train_real_attribute_mask)
# -
train_gen_flag = np.ones((train_features.shape[0], train_features.shape[1]))
train_features, train_feature_outputs = add_gen_flag(
train_features, train_gen_flag, feature_outputs, SAMPLE_LEN,
)
print(train_features.shape)
print(train_feature_outputs)
# +
# Model definition
generator = DoppelGANgerGenerator(
feed_back=False,
noise=True,
feature_outputs=train_feature_outputs,
attribute_outputs=train_attribute_outputs,
real_attribute_mask=train_real_attribute_mask,
sample_len=SAMPLE_LEN,
)
discriminator = Discriminator()
attr_discriminator = AttrDiscriminator()
# +
checkpoint_dir = "wwt_run/checkpoint"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sample_dir = "wwt_run/sample"
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
time_path = "wwt_run/time.txt"
length = int(train_features.shape[1] / SAMPLE_LEN)
start_time = time.time()
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
gan = DoppelGANger(
sess=sess,
checkpoint_dir=checkpoint_dir,
sample_dir=sample_dir,
time_path=time_path,
epoch=400,
batch_size=100,
data_feature=train_features,
data_attribute=train_attributes,
real_attribute_mask=train_real_attribute_mask,
data_gen_flag=train_gen_flag,
sample_len=SAMPLE_LEN,
data_feature_outputs=train_feature_outputs,
data_attribute_outputs=train_attribute_outputs,
vis_freq=200,
vis_num_sample=5,
generator=generator,
discriminator=discriminator,
attr_discriminator=attr_discriminator,
d_gp_coe=10.0,
attr_d_gp_coe=10.0,
g_attr_d_coe=1.0,
d_rounds=1,
g_rounds=1,
num_packing=1,
extra_checkpoint_freq=5,
)
gan.build()
gan.train()
# Generate some data
real_attribute_input_noise = gan.gen_attribute_input_noise(NUM_SAMPLES)
addi_attribute_input_noise = gan.gen_attribute_input_noise(NUM_SAMPLES)
feature_input_noise = gan.gen_feature_input_noise(NUM_SAMPLES, length)
input_data = gan.gen_feature_input_data_free(NUM_SAMPLES)
internal_features, internal_attributes, gen_flags, lengths = gan.sample_from(
real_attribute_input_noise, addi_attribute_input_noise,
feature_input_noise, input_data
)
end_time = time.time()
print(f"Elapsed time: {end_time} seconds")
# -
features, attributes = renormalize_per_sample(
internal_features, internal_attributes, train_feature_outputs,
train_attribute_outputs, gen_flags,
num_real_attribute=len(train_attribute_outputs)-2
)
print(features.shape)
print(attributes.shape)
# +
synthetic_wwt_df = pd.DataFrame(features.reshape(features.shape[0], -1))
synthetic_wwt_df.columns = wwt_df.columns[:features.shape[1]]
# Convert from softmax/onehot to categorical
synthetic_wwt_df["domain"] = np.argmax(attributes[:,:9], axis=1)
synthetic_wwt_df["access"] = np.argmax(attributes[:,9:12], axis=1)
synthetic_wwt_df["agent"] = np.argmax(attributes[:,12:], axis=1)
synthetic_wwt_df.head()
# -
synthetic_wwt_df.to_csv("synthetic_tf1.csv")
| oss_doppelganger/doppelganger_tf1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception_v4."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(predictions.op.name.startswith(
'InceptionV4/Logits/Predictions'))
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_v4(inputs, num_classes,
create_aux_logits=False)
self.assertFalse('AuxLogits' in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testAllEndPointsShapes(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v4(inputs, num_classes)
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'Mixed_3a': [batch_size, 73, 73, 160],
'Mixed_4a': [batch_size, 71, 71, 192],
'Mixed_5a': [batch_size, 35, 35, 384],
# 4 x Inception-A blocks
'Mixed_5b': [batch_size, 35, 35, 384],
'Mixed_5c': [batch_size, 35, 35, 384],
'Mixed_5d': [batch_size, 35, 35, 384],
'Mixed_5e': [batch_size, 35, 35, 384],
# Reduction-A block
'Mixed_6a': [batch_size, 17, 17, 1024],
# 7 x Inception-B blocks
'Mixed_6b': [batch_size, 17, 17, 1024],
'Mixed_6c': [batch_size, 17, 17, 1024],
'Mixed_6d': [batch_size, 17, 17, 1024],
'Mixed_6e': [batch_size, 17, 17, 1024],
'Mixed_6f': [batch_size, 17, 17, 1024],
'Mixed_6g': [batch_size, 17, 17, 1024],
'Mixed_6h': [batch_size, 17, 17, 1024],
# Reduction-A block
'Mixed_7a': [batch_size, 8, 8, 1536],
# 3 x Inception-C blocks
'Mixed_7b': [batch_size, 8, 8, 1536],
'Mixed_7c': [batch_size, 8, 8, 1536],
'Mixed_7d': [batch_size, 8, 8, 1536],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'PreLogitsFlatten': [batch_size, 1536],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4_base(inputs)
self.assertTrue(net.op.name.startswith(
'InceptionV4/Mixed_7d'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
for name, op in end_points.iteritems():
self.assertTrue(op.name.startswith('InceptionV4/' + name))
def testBuildOnlyUpToFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
all_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
for index, endpoint in enumerate(all_endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v4_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV4/' + endpoint))
self.assertItemsEqual(all_endpoints[:index+1], end_points)
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_v4(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_v4(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v4(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
if __name__ == '__main__':
tf.test.main()
| nets/inception_v4_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="iZ9_1Aq1og1m" colab_type="text"
# **Contributors:**
# <NAME> and Jongyi
#
# **Description:**
# Based on aspects of building location and construction, the goal is to predict the level of damage to buildings caused by the 2015 Gorkha earthquake in Nepal.
#
# **Data Credits:**
# Central Bureau of Statistics that work under the National Planning Commission Secretariat of Nepal.
#
# **Occassion:**
# DrivenData Practice Competition
# + id="lnE0BsGMos10" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="4303280d-9b1d-4db2-d709-6c9d825c1ecf"
#Import Statements
# !python3 -m io
import pandas as pd
import numpy as np
import csv
from google.colab import files
import seaborn as sns
from sklearn.metrics import accuracy_score,classification_report,log_loss,confusion_matrix,roc_curve,auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder,MinMaxScaler,LabelEncoder
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
from imblearn.over_sampling import SMOTE
import io
from sklearn.neural_network import MLPClassifier
# + id="rNKUjSl-pxFs" colab_type="code" colab={}
#Importing data from raw github files
train_values_url = 'https://raw.githubusercontent.com/vivekkhimani/RitcherEarthquakeDamagePredictor/master/data/train_values.csv'
train_labels_url = 'https://raw.githubusercontent.com/vivekkhimani/RitcherEarthquakeDamagePredictor/master/data/train_labels.csv'
submission_data_url = 'https://raw.githubusercontent.com/vivekkhimani/RitcherEarthquakeDamagePredictor/master/data/test_values.csv'
#Converting the raw imported data to pandas dataframes
train_values = pd.read_csv(train_values_url)
train_labels = pd.read_csv(train_labels_url)
submission_values = pd.read_csv(submission_data_url)
#Label Encoding
label_encoder = LabelEncoder()
train_values = train_values.apply(label_encoder.fit_transform)
#Convering the TRAINING dataframes to arrays
data_X = train_values.iloc[:,1:].to_numpy()
data_Y = train_labels.iloc[:,1].to_numpy()
# + id="DOvKMiAe1ZF3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="5da548e1-b801-4de0-9234-6f0642e9c2fe"
#One Hot Encoding
ohe = OneHotEncoder(categorical_features = [7,8,9,10,11,12,13,25])
data_X = ohe.fit_transform(data_X).toarray()
#Splitting Train and Test sets from the given training data
X_train,Y_train,X_test,Y_test = train_test_split(data_X, data_Y, test_size = 0.33, random_state = 42)
#checking for class imbalance
counter1 = 0
counter2 = 0
counter3 = 0
for items in data_Y:
if items == 1:
counter1+=1
if items == 2:
counter2+=1
if items == 3:
counter3+=1
print(counter1,counter2,counter3)
print(X_train.shape)
print(X_train)
#print(Y_train.shape)
#print(X_test.shape)
#print(Y_test.shape)
#sns.distplot(data_Y)
# + id="vqAbuWCySMyk" colab_type="code" colab={}
#<NAME>
#GridSeachCV and Randomized Search
#Get the parameters, run the model, and check the metrics
# + id="dxD-KTFMSXWY" colab_type="code" colab={}
#<NAME>
#Overfitting and Underfitting Metrics
#Plotting AUC ROC curve
| notebooks/Ritchers_Predictor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from PIL import Image
from keras.applications import vgg16
from keras.preprocessing import image
import os
def rotate_image(img_file, degrees):
im1 = Image.open(img_file)
im2 = im1.rotate(degrees)
im2.save(str(degrees)+"_degrees_edited_" + img_file)
# +
# model=vgg16.VGG16(weights='imagenet')
# -
def resize_pic(img_file, size):
img=image.load_img(img_file,target_size=size)
img.save(str(size)+"_new_size_" + img_file)
resize_pic("snatch.jpg", (220,220))
for filename in os.listdir(os.getcwd()):
if filename.endswith(".jpg") or filename.endswith(".png"):
rotate_image(filename,30)
else:
continue
os.listdir(os.getcwd())
| .ipynb_checkpoints/end to end - pics-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: test_d
# kernelspec:
# display_name: test_d
# language: python
# name: test_d
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# # pysmFISH pipeline running template
#
# This jupyter lab notebook is used to run automated data analysis via papermill. The data will be run through the entire pipeline (full run). A copy of the run notebook will be stored in the processed experiment folder inside the notebooks subfolder.
# + jupyter={"outputs_hidden": true, "source_hidden": false} nteract={"transient": {"deleting": false}}
from pathlib import Path
import time
from pysmFISH.pipeline import Pipeline
# + jupyter={"outputs_hidden": true, "source_hidden": false} nteract={"transient": {"deleting": false}} tags=["parameters"]
# THIS CELL IS TAGGED PARAMETERS
# REQUIRED ARGUMENTS
# -------------------
# Path to the experiment folder
experiment_fpath = ''
# Define if it is a 'new' or 're-run' (default: new)
run_type = 'new'
# Define the parsing type. Can be:
# original/no_parsing/reparsing_from_processing_folder/reparsing_from_storage
# (default: original)
parsing_type = 'original'
# OPTIONAL KWARGS
# ----------------
# Path to the cold storage hard drive (default: /fish/rawdata)
raw_data_folder_storage_path = '/fish/rawdata'
# Tag to identify the zarr file with parsed images (default: img_data)
parsed_image_tag = 'img_data'
# Tag to identify the zarr file with preprocessed images (default: preprocessed_img_data)
preprocessed_image_tag = 'preprocessed_img_data'
# Path to the location where the dataset are stored (default: /fish/fish_datasets)
dataset_folder_storage_path = '/fish/fish_datasets'
# Path to the location where the dataset are stored (default: /fish/fish_results)
results_folder_storage_path = '/fish/fish_results'
# Determine if the processed images will be saved (default: True)
save_intermediate_steps = True
# Path to an existing dataset that will be used in the processing
dataset_path = ''
# Number of FOV to process in parallel (20 when running in unmanaged cluster)
chunk_size = 20
# Searching distance that define two dots as identical (default: 10)
same_dot_radius_duplicate_dots = 5
# Define the stitched counts on which the overlapping dotes will be removed
# (default: microscope_stitched)
stitching_selected = 'microscope_stitched'
# Value to select the barcodes that are passing the
# screening (< hamming_distance). (default: 3)
hamming_distance = 3
# Define the name of the system that will run the processing. Can be local/htcondor
# (default htcondor). If engine == local the parameters that define the cluster
# will be ignored
processing_engine = 'unmanaged_cluster'
# Determine if the cluster should scale depending from the processing load
adaptive = True
# Number of cores to use in htcondor (default 20)
cores = 20
# Total memory for all the cores in condor (default 200GB) or per core in local setup
# or per process (nprocs) in the unmanaged cluster (6GB for 40 nprocs)
memory = '6GB'
# Size of the spillover disk for dask in htcondor (default 0.1GB)
disk = '0.1GB'
# Max number of jobs that the cluster can run
maximum_jobs = 15
# define the dask scheduler port. Used for the unmanaged cluster (default 23875)
scheduler_port = 23875
# define the dask dashboard port: Used for the unmanaged cluser (default 25399)
dashboard_port = 25399
# Address of the dask scheduler. Used for the unmanaged cluser.
# 'localhost' if running of the main node (default 'localhost)
scheduler_address = 'localhost'
# Addresses of the workers (default [monod10,monod11,monod12,monod33])
workers_addresses_list = ['monod10','monod11','monod12','monod33']
# number of processes for each workers (unmanaged cluster) (default 40 for single node monod)
nprocs = 40
# number threads/process (default 1)
nthreads = 1
# Directory where to spill over on the node in htcondor (default /tmp)
local_directory = '/tmp'
# Directory where to store dask and htcondor logs
logs_directory = ''
# Save the intensity of the bits and the flipping direction
save_bits_int = True
# Start the analysis from preprocessed images
start_from_preprocessed_imgs = False
# Resume (check the *_decoded_fov_* files already present in the results folder)
resume = False
# Connect the pipeline to a previously created cluster (default False)
# Can be: 'connect_to_client' ,'connect_to_scheduler'
reuse_cluster = False
# Already active cluster to reconnect to when you want to reuse a cluster (default None)
active_cluster = None
# Already active client to reconnect to when you want to reuse a cluster (default None)
active_client = None
# Running cluster to connect when you want reuse a cluster
active_scheduler_address = None
# Add a note if needed
notes = 'no notes'
# + jupyter={"outputs_hidden": true, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Add a running time tag to the pipeline run name
experiment_fpath = Path(experiment_fpath)
date_tag = time.strftime("%y%m%d_%H_%M_%S")
pipeline_run_name = date_tag + '_' + experiment_fpath.stem
# + nteract={"transient": {"deleting": false}} tags=[]
print(f"{notes}")
# + jupyter={"outputs_hidden": true, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Set up the pipeline run
running_pipeline = Pipeline(
pipeline_run_name= pipeline_run_name,
experiment_fpath= experiment_fpath,
run_type= run_type,
parsing_type= parsing_type,
processing_engine= processing_engine,
cores= cores,
memory= memory,
disk= disk,
local_directory= local_directory,
chunk_size= chunk_size,
raw_data_folder_storage_path= raw_data_folder_storage_path,
parsed_image_tag= parsed_image_tag,
preprocessed_image_tag= preprocessed_image_tag,
dataset_folder_storage_path= dataset_folder_storage_path,
results_folder_storage_path= results_folder_storage_path,
save_intermediate_steps= save_intermediate_steps,
dataset_path= dataset_path,
same_dot_radius_duplicate_dots= same_dot_radius_duplicate_dots,
stitching_selected= stitching_selected,
hamming_distance= hamming_distance,
logs_directory= logs_directory,
save_bits_int= save_bits_int,
start_from_preprocessed_imgs=start_from_preprocessed_imgs,
scheduler_port=scheduler_port,
dashboard_port=dashboard_port,
scheduler_address=scheduler_address,
workers_addresses_list=workers_addresses_list,
nprocs=nprocs,
nthreads=nthreads,
reuse_cluster=reuse_cluster,
active_cluster=active_cluster,
active_client=active_client,
active_scheduler_address=active_scheduler_address,
adaptive=adaptive,
maximum_jobs=maximum_jobs,
resume=resume)
# + jupyter={"outputs_hidden": true, "source_hidden": false} nteract={"transient": {"deleting": false}}
# Full pipeline run
running_pipeline.run_full()
| notebooks/Template_running_pysmFISH_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: achint-env2
# language: python
# name: achint-env2
# ---
import torch
# The KL divergence is given by,
# <span class="math display">
# \begin{aligned}
# D_{KL}(q(\mathbf{z, z'|x, x'}) \Vert p(\mathbf{z, z'})) &= \mathrm{log}\left[\frac{\tilde{q}_{\phi}(z^{(i)},z'^{(i)}|x,x')}{\tilde{p}_{\theta}(z^{(i)},z'^{(i)})}\right]-\langle \mathbf{sg}( \mathbb{E}_q[T]),\lambda\rangle-\langle \mathbf{sg}( \mathbb{E}_q[T']),\lambda'\rangle+(\mathbb{E}_p-\mathbb{E}_q)\langle \mathbf{sg}[T\otimes T'],G\rangle
# \end{aligned}
# </span>
#
# <span class="math display">
# \begin{aligned}
# T_{prior}&=[z_{prior},z^2_{prior}]\\
# T'_{prior}&=[z'_{prior},z'^2_{prior}]\\
# T_{posterior}&=[z_{posterior},z^2_{posterior}]\\
# T'_{posterior}&=[z'_{posterior},z'^2_{posterior}]\\
# \lambda&=[\lambda_1,\lambda_2]\\
# \lambda'&=[\lambda'_1,\lambda'_2]\\
# T_{prior}^2&=(z^2_{prior}+z'^2_{prior})\\
# T_{posterior}^2&=(z^2_{posterior}+z'^2_{posterior})
# \end{aligned}
# </span>
# We also define,
# <span class="math display">
# \begin{aligned}
# \mathrm{log}p_{prior}&=-T_{posterior}^2+T_{posterior}*G+T'_{posterior}\\
# \mathrm{log}q_{posterior}&=-T_{posterior}^2+T_{posterior}*G*T'_{posterior}+\lambda*T_{posterior}+\lambda'*T'_{posterior}\\
# \end{aligned}
# </span>
# The three terms of the partition function are given by:
# <span class="math display">
# \begin{aligned}
# part_0&=\sum \mathrm{log}q_{posterior}-\mathrm{log}p_{prior}=\langle \lambda,T_{posterior}\rangle +\langle \lambda',T'_{posterior}\rangle\\
# part_1&=-\langle \lambda,sgd(T_{posterior})\rangle -\langle \lambda',sgd(T'_{posterior})\rangle\\
# part_2&=(\mathbb{E}_p-\mathbb{E}_q)\langle \mathbf{sg}[T\otimes T'],G\rangle\\
# D_{KL}(q(\mathbf{z, z'|x, x'}) \Vert p(\mathbf{z, z'})) &=part_0+part_1+part_2
# \end{aligned}
# </span>
# +
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class kl_divergence():
def __init__(self, latent_dim1, latent_dim2, batch_size):
self.latent_dim1 = latent_dim1
self.latent_dim2 = latent_dim2
self.batch_size = batch_size
def calc(self,G,z1,z2,z1_prior,z2_prior,mu1,log_var1,mu2,log_var2):
## Creating Sufficient statistics
T1_prior = torch.cat((z1_prior,torch.square(z1_prior)),1) #sufficient statistics for prior of set1
T2_prior = torch.cat((z2_prior,torch.square(z2_prior)),1) #sufficient statistics for prior of set2
T1_post = torch.cat((z1,torch.square(z1)),1) #sufficient statistics for posterior of set1
T2_post = torch.cat((z2,torch.square(z2)),1) #sufficient statistics for posterior of set2
lambda1 = torch.cat((mu1,log_var1),1) #Output of encoder for set1
lambda2 = torch.cat((mu2,log_var2),1) #Output of encoder for set2
T_prior_sqrd = torch.sum(torch.square(z1_prior),1) +torch.sum(torch.square(z2_prior),1) #stores z^2+z'^2
T_post_sqrd = torch.sum(torch.square(z1),1) +torch.sum(torch.square(z2),1)
#Calculating KL divergence terms
# part_fun0 = torch.sum(torch.mul(lambda1,T1_post))+torch.sum(torch.mul(lambda2,T2_post))
part_fun0 = torch.mul(lambda1,T1_post)+torch.mul(lambda2,T2_post)
part_fun1 = -torch.sum(torch.mul(lambda1,T1_post.detach()))-torch.sum(torch.mul(lambda2,T2_post.detach())) #-lambda*Tq-lambda'Tq'
part_fun1 = -torch.mul(lambda1,T1_post.detach())-torch.mul(lambda2,T2_post.detach()) #-lambda*Tq-lambda'Tq'
T1_prior =T1_prior.unsqueeze(2) #[128, 2]->[128, 2,1]
T2_prior =T2_prior.unsqueeze(1) #[128, 2]->[128, 1,2]
T1_post =T1_post.unsqueeze(1) #[128, 2]->[128, 1,2]
T2_post =T2_post.unsqueeze(2) #[128, 2]->[128, 2,1]
Tprior_kron=torch.zeros(self.batch_size,2*self.latent_dim1,2*self.latent_dim2).to(device)
Tpost_kron=torch.zeros(self.batch_size,2*self.latent_dim1,2*self.latent_dim2).to(device)
for i in range(self.batch_size-1):
Tprior_kron[i,:]=torch.kron(T1_prior[i,:], T2_prior[i,:])
Tpost_kron[i,:]=torch.kron(T1_post[i,:], T2_post[i,:])
# part_fun2 = torch.sum(torch.mul(Tprior_kron.detach(),G)-torch.mul(Tpost_kron.detach(),G))
part_fun2 = (torch.tensordot(Tprior_kron.detach(),G)-torch.tensordot(Tpost_kron.detach(),G))
# print('sd',torch.mul(Tprior_kron.detach(),G).size())
return part_fun0,part_fun1,part_fun2
# +
# a=torch.randn(2,2).to(device)
# b=torch.randn(batch_size,1).to(device)
# c=torch.randn(batch_size,1).to(device)
# d=torch.randn(batch_size,1).to(device)
# e=torch.randn(batch_size,1).to(device)
# f=torch.randn(batch_size,1).to(device)
# g=torch.randn(batch_size,1).to(device)
# h=torch.randn(batch_size,1).to(device)
# qw=torch.randn(batch_size,1).to(device)
# kl_divergence.calc(b,a,c,d,e,f,g,b,h,qw)
# +
# mat1 = torch.ones(3,2)
# mat2 = torch.ones(3,2)
#x=torch.kron(mat1, mat2)
# +
# print(x.size())
# +
# Tprior_kron=torch.zeros(5,2,2)
# Tprior_kron[1,:]
# -
| 2D_2D_Gaussian_with_IS/kl_divergence_calculator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/medinadiegoeverardo/AB-Demo/blob/master/module4/medinadiego_4_assignment_kaggle_challenge_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="nCc3XZEyG3XV" colab_type="text"
# Lambda School Data Science, Unit 2: Predictive Modeling
#
# # Kaggle Challenge, Module 4
#
# ## Assignment
# - [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
# - [ ] Plot a confusion matrix for your Tanzania Waterpumps model.
# - [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 60% accuracy (above the majority class baseline).
# - [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_
# - [ ] Commit your notebook to your fork of the GitHub repo.
# - [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student <NAME>. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.
#
#
# ## Stretch Goals
#
# ### Reading
# - [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_
# - [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)
# - [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by <NAME>, with video
# - [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)
#
#
# ### Doing
# - [ ] Share visualizations in our Slack channel!
# - [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See below)
# - [ ] Stacking Ensemble. (See below)
# - [ ] More Categorical Encoding. (See below)
#
# ### RandomizedSearchCV / GridSearchCV, for model selection
#
# - _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:
#
# > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...
#
# The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?
#
# ### Stacking Ensemble
#
# Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:
#
# ```python
# import pandas as pd
#
# # Filenames of your submissions you want to ensemble
# files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']
#
# target = 'status_group'
# submissions = (pd.read_csv(file)[[target]] for file in files)
# ensemble = pd.concat(submissions, axis='columns')
# majority_vote = ensemble.mode(axis='columns')[0]
#
# sample_submission = pd.read_csv('sample_submission.csv')
# submission = sample_submission.copy()
# submission[target] = majority_vote
# submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
# ```
#
#
# ### More Categorical Encodings
#
# **1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:
#
# - **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.
# - **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).
# - **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).
# - **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).
#
#
# **2.** The short video
# **[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.
#
# Category Encoders has multiple implementations of this general concept:
#
# - [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)
# - [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)
# - [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)
# - [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)
# - [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)
# - [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)
#
# Category Encoder's mean encoding implementations work for regression problems or binary classification problems.
#
# For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:
#
# ```python
# encoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) # Both parameters > 1 to avoid overfitting
# X_train_encoded = encoder.fit_transform(X_train, y_train=='functional')
# X_val_encoded = encoder.transform(X_train, y_val=='functional')
# ```
#
# **3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.
#
# ```python
# dirty_cat.TargetEncoder(clf_type='multiclass-clf')
# ```
# It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).
#
# However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.
#
# **4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals.
#
# _**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categorcals. It’s an active area of research and experimentation! Maybe you can make your own contributions!**_
# + id="dkckzx86CecD" colab_type="code" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + id="Rw9TjjHxCiQJ" colab_type="code" colab={}
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# + [markdown] id="LB31-r0Ab27e" colab_type="text"
# ### feature eng
# + id="jV56nW2eMTWe" colab_type="code" colab={}
# suspected duplicates
train_dupl = train[['source', 'source_type', 'waterpoint_type', 'waterpoint_type_group','extraction_type', 'extraction_type_group',
'extraction_type_class', 'payment', 'payment_type', 'quantity', 'quantity_group']]
# + id="-Xr5iK5LN6zD" colab_type="code" outputId="4d57c074-d97c-4a5d-9420-f2a93e38d234" colab={"base_uri": "https://localhost:8080/", "height": 356}
train_dupl.tail()
# + id="EsWYkYbUPhPG" colab_type="code" outputId="72ef2e13-1567-4969-acc5-8facc57859e8" colab={"base_uri": "https://localhost:8080/", "height": 197}
train.source.value_counts()
# dropping source_type since source has more unique values
# also waterpoint_type_group since waterpoint_type has 1 unique value more
# + id="mtBELoaCWltg" colab_type="code" colab={}
def replacing_dates(df):
df['date_recorded'] = pd.to_datetime(df['date_recorded'], infer_datetime_format=True)
df['year_recorded'] = df['date_recorded'].dt.year
df['month_recorded'] = df['date_recorded'].dt.month
df['day_recorded'] = df['date_recorded'].dt.day
replacing_dates(train)
replacing_dates(test)
# + id="KU9sAmMtcfFr" colab_type="code" outputId="30456593-0ec1-4529-bfb7-0851f1d5270f" colab={"base_uri": "https://localhost:8080/", "height": 422}
train.head()
# + id="eWbtovmDSLkG" colab_type="code" colab={}
columns_drop = ['payment', 'extraction_type', 'waterpoint_type_group', 'quantity_group', 'source_type', 'date_recorded']
target = 'status_group'
features = train.columns.drop(columns_drop + [target])
# + id="nyhDzvolhfRl" colab_type="code" outputId="861c20ec-bc68-404b-e0d3-27ee2bb0d11b" colab={"base_uri": "https://localhost:8080/", "height": 181}
features
# + id="GHBvLzsKXnRz" colab_type="code" colab={}
# replace 'none' with np.nan, impute later. no need to reduce cardinality (ordinal encoder will be used)
import numpy as np
train['wpt_name'] = train['wpt_name'].replace('none', np.nan)
# replacing_nulls_with_nulls(train)
# + id="JhxppTOZYtjG" colab_type="code" outputId="72ae90d6-9b5c-4804-9cb3-7d6acb23b067" colab={"base_uri": "https://localhost:8080/", "height": 131}
def replacing_nulls_with_nulls(df):
cols = df.columns
cols = list(cols) # train.columns.to_list()
those_null = []
for col in cols:
if df[col].isnull().any() == False:
continue
df[col] = df[col].replace(0, np.nan)
those_null.append(col)
return those_null
replacing_nulls_with_nulls(train)
replacing_nulls_with_nulls(test)
# + id="E-vpIl5aDp3E" colab_type="code" colab={}
# + id="7WzHLaS1TUY4" colab_type="code" colab={}
x_train = train[features]
y_train = train[target]
x_test = test[features]
target = 'status_group'
features = train.columns.drop(columns_drop + [target])
# + [markdown] id="do2Gu8sChtzA" colab_type="text"
# ### pipeline, etc
# + id="kD_IQtMriGgQ" colab_type="code" outputId="3187d996-b83d-4cc6-ed18-1a4ee62403db" colab={"base_uri": "https://localhost:8080/", "height": 115}
from sklearn.impute import SimpleImputer
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
import category_encoders as ce
from sklearn.pipeline import make_pipeline
from scipy.stats import uniform, randint
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(random_state=10))
param_distributions = {
'simpleimputer__strategy': ['mean', 'median'],
'randomforestclassifier__n_estimators': randint(50, 300), # range(1, len(X_train.columns)+1)
'randomforestclassifier__max_depth': [5, 10, 15, 20],
'randomforestclassifier__max_features': uniform(0, 1),
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=5,
cv=3,
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(x_train, y_train);
# + id="x8lgIK81ioCW" colab_type="code" outputId="96d3dab8-f978-4c0a-9306-1789e8d4603d" colab={"base_uri": "https://localhost:8080/", "height": 69}
print('best hyperparameters', search.best_params_)
print('best accuracy score: ', search.best_score_)
y_pred = search.predict(x_test)
# + id="-yw4R5ITn8Vq" colab_type="code" colab={}
# submission = sample_submission.copy()
# submission['status_group'] = y_pred
# + id="4JRmVni3o09w" colab_type="code" outputId="718194b4-4897-4354-cf6a-1591910fc967" colab={"base_uri": "https://localhost:8080/", "height": 158}
submission.to_csv('medinadiegokaggle_4.csv', index=False)
# + id="rR5iOviVo2G1" colab_type="code" colab={}
from google.colab import files
files.download('medinadiegokaggle_4.csv')
# + id="TjG85aR_KubY" colab_type="code" colab={}
test.shape
# + id="QX_oYPemKwBz" colab_type="code" colab={}
train.shape
# + [markdown] id="NGsGE9dyJpag" colab_type="text"
# ### Random Forest Classifier
# + id="it-44CH8KHt5" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
train, validation = train_test_split(train, random_state=10, train_size=.8)
# + id="UCoQmbTYMBAz" colab_type="code" colab={}
columns_drop = ['payment', 'extraction_type', 'waterpoint_type_group', 'quantity_group', 'source_type', 'date_recorded']
target = 'status_group'
features = train.columns.drop(columns_drop + [target])
# + id="QIX3-JXGLxvz" colab_type="code" colab={}
replacing_dates(train)
replacing_dates(validation)
# replace 'none' with np.nan, impute later. no need to reduce cardinality (ordinal encoder will be used)
train['wpt_name'] = train['wpt_name'].replace('none', np.nan)
replacing_nulls_with_nulls(train)
replacing_nulls_with_nulls(validation)
# + id="9v811dAhMPbX" colab_type="code" colab={}
xx_train = train[features]
yy_train = train[target]
xx_val = validation[features]
yy_val = validation[target]
xx_test = test[features]
# + id="vkthcqwUKWk5" colab_type="code" colab={}
xx_train.head()
# + id="IPiihRBvJoxR" colab_type="code" colab={}
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(random_state=10, max_depth=20,
max_features=0.0287, n_estimators=238))
# + id="hhbr2P5XLW68" colab_type="code" colab={}
from sklearn.metrics import accuracy_score
# Fit on train, score on val
pipeline.fit(xx_train, yy_train)
y_pred = pipeline.predict(xx_val)
print('Validation Accuracy', pipeline.score(yy_val, y_pred)))
# + [markdown] id="Q7uAco2Co8S0" colab_type="text"
# ### confusion matrix
# + id="A13WmAgIll2X" colab_type="code" colab={}
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import matplotlib.pyplot as plt
import seaborn as sns
confusion_matrix(yy_val, y_pred)
# + id="XWjBKSQUEYL6" colab_type="code" colab={}
from sklearn.preprocessing import Normalizer
to_normalize = confusion_matrix(yy_val, y_pred)
norm = Normalizer().transform(to_normalize)
labels = unique_labels(y_pred)
columns = [f'predicted {label}' for label in labels]
index = [f'actual {label}' for label in labels]
table_2 = pd.DataFrame(norm, columns=columns, index=index)
table_2
# + id="XPP5XjecFqHk" colab_type="code" colab={}
# same results as normalizer
from sklearn.preprocessing import MinMaxScaler
to_minmax = confusion_matrix(yy_val, y_pred)
minmax = MinMaxScaler().fit_transform(to_minmax)
labels_2 = unique_labels(y_pred)
columns_2 = [f'predicted {label}' for label in labels_2]
index_2 = [f'actual {label}' for label in labels_2]
table_3 = pd.DataFrame(norm, columns=columns_2, index=index_2)
table_3
# + id="C0bzA5t8HJfL" colab_type="code" colab={}
labels_2 = unique_labels(y_pred)
columns_2 = [f'predicted {label}' for label in labels_2]
index_2 = [f'actual {label}' for label in labels_2]
table_3 = pd.DataFrame(norm, columns=columns_2, index=index_2)
sns.heatmap(table_3, cmap='BuPu_r', fmt='.2%', annot=True) # .1f, d
# + id="yaPjugfqE6lj" colab_type="code" colab={}
def con_array(y_true, y_pred):
labels = unique_labels(y_pred)
columns = [f'predicted {label}' for label in labels]
index = [f'actual {label}' for label in labels]
return columns, index
con_array(yy_val, y_pred)
# + id="SktFh9Qul7EI" colab_type="code" colab={}
def convert_array_list(y_true, y_pred):
labels = unique_labels(y_pred)
columns = [f'predicted {label}' for label in labels]
index = [f'actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return table
convert_array_list(yy_val, y_pred)
# + id="dR_55_GiAGJe" colab_type="code" colab={}
def convert_array_list(y_true, y_pred):
labels = unique_labels(y_pred)
columns = [f'predicted {label}' for label in labels]
index = [f'actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, cmap='CMRmap_r', fmt='d') # fmt='d' changes numerical notation
convert_array_list(yy_val, y_pred);
# + id="tDwMPObLDETq" colab_type="code" colab={}
correct_pred = 5957+201+3386
total_pred = 61+466+537+112+1126+34+5957+201+3386
correct_pred / total_pred
# + id="v7MDmO07KJky" colab_type="code" colab={}
from sklearn.metrics import accuracy_score
print('best accuracy score: ', search.best_score_)
print(accuracy_score(y_train, y_pred))
# + id="NSyVrb27KXxZ" colab_type="code" colab={}
sum(y_pred == y_train) / len(y_pred) # what
# + id="gL0_bocYLBXf" colab_type="code" colab={}
from sklearn.metrics import classification_report
print(classification_report(y_train, y_pred))
# + id="wEX5I3l4LWcE" colab_type="code" colab={}
convert_array_list(y_train, y_pred);
# + id="aY5Czl7EMajd" colab_type="code" colab={}
total_non_func_pred = 21761+72+79
correct_non_funct = 21761
# + id="ZDwB1nzyMgcn" colab_type="code" colab={}
# precision
correct_non_funct / total_non_func_pred
# + id="2XWNK06TM447" colab_type="code" colab={}
# recall
actual_non_func = 1060+3+21761
correct_non_funct / actual_non_func
# + id="KEyM3TP5N48L" colab_type="code" colab={}
# + [markdown] id="13ogGBY6N5fn" colab_type="text"
# ### precision, recall, thresholds, and predicted probabilities
# + id="cBpW0sSGN-fY" colab_type="code" colab={}
len(test)
# + id="qOiQd5WvN_yP" colab_type="code" colab={}
len(x_train)
# + id="nnKOmq6nOBL1" colab_type="code" colab={}
y_train.value_counts(normalize=True)
# + id="vqTFmK_4OH0-" colab_type="code" colab={}
# based on historical data, if you randomly chose waterpumps to inspect, then
# about 46% of the waterpumps would need repairs, and 54% would not need repairs
trips = 2000
print(f'Baseline: {trips * 0.46} waterpumps repairs in {trips} trips')
# + id="MQU6XuNYOVaH" colab_type="code" colab={}
# REDEFINING our target. Identify which waterpumps are non-functional or are functional but needs repair
y_train = y_train != 'functional' # give me those that != functional
y_train.value_counts(normalize=True)
# + id="zauxb8v9Or6J" colab_type="code" colab={}
y_train.head()
# + id="ceNVekN7Oyke" colab_type="code" colab={}
len(x_test) == len(test)
# + id="lppWRO4MPCdB" colab_type="code" colab={}
pipeline.fit(x_train, y_train)
y_pred = search.predict(x_test)
y_pred
# + id="vWe9FGMOQTto" colab_type="code" colab={}
convert_array_list(y_train, y_pred);
# + id="w-Ayp5w6QoEj" colab_type="code" colab={}
| module4/medinadiego_4_assignment_kaggle_challenge_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1806554 Python Assignment 3 Classwork
# +
def avg(a,b,c):
return (a+b+c)/3
x = int(input())
y = int(input())
z = int(input())
avg(x,y,z)
# -
def fact():
x = int(input())
fact = 1
for i in range(1,x+1):
fact *= i
print(fact)
fact()
import math
def hl(a,b):
gcd = math.gcd(a,b)
lcm = (a*b)/gcd
return lcm,gcd
a = int(input())
b = int(input())
lcm,hcf = hl(a,b)
print(f'lcm : {lcm} hcf : {hcf}')
def addSum(a):
if a == 0:
return 0
return a + addSum(a-1)
a = int(input())
addSum(a)
rev = 0
p = 1
def reverse(n):
global rev
global p
if(n > 0):
reverse(n//10)
rev += (n % 10) * p
p *= 10
return rev
n = int(input())
reverse(n)
| T&T/python scripts/.ipynb_checkpoints/Classwork 1806554 Python Assignment 3-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import glob
import pathlib
import pickle
import requests
import tarfile
import time
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import ensemble, model_selection, preprocessing
import torch
import torchinfo
import torchvision
# -
# # Introduction to Deep Learning with PyTorch
# # Data
#
# ## CIFAR-10 Dataset
#
# The original [CIFAR-10](http://www.cs.toronto.edu/~kriz/cifar.html) dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class.
CLASS_LABELS = {
0: "airplane",
1: "automobile",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck"
}
# ### Download and extract the data
# +
DATA_DIR = pathlib.Path("../data/")
RAW_DATA_DIR = DATA_DIR / "cifar-10"
URL = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
RAW_DATA_DIR.mkdir(parents=True, exist_ok=True)
with open(RAW_DATA_DIR / "cifar-10-python.tar.gz", "wb") as f:
response = requests.get(URL)
f.write(response.content)
with tarfile.open(RAW_DATA_DIR / "cifar-10-python.tar.gz", "r:gz") as f:
f.extractall(RAW_DATA_DIR)
# -
# ### Load the data
#
# We will load the data using the [Pandas](https://pandas.pydata.org/) library. Highly recommend the most recent edition of [*Python for Data Analysis*](https://learning.oreilly.com/library/view/python-for-data/9781491957653/) by Pandas creator <NAME> for anyone interested in learning how to use Pandas.
# +
_data = []
_labels = []
filepaths = glob.glob("../data/cifar-10/cifar-10-batches-py/*_batch*")
for filepath in sorted(filepaths):
with open(filepath, "rb") as f:
_batch = pickle.load(f, encoding="latin1")
_data.append(_batch["data"])
_labels.extend(_batch["labels"])
# each image has 3 channels with height and width of 32 pixels
features = pd.DataFrame(
np.vstack(_data),
columns=[f"p{i}" for i in range(3 * 32 * 32)],
dtype="uint8",
)
target = pd.Series(_labels, dtype="uint8", name="labels")
# -
# ### Explore the data
features.info()
features.head()
target.head()
# ### Visualize the data
# +
fig, axes = plt.subplots(10, 10, sharex=True, sharey=True, figsize=(15, 15))
for i in range(10):
for j in range(10):
m, _ = features.shape
k = np.random.randint(m)
img = (features.loc[k, :]
.to_numpy()
.reshape((3, 32, 32))
.transpose(1, 2, 0))
_ = axes[i, j].imshow(img)
_ = axes[i, j].set_title(CLASS_LABELS[target[k]])
fig.suptitle("Random CIFAR-10 images", x=0.5, y=1.0, fontsize=25)
fig.tight_layout()
# -
# # Creating a Test Dataset
#
# Before we look at the data any further, we need to create a test set, put it aside, and never look at it (until we are ready to test our trainined machine learning model!). Why? We don't want our machine learning model to memorize our dataset (this is called overfitting). Instead we want a model that will generalize well (i.e., make good predictions) for inputs that it didn't see during training. To do this we hold split our dataset into training and testing datasets. The training dataset will be used to train our machine learning model(s) and the testing dataset will be used to make a final evaluation of our machine learning model(s).
#
# ## If you might refresh data in the future...
#
# ...then you want to use some particular hashing function to compute the hash of a unique identifier for each observation of data and include the observation in the test set if resulting hash value is less than some fixed percentage of the maximum possible hash value for your algorithm. This way even if you fetch more data, your test set will never include data that was previously included in the training data.
# +
import zlib
def in_testing_data(identifier, test_size):
_hash = zlib.crc32(bytes(identifier))
return _hash & 0xffffffff < test_size * 2**32
def split_train_test_by_id(data, test_size, id_column):
ids = data[id_column]
in_test_set = ids.apply(lambda identifier: in_testing_data(identifier, test_size))
return data.loc[~in_test_set], data.loc[in_test_set]
# -
# ## If this is all the data you will ever have...
#
# ...then you can just set a seed for the random number generator and then randomly split the data. Scikit-Learn has a [`model_selection`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) module that contains tools for splitting datasets into training and testing sets.
# +
SEED = 42
SEED_GENERATOR = np.random.RandomState(SEED)
def generate_seed():
return SEED_GENERATOR.randint(np.iinfo("uint16").max)
# -
# split the dataset into training and testing data
_seed = generate_seed()
_random_state = np.random.RandomState(_seed)
train_features, test_features, train_target, test_target = model_selection.train_test_split(
features,
target,
test_size=1e-1,
random_state=_random_state
)
train_features.info()
test_features.info()
# ## Feature scaling
#
# Data for individual pixels is stored as integers between 0 and 255. Neural network models work best when numerical features are scaled. To rescale the raw features we can use tools from the [Scikit-Learn preprocessing module](https://scikit-learn.org/stable/modules/preprocessing.html).
# +
# hyper-parameters
_min_max_scaler_hyperparameters = {
"feature_range": (0, 1),
}
preprocessor = preprocessing.MinMaxScaler(**_min_max_scaler_hyperparameters)
# -
preprocessed_train_features = (preprocessor.fit_transform(train_features)
.astype("float32"))
preprocessed_train_target = (train_target.to_numpy()
.astype("int64"))
# # Classical ML Benchmark Model
#
# We have several of these from yesterday!
_seed = generate_seed()
_estimator_hyperpararmeters = {
"bootstrap": True,
"oob_score": True,
"max_samples": 0.9,
"random_state": np.random.RandomState(_seed),
}
estimator = ensemble.RandomForestClassifier(**_estimator_hyperpararmeters)
_ = estimator.fit(preprocessed_train_features, preprocessed_train_target)
estimator.oob_score_
# # Neural network from scratch
#
# ## Split the training data into training and validation sets
_seed = generate_seed()
_random_state = np.random.RandomState(_seed)
preprocessed_train_features, preprocessed_val_features, preprocessed_train_target, preprocessed_val_target = (
model_selection.train_test_split(preprocessed_train_features,
preprocessed_train_target,
test_size=1e-1,
random_state=_random_state)
)
preprocessed_train_features.shape
preprocessed_val_features.shape
#
# Next let's create a simple model using nothing but [PyTorch tensor operations](https://pytorch.org/docs/stable/tensors.html). PyTorch uses `torch.tensor` rather than `numpy.ndarray` so we need to convert data.
# +
preprocessed_train_target = torch.from_numpy(preprocessed_train_target)
preprocessed_train_features = torch.from_numpy(preprocessed_train_features)
preprocessed_val_target = torch.from_numpy(preprocessed_val_target)
preprocessed_val_features = torch.from_numpy(preprocessed_val_features)
# -
preprocessed_train_features
preprocessed_train_target
# PyTorch provides methods to create random or zero-filled tensors, which we will use to create our weights and bias for a simple linear model. These are just regular tensors, with one very special addition: we tell PyTorch that they require a gradient. This causes PyTorch to record all of the operations done on the tensor, so that it can calculate the gradient during back-propagation automatically!
#
# For the weights, we set `requires_grad` after the initialization, since we don’t want that step included in the gradient. (Note that a trailling `_` in PyTorch signifies that the operation is performed _in-place_.)
# +
number_samples, number_features = preprocessed_train_features.shape
# using Xavier initialization (divide weights by sqrt(number_features))
weights = torch.randn(number_features, 10) / number_features**0.5
weights.requires_grad_() # trailing underscore indicates in-place operation
bias = torch.zeros(10, requires_grad=True)
# -
# Thanks to PyTorch’s ability to calculate gradients automatically, we can use any standard Python function (or callable object) in a model! So we will start by writing a function to peform matrix multiplication and broadcasted addition called `linear_transformation`. We will also need an activation function, so we’ll write a function called `log_softmax_activation` and use it.
#
# **N.B.** Although PyTorch provides lots of pre-written loss functions, activation functions, and so forth, you can easily write your own using plain python. PyTorch will even create fast GPU or vectorized CPU code for your function automatically.
# +
def linear_transformation(X):
return X @ weights + bias
def log_softmax_activation(X):
return X - X.exp().sum(-1).log().unsqueeze(-1)
def logistic_regression(X):
Z = linear_transformation(X)
return log_softmax_activation(Z)
# -
# In the above, the `@` stands for the dot product operation. We will call our function on one batch of data (in this case, 64 images). Note that our predictions won’t be any better than random at this stage, since we start with random weights.
batch_size = 64
output = logistic_regression(preprocessed_train_features[:batch_size])
output[1]
# As you see, the `output` tensor contains not only the tensor values, but also a gradient function, `grad_fn`. We’ll use this later to do back propagation to update the model parameters.
#
# Let’s implement `negative_log_likelihood` to use as the loss function. Again, we can just use standard Python code.
def negative_log_likelihood(output, target):
m, _ = output.shape
return -output[range(m), target].mean()
negative_log_likelihood(output, preprocessed_train_target[:batch_size])
# Let’s also implement a function to calculate the `accuracy` of our model: for each prediction, if the index with the largest value matches the target value, then the prediction was correct.
def accuracy(output, target):
predictions = torch.argmax(output, dim=1)
return (predictions == target).float().mean()
# For comparison purposes we can compute the accuracy of our model with randomly initialized parameters.
accuracy(output, preprocessed_train_target[:batch_size])
# We can now run a training loop. For each iteration, we will:
#
# * select a mini-batch of data (of size `batch_size`)
# * use the model to make predictions
# * calculate the loss
# * `loss.backward()` updates the gradients of the model.
#
# We now use these gradients to update the weights and bias (i.e., model parameters). We do this within the `torch.no_grad()` context manager, because we do not want these actions to be recorded for our next calculation of the gradient. You can read more about how PyTorch’s Autograd records operations [here](https://pytorch.org/docs/stable/notes/autograd.html).
#
# We then set the gradients to zero, so that we are ready for the next loop. Otherwise, our gradients would record a running tally of all the operations that had happened (i.e. loss.backward() adds the gradients to whatever is already stored, rather than replacing them).
# + tags=[]
model_fn = logistic_regression
loss_fn = negative_log_likelihood
number_epochs = 15
number_batches = (number_samples - 1) // batch_size + 1
learning_rate = 1e-2
for epoch in range(number_epochs):
for batch in range(number_batches):
# forward pass
start = batch * batch_size
X = preprocessed_train_features[start:(start + batch_size)]
y = preprocessed_train_target[start:(start + batch_size)]
loss = loss_fn(model_fn(X), y)
# back propagation
loss.backward()
with torch.no_grad():
weights -= learning_rate * weights.grad
bias -= learning_rate * bias.grad
weights.grad.zero_()
bias.grad.zero_()
# -
# That’s it: we’ve created and trained a minimal neural network (in this case, a logistic regression, since we have no hidden layers) entirely from scratch! Let’s check the loss and accuracy and compare those to what we got earlier. We expect that the loss will have decreased and accuracy to have increased, and they have.
# +
training_loss = loss_fn(model_fn(preprocessed_train_features), preprocessed_train_target)
training_accuracy = accuracy(model_fn(preprocessed_train_features), preprocessed_train_target)
print(f"Training loss: {training_loss}")
print(f"Training accuracy: {training_accuracy}")
# -
# # Refactor using `torch.nn.functional`
#
# We will now refactor our code using [torch.nn](https://pytorch.org/docs/stable/nn.html) modules to make it more concise and flexible. The first and easiest step is to make our code shorter by replacing our hand-written activation and loss functions with those from [torch.nn.functional](https://pytorch.org/docs/stable/nn.html#torch-nn-functional).
#
# Since we are using negative log likelihood loss and log softmax activation in this tutorial, we can use [torch.nn.functional.cross_entropy](https://pytorch.org/docs/stable/nn.html#cross-entropy) which combines the two.
import torch.nn.functional as F
Z = linear_transformation(preprocessed_train_features)
F.cross_entropy(Z, preprocessed_train_target)
# # Refactor using `torch.nn.Module`
#
# Next up, we’ll use [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#module) and [torch.nn.Parameter](https://pytorch.org/docs/stable/nn.html#parameters), for a clearer and more concise training loop. In this case, we want to create a class that holds our weights, bias, and method for the forward step. `torch.nn.Module` has a number of attributes and methods (such as `parameters()` and `zero_grad()`) which we will be using.
# +
from torch import nn
class LogisticRegression(nn.Module):
def __init__(self):
super().__init__()
self._weights = nn.Parameter(torch.randn(3 * 32 * 32, 10) / (3 * 32 * 32)**0.5)
self._bias = nn.Parameter(torch.zeros(10))
def forward(self, X):
return X @ self._weights + self._bias
# -
# Since we’re now using an object instead of just using a function, we first have to instantiate our model.
model_fn = LogisticRegression()
# Now we can calculate the loss in the same way as before. Note that `torch.nn.Module` objects are used as if they are functions (i.e they are callable), but behind the scenes Pytorch will call the `forward` method.
F.cross_entropy(model_fn(preprocessed_train_features), preprocessed_train_target)
# Previously in our training loop we had to update the values for each parameter by name and manually zero out the grads for each parameter separately. With our refactoring we can take advantage of `model_fn.parameters()` and `model_fn.zero_grad()` (which are both defined by PyTorch for `torch.nn.Module` base class!) to make those steps more concise and less prone to the error of forgetting some of our parameters, particularly if we had a more complicated model.
#
# In order to facilitate re-use and continued refactoring, we can encapsulate the logic of our deep learning pipeline in the following functions.
# +
def partial_fit(model_fn, loss_fn, learning_rate, X_batch, y_batch):
# forward pass
loss = loss_fn(model_fn(X_batch), y_batch)
# back propagation
loss.backward()
with torch.no_grad():
for parameter in model_fn.parameters():
parameter -= learning_rate * parameter.grad
model_fn.zero_grad()
def fit(model_fn, loss_fn, X, y, learning_rate=1e-2, number_epochs=2, batch_size=64):
number_samples, _ = X.shape
number_batches = (number_samples - 1) // batch_size + 1
for epoch in range(number_epochs):
for batch in range(number_batches):
start = batch * batch_size
X_batch = X[start:(start + batch_size)]
y_batch = y[start:(start + batch_size)]
partial_fit(model_fn, loss_fn, learning_rate, X_batch, y_batch)
# -
model_fn = LogisticRegression()
loss_fn = F.cross_entropy
fit(model_fn, loss_fn, preprocessed_train_features, preprocessed_train_target, number_epochs=2)
# +
training_loss = loss_fn(model_fn(preprocessed_train_features), preprocessed_train_target)
training_accuracy = accuracy(model_fn(preprocessed_train_features), preprocessed_train_target)
print(f"Training loss: {training_loss}")
print(f"Training accuracy: {training_accuracy}")
# -
# # Refactoring using `torch.nn.Linear`
#
# Instead of defining and initializing `self._weights` and `self._bias`, and calculating `X @ self._weights + self._bias`, we will instead use the Pytorch class [torch.nn.Linear](https://pytorch.org/docs/stable/nn.html#linear) to define a linear layer which does all that for us. Pytorch has many types of predefined layers that can greatly simplify our code, and since the library code is highly optimized using PyTorch's predefined layers often makes our code faster too.
# +
from torch import nn
class LogisticRegression(nn.Module):
def __init__(self):
super().__init__()
self._linear_layer = nn.Linear(3 * 32 * 32, 10)
def forward(self, X):
return self._linear_layer(X)
# -
model_fn = LogisticRegression()
loss_fn = F.cross_entropy
fit(model_fn, loss_fn, preprocessed_train_features, preprocessed_train_target, number_epochs=15)
# +
training_loss = loss_fn(model_fn(preprocessed_train_features), preprocessed_train_target)
training_accuracy = accuracy(model_fn(preprocessed_train_features), preprocessed_train_target)
print(f"Training loss: {training_loss}")
print(f"Training accuracy: {training_accuracy}")
# -
# # Refactoring using `torch.optim`
#
# Pytorch also has a package with various optimization algorithms, [torch.optim](https://pytorch.org/docs/stable/optim.html). We can use the step method from our optimizer to take a forward step, instead of manually updating each parameter. Also note that now the `learning_rate` is a parameter of the optimizer and we do not need to manually pass it as an argument to the `fit` and `partial_fit` functions.
from torch import optim
# +
def partial_fit(model_fn, loss_fn, X_batch, y_batch, opt):
# forward pass
loss = loss_fn(model_fn(X_batch), y_batch)
# back propagation
loss.backward()
opt.step()
opt.zero_grad() # don't forget to reset the gradient after each batch!
def fit(model_fn, loss_fn, X, y, opt, number_epochs=2, batch_size=64):
number_samples, _ = X.shape
number_batches = (number_samples - 1) // batch_size + 1
for epoch in range(number_epochs):
for batch in range(number_batches):
start = batch * batch_size
X_batch = X[start:(start + batch_size)]
y_batch = y[start:(start + batch_size)]
partial_fit(model_fn, loss_fn, X_batch, y_batch, opt)
# -
model_fn = LogisticRegression()
loss_fn = F.cross_entropy
opt = optim.SGD(model_fn.parameters(), lr=1e-2)
fit(model_fn, loss_fn, preprocessed_train_features, preprocessed_train_target, opt, number_epochs=15)
# +
training_loss = loss_fn(model_fn(preprocessed_train_features), preprocessed_train_target)
training_accuracy = accuracy(model_fn(preprocessed_train_features), preprocessed_train_target)
print(f"Training loss: {training_loss}")
print(f"Training accuracy: {training_accuracy}")
# -
# # Refactor using `torch.utils.data.TensorDataSet`
#
# The [torch.utils.data](https://pytorch.org/docs/stable/data.html#module-torch.utils.data) module contains a number of useful classes that we can use to further simplify our code. PyTorch has an abstract `Dataset` class. A Dataset can be anything that has a `__len__` function (called by Python’s standard `len` function) and a `__getitem__` function as a way of indexing into it.
#
# PyTorch’s `TensorDataset` is a `Dataset` wrapping tensors. By defining a length and way of indexing, this also gives us a way to iterate, index, and slice along the first dimension of a tensor. This will make it easier to access both the independent and dependent variables in the same line as we train.
#
from torch.utils import data
def fit(model_fn, loss_fn, data_set, number_samples, opt, number_epochs=2, batch_size=64):
number_batches = (number_samples - 1) // batch_size + 1
for epoch in range(number_epochs):
for batch in range(number_batches):
start = batch * batch_size
X_batch, y_batch = data_set[start:(start + batch_size)]
partial_fit(model_fn, loss_fn, X_batch, y_batch, opt)
model_fn = LogisticRegression()
torchinfo.summary(model_fn)
loss_fn = F.cross_entropy
train_dataset = data.TensorDataset(preprocessed_train_features, preprocessed_train_target)
opt = optim.SGD(model_fn.parameters(), lr=1e-2)
# note the annoying dependence on number of samples!
fit(model_fn, loss_fn, train_dataset, number_samples, opt, number_epochs=15)
# +
training_loss = loss_fn(model_fn(preprocessed_train_features), preprocessed_train_target)
training_accuracy = accuracy(model_fn(preprocessed_train_features), preprocessed_train_target)
print(f"Training loss: {training_loss}")
print(f"Training accuracy: {training_accuracy}")
# -
# # Refactor using `torch.utils.data.DataLoader`
#
# Pytorch’s `DataLoader` is responsible for managing batches. You can create a `DataLoader` from any `Dataset`. `DataLoader` makes it easier to iterate over batches. Rather than having to use `data_set[start:(start + batch_size)]`, the `DataLoader` gives us each minibatch automatically.
# + tags=[]
# data.DataLoader?
# -
def fit(model_fn, loss_fn, data_loader, opt, number_epochs=2):
for epoch in range(number_epochs):
for X_batch, y_batch in data_loader:
partial_fit(model_fn, loss_fn, X_batch, y_batch, opt)
model_fn = LogisticRegression()
loss_fn = F.cross_entropy
train_data_loader = data.DataLoader(train_dataset, batch_size=batch_size, num_workers=4, shuffle=True)
opt = optim.SGD(model_fn.parameters(), lr=1e-2)
# now we no longer have the annoying dependency on number of samples!
fit(model_fn, loss_fn, train_data_loader, opt)
# +
training_loss = loss_fn(model_fn(preprocessed_train_features), preprocessed_train_target)
training_accuracy = accuracy(model_fn(preprocessed_train_features), preprocessed_train_target)
print(f"Training loss: {training_loss}")
print(f"Training accuracy: {training_accuracy}")
# -
# Thanks to Pytorch’s `torch.nn.Module`, `torch.nn.Parameter`, `Dataset`, and `DataLoader`, our training loop is now dramatically smaller and easier to understand. Let’s now try to add the basic features necessary to create effecive models in practice.
# # Adding Validation
#
# In the first part of this tutorial, we were just trying to get a reasonable training loop set up for use on our training data. In reality, you always should also have a validation set, in order to identify if you are overfitting.
#
# Shuffling the training data is important to prevent correlation between batches and overfitting. On the other hand, the validation loss will be identical whether we shuffle the validation set or not. Since shuffling takes extra time, it makes no sense to shuffle the validation data.
#
# We’ll use a batch size for the validation set that is twice as large as that for the training set. This is because the validation set does not need backpropagation and thus takes less memory (it doesn’t need to store the gradients). We take advantage of this to use a larger batch size and compute the loss more quickly.
# +
def checkpoint(filepath, lr_scheduler, model_fn, optimizer):
torch.save({
"lr_scheduler": lr_scheduler.state_dict(),
"model_state_dict": model_fn.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
filepath
)
def validate(model_fn, loss_fn, data_loader):
with torch.no_grad():
batch_accs = []
batch_losses = []
for X, y in data_loader:
batch_accs.append(accuracy(model_fn(X), y))
batch_losses.append(loss_fn(model_fn(X), y))
avg_accuracy = (torch.stack(batch_accs)
.mean())
avg_loss = (torch.stack(batch_losses)
.mean())
return avg_accuracy, avg_loss
def fit(model_fn, loss_fn, train_data_loader, opt, checkpoint_filepath=None, val_data_loader=None, number_epochs=2):
for epoch in range(number_epochs):
# train the model
model_fn.train()
for X_batch, y_batch in train_data_loader:
partial_fit(model_fn, loss_fn, X_batch, y_batch, opt)
# compute validation loss after each training epoch
model_fn.eval()
if val_data_loader is not None:
val_acc, val_loss = validate(model_fn, loss_fn, val_data_loader)
print(f"Training epoch: {epoch}, Validation accuracy: {val_acc}, Validation loss: {val_loss}")
# checkpoint key model state
checkpoint(checkpoint_filepath, lr_scheduler, model_fn, opt)
# +
model_fn = LogisticRegression()
loss_fn = F.cross_entropy
train_data_loader = data.DataLoader(train_dataset, batch_size=64, shuffle=True)
opt = optim.SGD(model_fn.parameters(), lr=1e-2)
val_dataset = data.TensorDataset(preprocessed_val_features, preprocessed_val_target)
val_data_loader = data.DataLoader(val_dataset, batch_size=64)
# -
fit(model_fn, loss_fn, train_data_loader, opt, val_data_loader, number_epochs=2)
# +
training_loss = loss_fn(model_fn(preprocessed_train_features), preprocessed_train_target)
training_accuracy = accuracy(model_fn(preprocessed_train_features), preprocessed_train_target)
print(f"Training loss: {training_loss}")
print(f"Training accuracy: {training_accuracy}")
# -
# ### Exercise: Logging Accuracy during Validation
#
# Make the necessary changes to the `validation` function so that you log out your model's accuracy on the validation data after every epoch.
# +
# insert code here!
# -
# ### Exercise: Underfitting or Overfitting?
#
# Train your model for 15-20 epochs. Do you think the model is underfitting or overfitting? Why?
# +
# insert code here!
# -
# ## Adding Checkpointing
# # Switching to CNN
#
# We are now going to build our neural network with three convolutional-subsampling layers. Because none of the functions in the previous section assume anything about the model form, we’ll be able to use them to train a CNN without any modification!
#
# The first architecture that we will implement is the classic [LeNet-5](https://www.datasciencecentral.com/lenet-5-a-classic-cnn-architecture/) architecture. We will use Pytorch’s predefined [torch.nn.Conv2d](https://pytorch.org/docs/stable/nn.html#conv2d) class as our convolutional layer. We define a CNN with 3 convolutional layers. Each convolution is followed by a [hyperbolic tangent](https://pytorch.org/docs/stable/generated/torch.nn.Tanh.html#torch.nn.Tanh) non-linear activation function and [average pooling](https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html#torch.nn.AvgPool2d). After the three convolutional-subsampling layers, we add a couple of densely connected linear layers.
class LeNet5(nn.Module):
def __init__(self):
super().__init__()
self._conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0)
self._conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0)
self._dense1 = nn.Linear(400, 120)
self._dense2 = nn.Linear(120, 84)
def forward(self, X):
X = X.view(-1, 3, 32, 32) # implicit knowledge of CIFAR-10 data shape!
X = F.avg_pool2d(F.tanh(self._conv1(X)), 2)
X = F.avg_pool2d(F.tanh(self._conv2(X)), 2)
X = X.view(X.size(0), -1)
X = F.tanh(self._dense1(X))
X = self._dense2(X)
return X
model_fn = LeNet5()
torchinfo.summary(model_fn)
opt = optim.SGD(model_fn.parameters(), lr=1e-2, momentum=0.9)
# note that we can re-use the loss function as well as trainig and validation data loaders
fit(model_fn, loss_fn, train_data_loader, opt, val_data_loader)
# # Refactor using `torch.nn.Sequential`
#
# PyTorch has another handy class we can use to simply our code: [torch.nn.Sequential](https://pytorch.org/docs/stable/nn.html#sequential). A `Sequential` object runs each of the modules contained within it, in a sequential manner. This is a simpler way of writing our neural network.
#
# To take advantage of this, we need to be able to easily define a custom layer from a given function. For instance, PyTorch doesn’t have a view layer, and we need to create one for our network. `LambdaLayer` will create a layer that we can then use when defining a network with `Sequential`.
class LambdaLayer(nn.Module):
def __init__(self, f):
super().__init__()
self._f = f
def forward(self, X):
return self._f(X)
# +
model_fn = nn.Sequential(
LambdaLayer(lambda X: X.view(-1, 3, 32, 32)),
nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2),
LambdaLayer(lambda X: X.view(X.size(0), -1)),
nn.Linear(400, 120),
nn.Tanh(),
nn.Linear(120, 84),
nn.Tanh(),
nn.Linear(84, 10)
)
opt = optim.SGD(model_fn.parameters(), lr=1e-2, momentum=0.9)
# -
fit(model_fn,
loss_fn,
train_data_loader,
opt,
val_data_loader,
number_epochs=2)
# # Generalize our pipeline by wrapping our DataLoader
#
# Our CNN is fairly concise, but it only works with CIFAR-10, because it assumes the input is a 3 * 32 * 32 long vector. Let’s get rid of this assumption, so our model works with any three channel image. First, we can remove the initial Lambda layer by moving the data preprocessing into a generator.
class WrappedDataLoader:
def __init__(self, data_loader, f):
self._data_loader = data_loader
self._f = f
def __len__(self):
return len(self._data_loader)
def __iter__(self):
for batch in iter(self._data_loader):
yield self._f(*batch)
model_fn = nn.Sequential(
nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2),
LambdaLayer(lambda X: X.view(X.size(0), -1)),
nn.Linear(400, 120),
nn.Tanh(),
nn.Linear(120, 84),
nn.Tanh(),
nn.Linear(84, 10)
)
torchinfo.summary(model_fn, input_size=(64, 3, 32, 32))
# +
opt = optim.SGD(model_fn.parameters(), lr=1e-2, momentum=0.9)
_preprocess = lambda X, y: (X.view(-1, 3, 32, 32), y)
train_data_loader = WrappedDataLoader(train_data_loader, _preprocess)
val_data_loader = WrappedDataLoader(val_data_loader, _preprocess)
# -
fit(model_fn,
loss_fn,
train_data_loader,
opt,
val_data_loader)
# # Add a learning rate scheduler
#
# Adjusting the learning rate is often critical to achieving good convergence to a local optimum. Fortunately, adjusting the learning rate using PyTorch requires only minor modifications to our training loop. While the "best" way to adjust the learning rate is nearly always problem specific, starting with larger values and then decaying the learning rate each epoch is often a good strategy to try first. See the official PyTorch documentation for more on [tuning learning rates](https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate).
# +
# optim.lr_scheduler?
# -
def fit(model_fn, loss_fn, train_data_loader, opt, lr_scheduler, checkpoint_filepath=None, val_data_loader=None, number_epochs=2):
for epoch in range(number_epochs):
# train the model
model_fn.train()
for X_batch, y_batch in train_data_loader:
partial_fit(model_fn, loss_fn, X_batch, y_batch, opt)
# compute validation loss after each training epoch
model_fn.eval()
if val_data_loader is not None:
val_acc, val_loss = validate(model_fn, loss_fn, val_data_loader)
print(f"Training epoch: {epoch}, Validation accuracy: {val_acc}, Validation loss: {val_loss}")
# update the learning rate
lr_scheduler.step()
# checkpoint key model state
checkpoint(checkpoint_filepath, lr_scheduler, model_fn, opt)
# +
opt = optim.SGD(model_fn.parameters(), lr=1e-2, momentum=0.9)
lr_scheduler = optim.lr_scheduler.ExponentialLR(opt, gamma=0.9, verbose=True)
_preprocess = lambda X, y: (X.view(-1, 3, 32, 32), y)
train_data_loader = WrappedDataLoader(train_data_loader, _preprocess)
val_data_loader = WrappedDataLoader(val_data_loader, _preprocess)
# -
fit(model_fn,
loss_fn,
train_data_loader,
opt,
lr_scheduler,
val_data_loader)
# # Experimenting with different architectures
#
# In practice, it is unlikely that you will be designing your own neural network architectures from scratch. Instead you will be starting from some pre-existing neural network architecture. The [torchvision](https://pytorch.org/vision/stable/) project contains a number of neural network architectures that have found widespread use in computer vision applications.
from torchvision import models
models.
# Many computer vision models were originally designed and trained on the [ImageNet](https://www.image-net.org/) dataset which has more classes and larger training images compared with the CIFAR-10 dataset. So we will need to make some adjustments to the architecture to accommodate for this.
model_fn = models.resnet18(num_classes=10)
torchinfo.summary(model_fn, input_size=(64, 3, 32, 32))
# Training this model with just a few CPUs would be impossible. In the next section we will see how to train large models like this using a GPU.
| notebooks/introduction-to-pytorch-part-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Rasch Model #
#
# This notebook demonstrates implementation of the Rasch model in TensorFlow. All theoretical parts are taken from an excellent textbook "Bayesian Reasoning and Machine Learning" by <NAME>. The Rasch Model is covered in chapter 22 of the book.
# Consider an exam in which student $s$ answers question $q$ either correctly $x_{qs} = 1$ or incorrectly $x_{qs} = 0$.
# For a set of $N$ students and $Q$ questions, the performance of all students is given in the $Q \times N$ binary
# matrix $X$. Based on this data alone we wish to evaluate the ability of each student, and at the same time estimate difficulty of each question. To learn both, we assign the probability that a student $s$ gets a question $q$ correct based on the student's latent ability $\alpha_s$ and the latent difficulty of the question $\delta_q$:
#
# $$p(x_{qs} = 1|\alpha, \delta) = \sigma(\alpha_s -\delta_q)$$
# Where $\sigma$ is sigmoid function.
#
# Making the i.i.d. assumption, the likelihood of the data $X$ under this model is:
#
# $$p(X|\alpha, \delta) = \prod_{s=1}^S\prod_{q=1}^Q \sigma(\alpha_s-\delta_q)^{x_{qs}} (1-\sigma(\alpha_s-\delta_q))^{1-x_{qs}}$$
# The log likelihood is then:
#
# $$L \equiv log(X|\alpha, \beta) = \sum_{q,s} { x_{qs} log \sigma(\alpha_s - \delta_q) +
# (1 - x_{qs}) log (1 - \sigma(\alpha_s - \delta_q))}$$
#
# And the partial derivatives are:
#
# $$\frac{\partial L}{\partial \alpha_s} = \sum_{q=1}^Q(x_{qs} - \sigma(\alpha_s - \delta_q))$$
#
# $$\frac{\partial L}{\partial \delta_q} = - \sum_{s=1}^S(x_{qs} - \sigma(\alpha_s - \delta_q))$$
#
# But since we are going to use TensorFlow, it will calculate the derivatives automatically, so these are just for the information
#Import
import numpy as np
import pandas as pd
import itertools
import tensorflow as tf
np.random.seed(1239)
def sigmoid(x):
return 1/(1+np.exp(-x))
# +
#First we generate the test data
#The synthetic question:
synthetic_questions = np.arange(-1.9, 3.1, 1)
synthetic_students = np.arange(0,2,0.1)
synthetic_logits = synthetic_students.reshape(-1,1) - synthetic_questions.reshape(1,-1)
synthetic_probs = sigmoid(synthetic_logits)
synthetic_data = (synthetic_probs > np.random.rand(synthetic_probs.shape[0],synthetic_probs.shape[1])).astype('float')
# +
synthetic_data
# -
data_shape = synthetic_data.shape
learning_rate = 0.1
tf.reset_default_graph()
X = tf.placeholder(dtype='float' ,shape=data_shape, name="X")
alpha = tf.Variable(initial_value=np.zeros((data_shape[0],1)), name="alpha", dtype='float')
delta = tf.Variable(initial_value=np.zeros((1,data_shape[1])), name="delta", dtype='float')
log_likelihood = tf.reduce_sum(X * tf.log(tf.sigmoid(alpha-delta)) + (1-X) * tf.log(1-tf.sigmoid(alpha-delta)))
cost = -log_likelihood
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(cost)
# +
init = tf.global_variables_initializer()
n_epochs = 4000
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 1000 == 0:
print("Epoch", epoch, "Cost =", cost.eval(feed_dict={X: synthetic_data}))
sess.run(training_op, feed_dict={X: synthetic_data})
best_alpha = alpha.eval()
best_delta = delta.eval()
# -
best_alpha
best_delta
# It got the questions in the right order, and the students are also roughly in the right order, but are affected by chance.
#
# One of the improvements of this model would be to add priors for $\alpha$ and $\delta$, which will cause regularization and the smoothing of both student ability scores and the question difficulty score.
| rasch_model/Rasch_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# https://www.tensorflow.org/extend/estimators
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# tensorflow
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers
# keras
from tensorflow.contrib.keras.python.keras.layers import Dense, LSTM, GRU, Activation
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file
# input data
from tensorflow.examples.tutorials.mnist import input_data
# estimators
from tensorflow.contrib import learn
# estimator "builder"
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
# helpers
import numpy as np
import random
import sys
# enable logs
tf.logging.set_verbosity(tf.logging.INFO)
def sample(preds, temperature=1.0):
#print(preds)
return np.argmax(preds)
# THE MODEL
def model_fn(features, targets, mode, params):
"""Model function for Estimator."""
# 1. Configure the model via TensorFlow operations
# First, build all the model, a good idea is using Keras or tf.layers
# since these are high-level API's
#lstm = GRU(128, input_shape=(params["maxlen"], params["vocab_size"]))(features)
#preds = Dense(params["vocab_size"], activation='sigmoid')(lstm)
# 0. Reformat input shape to become a sequence
lstm1 = GRU(128, input_shape=(params["maxlen"], params["vocab_size"]),
return_sequences=False)(features)
#lstm2 = GRU(128)(lstm1)
preds = Dense(params["vocab_size"])(lstm1)
preds_softmax = Activation("softmax")(preds)
# 2. Define the loss function for training/evaluation
loss = None
train_op = None
# Calculate Loss (for both TRAIN and EVAL modes)
if mode != learn.ModeKeys.PREDICT:
loss = tf.losses.softmax_cross_entropy(
onehot_labels=targets, logits=preds)
# 3. Define the training operation/optimizer
# Configure the Training Op (for TRAIN mode)
if mode == learn.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="RMSProp",
)
# 4. Generate predictions
predictions_dict = {
"preds": preds_softmax
}
# 5. Define how you want to evaluate the model
metrics = {
"accuracy": tf.metrics.accuracy(tf.argmax(input=preds_softmax, axis=1), tf.argmax(input=targets, axis=1))
}
# 6. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
# +
print('Getting data')
#path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
path = 'shakespeare.txt'
text = open(path).read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 1
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.float32)
y = np.zeros((len(sentences), len(chars)), dtype=np.float32)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
print(X[0])
# +
# PARAMETERS
LEARNING_RATE = 0.01
BATCH_SIZE = 64
STEPS = 50
NUM_OUTPUTS_PRED = 40
# Set model params
model_params = {"learning_rate": LEARNING_RATE, "vocab_size": len(chars), "maxlen": maxlen}
# Instantiate Estimator
nn = tf.contrib.learn.Estimator(model_fn=model_fn, params=model_params)
# Score accuracy
for iteration in range(1, 600):
print()
print('-' * 50)
print('Iteration', iteration)
# Fit
print('-' * 40)
print("Training")
print('-' * 40)
nn.fit(x=X, y=y, steps=STEPS, batch_size=BATCH_SIZE)
# choose a random sentence
start_index = random.randint(0, len(text) - maxlen - 1)
sentence = text[start_index: start_index + maxlen]
# generate output using the RNN model
original_sentence = sentence
generated = sentence
for i in range(NUM_OUTPUTS_PRED):
x = np.zeros((1, maxlen, len(chars)), dtype=np.float32)
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
p = None
for e in nn.predict(x):
if p is None: p = e["preds"]
next_index = sample(p)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
print('\n' * 10, '-' * 100)
print('HERE')
print(generated)
print(original_sentence)
print('-' * 100, '\n' * 10)
# -
| code_samples/RNN/shakespeare/.ipynb_checkpoints/RNN_Nietzsche-Generating-50-batch-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''base'': conda)'
# name: python37464bitbaseconda889e2a20be874e85ba6bccbdfb8985e1
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.naive_bayes import GaussianNB
# -
df = pd.read_csv('datasets/diabetes.csv')
df.describe(include='all')
# + tags=[]
df.info()
# -
df.head()
X = df[['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age']]
xscale = StandardScaler()
x = xscale.fit_transform(X)
y = df['Outcome']
xtrain, xtest, ytrain, ytest = train_test_split(x,y, test_size =.2, random_state=0)
clf = GaussianNB()
clf.fit(xtrain,ytrain)
clf.score(xtest, ytest) * 100
confusion_matrix(y, clf.predict(x))
sns.distplot(y, hist=False, bins=50)
sns.distplot(clf.predict(x),bins=50,hist=False,)
plt.title('prediction vs orignal')
plt.show()
| supervised_ml/classfication/naive_bayes/naive_bayes_diabetes_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Домашнее задание 2 (20 + 15 + 25 = 60 pts )
# ## Задача 1 (20 pts)
#
# - (5 pts) Докажите, что $\mathrm{vec}(AXB) = (B^\top \otimes A)\, \mathrm{vec}(X)$, если $\mathrm{vec}(X)$ векторизация матрицы по столбцам. Что и почему изменится, если использовать векторизацию по строкам?
#
# - (2 pts) Какова сложность наивного вычислени $(A \otimes B) x$ и почему она такова? Покажите, как она может быть снижена.
#
# - (3 pts) Пусть матрицы $A$ и $B$ имеют спектральные разложения $A = S_A\Lambda_A S_A^{-1}$ и $B = S_B\Lambda_B S^{-1}_B$. Найдите собственные векторы и собственные значения у матрицы $A\otimes I + I \otimes B$.
#
# - (10 pts) Пусть $A = \mathrm{diag}\left(\frac{1}{1000},\frac{2}{1000},\dots \frac{999}{1000}, 1, 1000 \right)$. Оцените аналитически число итераций необходимых для решения линейной системы с матрицей $A$ с относительной точностью $10^{-4}$ используя
# - метод Ричардсона с оптимальным параметром (используйте евклидову норму)
# - метод Чебышёва (используйте евклидову норму)
# - метод сопряжённых градиентов (используйте $A$-норму).
#
# и сравните вашу оценку с экспериментальными результатами.
# +
# Место для Вашего решения
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Задача 2 (15 pts)
#
# ### Спектральное разбиение графа и обратная итерация
#
#
# Дан связный граф $G$ и соответствующий ему лапласиан $L = D - A$ с собственными значениями $0=\lambda_1, \lambda_2, ..., \lambda_n$, где $D$ диагональная матрица со степенями вершин на диагонали, а $A$ – матрица смежности.
# Вектор Фидлера – это собственный вектор, соответствующий собственному значению $\lambda_2$ матрицы $L$. Этот вектор может быть использован для разбиения графа: положительные значения соответствуют одной части графа, отрицательные – другой.
# -
# ### Метод обратной итерации
#
# Для поиска вектора Фидлера мы будем использовать метод Релея.
#
# * (5 pts) Выпишите матрицу ортопроектора на подпространство ортогональное собственному вектору матрице $L$, соответствующему собственному значению $0$ и докажите (аналитически), что это действительно ортопроектор.
#
# * (5 pts) Реализуйте функцию спектрального разделения графа ```partition```:
# INPUT:
# A - adjacency matrix (scipy.sparse.csr_matrix)
# num_iter_fix - number of iterations with fixed shift (float)
# shift - (float number)
# num_iter_adapt - number of iterations with adaptive shift (int) -- Rayleigh quotient iteration steps
# x0 - initial guess (1D numpy.ndarray)
# OUTPUT:
# x - normalized Fiedler vector (1D numpy.ndarray)
# eigs - eigenvalue estimations at each step (1D numpy.ndarray)
# eps - relative tolerance (float)
def partition(A, shift, num_iter_fix, num_iter_adapt, x0, eps):
raise NotImplementedError()
# Алгоритм должен останавливаться спустя `num_iter_fix + num_iter_adapt` итераций или если выполнено следующее соотношение
#
# $$ \boxed{\|\lambda_k - \lambda_{k-1}\|_2 / \|\lambda_k\|_2 \leq \varepsilon} \text{ на некотором шаге } k.$$
#
# Не забудьте использовать ортогональную проекцию из задачи выше в итерационном процессе, чтобы получить верный собственный вектор.
# Также хорошей идеей могло быть использование ```shift=0``` до запуска адаптивного режима. Однако это невозможно в силу вырождннности матрицы $L$, а разреженное разложенное в ```scipy``` не работет в этом случае. Поэтому вместо нулевого сдвига предлагается использовать некоторое малое значение.
#
# * (3 pts) Сгенерируйте случайный `lollipop_graph`, используя библиотеку `networkx`, и найдите его разбиение. [Нарисуйте](https://networkx.github.io/documentation/networkx-1.9/examples/drawing/labels_and_colors.html) этот граф с вершинами, окрашенными в соответствии с разбиением.
#
# * (2 pts) Запустите метод со случайного начального приближение ```x0```, установите ```num_iter_fix=0``` и объясните, почему метод может сойтись к неверному собственному значению?
# +
# Место для Вашего решения
# + [markdown] id="ru70NGUlOGIy" slideshow={"slide_type": "fragment"}
# ## Задача 3 (25 pts)
#
# ## PageRank для DBLP
#
#
# #### Введение дэмпфирующего фактора
#
# * (5 pts) Напишите функцию ```pagerank_matrix(G)```, которая из поданной на вход матрицы смежности $G$ (в плотном или разреженном форматах) создаёт и возвращает матрицу PageRank'a $A$.
# -
# INPUT: G - np.ndarray or sparse matrix
# OUTPUT: A - np.ndarray (of size G.shape) or sparse matrix
def pagerank_matrix(G):
# enter your code here
return A
# + [markdown] id="ru70NGUlOGIy" slideshow={"slide_type": "fragment"}
# * (5 pts) Реализуйте степенной метод для заданной матрицы $A$, начального вектора $x_0$ и числа итераций```num_iter``` в соответствии с прототипом ниже. Функция возвращает оценку собственного вектора, собственного значения и нормы невязки при сходимости. Проверьте, что ваша реализация сходится для матрицы $\begin{bmatrix} 2 & -1 \\ -1 & 2 \end{bmatrix}$.
# + id="ru70NGUlOGIy" slideshow={"slide_type": "fragment"}
# INPUT: A - np.ndarray (2D), x0 - np.ndarray (1D), num_iter - integer (positive)
# OUTPUT: x - np.ndarray (of size x0), l - float, res - np.ndarray (of size num_iter + 1 [include initial guess])
def power_method(A, x0, num_iter): # 5 pts
# enter your code here
return x, l, res
# + [markdown] id="ru70NGUlOGIy" slideshow={"slide_type": "fragment"}
# Для избежания проблем со сходимостью степенного метода, обсуждённых на семинаре, можно ввести дэмпфирующий фактор и преобразовать матрицу PageRank'a следующим образом:
#
# $$
# A_d = dA + \frac{1-d}{N} \begin{pmatrix} 1 & \dots & 1 \\ \vdots & & \vdots \\ 1 & \dots & 1 \end{pmatrix},
# $$
#
# где $d$ малый параметр из отрезка $[0,1]$ (обычно $d=0.85$), который называется **демпфирующим фактором**, $A$ размера $N\times N$. После такого преобразования максимальное собственное значение матрицы $A_d$ обладает кратностью равной 1.
#
# Именно это [предложили](http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf) сделать <NAME> и <NAME> для получения своего алгоритма ранжирвания веб-страниц.
#
#
# * (4 pts) Реализуйте функцию быстрого умножения матрицы $A_d$ на вектор в соответствии с прототипом ниже ```pagerank_matvec(A, d, x)```. На вход функция принимает матрицу PageRank'a $A$ (в разреженном формате, например ```csr_matrix```), демпфирующий фактор $d$ и начальный вектор $x$, а возвращает $A_dx$.
# -
# INPUT: A - np.ndarray (2D), d - float (from 0.0 to 1.0), x - np.ndarray (1D, size of A.shape[0/1])
# OUTPUT: y - np.ndarray (1D, size of x)
def pagerank_matvec(A, d, x): # 2 pts
# enter your code here
return y
# + [markdown] id="ru70NGUlOGIy" slideshow={"slide_type": "fragment"}
# * (2 pts) Сгенерируйте случайную матрицу смежности $1000 \times 1000$ (если помещается в память, то увеличьте размер до $10000 \times 10000$) со 100 ненулевыми элементами и сравните время вычисления произведения матрицы и случайного вектора с помощью ```pagerank_matvec``` и явным вычислением $A_dx$.
#
# Загрузите данные [отсюда](https://goo.gl/oZVxEa), разархивируйте и поместите файлы `dblp_authors.npz` и `dblp_graph.npz` в папку рядом с этим ноутбуком. Каждое значение (имя автора) из `dblp_authors.npz` соответствует строке/столбцу в матрице из `dblp_graph.npz`. Значения в строке `i` и столбце `j` матрицы `dblp_graph.npz` соответствуют числу раз, которое автор `i` цитировал статьи автора `j`. Давайте теперь найдём наиболее влиятельных авторов по величине Pagerank'a на основе данных DBLP.
#
# * (5 pts) Загрузите взвешенную матрицу смежности и список авторов с помощью функции ```load_dblp(...)```. Выведите её плотность, то есть отношение числа ненулевых элементов к общему числу элементов. Найдите top-10 наиболее цитируемых авторов по значениям элементов этой матрицы. Далее сделайте все элементы равными 1 для простоты. Получите матрицу Pagerank'a из матрицы смежности и проверьте её стохастичность (сумма элементов по строкам/столбцам равна 1).
#
# * (1 pts) Для того чтобы передать функцию ```pagerank_matvec``` в функцию ```power_method``` для быстрого вычисления матрично-векторного произведения $A_dx$, вы можете сделать ```LinearOperator```:
# ```python
# L = scipy.sparse.linalg.LinearOperator(A.shape, matvec=lambda x, A=A, d=d: pagerank_matvec(A, d, x))
# ```
# Вызов ```L@x``` или ```L.dot(x)``` будет реализован путём вызова функции ```pagerank_matvec(A, d, x)```, таким образом, вы можете передать $L$ вместо матрицы $A$ в функцию ```power_method``` напрямую.
#
#
# * (2 pts) Запустите степенной метод начиная с вектора из всех 1 и нарисуйте норму невязок $\|A_dx_k - \lambda_k x_k\|_2$ как функцию от $k$ для $d=0.85$.
#
#
# * (1 pts) Выведите имена top-10 авторов по величине PageRank'a на основании графа DBLP с $d=0.85$. Прокомментируйте результат и сарвните его со списком наиболее цитируемых авторов.
# + id="L5ZnW7m-OGIz"
from scipy.sparse import load_npz
import numpy as np
def load_dblp(path_auth, path_graph):
G = load_npz(path_graph).astype(float)
with np.load(path_auth) as data: authors = data['authors']
return G, authors
G, authors = load_dblp('dblp_authors.npz', 'dblp_graph.npz')
# +
# Место для Вашего решения
| hw/hw2/hw2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Description:
# Explore the data inside pdf_files.json
#
#
#
# # TODO:
# -
import os
import json
from zipfile import ZipFile
# from pikepdf import Pdf
# from PyPDF2 import PdfFileReader
from io import BytesIO
import unicodedata
os.chdir("..")
INTER_PATH = os.path.join("data", "interim")
with open(os.path.join(INTER_PATH, "pdf_files.json")) as json_file:
data = json.load(json_file)
# + jupyter={"outputs_hidden": true}
data.keys()
# -
# No Text for this key
data['Sembrando Vida Operations_Mexico']
# + jupyter={"outputs_hidden": true}
# Some deconding problems here?
data['Sembrando Vida_Mexico']
# + jupyter={"outputs_hidden": true}
# No spaces in this one. By looking at the pdf it seems the document was physically scanned and that might be causing problems.
data["Sembrando Vida Report"]
# -
| tasks/extract_text/notebooks/pdf_text_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3 jocas
# language: python
# name: jocas
# ---
# ## I. Load programs and data
# In this part we load:
# * Some functions from *job_title_processing* package + Python packages
# * Dataset of annotated job offers. Data comes from [Pôle emploi API](https://www.emploi-store-dev.fr/portail-developpeur-cms/home/catalogue-des-api/documentation-des-api/api/api-offres-demploi-v2.html). They were collected in 2019 and are not open data for now.
# * Data from the French occupation nomenclatura (ROME) wich provides for each occupation code a list of possible job titles (available [here](https://www.pole-emploi.org/opendata/repertoire-operationnel-des-meti.html?type=article))
# ### a. Load useful functions
# Python packages
import pickle, os, math
import pandas as pd
import numpy as np
import seaborn as sns
from collections import Counter
from sklearn import metrics
# Local job titles functions
from job_title_processing import JobOffersTitleLemmatizer, JobOffersTitleCleaner
from job_title_processing.tools.svm_classification import *
from job_title_processing.tools.utils import *
# Global params
ENCODING, SEP = "utf-8-sig", ";"
DOMAINS = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N']
# +
# Graphical tools
# %matplotlib inline
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib import gridspec
plt.rcParams['font.family'] = "DejaVu Sans"
plt.rcParams['font.size'] = 23
DOMAINS_PALETTE = [
'#1F497D', '#C0504D', '#9BBB59', '#8064A2', '#6BB1C9' , '#CC6600', '#0066CC',
'#A50021', '#62C400', '#5F37AF', '#4BACC6', '#F79646','#B7C0EF' ,'#E3A5A5'
]
# -
# ### b. Load French data from a csv file
# +
ROOT = load_root_path() # Package root
fr_folder = os.path.join(ROOT, "data", "FR")
# Load French data (API Pole emploi 2019 - NOT OPEN DATA)
file = os.path.join(fr_folder, "raw_data-poleemploi_2019-01-01_2019-12-31.csv")
columns = {'intitule':'title', 'romeCode':'ROME'}
pole_emploi = pd.read_csv(file, encoding=ENCODING, sep=SEP, usecols=columns.keys())
pole_emploi.rename(columns=columns, inplace=True)
pole_emploi['domain'] = pole_emploi.ROME.str[0]
# Load titles from official nomenclature
file = os.path.join(fr_folder, "ROME_label.csv")
columns = {'titre':'title', 'ROME':'ROME'}
nomenclature = pd.read_csv(file, encoding=ENCODING, sep=SEP)
nomenclature.rename(columns=columns, inplace=True)
nomenclature['domain'] = nomenclature.ROME.str[0]
# Merge data
data = pole_emploi.append(nomenclature, ignore_index=True)
# -
# ## II. Explore data
print("Dataset size: " + str(len(data)))
label_count = Counter(data.ROME)
print("Most represented label : " + str(label_count.most_common()[0]))
print("Less represented label : " + str(label_count.most_common()[-1]))
print("Mean samples per label : " + str(np.mean(list(label_count.values()))))
# +
# Nomenclatura distribution
unique_nomanclature = nomenclature.drop_duplicates(subset=['ROME'])
pct_nomenclature = unique_nomanclature.domain.value_counts().div(len(unique_nomanclature)/100)
pct_nomenclature_T = pd.DataFrame(pct_nomenclature).T[DOMAINS].rename(index={"domain": "Nomenclature"})
# Pole emploi data distribution
pct_pole_emploi = pole_emploi.domain.value_counts().div(len(pole_emploi)/100)
pct_pole_emploi_T = pd.DataFrame(pct_pole_emploi).T[DOMAINS].rename(index={"domain": "Pole emploi"})
# Merge
df = pct_pole_emploi_T.append(pct_nomenclature_T)
# %matplotlib inline
# Init figure and grid
fig = plt.figure(2, figsize=(12, 3))
gs = gridspec.GridSpec(2, 1, height_ratios =[3, 1])
# Figure
ax = plt.subplot(gs[0])
ax = df.plot(kind='barh', stacked=True, color=DOMAINS_PALETTE, ax=ax, width=0.38, legend=None)
ax.set(title='ROME occupation code distribution in dataset vs nomenclature')
fig.patch.set_visible(False)
# Y Axis
ax.set_yticklabels(['Dataset', 'ROME nomenclature'])
# X axis
plt.axis([ 0, 100.1, None, None]) # X-axis limits
fmt = '%.0f%%' # Add percent
xticks = mtick.FormatStrFormatter(fmt)
ax.xaxis.set_major_formatter(xticks)
ax.xaxis.grid(color='k'), ax.set_axisbelow(True) # Add vertical lines
# Frame
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Legend
handles, labels = ax.get_legend_handles_labels()
ax_leg = plt.subplot(gs[1])
ax_leg.axis("off")
ax_leg.legend(
[plt.plot([],marker="", ls="")[0]] + handles, ["$\\bf{ROME\ first\ letter}$"] + labels, loc='best',
bbox_to_anchor=(0.42, 0., 0.5, 0.5), borderaxespad=0., ncol=len(labels) + 1, markerscale=1, prop={'size': 10},
handlelength=1, handleheight=1, frameon=False
)
plt.show()
# +
# Data distribution
df_samples = pd.DataFrame.from_dict(label_count, orient='index', columns=['n_samples'])
df_samples['ROME'] = df_samples.index
df_samples = df_samples.reset_index(drop=True)
df_samples['domain'] = df_samples.ROME.str[0]
# Modify scale
df_samples['n_log'] = df_samples.n_samples.apply(math.log10)
# Init figure
f, ax = plt.subplots(figsize=(10, 5))
sns.stripplot(
x="domain", y="n_samples", data=df_samples, alpha=.6,
order=DOMAINS, palette=DOMAINS_PALETTE, linewidth=1, size=10
)
# Param axis
ax.set(yscale="log", ylim=(1, 100000))
ax.minorticks_off()
ax.set(xlabel='ROME first letter', ylabel='Sample size')
ax.set(title='Distribution of ROME occupation codes samples\' sizes (in dataset)')
# Add mean number of samples per domain
for i, domain in enumerate(DOMAINS):
df_domain = df_samples.loc[df_samples.dom==domain]
mean_domain = np.mean(df_domain.n_samples)
plt.plot(i, mean_domain, marker='*', markersize=15, color='black', zorder=i+3)
# Legend
star = mlines.Line2D([], [], color='black', marker='*', linestyle='None', markersize=15, label='Mean')
plt.legend(handles=[star],loc=4)
# Results
plt.show()
# -
# ## III. Clean data and train linear SVM model
# ### a. Clean data
# Data are cleaned following this process:
# * lower case, delete digits and special char
# * delete stopwords
# * delete *jobwords* (eg. contract type, schedule, ..)
# * remove location such as country, departement, region
cleaner = JobOffersTitleCleaner(language='FR', jobword=True, location=True)
cleaner.clean_str("Ingénieur à mi-temps en CDD")
cleaner.clean_str("Ingénieur à mi-temps en CDD en rhone alpes")
cleaner.clean_str("Ingénieur à mi-temps en CDD à Toulouse")
cleaner.clean_str("Maçon/Maçonne")
# ### b. Lemmatize data
#
# Lemmatize job titles and keep only one occurence of each words in tite. **In the end, words order is mixed, since it doesn't matter here.**
lemmatizer = JobOffersTitleLemmatizer(language='FR', cleaner=cleaner)
lemmatizer.lemmatize_str("Maçon/Maçonne")
lemmatizer.lemmatize_str("Ingénieur à mi-temps en CDD en rhone alpes")
lemmatizer.lemmatize_str("Aide comptable à mi-temps en CDD à Angers")
# +
# TO UNCOMMENT
#
# X = lemmatizer.lemmatize_series(data.title)
# X_train, X_test, Y_train, Y_test = split(X, Y_pole_emploi, folder=fr_folder)
# -
# Load lemmatized dataset
filename = os.path.join(fr_folder, "train_test.pickle")
with open(filename, 'rb') as f:
X_train, X_test, Y_train, Y_test = pickle.load(f)
f.close()
# ### c. Train linear SVM model
# +
# TO UNCOMMENT
#
# Train model and save it
# svm = train_svm(X_train, Y_train, folder=fr_folder)
# -
# Load model
filename = os.path.join(fr_folder, "svm_C-1_mindf-1.pickle")
with open(filename, 'rb') as f:
svm = pickle.load(f)
f.close()
# ## IV. Evaluate model
# +
# Get global metrics at each level on test set
Y_pred = predict_svm(svm, X_test)
print("****** Level 1: ROME occupation code ******")
global_metrics_svm(Y_test, Y_pred, level=1)
print("\n ****** Level 2: occupation group ******\n")
global_metrics_svm(Y_test, Y_pred, level=2)
print("\n ****** Level 3: occupation domain ******\n")
global_metrics_svm(Y_test, Y_pred, level=3)
# +
from job_title_processing.tools.svm_classification import group_by_accuracy
# Get accuracy by domains
domain = [code[0] for code in Y_test]
df = pd.DataFrame(list(zip(Y_test, Y_pred, domain)), columns=['Y_test', 'Y_pred', 'domain'])
# Get f1 scores by domains
ROME_acc_by_domaine, domaine_accuracy, mean_accuracy = group_by_accuracy(df, group_by="domain", values=DOMAINS)
# Get f1 by domains
ROME_f1_by_domaine, macro_f1, micro_f1 = group_by_f1(df, group_by="domain", values=DOMAINS)
# +
# Graphics
# %matplotlib inline
f, ax = plt.subplots(figsize=(8, 5))
f.subplots_adjust(bottom=0.2)
# Agragated boxplots
ROME_acc_by_domaine.boxplot()
plt.xticks(rotation=25, fontsize=14)
# Add mean accuracy
for i, accuracy in enumerate(domaine_accuracy):
plt.plot(i+1, accuracy, marker='*', markersize=10, color='maroon')
for i, accuracy in enumerate(mean_accuracy):
plt.plot(i+1, accuracy, marker='.', markersize=10, color='forestgreen')
star = mlines.Line2D([], [], color='maroon', marker='*', linestyle='None', markersize=10, label='Weigthed avg accuracy')
dot = mlines.Line2D([], [], color='forestgreen', marker='.', linestyle='None', markersize=10, label='Unweigthed avg accuracy')
# Legend and axis
ax.legend(handles=[star, dot])
ax.set_xlabel('ROME first letter', fontsize = 15)
ax.set_ylabel('Accuracy', fontsize = 15)
ax.set_title('SVM Linear accuracy per ROME (on test set)',fontsize = 17)
file = os.path.join(ROOT, 'acc.png')
plt.savefig(file)
plt.show()
# +
# Graphics
# %matplotlib inline
f, ax = plt.subplots(figsize=(8, 5))
f.subplots_adjust(bottom=0.2)
ROME_f1_by_domaine.boxplot()
plt.xticks(rotation=25,fontsize=14)
# Add micro and macro F1
for i, f1 in enumerate(micro_f1):
plt.plot(i+1, f1, marker='*', markersize=10, color='orange')
for i, f1 in enumerate(macro_f1):
plt.plot(i+1, f1, marker='.', markersize=10, color='r')
star = mlines.Line2D([], [], color='orange', marker='*', linestyle='None', markersize=10, label='Micro average F1-score')
dot = mlines.Line2D([], [], color='r', marker='.', linestyle='None', markersize=10, label='Macro average F1-score')
# Legend and axis
ax.legend(handles=[star, dot])
ax.set_xlabel('ROME first letter', fontsize = 15)
ax.set_ylabel('F1-score', fontsize = 15)
ax.set_title('SVM Linear F1-score per ROME (on test set)',fontsize = 17)
file = os.path.join(ROOT, 'f1.png')
plt.savefig(file)
plt.show()
# +
from job_title_processing.tools.svm_classification import plot_cm
# Confusion matrix
y_pred = [code[0] for code in Y_pred]
y_true = [code[0] for code in Y_test]
# Get confusion matrix
cm = metrics.confusion_matrix(y_true, y_pred, labels = sorted(list(set(y_true+y_pred))))
# Plot confusion matrix (re-evaluate it)
fig = plot_cm(y_true, y_pred,figsize=(9,9), normalize=True)
# file = os.path.join(ROOT, 'confusion.png')
# fig.savefig(file,dpi=fig.dpi, bbox_inches = "tight")
| notebooks/FR_classifier_results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Classificador Multi-layer Perceptron - Implantação
#
# Este componente realiza predições usando um modelo Multi-layer Perceptron para classificação usando [Scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html). <br>
# Scikit-learn é uma biblioteca open source de machine learning que suporta apredizado supervisionado e não supervisionado. Também provê várias ferramentas para ajustes de modelos, pré-processamento de dados, seleção e avaliação de modelos, além de outras funcionalidades.
#
# Este notebook apresenta:
# - como carregar modelos e outros resultados do treinamento.
# - como utilizar um modelo para fornecer predições em tempo real.
# ## Declaração de Classe para Predições em Tempo Real
#
# A tarefa de implantação cria um serviço REST para predições em tempo real.<br>
# Para isso você deve criar uma classe `Model` que implementa o método `predict`.
# +
# %%writefile Model.py
import joblib
import numpy as np
import pandas as pd
class Model(object):
def __init__(self):
# Carrega artefatos: estimador, etc
artifacts = joblib.load("/tmp/data/mlp-classifier.joblib")
self.pipeline = artifacts["pipeline"]
self.features_names_training = artifacts["columns"]
self.columns_to_filter = artifacts["columns_to_filter"]
self.label_encoder = artifacts["label_encoder"]
self.method = artifacts["method"]
self.new_columns = artifacts["new_columns"]
self.features_after_pipeline = artifacts["features_after_pipeline"]
def class_names(self):
column_names = np.concatenate((self.columns_to_filter, self.new_columns))
return column_names.tolist()
def predict(self, X, feature_names, meta=None):
df = pd.DataFrame(X)
if feature_names:
# Antes de utilizar o conjunto de dados X no modelo, reordena suas features de acordo com a ordem utilizada no treinamento
df = pd.DataFrame(X, columns=feature_names)[self.columns_to_filter]
X = df.to_numpy()
# adiciona a predição ao conjunto de dados
y_prob = self.pipeline.predict_proba(X)
y_pred = self.pipeline.predict(X)
y_pred = self.label_encoder.inverse_transform(y_pred)
df.loc[:, self.new_columns[:-1]] = y_prob
df.loc[:, self.new_columns[-1]] = y_pred
return df.to_numpy()
| tasks/mlp-classifier/Deployment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Erasmus+ ICCT project (2018-1-SI01-KA203-047081)
# %matplotlib notebook
import control as c
import ipywidgets as w
import numpy as np
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Toggle cell visibility
from IPython.display import HTML
tag = HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide()
} else {
$('div.input').show()
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
Toggle cell visibility <a href="javascript:code_toggle()">here</a>.''')
display(tag)
# Hide the code completely
# from IPython.display import HTML
# tag = HTML('''<style>
# div.input {
# display:none;
# }
# </style>''')
# display(tag)
# -
# ## PID krmiljenje sistemov prvega reda s časovnim zadržkom
#
# V tem primeru je prikazana uporaba PID krmilnika (oziroma enega izmed njegovih podtipov) za krmiljenje LTI sistema prvega reda s časovnim zadržkom.
#
# Časovni zadržek je simuliran z uporabo Padéjeve aproksimacije, ki omogoča, da sistem ostane linearen. Aproksimant je ulomek dveh polinomov visoke stopnje, ki ju lahko vključimo v prenosno funkcijo kot povratno zvezo.
#
# <br>
# $$G_{Padé}(s)=\frac{\sum_{i=0}^{m}a_ix^i}{1+\sum_{j=1}^{n}b_jx^j}$$
# <img src="Images/first_order_time_delay.png" width="40%" />
# <b>Izebri časovno konstanto sistema skupaj s časovnim zadržkom in stopnjo polinoma imenovalca v aproksimantu!</b>
#
# <!-- In the following example, we'll be using a PID controller (or one of its subtypes) to control an LTI (linear time-invariant system) that is approximating a first-order system with a time delay.
#
# The time delay is simulated using the Padé approximation, allowing the system to remain linear. The approximant is a fraction of two large polynomials that can be included in the transfer function as the feedback system.
#
# <br>
# $$G_{Padé}(s)=\frac{\sum_{i=0}^{m}a_ix^i}{1+\sum_{j=1}^{n}b_jx^j}$$
# <img src="Images/first_order_time_delay.png" width="40%" />
# <b>Choose a time constant for the system, along with the desired time delay and the degree of the approximating polynomial!</b> -->
# +
# Figure definition
fig1, ((f1_ax1), (f1_ax2)) = plt.subplots(2, 1, num='Slika 1')
fig1.set_size_inches((9.8, 5))
fig1.set_tight_layout(True)
f1_line1, = f1_ax1.plot([], [])
f1_line2, = f1_ax2.plot([], [])
f1_ax1.grid(which='both', axis='both', color='lightgray')
f1_ax2.grid(which='both', axis='both', color='lightgray')
f1_ax1.autoscale(enable=True, axis='both', tight=True)
f1_ax2.autoscale(enable=True, axis='both', tight=True)
f1_ax1.set_title('Amplitudno-frekvenčni del Bodejevega diagrama', fontsize=11)
f1_ax1.set_xscale('log')
f1_ax1.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax1.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=10)
f1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
f1_ax2.set_title('Fazno-frekvenčni del Bodejevega diagrama', fontsize=11)
f1_ax2.set_xscale('log')
f1_ax2.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax2.set_ylabel(r'$\phi\/[°]$', labelpad=0, fontsize=10)
f1_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)
# System model
def system_model(T1, Tdelay, pade_deg):
del_num, del_den = c.pade(Tdelay, pade_deg, -1)
W_sysnodel = c.tf([1], [T1, 1])
W_sys = c.series(W_sysnodel, c.tf(del_num, del_den))
print('Prenosna funkcija sistema brez časovnega zadržka:')
print(W_sysnodel)
# System analysis
poles = c.pole(W_sysnodel) # Poles
print('Poli sistema brez časovnega zadržka:\n')
print(poles)
global f1_line1, f1_line2
f1_ax1.lines.remove(f1_line1)
f1_ax2.lines.remove(f1_line2)
mag, phase, omega = c.bode_plot(W_sys, Plot=False) # Bode-plot
f1_line1, = f1_ax1.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f1_line2, = f1_ax2.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
f1_ax1.relim()
f1_ax2.relim()
f1_ax1.autoscale_view()
f1_ax2.autoscale_view()
# GUI widgets
style = {'description_width': 'initial','button_width':'180px'}
T1_slider = w.FloatLogSlider(value=0.1, base=10, min=-4, max=1, description='T1 [s] :', continuous_update=False,
layout=w.Layout(width='75%'))
Tdelay_slider = w.FloatLogSlider(value=0.1, base=10, min=-2, max=0, description='zadržek [s] :', continuous_update=False,
layout=w.Layout(width='75%'))
deg_slider = w.IntSlider(value=1, min=1, max=10, step=1, description='Stopnja Padéjevega imenovalca:', continuous_update=False,
layout=w.Layout(width='75%'), style=style)
input_data = w.interactive_output(system_model, {'T1':T1_slider, 'Tdelay':Tdelay_slider, 'pade_deg':deg_slider})
display(w.HBox([T1_slider, Tdelay_slider, deg_slider]), input_data)
# -
# Po končani seznanitvi s karakteristiko sistema <b>izberi tip krmilnika!</b>
#
# <!-- After observing the system's characteristics, <b>select a controller type!</b> -->
# +
#Controller type select
typeSelect = w.ToggleButtons(
options=[('P', 0), ('PI', 1), ('PD', 2), ('PID', 3), ('realni PID', 4)],
description='Tip krmilnika: ', style={'description_width':'15%'})
display(typeSelect)
# -
# <b>Nastavi izbran PID krmilnik tako, da bodo čas prevzpona, čas umiritve, prenihaj in odstopek v stacionarnem stanju minimalni.</b><br>
# Optimalnih vrednosti naštetih parametrov ni možno doseči z eno samo nastavitvijo PID krmilnika. Ustvari več rešitev, od katerih vsako prilagodi za posamezen parameter.
#
# <!-- <b>Tune the selected controller so that rising/settling time, overshoot, or remaining error is minimized!</b><br>
# It is not possible to get the best results for each parameter in a single setup. Create multiple solutions, one for each type! -->
# +
# PID control
# Figure definition
fig2, ((f2_ax1, f2_ax2, f2_ax3), (f2_ax4, f2_ax5, f2_ax6)) = plt.subplots(2, 3, num='Slika 2')
fig2.set_size_inches((9.8, 5))
fig2.set_tight_layout(True)
f2_line1, = f2_ax1.plot([], [])
f2_line2, = f2_ax2.plot([], [])
f2_line3, = f2_ax3.plot([], [])
f2_line4, = f2_ax4.plot([], [])
f2_line5, = f2_ax5.plot([], [])
f2_line6, = f2_ax6.plot([], [])
f2_ax1.grid(which='both', axis='both', color='lightgray')
f2_ax2.grid(which='both', axis='both', color='lightgray')
f2_ax3.grid(which='both', axis='both', color='lightgray')
f2_ax4.grid(which='both', axis='both', color='lightgray')
f2_ax5.grid(which='both', axis='both', color='lightgray')
f2_ax6.grid(which='both', axis='both', color='lightgray')
f2_ax1.autoscale(enable=True, axis='both', tight=True)
f2_ax2.autoscale(enable=True, axis='both', tight=True)
f2_ax3.autoscale(enable=True, axis='both', tight=True)
f2_ax4.autoscale(enable=True, axis='both', tight=True)
f2_ax5.autoscale(enable=True, axis='both', tight=True)
f2_ax6.autoscale(enable=True, axis='both', tight=True)
f2_ax1.set_title('Odziv zaprtozančnega sistema \n na enotsko skočno funkcijo', fontsize=9)
f2_ax1.set_xlabel(r'$t\/[s]$', labelpad=0, fontsize=8)
f2_ax1.set_ylabel(r'$x\/[m]$', labelpad=0, fontsize=8)
f2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax2.set_title('Nyquistov diagram', fontsize=9)
f2_ax2.set_xlabel(r'$Re$', labelpad=0, fontsize=8)
f2_ax2.set_ylabel(r'$Im$', labelpad=0, fontsize=8)
f2_ax2.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax3.set_title('Amplitudno-frekvenčni del \n Bodejevega diagrama', fontsize=9)
f2_ax3.set_xscale('log')
f2_ax3.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax3.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=8)
f2_ax3.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax4.set_title('Odziv zaprtozančnega sistema \n na enotsko impulzno funkcijo', fontsize=9)
f2_ax4.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax4.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax4.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax5.set_title('Odziv odprtozančnega sistema \n na enotsko skočno funkcijo', fontsize=9)
f2_ax5.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax5.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax5.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax6.set_title('Fazno-frekvenčni del \n Bodejevega diagrama', fontsize=9)
f2_ax6.set_xscale('log')
f2_ax6.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax6.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=8)
f2_ax6.tick_params(axis='both', which='both', pad=0, labelsize=6)
def pid_control(Kp, Ti, Td, Fd, type_select, T1, Tdelay, pade_deg):
del_num, del_den = c.pade(Tdelay, pade_deg, -1)
W_sys = c.series( c.tf([1], [T1, 1]), c.tf(del_num, del_den))
if type_select in (1, 3, 4):
Ti0 = 1
else:
Ti0 = 0
if type_select in (2, 3, 4):
Td0 = 1
else :
Td0 = 0
if type_select == 4:
Fd0 = 1
else:
Fd0 = 0
# PID Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td / Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller in time constant format
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
# Display
global f2_line1, f2_line2, f2_line3, f2_line4, f2_line5, f2_line6
f2_ax1.lines.remove(f2_line1)
f2_ax2.lines.remove(f2_line2)
f2_ax3.lines.remove(f2_line3)
f2_ax4.lines.remove(f2_line4)
f2_ax5.lines.remove(f2_line5)
f2_ax6.lines.remove(f2_line6)
tout, yout = c.step_response(W_closed)
f2_line1, = f2_ax1.plot(tout, yout, lw=1, color='blue')
_, _, ob = c.nyquist_plot(W_open, Plot=False) # Small resolution plot to determine bounds
real, imag, freq = c.nyquist_plot(W_open, omega=np.logspace(np.log10(ob[0]), np.log10(ob[-1]), 1000), Plot=False)
f2_line2, = f2_ax2.plot(real, imag, lw=1, color='blue')
mag, phase, omega = c.bode_plot(W_open, Plot=False)
f2_line3, = f2_ax3.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f2_line6, = f2_ax6.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
tout, yout = c.impulse_response(W_closed)
f2_line4, = f2_ax4.plot(tout, yout, lw=1, color='blue')
tout, yout = c.step_response(W_open)
f2_line5, = f2_ax5.plot(tout, yout, lw=1, color='blue')
f2_ax1.relim()
f2_ax2.relim()
f2_ax3.relim()
f2_ax4.relim()
f2_ax5.relim()
f2_ax6.relim()
f2_ax1.autoscale_view()
f2_ax2.autoscale_view()
f2_ax3.autoscale_view()
f2_ax4.autoscale_view()
f2_ax5.autoscale_view()
f2_ax6.autoscale_view()
# GUI widgets
def draw_controllers(type_select):
global Kp_slider
global Ti_slider
global Td_slider
global Fd_slider
Kp_slider = w.FloatLogSlider(value=0.5, base=10, min=-1, max=4, description='Kp:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
if type_select in (1, 3, 4):
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='Ti:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='Ti:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
if type_select in (2, 3, 4):
Td_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='Td:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Td_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='Td:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
if type_select == 4:
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='Fd:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
else:
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='Fd:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'), disabled=True)
input_data = w.interactive_output(pid_control, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,
'Fd': Fd_slider, 'type_select':typeSelect, 'T1':T1_slider, 'Tdelay':Tdelay_slider,
'pade_deg':deg_slider})
display(w.HBox([Kp_slider, Ti_slider, Td_slider, Fd_slider]), input_data)
w.interactive_output(draw_controllers, {'type_select':typeSelect})
# -
# Z uporabo spodnje simulacije testiraj, kako dobro krmiljeni sistem sledi izbranemu vstopnemu signalu.<br>
# <b>Prilagodi nastavitve krmilnika tako, da bo sistem sprejemljivo sledil sinusni funkciji!</b>
# <br><br>
# <i>(Animacija je prilagojena na način, da se znotraj enega simulacijskega cikla (prikazan na diagramu) odziv sistema ne spreminja. Zaradi tega se, ob spremembi nastavitev PID krmilnika, nestabilne rešitve pokažejo šele v zadnji sekundi simulacijskega cikla.)</i>
#
# <!-- You can test the controlled system's signal following capabilities using the simulated scope.<br>
# <b>Readjust your controller so that it can follow a sine wave acceptably!</b>
# <br><br>
# <i>(The animations are scaled to fit the frame through the whole simulation. Because of this, unstable solutions might not seem to move until the very last second.)</i> -->
# +
# Simulation data
anim_fig = plt.figure(num='Animacija 1')
anim_fig.set_size_inches((9.8, 4))
anim_fig.set_tight_layout(True)
anim_ax1 = anim_fig.add_subplot(111)
frame_count=1000
scope_rounds=4
l1 = anim_ax1.plot([], [], lw=1, color='blue')
l2 = anim_ax1.plot([], [], lw=2, color='red')
line1 = l1[0]
line2 = l2[0]
anim_ax1.legend(l1+l2, ['referenčni signal', 'izstopni signal'], loc=1)
anim_ax1.set_title('Simulacija', fontsize=12)
anim_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=10)
anim_ax1.set_ylabel(r'$y\/$[/]', labelpad=0, fontsize=10)
anim_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
anim_ax1.grid(which='both', axis='both', color='lightgray')
T_plot = []
X_plot = []
R_plot = []
#Simulation function
def simulation(Kp, Ti, Td, Fd, type_select, T1, Tdelay, pade_deg, T, dt, X, Xf, Xa):
del_num, del_den = c.pade(Tdelay, pade_deg)
W_sys = c.series( c.tf([1], [T1, 1]), c.tf(del_num, del_den))
if type_select in (1, 3, 4):
Ti0 = 1
else:
Ti0 = 0
if type_select in (2, 3, 4):
Td0 = 1
else :
Td0 = 0
if type_select == 4:
Fd0 = 1
else:
Fd0 = 0
# Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td * Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller
# Model
W_open = c.series(W_PID, W_sys) # Open loop
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
# Reference and disturbance signals
T_sim = np.arange(0, T, dt, dtype=np.float64)
if X == 0: # Sine wave reference
X_sim = np.sin(2 * np.pi * Xf * T_sim) * Xa
elif X == 1: # Square wave reference
X_sim = np.sign(np.sin(2 * np.pi * Xf * T_sim)) * Xa
# System response
Tx, youtx, xoutx = c.forced_response(W_closed, T_sim, X_sim)
R_sim = youtx
# Display
XR_max = max(np.amax(np.absolute(np.concatenate((X_sim, R_sim)))), Xa)
if not np.isnan(XR_max):
anim_ax1.set_ylim((-1.2 * XR_max, 1.2 * XR_max))
global T_plot, X_plot, R_plot
T_plot = np.linspace(0, T, frame_count*(scope_rounds+1), dtype=np.float32)
X_plot = np.interp(T_plot, T_sim, X_sim)
R_plot = np.interp(T_plot, T_sim, R_sim)
def anim_init():
line1.set_data([], [])
line2.set_data([], [])
anim_ax1.set_xlim((0, T_plot[frame_count-1]))
return (line1, line2, anim_ax1,)
def animate(i):
line1.set_data(T_plot[scope_rounds*i:scope_rounds*i+frame_count-1], X_plot[scope_rounds*i:scope_rounds*i+frame_count-1])
line2.set_data(T_plot[scope_rounds*i:scope_rounds*i+frame_count-1], R_plot[scope_rounds*i:scope_rounds*i+frame_count-1])
anim_ax1.set_xlim((T_plot[i*scope_rounds], T_plot[i*scope_rounds+frame_count-1]))
return (line1, line2, anim_ax1,)
anim = animation.FuncAnimation(anim_fig, animate, init_func=anim_init,
frames=frame_count, interval=10, blit=True,
repeat=True)
# Controllers
T_slider = w.FloatLogSlider(value=10, base=10, min=-0.7, max=1, step=0.01,
description='trajanje [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
dt_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=-1, step=0.01,
description='časovni korak [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
style = {'description_width': 'initial'}
X_type = w.Dropdown(options=[('sinusni signal', 0), ('kvadratni val', 1)], value=1,
description='Referenčni signal: ', continuous_update=False, style=style, layout=w.Layout(width='auto', flex='3 3 auto'))
Xf_slider = w.FloatLogSlider(value=0.5, base=10, min=-2, max=2, step=0.01,
description='frekvenca [Hz]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Xa_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,
description='amplituda [/]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
input_data = w.interactive_output(simulation, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,'Fd': Fd_slider,
'type_select': typeSelect, 'T1': T1_slider,
'Tdelay':Tdelay_slider, 'pade_deg':deg_slider,
'T': T_slider, 'dt': dt_slider,
'X': X_type, 'Xf': Xf_slider, 'Xa': Xa_slider})
display(w.HBox([w.HBox([T_slider, dt_slider], layout=w.Layout(width='25%')),
w.Box([], layout=w.Layout(width='5%')),
w.VBox([X_type, w.HBox([Xf_slider, Xa_slider])], layout=w.Layout(width='30%')),
w.Box([], layout=w.Layout(width='5%'))],
layout=w.Layout(width='100%', justify_content='center')), input_data)
# -
# Parameter *trajanje* vpliva na prikaz simulacije, parameter *časovni korak* pa vpliva na frekvenco vzročenja in s tem izboljša rezultat, a na račun povečane računske moči.
#
# <!-- The duration parameter controls the simulated timeframe and does not affect the runtime of the animation. In contrast, the timestep controls the model sampling and can refine the results in exchange for higher computational resources. -->
| ICCT_si/examples/03/.ipynb_checkpoints/FD-09_PID_krmiljenje_sistemov_prvega_reda_s_casovnim_zadrzkom-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Read in the data and create objects to pass onto Gurobi constraints. Second column in parcel file is area and third column in parcel file is price
import numpy as np
import pandas as pd
parcels_data = np.array(pd.read_csv("Parcels.txt", header = None))
import gurobipy as gp
from gurobipy import GRB
P = parcels_data[:,0]
P = np.array(P.astype(int))
A = parcels_data[:,round(1, 5)]
C = parcels_data[:,round(2, 8)]
multidict_input = {}
for p in P:
multidict_input[p] = [A[p-1], C[p-1]]
# +
adjacency_data = pd.read_csv("Adjacency.txt", header = None)
subset = adjacency_data[[0,1]]
adjacency = [tuple(x) for x in subset.values]
# +
# what gurobi py uses when formulating constraints/objective function
parcels, areas, costs = gp.multidict(multidict_input)
# this is where we would specify our budget
budget = 1000000
# also add M variable
M = len(parcels) + 1
# -
# Now that the data is in and the budget is specified, I can write out the model
# create model object
m = gp.Model('ParcelSelct')
m.Params.timeLimit = 600.0
m.Params.LogFile = 'ParcelSelect_log'
# create decision variables for each of the parcels and flow variables
x = m.addVars(parcels, vtype = GRB.BINARY, name = "parcel")
y = m.addVars(adjacency, vtype = GRB.BINARY, name = "flow")
# +
# create budget constraint
budget = m.addConstr((x.prod(costs) <= budget), name = 'budget')
# create core parcel constraint
core = m.addConstr((x[23] == 1), name = 'core_p')
# -
# Constraint 3 from paper
for p in parcels:
A = [a[0] for a in adjacency
if a[1]==p]
if not A:
pass
else:
m.addConstr(sum(y[i,p] for i in A) <= len(A)*x[p], name = "no_flow" + str(p))
# Constraint 4 from paper
for p in parcels:
A = [a[1] for a in adjacency
if a[0]==p]
if not A:
pass
else:
m.addConstr(sum(y[p,j] for j in A) <= x[p], name = "one_flow_to" + str(p))
# Constraint 5 from paper
connected = m.addConstr(y.sum()== x.sum() - 1, name = "arcs_to_nodes")
# +
# create tail length contribution variables (based on # of parcels)
z = m.addVars(adjacency, vtype = GRB.INTEGER, name = "Z")
# Create tail length variables (based on # of parcels)
w = m.addVars(parcels, vtype = GRB.INTEGER, name = "W")
# -
# Constraint 6 from paper
for p in parcels:
A = [a[1] for a in adjacency
if a[0]==p]
if not A:
pass
else:
for j in A:
if j != p:
m.addConstr(z[p,j] >= w[p] + 1 - M*(1 - y[p,j]), name = "tail_length" + str(p)+str(j))
# Constraint 7 from paper
for p in parcels:
A = [a[0] for a in adjacency
if a[1] == p]
if not A:
pass
else:
m.addConstr(w[p] == z.sum('*',p), name = "wtail" + str(p))
# +
# Create objective function
m.setObjective(x.prod(areas), GRB.MAXIMIZE)
# -
# then we can write out the model (extra code required to optimize model and print out output)
m.write('ParcelSelect.lp')
m.optimize()
| Gurobi Model Formulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scipy.io as sio
import numpy as np
from bresenham import bresenham
import matplotlib.pyplot as plt
from numpy import matmul as mm
import math
Map = np.zeros((30,30))
orig,occ = (9,4),(19,19)
free = np.array(list(bresenham(orig[0],orig[1],occ[0],occ[1])))
for i in range(len(free)):
Map[tuple(free[i])] = 1
Map[occ[0],occ[1]] = 3
plt.imshow(Map)
plt.plot(orig[1],orig[0],'rx',linewidth=3)
plt.axis('equal')
| Robotics/EstimationAndLearning/Week3/.ipynb_checkpoints/example_bresenham-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hvarS/CS60075-Team28-Task-1/blob/main/attention_multi.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="tYlRJ3N1qe9R" outputId="20f4102a-1bc2-4188-8f17-62cf6db20880"
# # get extra dependencies
# !pip install transformers
# + colab={"base_uri": "https://localhost:8080/"} id="ws2cXaqgv6fs" outputId="689a9654-b81f-4ce7-ecf9-4b55d0538889"
# !wget http://nlp.stanford.edu/data/glove.6B.zip
# !unzip glove.6B.zip -d embeddings
# !rm glove.6B.zip
# !rm embeddings/glove.6B.50d.txt
# !rm embeddings/glove.6B.100d.txt
# !rm embeddings/glove.6B.200d.txt
# + id="dQQFuKBHCMH4" colab={"base_uri": "https://localhost:8080/"} outputId="c1a4d671-1330-46ef-ee5d-b5debd575207"
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + [markdown] id="x_41OyiRoURO"
# ## Importing
# + id="PBqqN39ZoRcA"
import os
import pandas as pd
import time
import tensorflow as tf
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from transformers import BertTokenizer, TFBertModel
from collections import namedtuple
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.preprocessing.text import Tokenizer
from keras.layers.embeddings import Embedding
# + id="Vr3O0KwxUfLR"
FOLDER_PATH = "/content/drive/MyDrive/CS60075-Team28-Task-1/"
DATA_FOLDER = os.path.join(FOLDER_PATH,"data/preprocessed")
# + id="Zf-AbjWQP2sP"
# import evaluate function
import sys
sys.path.append(FOLDER_PATH)
from eval import evaluate
# + id="LeyCMxKPtcOv" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="1bef92ff-c754-42e5-ac80-475bf3ba2149"
data = pd.read_csv(os.path.join(DATA_FOLDER, "lcp_single_train_preprocessed.csv"), index_col=0)
data['token'] = data['token'].astype(str)
data['sentence'] = data['sentence'].astype(str)
data.head()
# + id="yQ_QxnnMp6eV" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="f170cb95-6f3e-49ae-d700-61c67fc048c5"
data_multi = pd.read_csv(os.path.join(DATA_FOLDER, "lcp_multi_train_preprocessed.csv"), index_col=0)
data_multi['token'] = data_multi['token'].astype(str)
data_multi['sentence'] = data_multi['sentence'].astype(str)
data_multi.head()
# + id="HjvHnw5BqDZ2" colab={"base_uri": "https://localhost:8080/"} outputId="7b4d1618-ef12-47da-f14f-04da469bfb78"
data = pd.concat([data, data_multi])
print(len(data))
# + [markdown] id="_1wXzYgmV2Mx"
# ## Features - load data
# + colab={"base_uri": "https://localhost:8080/"} id="TmAPF6kVbdX8" outputId="054d5e59-0666-4d1f-b419-dae939914133"
f1 = pd.read_csv(os.path.join(FOLDER_PATH, "data/extra_features/lcp_single_train_features.csv"), index_col=0)
f1['token'] = f1['token'].astype(str)
f1['sentence'] = f1['sentence'].astype(str)
f1.set_index("id", inplace=True)
# drop unwanted features
f1.drop(['parse', 'lemma'], axis=1, inplace=True)
print(f1.columns)
f2 = pd.read_csv(os.path.join(FOLDER_PATH, "data/added_corpus_presence/lcp_single_train_preprocessed.csv"), index_col=0)
f2['token'] = f2['token'].astype(str)
f2['sentence'] = f2['sentence'].astype(str)
print(f2.columns)
features = f1.merge(f2, on=['id','sentence', 'corpus', 'token', 'complexity'])
# + colab={"base_uri": "https://localhost:8080/"} id="-G3zFmbnhPqB" outputId="f7bdb383-df8d-4c50-f58c-fa0ba68abca1"
# fill pos nan by NN, as they are in majority
features['pos'] = features['pos'].fillna('NN')
features['token_length'] = features['token_length'].fillna(0)
# categorical encoding
labels = dict(features['pos'].value_counts())
labels = { k:i for i,k in enumerate(labels)}
labels['POS'] = len(labels)
print(labels)
features['pos'] = features['pos'].apply((lambda x: labels[x]))
def get_vowels(word):
val = 0
for w in word:
if(w in ['A', 'a', 'E', 'e', 'I', 'i', 'O', 'o', 'U','u']):
val+=1
return val
features['token_vowels'] = features['token'].apply(lambda x: get_vowels(x) )
# scaler = preprocessing.StandardScaler()
# features[['token_length', 'syllables', 'pos', 'dep num', 'synonyms', 'hypernyms', 'hyponyms', 'google frequency', 'familarity', 'token_vowels']] = \
# scaler.fit_transform(features[['token_length', 'syllables', 'pos', 'dep num', 'synonyms', 'hypernyms', 'hyponyms', 'google frequency', 'familarity', 'token_vowels']])
features.head()
# + id="ocBaxNJ1uPD-" colab={"base_uri": "https://localhost:8080/"} outputId="5a6f465c-2952-4c61-a0b6-9f023d8d371f"
multi_f1 = pd.read_csv(os.path.join(FOLDER_PATH, "data/extra_features/lcp_multi_train_split_features.csv"), index_col=0)
multi_f1['token'] = multi_f1['token'].astype(str)
multi_f1['sentence'] = multi_f1['sentence'].astype(str)
multi_f1.set_index("id", inplace=True)
# drop unwanted features
multi_f1.drop(['parse', 'token1', 'token2', 'lemma1', 'lemma2', 'Unnamed: 0.1'], axis=1, inplace=True)
multi_f2 = pd.read_csv(os.path.join(FOLDER_PATH, "data/added_corpus_presence/lcp_multi_train_preprocessed.csv"), index_col=0)
multi_f2['token'] = multi_f2['token'].astype(str)
multi_f2['sentence'] = multi_f2['sentence'].astype(str)
multi_features = multi_f1.merge(multi_f2, on=['id','sentence', 'corpus', 'token', 'complexity'])
multi_features.head(2)
# + id="6oRx6fW5uxx1" colab={"base_uri": "https://localhost:8080/"} outputId="6e7110df-7813-435d-a337-b6083aca3c8c"
# fill pos nan by NN, as they are in majority
multi_features['pos2'] = multi_features['pos2'].fillna('NN')
multi_features['pos1'] = multi_features['pos1'].apply((lambda x: labels[x]))
multi_features['pos2'] = multi_features['pos2'].apply((lambda x: labels[x]))
# scaler = preprocessing.StandardScaler()
# multi_features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']] = \
# scaler.fit_transform(multi_features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']])
multi_features.head()
# + id="Tk7iGSWEKFas" colab={"base_uri": "https://localhost:8080/"} outputId="1890a9e2-a652-47c8-dc47-f4e33d39dd16"
# merge both single and multi features
features['pos1'] = features['pos'].copy()
features['pos2'] = features['pos']
features['dep num1'] = features['dep num']
features['dep num2'] = features['dep num']
features['synonyms1'] = features['synonyms']
features['synonyms2'] = features['synonyms']
features['hypernyms1'] = features['hypernyms']
features['hypernyms2'] = features['hypernyms']
features['hyponyms1'] = features['hyponyms']
features['hyponyms2'] = features['hyponyms']
features['google frequency1'] = features['google frequency']
features['google frequency2'] = features['google frequency1']
features.drop(['pos','dep num', 'synonyms', 'hyponyms', 'hypernyms', 'google frequency'], axis=1, inplace=True)
features = features.append( multi_features)
print(len(features))
scaler = preprocessing.StandardScaler()
features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']] = \
scaler.fit_transform(features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']])
features.head()
# + [markdown] id="9CvxpP0osP9j"
# ## Splitting the Dataset into Test/Train
# + colab={"base_uri": "https://localhost:8080/", "height": 544} id="U7e_rarpxhKI" outputId="2debd7fd-0613-4838-b9dc-d07dee5c4134"
train, test = train_test_split(features, test_size=0.1, random_state=12)
train.head()
# + id="DYam3dV5tnve"
sentences_train_list = list(train['sentence'])
complexity_train_list = list(train['complexity'])
tokens_train_list = list(train['token'])
sentences_test_list = list(test['sentence'])
complexity_test_list = list(test['complexity'])
tokens_test_list = list(test['token'])
# + [markdown] id="h5D2L7r-oYX6"
# ## Getting embeddings (300 dimensional GloVe) and Preprocessing to I/P in Model
# + id="MaQXDhoVy4fp"
def read_glove_vector(glove_vec):
with open(glove_vec, 'r', encoding='UTF-8') as f:
words = set()
word_to_vec_map = {}
for line in f:
w_line = line.split()
curr_word = w_line[0]
word_to_vec_map[curr_word] = np.array(w_line[1:], dtype=np.float64)
return word_to_vec_map
word_to_vec_map = read_glove_vector('embeddings/glove.6B.300d.txt')
# get embeddings, and pad till max_len
def get_embeddings(sentences, max_len=0):
sent_emb = []
for s in sentences:
# fill unk by nan
# calculate mean over non nan embeddings
# fill unk by the mean embedding of sentence
# pad 0 vectors till max_len
temp_sent_emb = [ word_to_vec_map[x] if x in word_to_vec_map else np.full((300,), np.nan) for x in s.split() ]
mean_emb = np.nanmean(np.array(temp_sent_emb), axis=0)
temp_sent_emb = np.array([ mean_emb if np.isnan(x[0]) else x for x in temp_sent_emb ])
temp_sent_emb = np.concatenate((temp_sent_emb, np.zeros((max_len-temp_sent_emb.shape[0],300))))
sent_emb.append(temp_sent_emb)
return np.array(sent_emb)
# + colab={"base_uri": "https://localhost:8080/"} id="NivSh02ZIomT" outputId="2c5b1c4c-43c1-40a1-a8ca-87c6c9f2f4bb"
# max len for the sentence used for padding
max_len_sent = max(max([len(s.split()) for s in sentences_train_list]) , max([len(s.split()) for s in sentences_test_list]))
print(max_len_sent)
max_len_token = max(max([len(s.split()) for s in tokens_train_list]) , max([len(s.split()) for s in tokens_test_list]))
print(max_len_token)
# + colab={"base_uri": "https://localhost:8080/"} id="lczjIv2hX1Wn" outputId="af3a4670-c330-48f2-a211-eb6ecef7735c"
train_sent_emb = get_embeddings(sentences_train_list, max_len_sent)
test_sent_emb = get_embeddings(sentences_test_list, max_len_sent)
print("sent emb shape train : {}".format(train_sent_emb.shape))
print("sent emb shape test : {}".format(test_sent_emb.shape))
# token_position_data
# for each token we should have [start index, lenght of token]
train_token_data = [
[
len(s.split(t)[0].split()),
len(t.split())
]
for s,t in zip(sentences_train_list, tokens_train_list)
]
test_token_data = [
[
len(s.split(t)[0].split()),
len(t.split())
]
for s,t in zip(sentences_test_list, tokens_test_list)
]
# features_train = train[['token_length', 'token_vowels', 'syllables', 'pos', 'dep num',
# 'synonyms', 'hypernyms', 'hyponyms',
# 'google frequency',
# 'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
# features_test = test[['token_length', 'token_vowels', 'syllables', 'pos', 'dep num',
# 'synonyms', 'hypernyms', 'hyponyms',
# 'google frequency',
# 'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
features_train = train[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
'google frequency1', 'google frequency2',
'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
features_test = test[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
'google frequency1', 'google frequency2',
'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
print("feature emb shape train : {}".format(features_train.shape))
print("feature emb shape test : {}".format(features_test.shape))
# + [markdown] id="F_v2CxU9nj1d"
#
#
# ## Model Class - BiLSTM
# + id="4nMSHIttBdp5"
# -------------------------------------------------------------------------------------
# feed fwd neural network :
# 1 hidden size layer, 1 output_size layer and a dropout between them
# used for calculating weights as well as final complexity prediction
#-------------------------------------------------------------------------------------
class FFN(tf.keras.layers.Layer):
def __init__(
self,
hidden_size,
output_size,
rate,
softmax=False,
):
super(FFN, self).__init__()
self.layer1 = tf.keras.layers.Dense(hidden_size, activation="relu") # (batch_size, hidden_size)
self.dropout = tf.keras.layers.Dropout(rate)
self.layer2 = tf.keras.layers.Dense(output_size, activation= "softmax" if softmax is True else None) # (batch_size, output_size)
def call(self, x, training):
return self.layer2(self.dropout(self.layer1(x), training=training) )
# return self.layer2(self.dropout(x, training=training))
# -------------------------------------------------------------------------------------
# Custom Model for predicting complexity
#-------------------------------------------------------------------------------------
class OurModelBiLSTM(tf.keras.Model):
def __init__(
self,
lstm_units,
hidden_size,
random_seed,
seq_len,
embedding_size,
rate=0.25
):
"""
hidden_size - for FFN
"""
super(OurModelBiLSTM, self).__init__()
tf.random.set_seed(random_seed)
self.random_seed = random_seed
self.lstm_units = lstm_units
# self.dense1 = tf.keras.layers.Dense(2*self.lstm_units)
self.bilstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(self.lstm_units, return_sequences=True), input_shape=(seq_len,embedding_size)
)
# self.bilstm = tf.keras.layers.LSTM(self.lstm_units, return_sequences=True)
self.dropout = tf.keras.layers.Dropout(rate=rate)
self.layernorm = tf.keras.layers.LayerNormalization()
self.dense = tf.keras.layers.Dense(2*self.lstm_units)
self.getWeights = FFN(hidden_size, 3, rate, softmax=True)
self.final_layer = FFN(hidden_size, 1, rate)
# one fwd pass on the model
def call(self, input_seq, token_position, feature_emb, training=False):
"""
input_seq : [batch_size, seq_len, embedding_size]
token_position = [batch_size, 2] - [start,length] for tokens for which complexity is to be predicted
"""
# bi-lstm pass
bilstm_output = self.bilstm(input_seq)
# bilstm_output = self.bilstm(input_seq) #(batch_size, seq_len, 2*lstm_units)
bilstm_output = self.dropout(bilstm_output, training=training)
# extract token embeddings
# for each example we have start token and length
# we take mean of these embeddings position
token_emb = tf.stack(
[tf.reduce_mean(tf.gather(i, tf.range(j[0],j[0]+j[1]), axis=0), axis=0) for i,j in zip(bilstm_output,token_position) ]
) # (batch_size, 2*lstm_units)
# combine all embeddings - take mean
added_emb = tf.reduce_mean(bilstm_output, axis=1) # (batch_size, 2*lstm_units)
# feature_emb - (batch_shape, features)
feature_emb = self.dense(feature_emb) # (batch_size, 2*lstm_units)
# feature_emb = tf.cast(feature_emb, tf.float32)
# get weights
# weights = self.getWeights(self.layernorm(tf.add_n([token_emb,added_emb, feature_emb])), training) # (batch_size, 3)
weights = self.getWeights(self.layernorm(tf.concat([token_emb,added_emb, feature_emb], axis=1)), training) # (batch_size, 3)
# generate attenton-based final embeddings
# final_emb = weights[0]*token_emb + weights[1]*added_emb + weights[3]*features_emb
final_emb = tf.zeros(shape=(), dtype=tf.dtypes.float32) # (bacth_size, 2*lstm_units)
final_emb += tf.expand_dims(weights[:, 0], axis=1) * token_emb
final_emb += tf.expand_dims(weights[:, 1], axis=1) * added_emb
final_emb += tf.expand_dims(weights[:, 2], axis=1) * feature_emb
# output complexity
final_output = self.final_layer(final_emb, training)
return final_output
# loss function
def loss_function(self, real, pred):
loss_ = tf.keras.losses.MSE(real, pred)
l = tf.reduce_mean(loss_)
return l
# set optimizer
def compile(self, optimizer):
super(OurModelBiLSTM, self).compile()
self.optimizer = optimizer
# train step - fwd pass + back prop + update model weights
# @tf.function() - may not work beacuse zip is used
def train_step(self, input_seq, token_position, y, feature_emb):
training=True
with tf.GradientTape() as tape:
# Forward pass
y_pred = self(input_seq, token_position, feature_emb, training)
loss = self.loss_function(tf.reshape(y,(-1,1)), y_pred)
# calculate gradients
gradients = tape.gradient(loss, self.trainable_variables)
# update model weights using gradients
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return loss
# fit funtion of model
def _fit(
self,
input_seq_data,
token_position_data,
feature_emb_data,
complexity,
epochs,
batch_size,
val_input_seq_data=None,
val_token_position_data=None,
val_feature_emb_data=None,
val_complexity=None
):
# create tensorflow dataset
tf_data = tf.data.Dataset.from_tensor_slices((input_seq_data, token_position_data, feature_emb_data, complexity))
# shuffle and batch
tf_data = tf_data.shuffle(100000, seed=self.random_seed).batch(batch_size)
train_loss = tf.keras.metrics.Mean(name="train_loss")
loss_train = {"MSE": []}
loss_val = {"MSE": []}
#training starts
for epoch in range(epochs):
start = time.time()
# reset state for mean loss
train_loss.reset_states()
# loop over batches
for step, x in tf_data.enumerate():
# get inputs for model
input_seq = x[0]
token_position = x[1]
feature_emb = x[2]
y = x[3]
# call trainstep
loss = self.train_step(input_seq, token_position, y, feature_emb)
train_loss(loss)
loss_train["MSE"].append(train_loss.result().numpy())
print("Epoch {} loss MSE: {}, time taken: {:.2f}s".format(epoch + 1, loss_train["MSE"][-1], time.time() - start))
# validation if provided
if (val_input_seq_data is not None):
assert val_token_position_data is not None and val_complexity is not None, "incorrect validation data"
val_pred = self._predict(val_input_seq_data, val_token_position_data, val_feature_emb_data)
loss_val["MSE"].append(self.loss_function(tf.reshape(val_complexity, (-1,1)), val_pred).numpy())
print("Validation loss MSE : {}".format(loss_val["MSE"][-1]))
if (val_input_seq_data is not None):
return loss_train, loss_val
return loss_train
# predict function
def _predict(self, input_seq_data, token_position_data, feature_emb_data):
# create tensorflow dataset
tf_data = tf.data.Dataset.from_tensor_slices((input_seq_data, token_position_data, feature_emb_data))
# batch for memory constraints
tf_data = tf_data.batch(512)
pred_list = []
for step, x in tf_data.enumerate():
# get inputs for model
input_seq = x[0]
token_position = x[1]
feature_emb = x[2]
pred_list.append(self(input_seq, token_position, feature_emb))
return tf.concat(pred_list, axis=0)
# + [markdown] id="sTnNsZJJMqiL"
# ## Prepare test data features - only multi
#
# + colab={"base_uri": "https://localhost:8080/"} id="cWiuZXrJM2ZF" outputId="520a4978-15b6-4a22-9510-494adcdad096"
test_f1 = pd.read_csv(os.path.join(FOLDER_PATH, "data/extra_features/lcp_single_test_features.csv"), index_col=0)
test_f1['token'] = test_f1['token'].astype(str)
test_f1['sentence'] = test_f1['sentence'].astype(str)
test_f1.set_index("id", inplace=True)
# drop unwanted features
test_f1.drop(['parse', 'lemma'], axis=1, inplace=True)
print(test_f1.columns)
test_f2 = pd.read_csv(os.path.join(FOLDER_PATH, "data/added_corpus_presence/lcp_single_test_preprocessed.csv"), index_col=0)
test_f2['token'] = test_f2['token'].astype(str)
test_f2['sentence'] = test_f2['sentence'].astype(str)
print(test_f2.columns)
test_features = test_f1.merge(test_f2, on=['id','sentence', 'corpus', 'token'])
# + colab={"base_uri": "https://localhost:8080/", "height": 714} id="a3fXkp14M2Zi" outputId="21ed43d0-28d1-4e5c-82b9-4ff3163f81d3"
# fill pos nan by NN, as they are in majority
test_features['pos'] = test_features['pos'].fillna('NN')
test_features['token_length'] = test_features['token_length'].fillna(0)
test_features['pos'] = test_features['pos'].apply((lambda x: labels[x]))
def get_vowels(word):
val = 0
for w in word:
if(w in ['A', 'a', 'E', 'e', 'I', 'i', 'O', 'o', 'U','u']):
val+=1
return val
test_features['token_vowels'] = test_features['token'].apply(lambda x: get_vowels(x) )
# test_features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']] = \
# scaler.transform(test_features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']])
# test_features[['token_length', 'syllables', 'pos', 'dep num', 'synonyms', 'hypernyms', 'hyponyms', 'google frequency', 'familarity', 'token_vowels']] = \
# scaler.transform(test_features[['token_length', 'syllables', 'pos', 'dep num', 'synonyms', 'hypernyms', 'hyponyms', 'google frequency', 'familarity', 'token_vowels']])
test_features.head()
# + id="RW0BqIfuM2Zj" colab={"base_uri": "https://localhost:8080/", "height": 332} outputId="<PASSWORD>"
test_multi_f1 = pd.read_csv(os.path.join(FOLDER_PATH, "data/extra_features/lcp_multi_test_split_features.csv"), index_col=0)
test_multi_f1['token'] = test_multi_f1['token'].astype(str)
test_multi_f1['sentence'] = test_multi_f1['sentence'].astype(str)
test_multi_f1.set_index("id", inplace=True)
# drop unwanted features
test_multi_f1.drop(['parse', 'token1', 'token2', 'lemma1', 'lemma2', 'Unnamed: 0.1'], axis=1, inplace=True)
test_multi_f2 = pd.read_csv(os.path.join(FOLDER_PATH, "data/added_corpus_presence/lcp_multi_test_preprocessed.csv"), index_col=0)
test_multi_f2['token'] = test_multi_f2['token'].astype(str)
test_multi_f2['sentence'] = test_multi_f2['sentence'].astype(str)
test_multi_features = test_multi_f1.merge(test_multi_f2, on=['id','sentence', 'corpus', 'token'])
test_multi_features['token'] = test_multi_f2['token'].astype(str)
test_multi_features.head(2)
# + id="9ElllpTLM2Zk" colab={"base_uri": "https://localhost:8080/", "height": 680} outputId="8f2cbdfc-0dc0-49a1-9d7f-7ffea4c14b19"
# fill pos nan by NN, as they are in majority
test_multi_features['pos2'] = test_multi_features['pos2'].fillna('NN')
test_multi_features['pos1'] = test_multi_features['pos1'].apply((lambda x: labels[x]))
test_multi_features['pos2'] = test_multi_features['pos2'].apply((lambda x: labels[x]))
test_multi_features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']] = \
scaler.transform(test_multi_features[['token_length', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2', 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2', 'google frequency1', 'google frequency2', 'familarity', 'token_vowels']])
test_multi_features.head()
# + id="dMgQISU-Oikz" colab={"base_uri": "https://localhost:8080/"} outputId="614cbeac-1b65-493c-971c-445b837df7c1"
# single_sent_emb = get_embeddings(list(test_features['sentence']), max_len_sent)
multi_sent_emb = get_embeddings(list(test_multi_features['sentence']), max_len_sent)
# print("single emb shape train : {}".format(single_sent_emb.shape))
print("multi emb shape test : {}".format(multi_sent_emb.shape))
# token_position_data
# for each token we should have [start index, lenght of token]
# single_token_data = [
# [
# len(s.split(t)[0].split()),
# len(t.split())
# ]
# for s,t in zip(list(test_features['sentence']), list(test_features['token']))
# ]
multi_token_data = [
[
len(s.split(t)[0].split()),
len(t.split())
]
for s,t in zip(list(test_multi_features['sentence']), list(test_multi_features['token']))
]
# features_single = test_features[['token_length', 'token_vowels', 'syllables', 'pos', 'dep num', 'synonyms', 'hypernyms', 'hyponyms', 'google frequency', 'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']]
# features_single = test_features[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
# 'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
# 'google frequency1', 'google frequency2',
# 'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
features_multi = test_multi_features[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
'google frequency1', 'google frequency2',
'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
# print("feature emb shape single : {}".format(features_single.shape))
print("feature emb shape multi : {}".format(features_multi.shape))
# + [markdown] id="7lCFd4QrWFUO"
# ## Run model
#
# + colab={"base_uri": "https://localhost:8080/"} id="MK-Tg5heCVkA" outputId="40df92f3-86b4-41ee-85df-d02680d8498e"
model = OurModelBiLSTM(lstm_units=15, hidden_size=10, random_seed=12, seq_len=max_len_sent, embedding_size=300, rate=0.4)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.005))
loss = model._fit ( train_sent_emb,
train_token_data,
feature_emb_data = features_train,
complexity = complexity_train_list,
epochs=14,
batch_size=32,
val_input_seq_data=test_sent_emb,
val_token_position_data = test_token_data,
val_feature_emb_data = features_test,
val_complexity = complexity_test_list
)
# + colab={"base_uri": "https://localhost:8080/"} id="xqbLPDisCXyl" outputId="28e1b711-9149-4ccc-a5a3-b9af54fbd2e7"
SUBMISSION_FOLDER = os.path.join(FOLDER_PATH,"predictions/attn_app/multi")
if( not os.path.exists(SUBMISSION_FOLDER)):
os.makedirs(SUBMISSION_FOLDER)
# predict
pred = model._predict(multi_sent_emb, multi_token_data, features_multi)
pred = pd.DataFrame({"ID":test_multi_features.index, "complexity":pred.numpy().reshape(-1,)})
pred.to_csv(SUBMISSION_FOLDER+"/app1.csv", index=False, header=False)
evaluate(SUBMISSION_FOLDER, FOLDER_PATH+"/references/lcp_multi_test_labelled_preprocessed.csv")
# + id="3LQn0AjsW8Sm" colab={"base_uri": "https://localhost:8080/"} outputId="9d66f71e-ae80-4dc8-da63-f023b163394c"
model.summary()
# + [markdown] id="xdLdsaUGXEbN"
# ## sample test bilstm code
# + colab={"base_uri": "https://localhost:8080/", "height": 120} id="2OfWGLwpFZpP" outputId="f6de6b1c-3517-46de-b518-a1526100b683"
# ----------- test model output shape
sample_model = OurModelBiLSTM(lstm_units=2, hidden_size=2001, random_seed=12, seq_len=10, embedding_size=10)
#(bs, seq_len, embedding_shape)
input = tf.random.uniform((32, 10, 10), dtype=tf.float32, minval=0, maxval=200)
# (start, len)
token_position_data = tf.constant([(3,2) for _ in range(32)])
output = sample_model(input, token_position_data)
print(output.shape)
# ----------- test fit and predict
sample_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001))
input_seq_data = tf.random.uniform((100, 10, 10), dtype=tf.float32, minval=0, maxval=200)
token_position_data = [(3,5) for _ in range(100)]
y = tf.random.uniform((100, 1), dtype=tf.float32, minval=0, maxval=1)
val_input_seq_data = tf.random.uniform((100, 10, 10), dtype=tf.float32, minval=0, maxval=200)
val_token_position_data = [(3,5) for _ in range(100)]
val_y = tf.random.uniform((100, 1), dtype=tf.float32, minval=0, maxval=1)
loss = sample_model._fit(
input_seq_data = input_seq_data,
token_position_data = token_position_data,
complexity = y,
epochs = 10,
batch_size = 32,
val_input_seq_data = val_input_seq_data,
val_token_position_data = val_token_position_data,
val_complexity=val_y
)
# + [markdown] id="UzykQcaxHgUF"
# # Code for other experiment - with BERT
# + [markdown] id="CJmrNc09kQdo"
# ## BERT CODE
# + id="zZ_77ooQkS-W"
# -------------------------------------------------------------------------------------
# feed fwd neural network :
# 1 hidden size layer, 1 output_size layer and a dropout between them
# used for calculating weights as well as final complexity prediction
#-------------------------------------------------------------------------------------
class FFN(tf.keras.layers.Layer):
def __init__(
self,
hidden_size,
output_size,
rate,
softmax=False,
):
super(FFN, self).__init__()
self.layer1 = tf.keras.layers.Dense(hidden_size, activation="relu") # (batch_size, hidden_size)
self.dropout = tf.keras.layers.Dropout(rate)
self.layer2 = tf.keras.layers.Dense(output_size, activation= "softmax" if softmax is True else None) # (batch_size, output_size)
def call(self, x, training):
return self.layer2(self.dropout(self.layer1(x), training=training) )
# return self.layer2(self.dropout(x, training=training))
# -------------------------------------------------------------------------------------
# Custom Model for predicting complexity
#-------------------------------------------------------------------------------------
class OurModelBert(tf.keras.Model):
def __init__(
self,
hidden_size,
random_seed,
rate=0.25
):
"""
hidden_size - for FFN
"""
super(OurModelBert, self).__init__()
tf.random.set_seed(random_seed)
self.random_seed = random_seed
self.dense = tf.keras.layers.Dense(768, activation="relu")
self.dropout = tf.keras.layers.Dropout(rate=rate)
self.getWeights = FFN(hidden_size, 3, rate, softmax=True)
self.final_layer = FFN(hidden_size, 1, rate)
self.layernorm = tf.keras.layers.LayerNormalization()
# one fwd pass on the model
def call(self, cls_emb, token_position, feature_emb, token_emb, training=False):
# feature_emb = tf.concat([feature_emb, token_position], axis=1)
feature_emb = self.dropout(self.dense(feature_emb), training=training)
# get weights
weights = self.getWeights(self.layernorm(tf.add_n([cls_emb, token_emb, feature_emb])), training) # (batch_size, 3)
# generate attenton-based final embeddings
# final_emb = weights[0]*token_emb + weights[1]*added_emb + weights[3]*features_emb
final_emb = tf.zeros(shape=(), dtype=tf.dtypes.float32) # (bacth_size, 2*lstm_units)
final_emb += tf.expand_dims(weights[:, 0], axis=1) * cls_emb
final_emb += tf.expand_dims(weights[:, 1], axis=1) * token_emb
final_emb += tf.expand_dims(weights[:, 2], axis=1) * feature_emb
# output complexity
final_output = self.final_layer(final_emb, training)
return final_output
# loss function
def loss_function(self, real, pred):
loss_ = tf.keras.losses.MSE(real, pred)
l = tf.reduce_mean(loss_)
return l
# set optimizer
def compile(self, optimizer):
super(OurModelBert, self).compile()
self.optimizer = optimizer
# train step - fwd pass + back prop + update model weights
# @tf.function() - may not work beacuse zip is used
def train_step(self, cls_emb, token_position, feature_emb, token_emb, y):
training=True
with tf.GradientTape() as tape:
# Forward pass
y_pred = self(cls_emb, token_position, feature_emb, token_emb, training)
loss = self.loss_function(tf.reshape(y,(-1,1)), y_pred)
# calculate gradients
gradients = tape.gradient(loss, self.trainable_variables)
# update model weights using gradients
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return loss
# fit funtion of model
def _fit(
self,
cls_emb,
token_emb,
token_position_data,
feature_emb,
complexity,
epochs,
batch_size,
val_cls_emb=None,
val_token_emb=None,
val_token_position_data=None,
val_feature_emb=None,
val_complexity=None,
):
# create tensorflow dataset
tf_data = tf.data.Dataset.from_tensor_slices((cls_emb, token_emb, token_position_data, feature_emb, complexity))
# shuffle and batch
tf_data = tf_data.shuffle(100000, seed=self.random_seed).batch(batch_size)
train_loss = tf.keras.metrics.Mean(name="train_loss")
loss_train = {"MSE": []}
loss_val = {"MSE": []}
#training starts
for epoch in range(epochs):
start = time.time()
# reset state for mean loss
train_loss.reset_states()
# loop over batches
for step, x in tf_data.enumerate():
# get inputs for model
cls_emb = x[0]
token_emb = x[1]
token_position = x[2]
feature_emb = x[3]
complexity = x[4]
# call trainstep
loss = self.train_step(cls_emb, token_position, feature_emb, token_emb, complexity)
train_loss(loss)
loss_train["MSE"].append(train_loss.result().numpy())
print("Epoch {} loss MSE: {}, time taken: {:.2f}s".format(epoch + 1, loss_train["MSE"][-1], time.time() - start))
# validation if provided
# if (val_input_seq_data is not None):
# assert val_token_position_data is not None and val_complexity is not None, "incorrect validation data"
# val_pred = self._predict(val_input_seq_data, val_token_position_data, val_feature_emb_data)
# loss_val["MSE"].append(self.loss_function(tf.reshape(val_complexity, (-1,1)), val_pred).numpy())
# print("Validation loss MSE : {}".format(loss_val["MSE"][-1]))
if (val_input_seq_data is not None):
return loss_train, loss_val
return loss_train
# predict function
def _predict(self, input_seq_data, token_position_data, feature_emb_data):
# create tensorflow dataset
tf_data = tf.data.Dataset.from_tensor_slices((input_seq_data, token_position_data, feature_emb_data))
# batch for memory constraints
tf_data = tf_data.batch(512)
pred_list = []
for step, x in tf_data.enumerate():
# get inputs for model
input_seq = x[0]
token_position = x[1]
feature_emb = x[2]
pred_list.append(self(input_seq, token_position, feature_emb))
return tf.concat(pred_list, axis=0)
# + id="uYfLF4rJnejD"
model = OurModelBert(hidden_size=256, random_seed=12, rate=0.3)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.005))
# + colab={"base_uri": "https://localhost:8080/"} id="Os-TGOk1n9KL" outputId="41347576-be19-4081-9d3b-ff28088a8f72"
features_train = features[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
'google frequency1', 'google frequency2',
'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
features_train.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 493} id="12kYjajZnejg" outputId="0e15297f-51c7-4baf-b390-8a8d273d1945"
loss = model._fit ( cls_emb,
token_emb,
token_position_data,
feature_emb=features_train,
complexity=features['complexity'],
epochs=50,
batch_size=32,
)
# + colab={"base_uri": "https://localhost:8080/"} id="NYIK1fh3o2ZF" outputId="263ac648-34cb-43c5-ea8d-ed418afafb60"
model.summary()
# + [markdown] id="OWcDjs8N4KQp"
# ## Get Bert Embeddings
# + colab={"base_uri": "https://localhost:8080/", "height": 171, "referenced_widgets": ["20f8fcdbe0ca4901b1dd21ef35c49d0c", "92a8c0e8459442f9840a91ef6919b08e", "<KEY>", "8256d8c9be8f465191d13ea776a4cea3", "cc065ecea73d49d2b018772fdde4d54d", "be8ccebea254416580c767141adb8044", "92ca6d1e4405470ba44dc9e2952cdbcf", "a95c1043552f4194a54876148dd8eac6"]} id="se6Mm76240CV" outputId="88257ec3-8c11-48f2-cdee-82f847075ef1"
# # Generated here and saved once, because it gives OOM error when less space is avialable
# # Thus token_position_data and bert_emb are saved once
# bert = TFBertModel.from_pretrained("bert-base-cased")
# tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
# # get maximum padding length
# tokenized_output_data = tokenizer.batch_encode_plus(
# list(features['sentence']),
# add_special_tokens=True,
# padding='longest',
# return_tensors="tf"
# )
# max_len = tokenized_output_data['input_ids'].shape[1]
# sentences, tokens = list(features['sentence']), list(features['token'])
# cls_emb = []
# token_position_data = []
# token_emb = []
# # get embeddings one by one
# for i in tqdm(range(len(sentences))):
# tok = tokenizer.encode_plus(
# sentences[i],
# add_special_tokens=True,
# padding='max_length',
# max_length = max_len,
# return_tensors="tf"
# )
# entire = bert(**tok, return_dict=False)[0]
# cls_emb.append(entire[:,0,:])
# token_position_data.append( [
# len(tokenizer.encode(sentences[i].split(tokens[i])[0], add_special_tokens=False)) + 1,
# len(tokenizer.encode(tokens[i], add_special_tokens=False))
# ]
# )
# token_emb.append(tf.reduce_mean(tf.gather(entire, tf.range(token_position_data[-1][0],token_position_data[-1][0]+token_position_data[-1][1]), axis=1), axis=1))
# cls_emb = np.squeeze(np.array(cls_emb), axis=1)
# token_emb = np.squeeze(np.array(token_emb), axis=1)
# token_position_data = np.array(token_position_data)
# print(cls_emb.shape)
# print(token_emb.shape)
# print(token_position_data.shape)
# np.save(FOLDER_PATH+"data/cls_emb.npy", cls_emb)
# np.save(FOLDER_PATH+"data/token_position_data_bert.npy", np.array(token_position_data))
# np.save(FOLDER_PATH+"data/token_emb_bert.npy", token_emb)
# + [markdown] id="ahz4CNaBjsp6"
# ## Load saved bert embeddings
# + id="K8uhdr0Djwwb"
cls_emb = np.load(FOLDER_PATH+"data/cls_emb.npy")
token_position_data = np.load(FOLDER_PATH+"data/token_position_data_bert.npy")
token_emb = np.load(FOLDER_PATH+"data/token_emb_bert.npy")
# + colab={"base_uri": "https://localhost:8080/"} id="QeGCa15-VglA" outputId="619ff91c-acb6-4185-af22-71cbcc7d0e7c"
sorted(token_position_data[:,0])[-15:]
# + [markdown] id="N_4ZYXkgXHfK"
# ## Bert code - 2
# + id="0dsSV1UXRVfe"
# -------------------------------------------------------------------------------------
# feed fwd neural network :
# 1 hidden size layer, 1 output_size layer and a dropout between them
# used for calculating weights as well as final complexity prediction
#-------------------------------------------------------------------------------------
class FFN(tf.keras.layers.Layer):
def __init__(
self,
hidden_size,
output_size,
rate,
softmax=False,
):
super(FFN, self).__init__()
self.layer1 = tf.keras.layers.Dense(hidden_size, activation="relu") # (batch_size, hidden_size)
self.dropout = tf.keras.layers.Dropout(rate)
self.layer2 = tf.keras.layers.Dense(output_size, activation= "softmax" if softmax is True else None) # (batch_size, output_size)
def call(self, x, training):
return self.layer2(self.dropout(self.layer1(x), training=training) )
# return self.layer2(self.dropout(x, training=training))
# -------------------------------------------------------------------------------------
# Custom Model for predicting complexity, using BERT
# warning - for pooler gradients not being updated
# because we're not using the pooler, therefore it doesnt get updated
#-------------------------------------------------------------------------------------
class OurModelBert(tf.keras.Model):
def __init__(
self,
bert_model,
hidden_size,
random_seed,
rate=0.25
):
"""
bert_model - model name or a path to saved model
hidden_size - for FFN
"""
super(OurModelBert, self).__init__()
tf.random.set_seed(random_seed)
self.random_seed = random_seed
self.tokenizer = BertTokenizer.from_pretrained(bert_model)
self.bert = TFBertModel.from_pretrained(bert_model)
# make it non trainable
self.bert.trainable = False
# self.dense = tf.keras.layers.Dense(768, activation='relu')
self.getWeights = FFN(hidden_size, 2, rate, True)
self.final_layer = FFN(hidden_size, 1, rate)
# one fwd pass on the model
def call(self, inputs, token_position, feature_emb, training=False):
"""
inputs : a dict of paramsters to Bert model with keys (input_ids, token_type_ids, attention_mask)
token_position = [batch_size, 2] - [start,end] for tokens for which complexity is to be predicted
"""
# bert pass : returns last hidden state, pooler output
# sentence_output = tf.concat(sentence_output,axis=0)
sequence_output, _ = self.bert(**inputs, return_dict=False) #(batch_size, seq_len, 768)
# overall sentence embedding is CLS emb representation
cls_emb = sequence_output[:,0,:] # (batch_size, 768)
# extract token embeddings
# for each example we have start and end token position
# we add these embeddings position
temp = []
for s,t in zip(sequence_output, token_position):
start,end = t[0],t[1]
if (t[0]>self.max_length):
start,end = 1,self.max_length
elif (t[0]+t[1]>self.max_length):
end = self.max_length
temp.append(tf.reduce_sum(tf.gather(s, tf.range(start,start+end), axis=0), axis=0))
token_emb = tf.stack(temp)
# # OOM FOR LARGE INPUT
# token_emb = tf.stack(
# [tf.reduce_sum(tf.gather(s, tf.range(t[0],t[0]+t[1]), axis=0), axis=0) for s,t in zip(sequence_output,token_position) ]
# ) # (batch_size, 768)
# feature_emb = self.dense(feature_emb)
# get weights
weights = self.getWeights(tf.concat([token_emb, cls_emb], axis=1), training) # (batch_size, 3)
# generate attenton-based final embeddings
# final_emb = weights[0]*token_emb + weights[1]*added_emb
final_emb = tf.zeros(shape=(), dtype=tf.dtypes.float32) # (bacth_size, 768)
final_emb += tf.expand_dims(weights[:, 0], axis=1) * token_emb
final_emb += tf.expand_dims(weights[:, 1], axis=1) * cls_emb
# final_emb += tf.expand_dims(weights[:, 2], axis=1) * feature_emb
# output complexity
final_output = self.final_layer(final_emb)
return final_output
# loss function
def loss_function(self, real, pred):
loss_ = tf.keras.losses.MSE(real, pred)
l = tf.reduce_mean(loss_)
return l
# set optimizer
def compile(self, optimizer):
super(OurModelBert, self).compile()
self.optimizer = optimizer
# train step - fwd pass + back prop + update model weights
# @tf.function() - may not work beacuse zip is used
def train_step(self, inputs, token_position, y, feature_emb):
"""
sentences: batch wise sentences
inputs : a dict of paramsters to Bert model with keys (input_ids, token_type_ids, attention_mask, training, return_dict)
token_position = [batch_size, 2] - [start,end] for tokens for which complexity is to be predicted
"""
with tf.GradientTape() as tape:
# Forward pass
y_pred = self(inputs, token_position, feature_emb, True)
loss = self.loss_function(y, y_pred)
# calculate gradients
gradients = tape.gradient(loss, self.trainable_variables)
# update model weights using gradients
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return loss
# fit funtion of model
def _fit(
self,
sentences,
tokens,
feature_emb,
complexity,
epochs,
batch_size,
val_sentences=None,
val_tokens=None,
val_feature_emb=None,
val_complexity=None
):
"""
sentences, tokens, complexity - from dataset - a list is expected
"""
# tokenize + pad data
# tokenized_output = dict of input_ids, token_type_ids, attention_mask
tokenized_output_data = self.tokenizer.batch_encode_plus(
sentences,
add_special_tokens=True,
padding='max_length',
max_length=50,
return_tensors="tf",
truncation=True,
)
# self.max_length=tokenized_output_data['input_ids'].shape[1]
self.max_length = 50
# identify token position
# token_position_data is a list of [start, length] for all sentences
# [start, end] = [start, start+length-1] => will be done in call function
# find the token in sentence and 1 is added to accomodate CLS token
token_position_data = [
[
len(self.tokenizer.encode(s.split(t)[0], add_special_tokens=False)) + 1,
len(self.tokenizer.encode(t, add_special_tokens=False))
]
for s,t in zip(sentences, tokens)
]
# create tensorflow dataset
tf_data = tf.data.Dataset.from_tensor_slices((tokenized_output_data, token_position_data, feature_emb, complexity))
# shuffle and batch
tf_data = tf_data.shuffle(100000, seed=self.random_seed).batch(batch_size)
train_loss = tf.keras.metrics.Mean(name="train_loss")
loss_train = {"MSE": []}
loss_val = {"MSE": []}
#training starts
for epoch in range(epochs):
start = time.time()
# reset state for mean loss
train_loss.reset_states()
# loop over batches
for step, x in tqdm(tf_data.enumerate()):
# get inputs for model
tokenized_output = x[0]
token_position = x[1]
feature_emb=x[2]
y = x[3]
# call trainstep
loss = self.train_step(tokenized_output, token_position, y, feature_emb)
train_loss(loss)
loss_train["MSE"].append(train_loss.result().numpy())
print("Epoch {} loss MSE: {}, time taken: {:.2f}s".format(epoch + 1, loss_train["MSE"][-1], time.time() - start))
# validation if provided
if (val_sentences!=None):
val_pred = self._predict(val_sentences, val_tokens, val_feature_emb)
loss_val["MSE"].append(self.loss_function(val_complexity, val_pred).numpy())
print("Validation loss MSE : {}".format(loss_val["MSE"][-1]))
if (val_sentences!=None):
return loss_train, loss_val
return loss_train
# predict function
def _predict(self, sentences, tokens, feature_emb):
# tokenize + pad data - only till max length
# tokenized_output = dict of input_ids, token_type_ids, attention_mask
tokenized_output_data = self.tokenizer.batch_encode_plus(
sentences,
add_special_tokens=True,
padding='max_length',
max_length=self.max_length,
truncation=True,
return_tensors="tf"
)
token_position_data = [
[
len(self.tokenizer.encode(s.split(t)[0], add_special_tokens=False)) + 1,
len(self.tokenizer.encode(t, add_special_tokens=False))
]
for s,t in zip(sentences, tokens)
]
# create tensorflow dataset
tf_data = tf.data.Dataset.from_tensor_slices((tokenized_output_data,token_position_data, feature_emb))
# batch for memory constraints
tf_data = tf_data.batch(32)
pred_list = []
for step, x in tf_data.enumerate():
# get inputs for model
input_seq = x[0]
token_position = x[1]
feature_emb = x[2]
pred_list.append(self(input_seq, token_position, feature_emb))
return tf.concat(pred_list,axis=0)
# + colab={"base_uri": "https://localhost:8080/"} id="YdMZ1Vy0AlYT" outputId="17ded789-0c06-4bb2-ce3b-b3b27a9f2715"
print(len(sentences_train_list))
print(len(sentences_test_list))
print(len(tokens_train_list))
print(len(tokens_test_list))
features_train = train[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
'google frequency1', 'google frequency2',
'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
features_test = test[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
'google frequency1', 'google frequency2',
'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
print("feature emb shape train : {}".format(features_train.shape))
print("feature emb shape test : {}".format(features_test.shape))
# + colab={"base_uri": "https://localhost:8080/"} id="yRjjp7XUAkec" outputId="474a4290-1170-4b78-a8a7-9c14f9b8ea91"
PRETRAINED_BERT_MODEL = "bert-base-cased"
model = OurModelBert(bert_model=PRETRAINED_BERT_MODEL, hidden_size=10, random_seed=42, rate=0.3)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01))
# + colab={"base_uri": "https://localhost:8080/", "height": 381, "referenced_widgets": ["5d65491a422c427885e44034094613ea", "0855f32c1b404a769c00cee002ca0eda", "ffc87e90f6a84b99b63d3e0b412fa522", "6d36caaf2bf64c39894213456095c97d", "cadafb75c1bf4d50b03e4a7e8d736e3a", "e0af6b07fbf84e1e8b0ed83a7ba02579", "5efc1e850ded41939ca5102f19f57675", "4a6ba5de1e4744e0983740ba1bdffb0c", "c9ec30c72a5f4ebfb400cc57c5966ad8", "20d1e737ad7243a6a67ded5e8afe6591", "92b3e15e3fe14c55b6928c0756c95031", "8c7101a22df74fe4977c626f2cad0a3f", "6d721e36ddb6454a9adf9d71137a5102", "f53c777c03aa4cca98d3402b65abfc13", "3b3e434e8c0c49659fb842a069e519fc", "1d970ce537db45abb8e8e462ce09b3bc", "253294b844824e76a65b0ad59e9526aa", "<KEY>", "945cbafe6fee4a55b1ea5d37f211be58", "3b3ab4c1ccd24001925845e8438e7702", "a9c074ce2773436296e1985f931079a4", "e4fc761c71744c4e9efeb25e8b808172", "43303003106346a4928e36fa01ccceba", "<KEY>", "<KEY>", "<KEY>", "d0483176cd4e4a309b1ee4852ca9c0d1", "e451883838004f07a5dabd27b763703d", "<KEY>", "0af085f978694264b5bdc8e4b93a9502", "6c9b69228f6e42298d6a76cf8a2d72a2", "<KEY>", "<KEY>", "96b0aced6d1b4c208877e9236d73ae73", "<KEY>", "<KEY>", "92a90f628b4b4da58d26312f89a9b2e7", "8ee540a3f8b34e42a8379bae6c199b0b", "<KEY>", "56b0c028029341e3987a6f39f8fac4e9"]} id="_AE5vmI4BD2K" outputId="e725f6ba-0d05-4b8c-f50b-5c936e6d36df"
model._fit(sentences_train_list, tokens_train_list, features_train, complexity_train_list, 20, 32, sentences_test_list, tokens_test_list, features_test, complexity_test_list)
# + id="iOS6iukCgfKf"
model.summary()
# + [markdown] id="62ys0rGAf9-F"
# ## Evaluate
# + colab={"base_uri": "https://localhost:8080/"} id="OIm5_pTxf9-V" outputId="f18e32c2-1129-4176-a504-1c61f139752d"
print(len(list(test_features['sentence'])))
print(len(list(test_features['token'])))
features = test_features[['token_length', 'token_vowels', 'syllables', 'pos1', 'pos2', 'dep num1', 'dep num2',
'synonyms1', 'synonyms2', 'hypernyms1', 'hypernyms2', 'hyponyms1', 'hyponyms2',
'google frequency1', 'google frequency2',
'biomedical', 'bible', 'subtitles', 'wiki', 'familarity']].values
print("feature shape : {}".format(features.shape))
SUBMISSION_FOLDER = os.path.join(FOLDER_PATH,"predictions/approach1/bert/single")
if( not os.path.exists(SUBMISSION_FOLDER)):
os.makedirs(SUBMISSION_FOLDER)
# predict
pred = model._predict(sentences, tokens, feature_emb)
pred = pd.DataFrame({"ID":test_features.index, "complexity":pred.numpy().reshape(-1,)})
pred.to_csv(SUBMISSION_FOLDER+"/app_bert1.csv", index=False, header=False)
evaluate(SUBMISSION_FOLDER, FOLDER_PATH+"/references/lcp_single_test_labelled_preprocessed.csv")
# + colab={"base_uri": "https://localhost:8080/"} id="3J0nHqAQf9-W" outputId="6b1b31ae-8c88-4a25-d605-80c4b746f2c8"
SUBMISSION_FOLDER = os.path.join(FOLDER_PATH,"predictions/approach1/multi")
if( not os.path.exists(SUBMISSION_FOLDER)):
os.makedirs(SUBMISSION_FOLDER)
# predict
pred = model._predict(multi_sent_emb, multi_token_data, features_multi)
pred = pd.DataFrame({"ID":test_multi_features.index, "complexity":pred.numpy().reshape(-1,)})
pred.to_csv(SUBMISSION_FOLDER+"/app1.csv", index=False, header=False)
evaluate(SUBMISSION_FOLDER, FOLDER_PATH+"/references/lcp_multi_test_labelled_preprocessed.csv")
# + [markdown] id="kRjOEyqpBVVz"
# ## test bert code
# + colab={"base_uri": "https://localhost:8080/", "height": 986, "referenced_widgets": ["ae8de82b5945472a91856590d1adf643", "61ba8e6c0419473d93b45633003e5423", "ad956a9eaa9f4e23a67587369a84fba9", "4f296caf3d054b9d992ee70252edef10", "<KEY>", "d686366b43e949358e30f9302b95c84b", "d9b9b3da7e1a4e31bb1261970b3acd18", "<KEY>", "2ec8b0324db242dbb272f4e083b0f76f", "9839616591934659a07384fa8df4b269", "3caeedb5ee8c4955aaed5f3d7ef7ccb5", "91e302e28d25445eb244339f7f210f1d", "28c78d1e7b8643989a40afc0da73dd2e", "<KEY>", "<KEY>", "6025869f24c44019adca723c827d18e6", "c1210a68dd074af493ff46c952a036c5", "<KEY>", "<KEY>", "88f297aca34e40b69cec0ae069405309", "1e467475d46b4b5b96e71fa1262f27a8", "e59f11e9d83241a39dbe58d5a50a630e", "<KEY>", "<KEY>", "486017b7bed347b4b8dac53e35011c18", "<KEY>", "5f450f076934428190fc1326ee0f4b90", "<KEY>", "9b74a2e519254818bd8ecf876eff63e0", "<KEY>", "acfb98d75ee04f93a5af9632db28e68e", "d9e2f7ae8c624aac84aa93450882dc57", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9cc53c4a55704a98be5c9ffec3dba128", "8b56b1be734940c986f3f8d8e49f5865", "<KEY>", "<KEY>", "<KEY>", "e5694d7ea0164181ac2faf1cd0e0dad2", "<KEY>", "3c8ec4e8938e4ff9a6941c7e0aaaff38", "ef9753c849c84002867c4c3374eb420d", "54dd95c9769b41acbe024e69da30ddbc", "71ec3a626c3d48fda98314f67f0cec4f", "e8850d55196942a39ee8f36486c50492", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f09bbe9c0c194ecab12b33fcc320be8b", "<KEY>", "<KEY>", "<KEY>", "d42f3639eb8a48edac7917d510e53d75", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "cf23df9d9d3b4df3adf6dab18ead0d23", "61a33f497b0b4e808c2a7e653eb4378b", "<KEY>", "<KEY>", "cd75affd3a74429887ec9d4d416a70da", "b9fe9fe76413419aba9c81830616c4b4", "f0f8dc1279ef4e84bf2e720da5f5a65a", "6950c66ac1d0421fa9306b839403e3cb", "<KEY>", "e7f85419574e4a52bbda8cc9afe451b4", "0e1f861be3d941c5a36a294728174247", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "690c9cf90cff4afd8752ff4e7366ca7f"]} id="Zzei128CfWWa" outputId="38a114a3-03a2-4eff-883c-9869ed1aa236"
# ----------- test model output shape
sample_model = OurModelBert( bert_model="bert-base-cased",
hidden_size=100,
random_seed=12,
)
inputs = sample_model.tokenizer.batch_encode_plus(["this is a senten ce", "dada af"], add_special_tokens=True, padding='max_length', max_length=20, return_tensors="tf")
token_position_data = tf.constant([(3,5) for _ in range(2)])
output = sample_model(inputs, token_position_data)
print(output.shape)
# ----------- test fit and predict
sample_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001))
sentences = ["this is a senten ce", "dada af", "third sentence is this ok"]
tokens = ["senten", "a", "this ok"]
y = tf.random.uniform((3, 1), dtype=tf.float32, minval=0, maxval=1)
loss = sample_model._fit(
sentences=sentences,
tokens=tokens,
complexity=y,
epochs = 10,
batch_size = 32,
)
# + id="wCK6HIYqNZdT"
FOLDER_PATH = "/content/drive/MyDrive/CS60075-Team28-Task-1"
DATA_FOLDER = FOLDER_PATH+"/data/preprocessed"
# + colab={"base_uri": "https://localhost:8080/", "height": 235} id="GbDgSUIONZPo" outputId="7c71fb04-115c-478a-bc47-d81f3d76caed"
df = pd.read_csv(os.path.join(DATA_FOLDER, "lcp_single_train_preprocessed.csv"), index_col=0)
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="6UKrJ6TjCRXr" outputId="16a35002-018a-4c2c-a8e8-2e22a4f05867"
# from google.colab import drive
# drive.mount('/content/drive')
# + id="Ni7OUTHClaGh"
df['sentence'] = df['sentence'].astype(str)
df['token'] = df['token'].astype(str)
sentences = list(df["sentence"])
tokens = list(df["token"])
complexity = list(df["complexity"])
# + colab={"base_uri": "https://localhost:8080/", "height": 409, "referenced_widgets": ["4740168a46c64e0181774b2b3f278370", "f4cf937a77d9404cb438f8e7c20ce86d", "7a90608aafaf46e2a34ffec6b164f3f4", "<KEY>", "55e766ca941d45cf8d9d59da259e1a83", "<KEY>", "c53f1344f1fc4e44ab05d040c052b0d8", "be80c067a1b6459e82b109830e0e6d9a"]} id="biYJCX9Gl-tn" outputId="e6c05fc4-c0a6-4546-fac0-39bfa18dbc4f"
model = OurModelBert(bert_model=PRETRAINED_BERT_MODEL, hidden_size=10, random_seed=42)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001))
model._fit(sentences, tokens, complexity, 10, 64)
# + colab={"base_uri": "https://localhost:8080/", "height": 408} id="wnbh29tUBkuS" outputId="a4273ebd-47dc-4a7b-d5fe-ef39514962dc"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="kAbuVC8qBpX5" outputId="1b9564d6-6a55-4f78-9cca-30f1bbb3ce09"
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
tok = tokenizer.encode_plus("this is a senten ce", add_special_tokens=True, padding='max_length', max_length=20, return_tensors="tf")
print(tok)
# model = TFBertModel.from_pretrained(PRETRAINED_BERT_MODEL)
# last_hidden_state,pooler_output = model(**tok, return_dict=False)
# cls = last_hidden_state[:,0,:]
# + id="G-xqYJrPFems"
# + id="MnKAO-03E3y3"
sentences = ["this is a senten ce"]
tokens = ["senten"]
# + colab={"base_uri": "https://localhost:8080/"} id="c4M9lqZUFRx8" outputId="3ffdd6d6-6334-4f08-c920-538e065b9e7c"
("his is a senten ce").split(tokens[0])
# + colab={"base_uri": "https://localhost:8080/"} id="4YgVK-tsF64u" outputId="3447c8d7-b730-4107-a11c-7a111242871c"
tokenizer.encode(sentences[0].split(tokens[0])[0], add_special_tokens=False)
# + id="7MYnl50lXYCn"
token_position_data = [
[
len(s.split(t)[0].split()),
len(t.split())
]
for s,t in zip(sentences, tokens)
]
# + colab={"base_uri": "https://localhost:8080/"} id="bPI4S9SDFCTm" outputId="5c0f74c0-50d6-48dd-89ea-356cd4c4dda5"
token_position_data
# + id="jTZoTE7TFGZi"
# Dataset = namedtuple('Dataset', 'name, train, test')
# Model = namedtuple('Model', 'type, name, dimension, corpus, model')
# from gensim.test.utils import datapath, get_tmpfile
# from gensim.models import KeyedVectors
# from gensim.scripts.glove2word2vec import glove2word2vec
# MAIN_PATH = 'embeddings/'
# glove_models = []
# glove_defs = [ Model('glove', 'glove.6B.300d.txt', 300, 'wikipedia+gigaword5', None)]
# for model in glove_defs:
# glove_file = MAIN_PATH + model.name
# tmp_file = get_tmpfile(model.name + '-temp')
# glove2word2vec(glove_file, tmp_file)
# vecs = KeyedVectors.load_word2vec_format(tmp_file)
# glove_models.append(Model(model.type, model.name, model.dimension, model.corpus, vecs))
# print('load model : {}'.format(model.name))
# print(glove_models)
# tokenizer = Tokenizer()
# tokenizer.fit_on_texts(sentences_train_list)
# words_to_index = tokenizer.word_index
# len(words_to_index)
# vocab_len = len(words_to_index)
# embed_vector_len = word_to_vec_map['hello'].shape[0]
# emb_matrix = np.zeros((vocab_len+1, embed_vector_len))
# for word, index in words_to_index.items():
# embedding_vector = word_to_vec_map.get(word)
# if embedding_vector is not None:
# emb_matrix[index, :] = embedding_vector
# embedding_layer = Embedding(input_dim=vocab_len, output_dim=embed_vector_len, input_length=256, weights = [emb_matrix], trainable=False)
# emb_matrix.shape
# + id="PRHy3paSAvJW"
# + id="YyLghUFPAyAr"
| attention_multi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GMaina716/WEEK-3-IP/blob/main/Moringa_Data_Science_Prep_W3_Independent_Project_2021_05_George_Maina_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="vH74O0kczRzY"
# we start by uploading the necssary packages that we will need
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] id="_e8_rJDCF9uC"
# **Data Understanding and Data Prepapration**
# + [markdown] id="KMjvyYafGRN9"
# On this section, we will upload the data sets,
# then combine the datasets with daily records into one dataset called telcom.
# We will look for any anormalies in the data such us missing value and outliers
# Missing values will be filled or dropped depending on the extent of their numbers
# COlumns with values that will not be needed in the analysis will be dropped.
# We will analyse the two datasets separately by looking at the statistical attributes of the data.
# We will also look at the shape, size and dimensions of the individual datasets.
# We also will check for the number of missing values and uniques values in the columns
# Where there are outliers, we will drop the rows with them.
# We will then combine the cells dataset and the telcom dataset
# + id="-FlufRy4zwnd"
#importing the datasets to be used
#four datasets have been provided
cell=pd.read_csv('/content/cellsgeo.csv',sep = ';')
td1=pd.read_csv('/content/Telcom_dataset.csv')
td2=pd.read_csv('/content/Telcom_dataset2.csv')
td3=pd.read_csv('/content/Telcom_dataset3.csv')
# + id="i1ejij-N1KO6" colab={"base_uri": "https://localhost:8080/", "height": 133} outputId="01ac9989-ef17-4f33-cd17-87d33a5bc603"
#tocreate a copy of the first dataset
cells=cell
cells.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="HU4OHZM31PDm" outputId="c24ae89a-f988-4892-83da-37dd68777100"
# we start by lookingat how our dataset is
cells.info()
# + colab={"base_uri": "https://localhost:8080/"} id="1XLKbbM61PE1" outputId="d533b9a5-9660-4927-adb9-bc87142d58ae"
#from the cell above,w e see that there are columns with missing values
# we will get the missing values and the columns they are in
column_missing=cells.isna().sum()
column_missing=column_missing[column_missing!=0]
print('columns wit missing values:',len(column_missing))
column_missing.sort_values(ascending=False)
# + id="IQUMhAgp1PIR"
# from the cell above, We observe that there are 3 columns with missing data
# the columns are STATUS, ZONENAME,AREA
# The 3 columns have little missing data and thus we can drop the rows with missing data in thos columns
#status has 67 missing data points
#zonename has 6 missing data points
#area has 23 missing data points
# we will fill the missing data points using different techniques
# + colab={"base_uri": "https://localhost:8080/", "height": 133} id="mAf_me1O1m3v" outputId="d43ade9e-fa96-4e64-9cf3-b9d68fc9a759"
# to find the percentage of missing value points relative to their totals
column_missing2=cells.isna().sum()/cells.isna().count().sort_values(ascending=False)
column_missing2=column_missing2[column_missing2!=0]
column_missing2.sort_values(ascending=False)
#we the combine the percentage missing values and the number of missing values into one table using concat method
missing_table=pd.concat([column_missing,column_missing2],axis=1,sort=False,keys=["total","Percentage"])
missing_table
# + id="1V7HNLE-1m42"
#We fill the columns with missing values
cells['STATUS'].fillna(method='bfill',inplace=True)
cells['ZONENAME'].fillna(method='bfill',inplace=True)
cells['AREA'].fillna("unknown",inplace=True)
# we drop columns that we will not be using for our analysis
cell.drop('Unnamed: 0',axis=1,inplace=True)
cell.drop(['LONGITUDE','LATITUDE'],axis=1,inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 133} id="9hLbkKRn1m8G" outputId="1f7553bd-a9f0-4282-b50b-bd2e4bbf77c6"
# a grimpse of how our dataset looks like
cells.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="aEowLWf93qUo" outputId="09b345b4-5867-4822-a944-fee716c0da5f"
# to get the shape, size and dimesions of the dataset
print('cells table has shape:',cells.shape)
print('cells table is of size:',cells.size)
print('cells table is of dimension:',cells.ndim)
# + colab={"base_uri": "https://localhost:8080/"} id="i4N0Bzmu2RP8" outputId="7e4d1431-10ed-4847-880a-8241bf8de254"
# to look for the number of unique values in the columns
for i in cells:
print(i,cells[i].nunique())
# Interpretation of this is that
#in the column vile, there are 504 unique cities/ town
# + colab={"base_uri": "https://localhost:8080/", "height": 162} id="jQigeaz12RRD" outputId="811febe4-4001-4e28-f02f-32a779ee4f0f"
# the describe method helps us to get summary characteristics of numerical values
cells.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="ywiVkNfm2RVg" outputId="c0823e20-df06-43fc-ad68-f86625efeee7"
# to look at the most repeated values per column
for i in cells:
print(i,cell[i].mode())
# + colab={"base_uri": "https://localhost:8080/"} id="gPuSg3NS1m9o" outputId="ea8402e1-6d74-4ab7-8522-8f40371280d4"
#to check the type of data types for each column
cells.dtypes
# + id="AAlUJRkgIG5J"
# + colab={"base_uri": "https://localhost:8080/", "height": 133} id="k9pabp-e1nBS" outputId="8fa178e0-e30c-4918-84ec-19b350f1d03c"
#merge the other three datasets into one big table
telcom = pd.concat([td1,td2,td3],axis=0,sort=False)
telcom.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="GGuEA9Qm26YH" outputId="a63189cd-3e38-435f-edb8-173e74e30a6f"
#look at its shape,size and dimensions
print('telcom table has shape:',telcom.shape)
print('telcom table is of size:',telcom.size)
print('telcom table is of dimension:',telcom.ndim)
# + colab={"base_uri": "https://localhost:8080/"} id="_k-p5ULD26Zd" outputId="fcee50a7-583d-461b-91a9-ae6da2d30e64"
#getting a quick view of how our new dataset is
telcom.info()
# + colab={"base_uri": "https://localhost:8080/"} id="Xs69x02y26dM" outputId="c60254b9-1414-4dbe-fa58-dd33885282c7"
#looking for missing values in the new data fram
missing3=telcom.isna().sum()
missing3=missing3[missing3!=0]
print('Number of missing variables per column', len(missing3))
missing3.sort_values(ascending=False)
# + id="7QlxB28wRLeG"
#from the column above we can see that site_ID hwas the only column with missing values
# + colab={"base_uri": "https://localhost:8080/", "height": 277} id="cHRDMnUv26e4" outputId="c0cd4141-27df-45bf-eb01-3837cf7d915d"
telcom.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 607} id="J-l9Q0Zc26iO" outputId="56a15128-8876-414e-c019-c9c47b83b637"
#we observe that the records in the values column have an outlier
#the maximum value is 12900. # this is illogical
#we treat the value as an outlier.
#outliers are droped
plt.figure(figsize=(8,10))
plt.title('Box Plot of Value to Look For Outliers',color='blue')
plt.boxplot(telcom['VALUE'])
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="pJ7xoyO_4ViP" outputId="2e08c38e-f56c-46a5-ec31-344122715eda"
#from the boxplot we can observe that there are other 7 outlier values
#most of the data lies between 0 and 2400
#we willd drop rows with this outlier values
#threshold=2400
print(np.where(telcom['VALUE']>2400))
todrop=telcom[(telcom['VALUE']==1128) & (telcom['VALUE']==6072) & (telcom['VALUE']==10460) & (telcom['VALUE']==11552) & (telcom['VALUE']==11838)].index
telcom.drop(todrop , inplace=True)
# + [markdown] id="FDL-7btel5qh"
# To merge the two tables so as to have one dataset that we will use our analysis on
# + id="6p5mu4jLjpTJ"
# to merge the two tables
# table cells and telecom
# + id="UJiwpA2WRbep"
merged_telecom=cells.merge(telcom,how='inner',left_on=['SITE_CODE'],right_on=['SITE_ID'])
# + colab={"base_uri": "https://localhost:8080/", "height": 680} id="yj_Nwd69SATg" outputId="b49a71dd-2b37-4196-d831-06a33386cbcb"
merged_telecom.tail(10)
# + colab={"base_uri": "https://localhost:8080/"} id="Gy6YbyqtplLP" outputId="d9c897af-00ca-4b48-f4aa-7172f6649d0d"
#to drop duplicates
merged_telecom.drop_duplicates(inplace=True)
merged_telecom.shape
# + colab={"base_uri": "https://localhost:8080/"} id="eWzZKOPE4VkM" outputId="d2a50098-a2ca-47e1-e89e-dd478497480a"
merged_telecom.info()
# + colab={"base_uri": "https://localhost:8080/"} id="eXWb6qEtSYRN" outputId="6cf8eaf7-9a6e-4cd3-9a08-98f2161ac614"
merged_null=merged_telecom.isna().sum()
merged_null=merged_null[merged_null!=0]
print('number of columns with missing values are',len(merged_null))
merged_null.sort_values(ascending=False)
# + colab={"base_uri": "https://localhost:8080/"} id="7KoED64d996B" outputId="932a5c3c-76fb-4dba-f009-2f7a10fdf013"
#to get a look at the attributes of the table
print('shape of the combined dataset: ',merged_telecom.shape)
print('size of the combined dataset: ',merged_telecom.size)
print('dimensions of the merged dataset: ',merged_telecom.ndim)
# + id="fZRBbEbhj-HO"
#to drop columns that will not be needed in this analysis
merged_telecom.drop(['LOCALISATION','DECOUPZONE','REGION','SITE_CODE','CELL_ON_SITE','DW_A_NUMBER_INT','DW_B_NUMBER_INT','COUNTRY_B','COUNTRY_A'],axis=1,inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="jKFx-Wcclkx4" outputId="503943a0-4cd6-4477-842b-69b99290bf6f"
#to check if the columns have been dropped
merged_telecom.columns
# + colab={"base_uri": "https://localhost:8080/"} id="HvYmBFG3b9sI" outputId="3e40651a-d414-4455-b96f-14bbf755f310"
merged_telecom.dtypes
# + [markdown] id="9asKg-psI6nE"
# **ANALYSIS**
# + [markdown] id="qwcb2dAZa8Ow"
# where most users from
# + colab={"base_uri": "https://localhost:8080/"} id="6T9355wd1PJS" outputId="3a4601c1-f40b-4f21-cec1-15f6694ba120"
#to get the city with the most users
merged_telecom['VILLES'].value_counts()
# + id="i4m8yVh-JWkc"
#most calls were made by people in the ville Yopougon
# + [markdown] id="pmGrPffObIOw"
# Ville where most revenue was generated from
# + colab={"base_uri": "https://localhost:8080/"} id="bzCk2OuDbQ71" outputId="ef50d4d9-dafc-4469-bcde-9e32c4e1c42a"
merged_telecom.groupby('VILLES')['VALUE'].agg('sum').sort_values(ascending=False)
# + id="-xl_YsIkiSvy"
# MTN generated most revenue from the ville Yopougon
#this is true since it had the most users
# + [markdown] id="kWCtX0eQiTzJ"
# which was the most used product
# + colab={"base_uri": "https://localhost:8080/"} id="mwya9I0hh7wb" outputId="193cd8ac-5705-4f5c-aef9-7c46cf829679"
print('number unique values in product: ',merged_telecom['PRODUCT'].nunique())
print('unique products:', merged_telecom['PRODUCT'].unique())
print('\n\n')
merged_telecom['PRODUCT'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 492} id="83gcMRlDiCnr" outputId="268f20a4-c245-4ba8-e177-015d0c939730"
# create a plot to see this
plt.figure(figsize=(8,10))
plt.title('MTN PRODUCT USAGE',fontdict={'fontsize':12},color='blue')
explode=0,0.1
labels='sms','Voice'
plt.pie(merged_telecom['PRODUCT'].value_counts(),labels=labels,explode=explode,autopct='%1.3f%%')
plt.show()
# + id="_l-_eoQ5bUpg"
# sms services were the most used
# + [markdown] id="3Ay94POIfCyR"
# From which Villes were the most calls from
# + colab={"base_uri": "https://localhost:8080/", "height": 391} id="BMIMSFU4fDLX" outputId="810ca532-79df-4d6b-b414-f538ec53c23a"
# to get the rows with only voice calls
#we put those rows in a column called rows_voice
rows_voice=merged_telecom[merged_telecom['PRODUCT'].map(lambda PRODUCT: 'Voice' in PRODUCT)]
rows_voice
# + colab={"base_uri": "https://localhost:8080/"} id="5Q6ml-tzfNTK" outputId="988b32ca-8442-4627-bd23-5043c362e767"
rows_voice.groupby('VILLES')['PRODUCT'].value_counts().sort_values(ascending=False)
# + id="CRallyyffVEk"
# from above we can see that most voice calls were from the vile yopougon
# + [markdown] id="IZQ8cGI1ipjk"
# From which Villes were the most sms from
# + colab={"base_uri": "https://localhost:8080/", "height": 391} id="fdALcHHkiuYT" outputId="a6f353e7-736f-4471-f68b-5d829cdfaf20"
# to get the rows with only voice calls
#we put those rows in a column called rows_voice
rows_sms=merged_telecom[merged_telecom['PRODUCT'].map(lambda PRODUCT: 'sms' in PRODUCT)]
rows_voice
# + colab={"base_uri": "https://localhost:8080/"} id="HdK2ORTei5qM" outputId="e914e8d7-92b5-4c90-8e93-fa4af8f34696"
rows_voice.groupby('VILLES')['PRODUCT'].value_counts().sort_values(ascending=False)
# + id="8pg7-icgi8PZ"
#we can see that the ville Yopougnon had the most sms
# + colab={"base_uri": "https://localhost:8080/"} id="YKDha2_Tq4TI" outputId="9c31ab0c-5eb2-4c53-fa39-f9b0837d077a"
merged_telecom['CELL_ID_x'].value_counts()
# + id="FJIDP0tAx-x3"
#most calls were made by the owner of the cell id 122a33b94a,1ce5a37ea9,9e8e5e6e80,d6a6e27eeb
# + [markdown] id="p0haEhlGKdmh"
# Which product brought the most revenue to MTN
# + colab={"base_uri": "https://localhost:8080/"} id="7k1201psKnhC" outputId="7626ac1c-68e1-45ed-edfc-101d17b8b364"
product_revenue_mean=merged_telecom.groupby('PRODUCT')['VALUE'].mean()
product_revenue_mean
# + colab={"base_uri": "https://localhost:8080/", "height": 583} id="vQVsDJZWKj3w" outputId="ec4f4e52-1da5-4495-97aa-ff9df987aae3"
plt.figure(figsize=(9,9))
plt.title('Revenue against Product',fontdict={'fontsize':12},color='blue')
plt.xlabel('product')
plt.ylabel('average price')
product_revenue_mean.plot(kind='bar')
plt.show()
# + id="5Y8dy78VMIhH"
#voice products brought the most revenue for MTN
#revenue from voice product is eight times that of sms
# + [markdown] id="Zl9jy5FoISUS"
# which area had the most users
# + colab={"base_uri": "https://localhost:8080/"} id="h3Q44BtTFym2" outputId="d62f7181-5e4b-4f02-c4aa-f59f37e6d9bb"
a=merged_telecom['AREA'].value_counts()
a
# + colab={"base_uri": "https://localhost:8080/", "height": 596} id="bGUezSgOFypy" outputId="9fbc0f05-7fa9-40a5-ebc0-e1ef8fd735de"
plt.figure(figsize=(8,8))
plt.title('Area Populatiy of MTN Products',fontdict={'fontsize':10},color='blue')
plt.xlabel('Area')
plt.ylabel('Count')
a.plot(kind='bar')
plt.show
# + id="0N4gtOPRMTI0"
# ABdijan North had the most user
# + id="0EYNCwRaFywG"
# on average, voice products generated more revenue to MTN. revenue generated from calls is almost 8 times that generated from sms
# + [markdown] id="8Xym0J3pJz8w"
# which area generated most revenue
# + colab={"base_uri": "https://localhost:8080/"} id="AuCtkt7tQY4Z" outputId="aa82aae5-5444-48f0-80a7-de1aba26796e"
area_value=merged_telecom.groupby('AREA')['VALUE'].agg('sum')
area_value
# + colab={"base_uri": "https://localhost:8080/", "height": 536} id="NoEA7pcLSUdB" outputId="e4b2e7af-7c8c-4370-e3c7-02e2b9f2f017"
plt.figure(figsize=(9,9))
plt.title('Revenue per Area',fontdict={'fontsize':12},color='blue')
labels=['ABIDJAN NORTH','ABIDJAN SOUTH','CENTRE NORTH','EAST','NORTH WEST','SOUTH WEST','unknown']
explode=0.1,0,0,0,0,0,0
plt.pie(area_value,labels=labels,explode=explode,autopct='%1.3f%%')
plt.show()
# + id="KPjohjasTkzX"
#users from abdijan north generated the most revenue for MTN in the three days
| Moringa_Data_Science_Prep_W3_Independent_Project_2021_05_George_Maina_Python.ipynb |
# # 📝 Exercise M2.01
#
# The aim of this exercise is to make the following experiments:
#
# * train and test a support vector machine classifier through
# cross-validation;
# * study the effect of the parameter gamma of this classifier using a
# validation curve;
# * use a learning curve to determine the usefulness of adding new
# samples in the dataset when building a classifier.
#
# To make these experiments we will first load the blood transfusion dataset.
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# +
import pandas as pd
blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv")
data = blood_transfusion.drop(columns="Class")
target = blood_transfusion["Class"]
# -
# We will use a support vector machine classifier (SVM). In its most simple
# form, a SVM classifier is a linear classifier behaving similarly to a
# logistic regression. Indeed, the optimization used to find the optimal
# weights of the linear model are different but we don't need to know these
# details for the exercise.
#
# Also, this classifier can become more flexible/expressive by using a
# so-called kernel that makes the model become non-linear. Again, no requirement
# regarding the mathematics is required to accomplish this exercise.
#
# We will use an RBF kernel where a parameter `gamma` allows to tune the
# flexibility of the model.
#
# First let's create a predictive pipeline made of:
#
# * a [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# with default parameter;
# * a [`sklearn.svm.SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)
# where the parameter `kernel` could be set to `"rbf"`. Note that this is the
# default.
# +
# Write your code here.
# -
# Evaluate the generalization performance of your model by cross-validation with a
# `ShuffleSplit` scheme. Thus, you can use
# [`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html)
# and pass a [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html)
# to the `cv` parameter. Only fix the `random_state=0` in the `ShuffleSplit`
# and let the other parameters to the default.
# +
# Write your code here.
# -
# As previously mentioned, the parameter `gamma` is one of the parameters
# controlling under/over-fitting in support vector machine with an RBF kernel.
#
# Evaluate the effect of the parameter `gamma` by using the
# [`sklearn.model_selection.validation_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html) function.
# You can leave the default `scoring=None` which is equivalent to
# `scoring="accuracy"` for classification problems. You can vary `gamma`
# between `10e-3` and `10e2` by generating samples on a logarithmic scale
# with the help of `np.logspace(-3, 2, num=30)`.
#
# Since we are manipulating a `Pipeline` the parameter name will be set to
# `svc__gamma` instead of only `gamma`. You can retrieve the parameter name
# using `model.get_params().keys()`. We will go more into detail regarding
# accessing and setting hyperparameter in the next section.
# +
# Write your code here.
# -
# Plot the validation curve for the train and test scores.
# +
# Write your code here.
# -
# Now, you can perform an analysis to check whether adding new samples to the
# dataset could help our model to better generalize. Compute the learning curve
# (using [`sklearn.model_selection.learning_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html))
# by computing the train and test scores for different training dataset size.
# Plot the train and test scores with respect to the number of samples.
# +
# Write your code here.
| notebooks/cross_validation_ex_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="3NyDwK_fuDFF"
# 0. Imports
# + id="5RQ9JSq9rCLv" executionInfo={"status": "ok", "timestamp": 1616621592780, "user_tz": -60, "elapsed": 895, "user": {"displayName": "\u013dudov\u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AO<KEY>0VUNQNyApN5rCT7UJZW89w=s64", "userId": "17075019361640864910"}}
import pandas as pd
import numpy as np
# + [markdown] id="HCMhoFvgujmL"
# 1. Load csv
# + colab={"base_uri": "https://localhost:8080/", "height": 380} id="_xGwxuKMubhg" executionInfo={"status": "ok", "timestamp": 1616621593238, "user_tz": -60, "elapsed": 1332, "user": {"displayName": "\u013dudov\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AO<KEY>w=s64", "userId": "17075019361640864910"}} outputId="00cc807c-2901-4e04-a475-a2ffda362fad"
df = pd.read_csv('/content/drive/MyDrive/Škola/WM/UNIXTIME/log_jeden_den.csv', ';', usecols=range(1,10))
df.head()
# + [markdown] id="lOYJG1Jw3Vk8"
# 2. Create new column by converting datetime to unix timestamp
# + colab={"base_uri": "https://localhost:8080/", "height": 432} id="FJhbsTXVvlRX" executionInfo={"status": "ok", "timestamp": 1616621593560, "user_tz": -60, "elapsed": 1619, "user": {"displayName": "\u013dudov\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_8RtneW7PgTTuiCnx0VUNQNyApN5rCT7UJZW89w=s64", "userId": "17075019361640864910"}} outputId="7ef364ce-e60d-4605-9ded-289112980063"
df['unixtime'] = pd.to_datetime(df['datetime'], format="[%d/%b/%Y:%H:%M:%S %z]")
df['unixtime'] = df['unixtime'].values.astype(np.int64) // 10 ** 9
df.head()
# + [markdown] id="vf3Bj2kE5fRW"
# 3. Save to csv
# + id="_Mi8aXdY4jST" executionInfo={"status": "ok", "timestamp": 1616621593945, "user_tz": -60, "elapsed": 1997, "user": {"displayName": "\u013dudov\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg_8RtneW7PgTT<KEY>", "userId": "17075019361640864910"}}
df.to_csv('Laca_unixtime.csv', sep=';')
| UNIXTIME/Unixtime.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/NatureOfThermalEnergy/nature-of-thermal-energy.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# + hidden=false
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib import rc
from IPython.display import display, Math, Latex, HTML, clear_output, Markdown, IFrame, Javascript
import ipywidgets as widgets
from ipywidgets import interact, FloatSlider, IntSlider, interactive, Layout
from traitlets import traitlets
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
# %matplotlib notebook
#Convenience function that evaluates a certain cell range
def evaluate_cells(de, a):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+%s,IPython.notebook.get_selected_index()+%s)'%(de,a)))
return
# + hidden=false
HTML('''<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>''')
# + [markdown] hidden=false
# # <center>Heat and Temperature: The Nature of Thermal Energy </center>
#
# In this Jupyter Notebook you will explore the nature of thermal energy and its effects on different forms of matter using informal observations, experimental evidence, and models. By the end of the notebook you will have gained an understanding on:
#
# - comparing heat transmission in different materials
# - explaining how heat is transmitted by conduction, convection and radiation in solids, liquids and gases
# - describing the effect of heat on the motion of particles; and explain changes of state, using the particle model of matter
# - distinguishing between heat and temperature; and explain temperature, using the concept of kinetic energy and the particle model of matter
# - investigating and describing the effects of heating and cooling on the volume of different materials, and identify applications of these effects
#
# Necessary Background: Basic understanding of kinetic energy and states of matter.
#
# Thermal energy applications are found throughout our daily lives; the means by which a refrigerator functions, the ways we power and heat our homes, the combustion which drives our engines. These are just a few of the many ways we use thermal energy to improve our lives. Consequently, having a better understanding of the nature of thermal energy means having a better understanding of the world around us, natural and manufactured.
# + [markdown] hidden=false
#
# ## Heat vs. Temperature
#
# The difference between temperature and heat is quite subtle. We'd better state the definitions formally:
#
# **Heat** is a type of energy transfer in which energy flow is caused by a difference in temperature.
#
# **Temperature** is a measure of the average heat or thermal energy of the particles in a substance.
#
# These are simple explanations for the time being and will become clearer over the course of this notebook. heat is a *microscopic* quantity which gives rise to the *macroscopic* quantity of temperature. We often think of heat and temperature interchangeably but they have fundamentally different meanings. Heat can be exchanged between matter and temperature can be possessed by matter. Over the course of this notebook the differences will become clearer.
#
# ---
#
# *Side note*: **Macroscopic vs Microscopic**
#
# Macroscopic quantities can be thought of as the things that you can *see* and quantify, whereas microscopic quantities are the opposite of this; they need some more detailed measuring tools in order to observe. For example, consider the picture of a forest below. Many trees come together to make up the entire forest. In the picture, these trees are comprised of many different shades of green. As we zoom in further, we see that these shades are actually just individual square pixels, each with one colour and brightness. The brightness, colour, and size of each pixel can be seen as the microscopic properties. The macroscopic properties would be the individual trees and the forest that they form. Microscopic properties can only be seen as we zoom into the image. Similarly, macroscopic properties can only be seen as we zoom out.
#
# <img src = "images/macroscopic_forest.png" height = 550 width = 550 style = "border-radius: 8px">
#
# + [markdown] hidden=false
# # <center> Particle Model of Matter </center>
# In order to help you understand heat, it will be useful to think of the *Particle Model of Matter*. This model was first proposed by the Greek philosopher Democritus over 2000 years ago. He thought of being able to cut up matter, the stuff that makes up everything in the world, into smaller and smaller parts. Eventually, one would reach its basic components; the particles that make up all matter. The term particle is still used everywhere in the physical sciences. They can be simply thought of as small objects with certain properties such as size, speed, or colour. Typically they form interact together in terms of these properties, creating some macroscopic property. Using the concept of a particle has helped us model how different microscopic quantities combine to create macroscopic ones. The particle model states that:
#
# ### 1. All matter is made up of tiny particles
#
# Place your mouse over the image below to interact.
# + hidden=false language="html"
#
# <div style="text-align: center">
# <iframe width="500px" height="420px" src="animations/cube_grid.html" frameborder="0"></iframe>
# </div>
#
# + [markdown] hidden=false
# ### 2. The particles are constantly moving
#
# Since the particles are not at absolute (0 Kelvin), they still contain a bit of thermal energy. When the particles are bound in a solid structure, like above, this causes them to vibrate slightly. Particles in any state will always slightly unless they are at absolute 0 temperature. Below is the same solid as above, but depicted more realistically.
# + hidden=false language="html"
#
# <div style="text-align: center">
# <iframe width="450px" height="420px" src="animations/vibrating_cube.html" frameborder="0"></iframe>
# </div>
# + [markdown] hidden=false
# *Side Note*: **absolute Zero**
#
# The Kelvin scale is an absolute temperature scale using 0 as the point where all motion due to heat. The celsius scale on the other hand uses the freezing point of water as its zero, which corresponds to 273.15 Kelvin. This can be inconvenient for many scientists, especially when quantities have to be multiplied by temperature, thus the Kelvin scale is the international standard and is recognized as the base unit of temperature. Absolute 0 is an *absolute* since all motion for any type of matter will stop at this temperature. That being said, absolute zero is actually physically impossible to reach <a href = "https://www.sciencealert.com/after-a-century-of-debate-cooling-to-absolute-zero-has-been-declared-mathematically-impossible">Click here if you're interested</a>. There is ongoing research on cooling matter to extremely low temperatures in order to observe its behaviour when there is nearly zero thermal motion.
# + [markdown] hidden=false
# ### 3. The particles move faster as you heat them up
# + hidden=false language="html"
#
# <div style="text-align: center">
# <iframe width="450px" height="420px" src="animations/particle_in_a_box.html" frameborder="0"></iframe>
# </div>
#
# + [markdown] hidden=false
# Statement 3 of the particle model offers the clearest description for the difference between temperature and heat. As the temperature inside the square increases, the particles move faster. This is because the the particles have more heat energy, but what we actually measure is temperature. The particles collide with the sides of the box more frequently. The more frequent the collisions, the higher the temperature we measure. Above is a graphic describing how the 'particles' that make up water might behave as they are heated.
# + [markdown] hidden=false
# ---
# ## <center>Changes of State</center>
#
# Heat transfer plays a deciding role in the changes of state of matter (Solid, Liquid or Gas). Let's break down how heat energy changes the state of matter and what these processes are called. Matter will be thought of as a collection of particles. The behaviour of these particles is determined by the state of matter in which they are in:
#
# - Particles in a **solid** are tightly packed. They have very little motion other than vibrating in the location that they are in.
#
# - Particles in a **liquid** are able to move past one another. This is what gives liquids the property of being able to "flow".
#
# - Particles in a **gas** can essentially move around freely, occasionally colliding into one another. This is why gases have so much free space.
#
# Note the use of a macroscopic quantity to used to describe microscopic ones (state $\rightarrow$ motion).
#
# ### Why do the changes happen?
#
# Matter changes state due to heat energy. Think of the particles in a solid. As they gain more heat energy, the particles themselves gain more kinetic energy. More kinetic energy means they are moving faster, making them collide into one another more often. Eventually the bonds that hold the solid together cannot maintain the amount of collisions and the solid begins to break apart, into a liquid. This is known as melting.
#
# Once the matter has melted, the particles begin to flow, yet they still maintain a bit of their structure; they are still "connected". If you were to give them even more heat energy, the structure begins to fall apart, and eventually the matter becomes a gas where the particles can move around freely and disperse from one another. This process is called evaporation.
#
# Some of the main characteristics of a substance are the temperatures at which these transitions happen. These are known as the *melting point* and *boiling point* of the substance. For example, water has a melting point of 0$^{\circ}$C and a boiling point of 100$^{\circ}$C (STP) and these temperatures are a fundamental part of what it means to be "water". The acronym SATP stands for Standard Ambient Temperature and Pressure, and is used as a standard ruler for experimental measurements. The melting and boiling points of water change at different pressures and therefore the pressure at which these quantities were measured must be specified as well. This allows for accurate comparisons between different experiments and laboratories.
#
# The different changes that occur in matter are pictured below. Try to memorize the names of the different changes of state. Hover your mouse over each black box to see the names of them. Once you press the "Play memory game" button, you won't be able to see their names anymore and will have to match the numbers to the name of change of state. See how many times you can get them correct! You'll have them memorized in no time :-)
# + hidden=false
# %matplotlib notebook
#Memory game for state transitions
import random
x = [470,550,810,810,1100,1180]
y = [550,550,835,905,550,550]
img = plt.imread("images/changes_of_state.png")
fig, ax = plt.subplots(figsize = (8,8), num = 'Changes of State of Water')
ax.imshow(img)
annot = ax.annotate("", xy=(0,0), xytext=(30,30),textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
names = ['Melting','Freezing','Condensation','Vaporization','Deposition','Sublimation']
norm = plt.Normalize(1,4)
cmap = plt.cm.RdYlGn
sc = plt.scatter(x,y,c = 'k', marker = 's', alpha = 1, s = 250)
annot = ax.annotate("", xy=(0,0), xytext=(30,30),textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
def update_annot(ind):
pos = sc.get_offsets()[ind["ind"][0]]
annot.xy = pos
text = "{}".format(" ".join([names[n] for n in ind["ind"]]))
annot.set_text(text)
annot.get_bbox_patch().set_alpha(1)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = sc.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect("motion_notify_event", hover)
fig.patch.set_visible(False)
ax.axis('off')
plt.show()
memgame = widgets.Button(description= "Play Memory Game",
layout = Layout(width='30%', height='60px'),
button_style = 'info'
)
reset = widgets.Button(description="Reset Game",
layout = Layout(width='30%', height='60px'),
button_style = 'info'
)
display(memgame)
a1 = widgets.RadioButtons(
options=['Melting','Freezing','Condensation','Vaporization','Deposition','Sublimation'],
description = '1?',
value = None,
disabled=False)
a2 = widgets.RadioButtons(
options=['Melting','Freezing','Condensation','Vaporization','Deposition','Sublimation'],
description = '2?',
value = None,
disabled=False)
a3 = widgets.RadioButtons(
options=['Melting','Freezing','Condensation','Vaporization','Deposition','Sublimation'],
description = '3?',
value = None,
disabled=False)
a4 = widgets.RadioButtons(
options=['Melting','Freezing','Condensation','Vaporization','Deposition','Sublimation'],
description = '4?',
value = None,
disabled=False)
a5 = widgets.RadioButtons(
options=['Melting','Freezing','Condensation','Vaporization','Deposition','Sublimation'],
description = '5?',
value = None,
disabled=False)
a6 = widgets.RadioButtons(
options=['Melting','Freezing','Condensation','Vaporization','Deposition','Sublimation'],
description = '6?',
value = None,
disabled=False)
Qs1to3 = widgets.HBox([a1,a2,a3])
Qs4to6 = widgets.HBox([a4,a5,a6])
button_check = widgets.Button(description="Check your answers",
layout = Layout(width='30%', height='60px'),
button_style = 'info'
)
def button_onclick(b):
annot.remove()
x = [470,550,810,810,1100,1180]
y = [550,550,835,905,550,550]
ax.axis('off')
plt.scatter(x,y,c = 'k', marker = 's', alpha = 1, s = 250)
a = [1,2,3,4,5,6]
corr = 10 #small correction on number
random.shuffle(a)
for i,j,k in zip(x,y,a):
ax.annotate(str(k), xy = (i-corr,j+corr), color = 'w')
display(Latex("What is the change of state corresponding to box number..."))
display(Qs1to3)
display(Qs4to6)
display(button_check)
output0 = widgets.Output()
def check_button(x):
output0.clear_output()
if a1.value == names[a.index(1)] and a2.value == names[a.index(2)] and a3.value == names[a.index(3)] \
and a4.value == names[a.index(4)] and a5.value == names[a.index(5)] and a6.value == names[a.index(6)]:
display(Markdown("Correct!"))
display(play_again)
else:
inc = []
answers = [a1.value,a2.value,a3.value,a4.value,a5.value,a6.value]
for i in range(len(answers)):
if answers[i] != names[a.index(i+1)]:
inc.append("Incorrect")
else:
inc.append("Correct")
output0.append_display_data(Latex("Those aren't quite correct, please try again."))
output0.append_display_data(Latex("1. %s, 2. %s, 3. %s, 4. %s, 5. %s, 6. %s" %tuple(inc[:])))
display(output0)
button_check.on_click(check_button)
memgame.on_click(button_onclick)
play_again = widgets.Button(description = "Play Again",
layout = Layout(width='30%', height='60px'),
button_style = 'info'
)
def joue_encore(x):
clear_output()
img = plt.imread("images/changes_of_state.png")
fig, ax = plt.subplots(figsize = (8,8))
ax.imshow(img)
x = [470,550,810,810,1100,1180]
y = [550,550,835,905,550,550]
ax.axis('off')
plt.scatter(x,y,c = 'k', marker = 's', alpha = 0, s = 250 )
a = [1,2,3,4,5,6]
corr = 10 #small correction on number
random.shuffle(a)
for i,j,k in zip(x,y,a):
ax.annotate(str(k), xy = (i-corr,j+corr), color = 'w')
display(Latex("What is the change of state corresponding to box number:"))
display(Qs1to3)
display(Qs4to6)
button_check = widgets.Button(description="Check your answers",
layout = Layout(width='30%', height='60px'),
button_style = 'info'
)
display(button_check)
output= widgets.Output()
def check_button(x):
output.clear_output()
if a1.value == names[a.index(1)] and a2.value == names[a.index(2)] and a3.value == names[a.index(3)] \
and a4.value == names[a.index(4)] and a5.value == names[a.index(5)] and a6.value == names[a.index(6)]:
display(Markdown("Correct!"))
display(play_again)
else:
inc = []
answers = [a1.value,a2.value,a3.value,a4.value,a5.value,a6.value]
for i in range(len(answers)):
if answers[i] != names[a.index(i+1)]:
inc.append("Incorrect")
else:
inc.append("Correct")
output.append_display_data(Latex("Those aren't quite correct, please try again."))
output.append_display_data(Latex("1. %s, 2. %s, 3. %s, 4. %s, 5. %s, 6. %s" %tuple(inc[:])))
dipslay(output)
button_check.on_click(check_button)
play_again.on_click(joue_encore)
# + [markdown] hidden=false
# ### <center>Visualizing the changes of state</center>
#
# Below is an interactive animation on the changes of state for *water*. Move the slider to change the temperature of the particles in the box below. Observe where the changes of state occur and how the particles gain more kinetic energy as the temperature increases.
# + hidden=false language="html"
#
# <div style="text-align: center">
# <iframe width="500px" height="420px" src="animations/changes_of_state.html" frameborder="0"></iframe>
# </div>
#
# + [markdown] hidden=false
# # <center> How is Heat Transmitted?</center>
# There are three main methods by which heat is transmitted: conduction, convection and radiation. Below we present the basic theory behind these methods of heat transfer and some applications of how we use them to improve our lives. Examples will be provided where we analyze how the three different methods of heat transmission occur in natural processes.
# + [markdown] hidden=false
# ## 1. Conduction
# Heat transfer via **conduction** occurs when two bodies, with different temperatures, are in physical contact with each other. Conduction is how a stove top heats a pan or why a hot cup feels warm on your hand. Different materials conduct heat faster or slower. The conductivity of material also depends on what state it is in. These differences are expressed by a material property known as the ***thermal conductivity***.
#
# <img src = "images/thermal_conductivity.jpg" width ="480" height="480" style = "border-radius: 8px;">
#
#
#
#
# This graphic provides an visual interpretation of the heat flow, denoted as $\rm Q$ between two bodies in contact at surface $\rm A$ with temperatures $\rm T_1$ and $\rm T_2$. Let's explore how thermal conductivity changes in different materials.
#
# ### Thermal Conductivity
#
# Materials with low thermal conductivity transfer heat slowly and materials with high conductivity transfer heat quickly. Can you think of some benefits of using materials with different a thermal conductivity for commercial use?
#
# For example, consider a to-go mug. The whole purpose of this product is to keep the contained drink warm or cold. If the drink is warm, the contents will be warmer than the mug itself. They will then exchange heat to the body of the mug via conduction. If the drink is cold then the mug will transfer heat to the contents. In each case the mug is in contact with the external environment; the air outside or your hand. The environment itself, depending on its temperature, will either transfer heat to or gain heat from the mug. Since this additional heat transfer is on going, eventually the mug and the drink inside will reach the same temperature as the outside environment.
#
# In either case, one would want the mug to have a low *thermal conductivity* so that it does not easily exchange heat with its environment. This will preserve the temperature of its contents. Below is an interesting video from Veritasium illustrating how conduction occurs differently in different materials.
#
# If you haven't heard of Veritasium before, I strongly encourage you to check the YouTube channel out. He explains a lot of topics in science and there's always something new to learn from each video.
# + hidden=false slideshow={"slide_type": "fragment"} language="html"
# <div style="text-align: center">
# <iframe width="560" height="315" src="https://www.youtube.com/embed/vqDbMEdLiCs" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
# </div>
# + [markdown] hidden=false
# ## 2. Convection
#
# Heat transfer via **convection** occurs due to the movement of molecules in a fluid (gas or liquid). Convection is perhaps the most intuitive method of heat transfer. Since it involves the flow of a gas or liquid from a warmer to a colder temperature within the fluid, it is easy to visualize. Convection does not occur in solid materials because the molecules that make up a solid do not have the ability to flow; this is what makes a solid a solid. Not unlike conduction, convection occurs from the flow of heat energy due a temperature difference within a fluid. The hotter fluid flows to regions where the fluid is cooler. It is the process which governs the currents in our oceans and the movement of air in our atmosphere. Our understanding of convection has helped us forecast the weather and even explain how energy is transported within the sun!
#
# ---
# **Why does the movement occur?**
#
# If we consider the particle model of matter, the fluid moves due to changes in **kinetic energy** of the particles in the fluid. The hotter regions in the fluid, the ones with more heat energy, have more "excited" particles that are colliding into one another more frequently. These regions are less **dense** than the colder regions. In the colder regions, the particles are colliding into one another less frequently therefore they do not spread out as much. Since there are less collisions, the particles are more concentrated in the colder region. These more dense and concentrated regions in the fluid are heavier since they have more particles in them. The cold, heavier regions will sink; allowing for the hotter regions to rise. Convection is the reason why "heat rises" in a room.
#
# ---
# **Examples of convective processes**
#
#
# - In a pot filled with water on the stove, we observe convective heat transfer in the bubbles rising to the top of the pot. These are hotter, *less dense* regions rising to the surface of the liquid. When these regions reach the point the that they are less dense than the surrounding air, they will evaporate and undergo a transition of state from liquid to gas.
#
# <img src="images/pot_boiling.png" width = "400" height = "400" style = "border-radius: 8px;">
#
#
# - In stars, there is a zone in which energy is transported from the core of the star, where energy is produced by nuclear fusion, to the surface. This is known as the *convective zone*. Due to the complicated nature of energy transportation within stars, this process is difficult to model and the subject of ongoing research within the field of stellar astrophysics. Understanding convection has helped astrophysicists know more about our sun and other stars like it!
#
# <img src = "images/convectioninstarsdata.jpg" width="400" height="400" style = "border-radius: 8px;">
#
#
# - In the atmosphere, convection plays a major role in the creation of a thunderstorm. Clouds form as hotter air rises, carrying moisture with them. The air cools on its ascent, causing the moisture to condense. If the air is unstable enough, this process can occur for a longer time. This results in the formation of "cumulonimbus clouds", which are those very tall pillowy clouds that often produce thunder and lightning. Given enough moisture, instability in the air and convection (heat transfer), a thunderstorm is likely to occur.
#
# <img src = "images/convectioninthunderstorms.jpg" width = "500" height = "500" style = "border-radius: 8px;">
#
# ---
# + [markdown] hidden=false
# ## 3. Radiation
#
# Radiation is our third and final form of heat transfer to discuss. Heat transfer via radiation occurs in all matter that has a temperature above absolute 0 (0 Kelvin). It occurs from a conversion of thermal energy into *electromagnetic energy*. Energy is "radiated" away from a body in the form of "light". This light varies in wavelength and is dependent on the temperature of the body; hotter objects emit radiation at shorter wavelengths that carry higher energy. For example, in a fire, you may have heard how the blue part of a fire is hotter than the red part. This is true and is due to the fact that blue light has a shorter wavelength than red light, and since the wavelength is dependent on the temperature of the material emitting it, the blue portion of a fire is hotter.
#
# An interesting consequence of this fact comes from the analysis of the wavelengths of light radiated from the sun. Below is graphic showing the types of radiation emitted from the sun.
#
#
# <img src = "images/earth_sun_plank.jpg" width = "400" height ="400" style = "border-radius: 8px;">
#
#
# The most intensely radiated wavelength from the sun comes from the green part of visible light. Plants, trees, and even our eyes have become fine tuned to absorbing and perceiving more variations of the colour green because this is the most prominent radiation from the sun. Understanding thermal radiation has offered a scientific explanation for why our planet is so green.
#
# ---
# Exercises
# ---
# Below a list of processes in which heat transfer occurs in different forms. You will be asked to check the box for which type of heat transfer you think is correct to match the description. Press "Check Answer" to see if you were correct!
#
# ## <center>Boiling Water </center>
#
# <img src="images/Boiling_Water_pic.jpg" height = "400" width = "400" style = "border-radius: 8px;"/>
#
#
# ---
# + hidden=false
a1 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
disabled=False,
value = None,
continuous_update=True
)
a2 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
disabled=False,
value = None,
continuous_update=True
)
a3 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
disabled=False,
value = None,
continuous_update=True
)
a4 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
disabled=False,
value = None,
continuous_update=True
)
#create a button widget to check answers, again calling the button to display
button_check = widgets.Button(description="Check your answers",
layout = Layout(width='30%', height='60px'),
button_style = 'info'
)
#compare answers to responses
def compare(l1,l2):
a = []
for i in range(0,min(len(l1),len(l2))):
if l1[i] == l2[i]:
a.append(1)
else:
a.append(0)
return a
ans1 = ['Radiation', 'Conduction', 'Convection', 'Conduction']
out = widgets.Output()
def check_button(event):
out.clear_output()
rs1 = [a1.value, a2.value, a3.value, a4.value]
if rs1 == ans1:
out.append_display_data(Markdown("Correct! Here's why : </br></br> \
1. The stove top heats up and radiates energy away from it. This is what you feels in your hand as heat. </br> \
2. The stove top conducts heat to the pot since they are in physical contact with one another. </br> \
3. Heat is transferred by fluid motion within the water via convection. </br> \
4. The gas particles carry heat energy, in the form of kinetic energy, which gets transferred via physical contact with your hand and the steam. </br> \
Since the heat transfer occurs via physical contact, the process responsible for the heat transfer is conduction. "))
else:
bien = compare(rs1,ans1)
h = []
for i in bien:
if i == 0:
h.append('Incorrect')
if i == 1:
h.append('Correct')
s = "Those aren't quite correct, please try again."
corrections = "1. %s, 2. %s, 3. %s, 4. %s" %(h[0],h[1],h[2],h[3])
out.append_display_data(Latex(s))
out.append_display_data(Latex(corrections))
button_check.on_click(check_button)
display(Markdown('1) After you turn on the stove, you hold your hand above the stove and it feels hot.'))
display(a1)
display(Markdown('2) The pot starts to heat up when placed on the stove.'))
display(a2)
display(Markdown('3) Hot water rises to the surface of the pot.'))
display(a3)
display(Markdown('4) Steam from the pot feels hot on your hand. (Somewhat tricky)'))
display(a4)
display(button_check)
display(out)
# + [markdown] hidden=false
# ## <center>Refrigerator</center>
#
# <img src="images/fridge.jpg" height = "300" width="300">
#
#
# + hidden=false
display(Markdown('1) Hot air is pumped out of the food compartment'))
a1 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
value = None,
disabled=False
)
display(a1)
display(Markdown('2) You grab a cold popsicle out of the refrigerator. It feels cold on your hand.'))
a2 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
value = None,
disabled=False
)
display(a2)
display(Markdown('3) How did the popsicle get cold? How did it transfer heat energy while sitting in the refrigerator?'))
a3 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
value = None,
disabled=False
)
display(a3)
display(Markdown('4) You open the refrigerator door and it feels cold inside.'))
a4 = widgets.RadioButtons(
options=['Conduction', 'Convection', 'Radiation'],
value = None,
disabled=False
)
display(a4)
#create a button widget to check answers, again calling the button to display
button_check = widgets.Button(description="Check your answers",
layout = Layout(width='30%', height='60px'),
button_style = 'info'
)
display(button_check)
#Answers to be checked against, third entry should be != Conduction, fourth is != 'Radiation'
ans = ['Convection','Conduction', 'Conduction','Radiation']
#compare answers to responses
def compare(l1,l2):
a = []
for i in range(0,min(len(l1),len(l2))):
if l1[i] == l2[i]:
a.append(1)
else:
a.append(0)
return a
outtie = widgets.Output()
def check_button(x):
outtie.clear_output()
rs = [a1.value, a2.value, a3.value, a4.value] #responses
if rs[0] == ans[0] and rs[1] == ans[1] and rs[2] != ans[2] and rs[3] != ans[3]:
outtie.append_display_data(Markdown("Correct! Here's why : </br></br> \
1. Hot air flows out of the fridge via convection. </br> \
2. Heat is transferred via convection since your hand it hotter than the popsicle. </br> \
3. Heat is transferred by radiation or conduction. The food sitting in the refrigerator radiates heat energy and also conducts heat to the colder air within the fridge. </br> \
4. Heat is transferred via conduction from your warmer self to the cold air within the refrigerator."))
else:
a = compare(rs,ans)
bien = [1,1,0,0] #This is what "a" should be
xx = compare(a,bien)
h = []
for i in xx:
if i == 0:
h.append('Incorrect')
if i == 1:
h.append('Correct')
outtie.append_display_data(Latex("Those aren't quite correct, please try again."))
outtie.append_display_data(Latex("1. %s, 2. %s, 3. %s, 4. %s" %(h[0],h[1],h[2],h[3])))
button_check.on_click(check_button)
display(outtie)
# + [markdown] hidden=false
# # Conclusion
# ---
# In this Jupyter Notebook you learned about some aspects of the nature of thermal energy. We differentiated between heat and temperature. The particle model was used to describe the effect of heat on matter. You learned about the various modes of heat transfer:
# - **Conduction**
# - **Convection**
# - **Radiation**
#
# You also some examples of how these processes are studied in everyday life.
# You also learned the different changes of state of matter. If you don't feel like you have these memorized, I encourage you to play the memory game a few more times.
#
# We exploit thermal energy to improve our daily lives. It is through our understanding of the processes that govern heat that we have been able to do so.
# + [markdown] hidden=false
# **Image References**
#
# - conduction :<font size = "0.5"> https://www.khanacademy.org/science/physics/thermodynamics/specific-heat-and-heat-transfer/a/what-is-thermal-conductivity </font>
# - sun: <font size="0.5"> https://phys.org/news/2016-07-astronomers-gain-insight-magnetic-field.html</font>
# - thunderstorm: <font size = "0.5">https://www.britannica.com/science/thunderstorm </font>
# - boiling pot: <font size = "0.5">https://dr282zn36sxxg.cloudfront.net/datastreams/f-d%3A5f69d56e0b81661f4564d03405f4cd0be18c94afbe66c30f9eb9a17b%2BIMAGE_THUMB_POSTCARD_TINY%2BIMAGE_THUMB_POSTCARD_TINY.</font>
# - radiation diagram: <font size = "0.5"> http://apollo.lsc.vsc.edu/classes/met130/notes/chapter2/plank_e_sun.html </font>
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| Science/NatureOfThermalEnergy/nature-of-thermal-energy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Spark OCR GPU image processing example
# ## Install spark-ocr python packge
# Need specify path to `spark-ocr-assembly-[version].jar` or `secret`
secret = ""
license = ""
version = secret.split("-")[0]
spark_ocr_jar_path = "../../target/scala-2.12"
# + language="bash"
# if python -c 'import google.colab' &> /dev/null; then
# echo "Run on Google Colab!"
# echo "Install Open JDK"
# apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
# java -version
# fi
# +
import sys
import os
if 'google.colab' in sys.modules:
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# -
# install from PYPI using secret
# %pip install spark-ocr==$version\.spark30 --extra-index-url=https://pypi.johnsnowlabs.com/$secret --upgrade
# +
# or install from local path
# #%pip install --upgrade ../../python/dist/spark-ocr-3.1.0.spark30.tar.gz
# -
# ## Initialization of spark session
# +
from pyspark.sql import SparkSession
from sparkocr import start
import os
if license:
os.environ['JSL_OCR_LICENSE'] = license
spark = start(jar_path=spark_ocr_jar_path)
spark
# -
# ## Imports
# +
from pyspark.ml import PipelineModel
from pyspark.sql import functions as F
from sparkocr.transformers import *
from sparkocr.enums import *
from sparkocr.utils import display_image, display_images
from sparkocr.metrics import score
# -
# ## Read image
import pkg_resources
imagePath = pkg_resources.resource_filename('sparkocr', 'resources/ocr/images/check.jpg')
image_raw = spark.read.format("binaryFile").load(imagePath)
image_df = BinaryToImage().transform(image_raw)
display_images(image_df)
# ## Scaling
# +
scaled_image_df = GPUImageTransformer() \
.addErodeTransform(1,1)\
.addScalingTransform(2) \
.setInputCol("image") \
.setOutputCol("scaled_image") \
.transform(image_df)
display_images(scaled_image_df, "scaled_image")
# -
# ## Image Adaptive Thresholding on GPU
# +
thresholded_image = GPUImageTransformer() \
.addOtsuTransform()\
.setInputCol("image") \
.setOutputCol("thresholded_image") \
.transform(image_df)
display_images(thresholded_image, "thresholded_image")
# -
# ## Erosion
# +
eroded_image = GPUImageTransformer() \
.addErodeTransform(1,1)\
.setInputCol("image") \
.setOutputCol("eroded_image") \
.transform(image_df)
display_images(eroded_image, "eroded_image")
# -
# ## Dilation
# +
dilated_image = GPUImageTransformer() \
.addDilateTransform(1,1)\
.setInputCol("eroded_image") \
.setOutputCol("dilated_image") \
.transform(eroded_image)
display_images(dilated_image, "dilated_image")
# -
# ## Multiple chained transforms
# +
multiple_image = GPUImageTransformer() \
.addScalingTransform(8) \
.addOtsuTransform() \
.addErodeTransform(3, 3) \
.setInputCol("image") \
.setOutputCol("multiple_image") \
.transform(image_df)
display_images(multiple_image, "multiple_image")
# -
# ## Metrics
# Compare performance of GPU transforms vs. regular Transformer based pipeline.
# +
from sparkocr.enums import *
scaler = ImageScaler()\
.setInputCol("image")\
.setOutputCol("scaled_image")\
.setScaleFactor(8.0)
adaptive_thresholding = ImageAdaptiveBinarizer() \
.setInputCol("scaled_image") \
.setOutputCol("corrected_image") \
.setMethod(TresholdingMethod.OTSU)
erosion = ImageMorphologyOperation() \
.setKernelShape(KernelShape.SQUARE) \
.setKernelSize(1) \
.setOperation("erosion") \
.setInputCol("corrected_image") \
.setOutputCol("eroded_image")
pipeline = PipelineModel(stages=[
scaler,
adaptive_thresholding,
erosion
])
# -
# ## Common Spark-OCR transforms
# %%time
for _ in range(10):
pipeline.transform(image_df).select("eroded_image").collect()
# ## GPU transforms
# %%time
for _ in range(10):
multiple_image.select("multiple_image").collect()
# ## OCR results
# +
result = ImageToText() \
.setInputCol("multiple_image") \
.setOutputCol("text") \
.setConfidenceThreshold(35) \
.transform(multiple_image)
print("\n".join([row.text for row in result.select("text").collect()]))
| jupyter/SparkOCRGPUOperations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Work
# 1. 請比較使用 l1, l1_l2 及不同比例下的訓練結果
# +
import os
import keras
from keras.datasets import cifar10
from keras.models import Model
from keras.utils import to_categorical
from keras.layers import Input, Conv2D, MaxPool2D, Flatten, Dense
from keras.regularizers import l1, l2, l1_l2
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# -
train, test = cifar10.load_data()
# +
## 資料前處理
def preproc_x(x, flatten=True):
x = x / 255.
if flatten:
x = x.reshape((len(x), -1))
return x
def preproc_y(y, num_classes=10):
if y.shape[-1] == 1:
y = keras.utils.to_categorical(y, num_classes)
return y
# +
x_train, y_train = train
x_test, y_test = test
# Preproc the inputs
x_train = preproc_x(x_train, flatten=False)
x_test = preproc_x(x_test, flatten=False)
img_size = x_train.shape[1:]
# Preprc the outputs
y_train = preproc_y(y_train)
y_test = preproc_y(y_test)
# -
def build_cnn(input_shape, regular=l1):
input_layer = Input(shape=input_shape)
conv = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(input_layer)
maxpool = MaxPool2D()(conv)
conv = Conv2D(filters=32, kernel_size=(3, 3), padding='same')(maxpool)
maxpool = MaxPool2D()(conv)
flatten = Flatten()(maxpool)
dense = Dense(256, kernel_regularizer=regular(), activation='relu')(flatten)
dense = Dense(256, kernel_regularizer=regular(), activation='relu')(dense)
output = Dense(10, activation='softmax')(dense)
model = Model(inputs=[input_layer], outputs=[output])
return model
## 超參數設定
LEARNING_RATE = 1e-4
EPOCHS = 5
BATCHSIZE = 256
regular_dict = {'L1': l1, 'L2': l2, 'ElasticNet': l1_l2}
results = {}
for name, regular in regular_dict.items():
keras.backend.clear_session()
model = build_cnn(img_size, regular)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=BATCHSIZE, epochs=EPOCHS,
validation_data=(x_test, y_test), shuffle=True)
# Collect results
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["acc"]
valid_acc = model.history.history["val_acc"]
exp_name_tag = "{}".format(str(name))
results[exp_name_tag] = {'train-loss': train_loss,
'valid-loss': valid_loss,
'train-acc': train_acc,
'valid-acc': valid_acc}
# +
import matplotlib.pyplot as plt
# %matplotlib inline
color_bar = ["r", "g", "b", "y", "m", "k"]
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-loss'])),results[cond]['train-loss'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-loss'])),results[cond]['valid-loss'], '--', label=cond, color=color_bar[i])
plt.title("Loss")
plt.ylim([0, 5])
plt.legend()
plt.show()
plt.figure(figsize=(8,6))
for i, cond in enumerate(results.keys()):
plt.plot(range(len(results[cond]['train-acc'])),results[cond]['train-acc'], '-', label=cond, color=color_bar[i])
plt.plot(range(len(results[cond]['valid-acc'])),results[cond]['valid-acc'], '--', label=cond, color=color_bar[i])
plt.title("Accuracy")
plt.legend()
plt.show()
| Day081_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [1] About
#
# <img style="float: right;" src="../BCYadav_about.png">
#
# - This notebook is a part of tutorial series prepared by <NAME>, Research Scholar @ IIT Roorkee.
# - ORCID iD: https://orcid.org/0000-0001-7288-0551
# - Google Scholar: https://scholar.google.com/citations?user=6fJpxxQAAAAJ&hl=en&authuser=1
# - Github: https://github.com/Bankimchandrayadav/PythonInGeomatics
# - Twitter: https://twitter.com/DrBCY
# - **Recent Publication:** https://rmets.onlinelibrary.wiley.com/doi/10.1002/joc.6562
# - This notebook demonstrates the [clipping the data to study area's resolution]
# ---
# # [2] First time usage for conda users
# +
# # !conda install -c conda-forge gdal -y
# -
# # [3] First time usage for pip users
# +
# # !pip install "../03_wheels/GDAL-3.1.4-cp38-cp38-win_amd64.whl"
# -
# # [4] Importing libraries
import gdal
import os
import shutil
import time
import numpy as np
from tqdm.notebook import tqdm as td
start = time.time() # will be used to measure the effectiveness of automation
# # [5] Creating routine functions
def fresh(where):
if os.path.exists(where):
shutil.rmtree(where)
os.mkdir(where)
else:
os.mkdir(where)
# # [6] Read files
# ## [6.1] Specify input directory
rootDir = "../02_data/05_resampled"
# ## [6.2] Read files from input directory
# +
# create an empty list
rasters = []
# loop starts here
for dirname, subdirnames, filenames in os.walk(rootDir):
# search message
print('Searched in directory: {}\n'.format(dirname))
# subloop starts here
for filename in filenames:
# get complete file name
filename = os.path.join(dirname, filename)
# add name one by one to the above list (rasters)
rasters.append(filename)
# print success message
print('Files read')
# -
# ## [6.3] Check the input data
print('First file in sequence:', rasters[0])
print('Last file in sequence:', rasters[-1])
# # [7] Clipping
# ## [7.1] Specify output directory:
outDir = "../02_data/06_clipped/"
# ## [7.2] Delete any existing or old files
fresh(where=outDir)
# ## [7.3] Check output directory [optional]
# +
# os.startfile(os.path.realpath(outDir))
# -
# ## [7.4] Clipping all files
# loop starts here
for i in td(range(0, len(rasters)), desc='Reprojecting'):
# extract out the filename from complete path name
fileName = rasters[i].split('\\')[1].split('.')[0]
# specify outfile name
outName = outDir + "{}_clipped.tif".format(fileName)
# clipping to study area
gdal.Warp(
destNameOrDestDS=outName, # out file name
srcDSOrSrcDSTab=rasters[i], # source file name
outputBounds = [
159236.23230056558, # left [min X]
3170068.6251568096, # bottom [min Y]
509236.2323005656, # right [max X]
3500068.6251568096 # top [max Y]
]
)
# # [8] Time elapsed
end = time.time()
print('Time elapsed:', np.round(end-start ,2), 'secs')
# # [9] See results [1000 reprojected files, optional]
os.startfile(os.path.realpath(outDir))
# ---
# # End of last tutorial and series
# ---
# # *If you want to see me more please contact me at:*
#
# *Twitter: https://twitter.com/DrBCY*
# *Outlook: <EMAIL>*
#
# *Add me to whatsapp groups:*
# *+91 9068263621*
| 01_notebooks/05_clipping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img alt="QuantRocket logo" src="https://www.quantrocket.com/assets/img/notebook-header-logo.png">
#
# © Copyright Quantopian Inc.<br>
# © Modifications Copyright QuantRocket LLC<br>
# Licensed under the [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/legalcode).
#
# <a href="https://www.quantrocket.com/disclaimer/">Disclaimer</a>
# + [markdown] deletable=true editable=true
# # Overfitting
#
# By <NAME>" Nitishinskaya and <NAME>. Algorithms by <NAME>.
# + [markdown] deletable=true editable=true
# ## What is overfitting?
#
# When constructing a model, we tune both the parameters and the model by fitting to sample data. We then use the model and parameters to predict data we have not yet observed. We say a model is overfit when it is overly sensitive to noise and idiosyncracies in the sample data, and therefore does not reflect the underlying data-generating process.
#
# To understand why this happens, one has to consider the amount of noise present in any dataset. One can consider a set of data as $D_{T}$, the true underlying data that came from whatever process we are trying to model, and $\epsilon$, some random noise. Because what we see is $D = D_{T} + \epsilon$, we might fit our model to very perfectly predict for the given $\epsilon$, but not for $D_{T}$.
#
# This is problematic because we only care about fitting to the sample insofar as that gives an accurate fit to future data. The two broad causes of overfitting are:
# * small sample size, so that noise and trend are not distinguishable
# * choosing an overly complex model, so that it ends up contorting to fit the noise in the sample
# + [markdown] deletable=true editable=true
# ### Verbal Example: Too Many Rules (Complexity)
#
# Let's say you have the following dataset:
#
# | TV Channel | Room Lighting Intensity | Enjoyment |
# |------------|-------------------------|-----------|
# | 1 | 2 | 1 |
# | 2 | 3 | 2 |
# | 3 | 1 | 3 |
#
# You are trying to predict enjoyment, so you create the following rules:
#
# 1. If TV Channel is 1 and Lighting Intensity is 2, then Enjoyment will be 1.
# 2. If TV Channel is 2 and Lighting Intensity is 3, then Enjoyment will be 2.
# 3. If TV Channel is 3 and Lighting Intensity is 1, then Enjoyment will be 3.
# 4. In all other cases predict an average enjoyment of 2.
#
# This is a well defined model for future data, however, in this case let's say your enjoyment is purely dependent on the tv channel and not on the lighting. Because we have a rule for each row in our dataset, our model is perfectly predictive in our historical data, but would performly poorly in real trials because we are overfitting to random noise in the lighting intensity data.
#
# Generalizing this to stocks, if your model starts developing many specific rules based on specific past events, it is almost definitely overfitting. This is why black-box machine learning (neural networks, etc.) is so dangerous when not done correctly.
# + [markdown] deletable=true editable=true
# ### Example: Curve fitting
#
# Overfitting is most easily seen when we look at polynomial regression. Below we construct a dataset which noisily follows a quadratic. The linear model is underfit: simple linear models aren't suitable for all situations, especially when we have reason to believe that the data is nonlinear. The quadratic curve has some error but fits the data well.
#
# When we fit a ninth-degree polynomial to the data, the error is zero - a ninth-degree polynomial can be constructed to go through any 10 points - but, looking at the tails of the curve, we know that we can't expect it to accurately predict other samples from the same distribution. It fits the data perfectly, but that is because it also fits the noise perfectly, and the noise is not what we want to model. In this case we have selected a model that is too complex.
# + deletable=true editable=true
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
from statsmodels import regression
from scipy import poly1d
# + deletable=true editable=true jupyter={"outputs_hidden": false}
x = np.arange(10)
y = 2*np.random.randn(10) + x**2
xs = np.linspace(-0.25, 9.25, 200)
lin = np.polyfit(x, y, 1)
quad = np.polyfit(x, y, 2)
many = np.polyfit(x, y, 9)
plt.scatter(x, y)
plt.plot(xs, poly1d(lin)(xs))
plt.plot(xs, poly1d(quad)(xs))
plt.plot(xs, poly1d(many)(xs))
plt.ylabel('Y')
plt.xlabel('X')
plt.legend(['Underfit', 'Good fit', 'Overfit']);
# + [markdown] deletable=true editable=true
# When working with real data, there is unlikely to ever be a situation where a ninth-degree polynomial is appropriate: our choice of function should reflect a belief about the underlying process, and real-world processes generally do not follow high-degree polynomial curves. This example is contrived, but it can be tempting to use a quadratic or cubic model just to decrease sample error.
# + [markdown] deletable=true editable=true
# ### Note: Model/Parameter Parsimony
#
# Just as the most elegant physics models describe a tremendous amount of our world through a few equations, a good trading model should explain most of the data through a few rules. Any time you start to have a number of rules even close to the number of points in your data set, you can be sure you are overfitting. Since parameters can be thought of as rules as they equivalently constrain a model, the same is true of parameters. Fewer parameters is better, and it is better to explain 60% of the data with 2-3 parameters than 90% with 10.
# + [markdown] deletable=true editable=true
# ### Beware of the perfect fit
#
# Because there is almost always noise present in real data, a perfect fit is almost always indicative of overfitting. It is almost impossible to know the percentage noise/signal in a given data set while you are developing the model, but use your common sense. Are the predictions surprisingly good? Then you're probably overfitting.
# + [markdown] deletable=true editable=true
# ### Example: Regression parameters
#
# How do we know which variables to include in a model? If we're afraid of omitting something important, we might try different ones and include all the variables we can find that improve the fit. Below we regress one asset that is in the same sector as the asset whose price we're trying to predict, and three other unrelated ones. In our initial timeframe, we are able to fit the model more closely to the data when using multiple variables than when using just one.
# + deletable=true editable=true jupyter={"outputs_hidden": false}
# Load one year's worth of pricing data for five different assets
from quantrocket.master import get_securities
from quantrocket import get_prices
securities = get_securities(symbols=["AAPL", "MSFT", "JNJ", "XOM", "MON"], vendors='usstock')
start = '2017-01-01'
end = '2018-01-01'
closes = get_prices("usstock-free-1min", data_frequency="daily", sids=securities.index.tolist(), fields='Close', start_date=start, end_date=end).loc['Close']
sids_to_symbols = securities.Symbol.to_dict()
closes = closes.rename(columns=sids_to_symbols)
x1 = closes['MSFT']
x2 = closes['JNJ']
x3 = closes['MON']
x4 = closes['XOM']
y = closes['AAPL']
# Build a linear model using only x1 to explain y
slr = regression.linear_model.OLS(y, sm.add_constant(x1)).fit()
slr_prediction = slr.params[0] + slr.params[1]*x1
# Run multiple linear regression using x1, x2, x3, x4 to explain y
mlr = regression.linear_model.OLS(y, sm.add_constant(np.column_stack((x1,x2,x3,x4)))).fit()
mlr_prediction = mlr.params[0] + mlr.params[1]*x1 + mlr.params[2]*x2 + mlr.params[3]*x3 + mlr.params[4]*x4
# Compute adjusted R-squared for the two different models
print('SLR R-squared:', slr.rsquared_adj)
print('SLR p-value:', slr.f_pvalue)
print('MLR R-squared:', mlr.rsquared_adj)
print('MLR p-value:', mlr.f_pvalue)
# Plot y along with the two different predictions
y.plot()
slr_prediction.plot()
mlr_prediction.plot()
plt.ylabel('Price')
plt.xlabel('Date')
plt.legend(['AAPL', 'SLR', 'MLR']);
# + [markdown] deletable=true editable=true
# However, when we use the same estimated parameters to model a different time period, we find that the single-variable model fits worse, while the multiple-variable model is entirely useless. It seems that the relationships we found are not consistent and are particular to the original sample period.
# + deletable=true editable=true jupyter={"outputs_hidden": false}
# Load the next 6 months of pricing data
start = '2018-01-01'
end = '2018-06-01'
closes = get_prices("usstock-free-1min", data_frequency="daily", sids=securities.index.tolist(), fields='Close', start_date=start, end_date=end).loc['Close']
closes = closes.rename(columns=sids_to_symbols)
x1 = closes['MSFT']
x2 = closes['JNJ']
x3 = closes['MON']
x4 = closes['XOM']
y = closes['AAPL']
# Extend our model from before to the new time period
slr_prediction2 = slr.params[0] + slr.params[1]*x1
mlr_prediction2 = mlr.params[0] + mlr.params[1]*x1 + mlr.params[2]*x2 + mlr.params[3]*x3 + mlr.params[4]*x4
# Manually compute adjusted R-squared over the new time period
# Adjustment 1 is for the SLR model
p = 1
N = len(y)
adj1 = float(N - 1)/(N - p - 1)
# Now for MLR
p = 4
N = len(y)
adj2 = float(N - 1)/(N - p - 1)
SST = sum((y - np.mean(y))**2)
SSRs = sum((slr_prediction2 - y)**2)
print('SLR R-squared:', 1 - adj1*SSRs/SST)
SSRm = sum((mlr_prediction2 - y)**2)
print('MLR R-squared:', 1 - adj2*SSRm/SST)
# Plot y along with the two different predictions
y.plot()
slr_prediction2.plot()
mlr_prediction2.plot()
plt.ylabel('Price')
plt.xlabel('Date')
plt.legend(['AAPL', 'SLR', 'MLR']);
# + [markdown] deletable=true editable=true
# If we wanted, we could scan our universe for variables that were correlated with the dependent variable, and construct an extremely overfitted model. However, in most cases the correlation will be spurious, and the relationship will not continue into the future.
# + [markdown] deletable=true editable=true
# ### Example: Rolling windows
#
# One of the challenges in building a model that uses rolling parameter estimates, such as rolling mean or rolling beta, is choosing a window length. A longer window will take into account long-term trends and be less volatile, but it will also lag more when taking into account new observations. The choice of window length strongly affects the rolling parameter estimate and can change how we see and treat the data. Below we calculate the rolling averages of a stock price for different window lengths:
# + deletable=true editable=true jupyter={"outputs_hidden": false}
# Load the pricing data for a stock
exxon_sid = get_securities(symbols='XOM', vendors='usstock').index[0]
start = '2011-01-01'
end = '2013-01-01'
prices = get_prices('usstock-free-1min', data_frequency="daily", sids=exxon_sid, fields='Close', start_date=start, end_date=end)
prices = prices.loc['Close'][exxon_sid]
# Compute rolling averages for various window lengths
mu_30d = prices.rolling(window=30).mean()
mu_60d = prices.rolling(window=60).mean()
mu_100d = prices.rolling(window=100).mean()
# Plot asset pricing data with rolling means from the 100th day, when all the means become available
plt.plot(prices[100:], label='Asset')
plt.plot(mu_30d[100:], label='30d MA')
plt.plot(mu_60d[100:], label='60d MA')
plt.plot(mu_100d[100:], label='100d MA')
plt.xlabel('Day')
plt.ylabel('Price')
plt.legend();
# + [markdown] deletable=true editable=true
# If we pick the length based on which seems best - say, on how well our model or algorithm performs - we are overfitting. Below we have a simple trading algorithm which bets on the stock price reverting to the rolling mean (for more details, check out the mean reversion notebook). We use the performance of this algorithm to score window lengths and find the best one. However, when we consider a different timeframe, this window length is far from optimal. This is because our original choice was overfitted to the sample data.
# + deletable=true editable=true
# Trade using a simple mean-reversion strategy
def trade(stock, length):
# If window length is 0, algorithm doesn't make sense, so exit
if length == 0:
return 0
# Compute rolling mean and rolling standard deviation
rolling_window = stock.rolling(window=length)
mu = rolling_window.mean()
std = rolling_window.std()
# Compute the z-scores for each day using the historical data up to that day
zscores = (stock - mu)/std
# Simulate trading
# Start with no money and no positions
money = 0
count = 0
for i in range(len(stock)):
# Sell short if the z-score is > 1
if zscores[i] > 1:
money += stock[i]
count -= 1
# Buy long if the z-score is < 1
elif zscores[i] < -1:
money -= stock[i]
count += 1
# Clear positions if the z-score between -.5 and .5
elif abs(zscores[i]) < 0.5:
money += count*stock[i]
count = 0
return money
# + deletable=true editable=true jupyter={"outputs_hidden": false}
# Find the window length 0-254 that gives the highest returns using this strategy
from IPython.display import clear_output
length_scores = []
for l in range(255):
print(f'testing window length: {l}')
score = trade(prices, l)
length_scores.append(score)
clear_output(wait=True)
best_length = np.argmax(length_scores)
print('Best window length:', best_length)
# + deletable=true editable=true jupyter={"outputs_hidden": false}
# Get pricing data for a different timeframe
start2 = '2013-01-01'
end2 = '2015-01-01'
prices2 = get_prices('usstock-free-1min', data_frequency='daily', sids=exxon_sid, fields='Close', start_date=start2, end_date=end2)
prices2 = prices2.loc['Close'][exxon_sid]
# Find the returns during this period using what we think is the best window length
length_scores2 = []
for l in range(255):
print(f'testing window length {l}')
score = trade(prices2, l)
length_scores2.append(score)
clear_output(wait=True)
print(best_length, 'day window:', length_scores2[best_length])
# Find the best window length based on this dataset, and the returns using this window length
best_length2 = np.argmax(length_scores2)
print(best_length2, 'day window:', length_scores2[best_length2])
# + [markdown] deletable=true editable=true
# Clearly fitting to our sample data doesn't always give good results in the future. Just for fun, let's plot the length scores computed from the two different timeframes:
# + deletable=true editable=true jupyter={"outputs_hidden": false}
plt.plot(length_scores)
plt.plot(length_scores2)
plt.xlabel('Window length')
plt.ylabel('Score')
plt.legend(['2011-2013', '2013-2015']);
# + [markdown] deletable=true editable=true
# To avoid overfitting, we can use economic reasoning or the nature of our algorithm to pick our window length. We can also use Kalman filters, which do not require us to specify a length; this method is covered in another notebook.
# + [markdown] deletable=true editable=true
# ## Avoiding overfitting
#
# We can try to avoid overfitting by taking large samples, choosing reasonable and simple models, and not cherry-picking parameters to fit the data; but just running two backtests is already overfitting.
#
# ### Out of Sample Testing
#
# To make sure we haven't broken our model with overfitting, we have to test out of sample data. That is, we need to gather data that we did not use in constructing the model, and test whether our model continues to work. If we cannot gather large amounts of additional data at will, we should split the sample we have into two parts, of which one is reserved for testing only.
#
# ### Common Mistake: Abusing Out of Sample Data
#
# Sometimes people will construct a model on in-sample data, test on out-of-sample data, and conclude it doesn't work. They will then repeat this process until they find a model that works. This is still overfitting, as you have now overfit the model to the out-of-sample data by using it many times, and when you actually test on true out-of-sample data your model will likely break down.
#
# ### Cross Validation
#
# Cross validation is the process of splitting your data into $n$ parts, then estimating optimal parameters for $n-1$ parts combined and testing on the final part. By doing this $n$ times, one for each part held out, we can establish how stable our parameter estimates are and how predictive they are on data not from the original set.
#
# ### Information Criterion
#
# Information criterion are a rigorous statistical way to test if the amount of complexity in your model is worth the extra predictive power. The test favors simpler models and will tell you if you are introducing a large amount of complexity without much return. One of the most common methods is [Akaike Information Criterion.](https://en.wikipedia.org/wiki/Akaike_information_criterion)
# -
# ---
#
# **Next Lecture:** [Hypothesis Testing](Lecture20-Hypothesis-Testing.ipynb)
#
# [Back to Introduction](Introduction.ipynb)
# ---
#
# *This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian") or QuantRocket LLC ("QuantRocket"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, neither Quantopian nor QuantRocket has taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information believed to be reliable at the time of publication. Neither Quantopian nor QuantRocket makes any guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
| quant_finance_lectures/Lecture19-Dangers-of-Overfitting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import glob
from IPython.display import display
import sys
# %load_ext autoreload
# %autoreload 2
sys.path.insert(0, '../../scripts')
from plot_helpers import nice_name_map, plot_scatter_with_lines, save_plot, format_input_size
# +
benchmark_results = []
for path in glob.glob('../../benchmarks/2021-01-07-report-scaling/*.csv'):
benchmark_results.append(pd.read_csv(path))
benchmark_results = pd.concat(benchmark_results)
benchmark_results.sample(5)
# +
non_simd_diff_programs = [
'mpi_no_master_frontier',
'mpi_priority_frontier',
]
simd_diff_programs = [f'{p}_simd' for p in non_simd_diff_programs]
for diff_program in non_simd_diff_programs + simd_diff_programs:
assert (benchmark_results['diff_program'] == diff_program).any()
# -
palette = {mpi_procs: color for mpi_procs, color in zip(sorted(benchmark_results.mpi_procs.dropna().unique()), sns.color_palette())}
fig, ax = plt.subplots(2, len(non_simd_diff_programs), figsize=(16, 12), sharex=True, sharey=True)
for group_i, (group, group_is_simd) in enumerate([(non_simd_diff_programs, False), (simd_diff_programs, True)]):
for diff_program_i, diff_program in enumerate(group):
temp = benchmark_results.copy()
temp['seconds_until_len'] = temp['micros_until_len'] * 1e-6
temp['mpi_nodes'] = temp['mpi_procs'] / 128
temp = temp[(temp['diff_program'] == diff_program) & (temp['input_strategy'] == 'independent')]
temp_2 = temp.groupby(['input_length_1', 'mpi_nodes'])['seconds_until_len'].median().unstack('input_length_1')
this_ax = ax[diff_program_i][group_i]
plot_scatter_with_lines(
ax=this_ax,
scatter_data=temp,
line_data=temp_2,
x_key='mpi_nodes',
y_key='seconds_until_len',
hue_key='input_length_1',
sort_hue_keys=True,
hue_to_label=format_input_size,
)
this_ax.set_xticks(np.arange(np.ceil(temp['mpi_nodes'].max()) + 1))
this_ax.set_title(f'{diff_program} ({"SIMD" if group_is_simd else "no SIMD"})')
this_ax.set_ylim(0, 160)
fig, ax = plt.subplots(1, len(non_simd_diff_programs), figsize=(12, 4), sharex=True, sharey=True)
for diff_program_i, (non_simd_diff_program, simd_diff_program) in enumerate(zip(non_simd_diff_programs, simd_diff_programs)):
temp = benchmark_results.copy()
temp = temp[(temp['diff_program'].isin([non_simd_diff_program, simd_diff_program])) & (temp['input_strategy'] == 'independent')]
temp = temp.groupby(['generation_config_i', 'mpi_procs', 'diff_program'])['micros_until_len'].median().unstack('diff_program')
temp = temp[(temp >= 1e6).all(axis=1)]
temp = temp[non_simd_diff_program] / temp[simd_diff_program]
temp = temp.reset_index(drop=True)
this_ax = ax[diff_program_i]
temp.hist(ax=this_ax, bins=10);
this_ax.set_title(nice_name_map[simd_diff_program])
# +
temp_2 = {}
for diff_program_i, (non_simd_diff_program, simd_diff_program) in enumerate(zip(non_simd_diff_programs, simd_diff_programs)):
temp = benchmark_results.copy()
temp = temp[(temp['diff_program'].isin([non_simd_diff_program, simd_diff_program])) & (temp['input_strategy'] == 'independent')]
temp = temp.groupby(['generation_config_i', 'mpi_procs', 'diff_program'])['micros_until_len'].median().unstack('diff_program')
temp = temp[(temp >= 1e6).all(axis=1)]
temp = temp[non_simd_diff_program] / temp[simd_diff_program]
temp_2[nice_name_map[simd_diff_program]] = temp
temp_2 = pd.DataFrame(temp_2)
shape = (5.5, 2)
g = sns.displot(
temp_2,
kind='kde',
legend=False,
rug=True,
rug_kws={'height': 0.1},
height=shape[1],
aspect=shape[0] / shape[1],
);
g.ax.legend(sorted(temp_2.columns), title='Program'); # HACK make the legend text and colors match
g.ax.set_xlabel('Speedup due to SIMD')
sns.despine(top=False, right=False)
save_plot(g.fig, 'simd-comparison')
| notebooks/benchmark-analysis-report/simd_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/1carlosd1/daa_2021_1/blob/master/Tarea5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="dAFaBMl0G1Pk" outputId="d5c0978c-e1d1-4222-e6d9-32a3a1ba1c24" colab={"base_uri": "https://localhost:8080/", "height": 357}
from time import time
def ejemplo1( n ):
start_time = time()
c = n + 1
d = c * n
e = n * n
total = c + e - d
print(f"total={ total }")
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
for entrada in range(100,1100,100):
ejemplo1(entrada)
# + id="TP3qYxdNJeud" outputId="6e36f0ce-396f-40cb-c5fe-7f6174263077" colab={"base_uri": "https://localhost:8080/", "height": 187}
from time import time
def ejemplo2( n ):
start_time = time()
contador = 0
for i in range( n ) :
for j in range( n ) :
contador += 1
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return contador
for entrada in range(100,1100,100):
ejemplo2(entrada)
# + id="Bm86b9e0MSFW" outputId="cff594e1-13c9-4264-c5f6-748eaa9d06d9" colab={"base_uri": "https://localhost:8080/", "height": 187}
from time import time
def ejemplo3(n):
start_time = time()
x=n*2 #x=8
y=0
for m in range(100):
y=x-n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return y
for entrada in range(100,1100,100):
ejemplo3(entrada)
# + id="PYUiIB75NSR-" outputId="64d743ee-8eef-4ff0-c895-bf9b2c52aaae" colab={"base_uri": "https://localhost:8080/", "height": 187}
from time import time
def ejemplo4( n ):
start_time = time()
x = 3 * 3.1416 + n
y = x + 3 * 3 - n
z = x + y
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return z
for entrada in range(100,1100,100):
ejemplo4(entrada)
# + id="B6x4HCKqOMwU" outputId="0028eec9-cf04-4d9b-b97f-75aa9ffc118d" colab={"base_uri": "https://localhost:8080/", "height": 187}
from time import time
def ejemplo5( x ):
start_time = time()
n = 10
for j in range( 0 , x , 1 ):
n = j + n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return n
for entrada in range(100,1100,100):
ejemplo5(entrada)
# + id="66OCW4DaO3vo" outputId="a0665feb-245a-44a9-cf91-26b468f87091" colab={"base_uri": "https://localhost:8080/", "height": 187}
def ejemplo6( n ):
start_time = time()
data=[[[1 for x in range(n)] for x in range(n)]
for x in range(n)]
suma = 0
for d in range(n):
for r in range(n):
for c in range(n):
suma += data[d][r][c]
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return suma
for entrada in range(100,1100,100):
ejemplo6(entrada)
| Tarea5.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,jl:light
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
# ## Data exchange between neighbor arrays
#
# The implementation of `exchange` functions is a central concept of `MeshArrays.jl` illustrated here through a basic example. It is further used in `03_smoothing.ipynb` that applies diffusion over the surface of a sphere.
#
# #### First, Let's load the `MeshArrays.jl` and `Plots.jl` package modules
using MeshArrays, Plots
# Download the pre-defined grid if needed
if !isdir("GRID_CS32")
run(`git clone https://github.com/gaelforget/GRID_CS32`)
end
# Select `cube sphere` grid and read `ocean depth` variable
mygrid=GridSpec("CS32")
D=mygrid.read(mygrid.path*"Depth.data",MeshArray(mygrid,Float32))
show(D)
# #### Use the `exchange` function
#
# It will add neighboring points at face edges as seen below
Dexch=exchange(D,4)
show(Dexch)
# We can also illustrate what happened using `Plots.jl`
P=heatmap(D.f[6],title="Ocean Depth (D, Face 6)",lims=(0,40))
Pexch=heatmap(Dexch.f[6],title="...(Dexch, Face 6)",lims=(0,40))
plot(P,Pexch)
| 02_exchanges.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: JuliaPro_v1.5.3-1 1.5.3
# language: julia
# name: juliapro_v1.5.3-1-1.5
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Einrichtung von Julia JuMP
# ---
# Überprüfen Sie die bereits installierten Pakete. In der JuliaBox sind möglicherweise alle notwendigen Pakete bereits vorhanden.
# + slideshow={"slide_type": "fragment"}
using Pkg;
Pkg.status()
# + [markdown] slideshow={"slide_type": "slide"}
# Installieren Sie ``JuMP`` , mit dem Sie einfach Optimierungsprogramme formulieren können, sowie ``Cbc``, einen open-source Solver zum Lösen des Problems, und ``StatsPlots`` zur Visualisierung der Lösung.
#
# + slideshow={"slide_type": "fragment"}
Pkg.add("JuMP");
Pkg.add("Cbc");
# + [markdown] slideshow={"slide_type": "fragment"}
# Richten Sie die installierten Pakete so ein, dass sie im folgenden Code verwendet werden können.
# + slideshow={"slide_type": "fragment"}
using JuMP, Cbc;
# + [markdown] slideshow={"slide_type": "slide"}
# # Planspiel Schritt 2: Capacitated Lot Sizing Problem
#
#
# -
# ### Ergebnisse aus Teil 1
# +
F_Aggr = [0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0];
#Hier ist die Lösung des langfristigen Fremdbezuges aus Teil 1 dargestellt
#Die Zeilen stehen für die Produkte und die Spalten für die Perioden
# +
ZAggr = [21.0 6.0 0.0 0.0 38.0 40.0];
#Hier ist die genutzte Zusatzkapazität pro Periode aus Teil 1 dargestellt
# +
LAggr = [0.0 0.0 0.0 823.0 902.0 647.0 0.0
0.0 0.0 0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 9.0 3.0 1.0 0.0 ];
#Hier sind die Lagerbestände der Produkte pro Periode aus Teil 1 dargestellt
#Wieso sind hier sieben Spalten dargestellt, obwohl nur sechs Perioden betrachtet werden?
#Tipp: Wann wird der Lagerbestand gemessen?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Erstellen Sie das Modell namens ``m`` und geben Sie als zu verwendenden Solver Cbc an.
# ---
# + slideshow={"slide_type": "fragment"}
m = Model(Cbc.Optimizer);
set_optimizer_attribute(m, "seconds", 60);
# beschränkt die Rechenzeit für die Optimierung auf 60 Sekunden
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Mengen und Parameter
# + [markdown] slideshow={"slide_type": "fragment"}
# Fügen Sie die Mengen ein.
#
# + slideshow={"slide_type": "fragment"}
#Mengen
Produkte = ["HIER CODE EINGEBEN"];
Perioden = ["HIER CODE EINGEBEN"];
#Hinweis: Erstellen Sie für jeden Monat ein eigenes Modell (mit je 4 Wochen)
#--> Sie brauchen also sechs seperate Dateien; eine für jeden Monat
Ressourcen = ["HIER CODE EINGEBEN"];
#Längen
I = length(Produkte);
T = length(Perioden);
J = length(Ressourcen);
# + [markdown] slideshow={"slide_type": "slide"}
# Fügen Sie die Parameter ein.
# + slideshow={"slide_type": "fragment"}
#Hinweis: Überlegen Sie welche Parameter von Monats- auf Wochenwerte heruntergerechnet werden müssen
kl = ["HIER CODE EINGEBEN"]; #Lagerkosten der Produkte in Euro pro Woche
ks = ["HIER CODE EINGEBEN"]; #Rüstkostensatz der Produkte
ts = ["HIER CODE EINGEBEN"]; #Rüstzeiten der Produkte
tp = [0.105, 0.095, 0.065]; #Stückbearbeitungszeiten der Produkte
M = 100000; #Große Zahl
d = ["HIER CODE EINGEBEN"];
#Bedarf von Produkt i in Periode t
#=Bsp. für drei Perioden und vier Produkte: d = [900 200 100
300 0 69
500 30 210
10 25 60 ]; =#
kf = [320, 432, 240]; #Fremdbezugskosten 1 der Produkte in Euro (Tucher)
kv = [160, 216, 120]; #variable Herstellkosten der Produkte
# +
#Diese Werte sind abhängig von der aggregierten Planung
l_start = ["HIER CODE EINGEBEN"]; #=Anfangslagerbestände des Monats ( =Endbestand Vorperiode, s.h. oben
'Ergebnisse Teil 1')=#
#Bsp. bei vier Produkten: l_start = [20, 50 ,30 , 10];
l_end = ["HIER CODE EINGEBEN"]; #=Der Endbestand des Monats ist immer der entsprechend berechnete Lagerbestand
aus Schritt 1=#
#Bsp. bei vier Produkten: l_end = [30, 60 ,40 , 5];
c = ["HIER CODE EINGEBEN"]; #Periodenkapazität
#=Hinweis: Berücksichtigen Sie zusätzlich zur regulären Kapazität auch die in der aggr. Planung ermittelte
Zusatzkapazität aufgeteilt auf 4 Wochen=#
faggr = ["HIER CODE EINGEBEN"]
#Die wöchentliche Lieferung der bei Cheapress bestellten Produkte
#=Bsp. für drei Perioden und vier Produkte: faagr = [900 200 100
300 0 69
500 30 210
10 25 60 ]; =#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Entscheidungsvariablen
# ---
# Definieren Sie die Entscheidungsvariablen. Achten Sie auf die Definitionsbereiche.
# + [markdown] slideshow={"slide_type": "fragment"}
# **Nichtnegativitätsbedingung**: Die Entscheidungsvariablen, also die Produktionsmengen, Lagerbestände, Fremdbezugsmengen und die in Anspruch genommenen Zusatzkapazitäten dürfen nur positive Werte annehmen, wir setzen deren Nichtnegativität voraus.
#
# $ \qquad X_{it}, L_{it}, F_{it} \geq 0 \qquad \qquad \forall i \in I, t \in T$
#
# -
@variables m begin
X[1:I,1:T]>=0,Int #Produktionsmenge von Produkt i in Periode t
F[1:I,1:T]>=0,Int #Fremdbezugsmenge von Produkt i in Periode t
L[1:I,0:T]>=0, Int; #Lagerbestand von Produkt i am Periodenende von t
end
# + [markdown] slideshow={"slide_type": "fragment"}
# **Binaerbedingung**: Rüstvorgänge werden immer ganz ($\gamma_{it} = 1$) oder gar nicht ($\gamma_{it} = 0$) ausgeführt. Die binäre Rüstvariable nimmt also entweder den Wert 1 oder 0 an.
#
# $ \qquad \gamma_{it} \in \{0,1\} \qquad\qquad\qquad\qquad\qquad\qquad \forall t \in T, \quad i \in I $
# + slideshow={"slide_type": "fragment"}
@variable(m,gamma[1:I,1:T],Bin); #Die binäre Rüstvariable von Produkt i in Periode t
# + [markdown] slideshow={"slide_type": "slide"}
# ## Zielfunktion
#
# **Kostenminimierung:** Der Produktionskosten K sollen minimiert werden. Diese berechnen sich hier aus der Summe der variablen Kosten, der Lagerkosten, der Fremdbezugskosten und Rüstkosten.
#
# $$ \qquad \min K = \displaystyle\sum_{i=1}^I \sum_{t=1}^T (k_i^vX_{it}+k_i^l L_{it}+k_i^fF_{it}+ k^s_i\gamma_{i,t}) $$
# + slideshow={"slide_type": "fragment"}
@objective(m, Min, sum(kv[i] * X[i,t] + kl[i] * L[i,t] + kf[i] * F[i,t] + ks[i]*gamma[i,t]
for i=1:I for t=1:T));
# + [markdown] slideshow={"slide_type": "slide"}
# ## Nebenbedingungen
# + [markdown] slideshow={"slide_type": "fragment"}
# **Kapazitaetsrestriktion:** Die Summe aus Produktions- und Rüstzeit aller Produkte darf in jeder Periode die vorhandene Kapazität der Ressource j nicht überschreiten.
#
# $$ \qquad \displaystyle\sum_{i=1}^I(t_i^p\cdot X_{it}+t_i^s\cdot\gamma_{it} ) \leq c_t \hspace{40mm} \forall t \in T, j \in J\qquad $$
#
#
# + slideshow={"slide_type": "fragment"}
@constraint(m, KapRes[t=1:T, j=1:J], sum(ts[i] * gamma[i,t] + tp[i] * X[i,t] for i=1:I) <= c[t] );
# + [markdown] slideshow={"slide_type": "fragment"}
# **Lagerbilanzgleichung**: Der Lagerbestand eines Produktes am Ende einer Periode berechnet sich aus der Summe der eingelagerten Menge in der Vorperiode, der Produktionsmenge und der kurzfristigen und langfristigen Fremdbezugsmenge, abzüglich der abgesetzen Menge des Produktes.
#
# $$ \qquad L_{it}=L_{i,t-1}+X_{it}-d_{it}+F_{it}+f_{it}^{aggr} \qquad \forall i \in I , t \in T$$
# + slideshow={"slide_type": "fragment"}
#Fügen sie den Parameter der langfristigen Fremdbezugsmenge in die Nebenbedingung ein!
@constraint(m, Lager[i=1:I,t=1:T], L[i,t] == L[i,t-1] + X[i,t] + F[i,t] - d[i,t]"HIER CODE EINFUEGEN");
# + [markdown] slideshow={"slide_type": "slide"}
# **Anfangslagerbestand**: Der Anfangslagerbestand aller Produkte entspricht dem initial gesetzen $l_{start}$.
#
# **Endlagerbestand**: Der Endlagerbestand aller Produkte entspricht dem initial gesetzen $l_{end}$.
#
# $$ \qquad L_{i,0} = l_{start} \hspace{40mm} \forall i \in I$$
#
# $$ \qquad L_{i,T} = l_{end} \hspace{40mm} \forall i \in I$$
#
# + slideshow={"slide_type": "fragment"}
@constraint(m, AnfLager[i=1:I], L[i,0] == l_start[i]);
#Fügen Sie eine Nebenbedingung für den Endlagerbestand hinzu
@constraint("HIER CODE EINFUEGEN");
# + [markdown] slideshow={"slide_type": "fragment"}
# **Rüstbedingung**: Wenn für ein Produkt in einer Periode nicht gerüstet wird, ist die produzierte Menge dieses Produkts in dieser Periode 0. Wenn für ein Produkt in einer Periode gerüstet wird, wird die produzierte Menge durch die Rüstbedingung nicht eingeschränkt.
#
# $$ \qquad X_{it} \leq M \cdot \gamma_{it} \hspace{40mm} \forall t \in T, \quad i \in I $$
# + slideshow={"slide_type": "fragment"}
@constraint(m, Ruestbed[i=1:I,t=1:T], X[i,t] <= M * gamma[i,t]);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Lösen Sie das Modell.
# ---
# + slideshow={"slide_type": "fragment"}
"HIER CODE EINFUEGEN"
#Befehl zum Lösen des Modells einfügen
#Tipp: Im Modell der Vorlesung wurde dieser Befehl bereits verwendet
# + [markdown] slideshow={"slide_type": "fragment"}
# Lassen Sie sich den Zielfunktionswert Z anzeigen.
# + slideshow={"slide_type": "fragment"}
cost = "HIER CODE EINFUEGEN"
#Befehl zum Anzeigen des Zielfunktionswertes einfügen
#Tipp: Im Modell der Vorlesung wurde dieser Befehl bereits verwendet
println("Objective value cost: ", round(Int64,cost)) #gibt den gerundeten Wert der Variable cost aus
# -
# Platz für weitere Berechnungen:
| Basismodelle Planspiel/PlanspielTeil2leer_v1.5.3-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="figures/k2_pix_small.png">
# *This notebook contains an excerpt instructional material from [gully](https://twitter.com/gully_) and the [K2 Guest Observer Office](https://keplerscience.arc.nasa.gov/); the content is available [on GitHub](https://github.com/gully/k2-metadata).*
#
# # Gaia crossmatch
#
# Things to say:
# - Need to read in Gaia TGAS catalog
# - Need to compare to targeted K2 objects
# <!--NAVIGATION-->
# < [Other K2 metadata sources](03.00-Other-metadata-sources.ipynb) | [Contents](Index.ipynb) | [Advanced visualizations](04.00-Advanced-visualizations.ipynb) >
import pandas as pd
import glob
TGAS_df = pd.DataFrame()
fns = glob.glob('../../adrasteia/data/TgasSource_000-000-*.csv')
# <!--NAVIGATION-->
# < [Other K2 metadata sources](03.00-Other-metadata-sources.ipynb) | [Contents](Index.ipynb) | [Advanced visualizations](04.00-Advanced-visualizations.ipynb) >
| notebooks/03.01-Gaia_crossmatch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.4 64-bit
# name: python394jvsc74a57bd0b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e
# ---
# + executionInfo={"elapsed": 4434, "status": "ok", "timestamp": 1607146642164, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiIWFHtan62vtW1gz2Bv2bxL3rppefcadxzEVxRKQ=s64", "userId": "03254185060287524023"}, "user_tz": -330} id="WapU423CpeT5"
import torch
import torch.nn as nn
import torchvision
import torch.nn.functional as F
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transfroms
from torch.utils.data import DataLoader
# -
device = "cuda" if torch.cuda.is_available() else "cpu"
VGG_types = {
"VGG11": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
"VGG13": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"],
"VGG16": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
"M",
512,
512,
512,
"M",
512,
512,
512,
"M",
],
"VGG19": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
256,
"M",
512,
512,
512,
512,
"M",
512,
512,
512,
512,
"M",
],
}
class VGG_net(nn.Module):
def __init__(self, in_channels=3, num_classes=1000):
super(VGG_net, self).__init__()
self.in_channels = in_channels
self.conv_layers = self.create_conv_layers(VGG_types["VGG19"])
self.fcs = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.conv_layers(x)
x = x.reshape(x.shape[0], -1)
x = self.fcs(x)
return x
def create_conv_layers(self, architecture):
layers = []
in_channels = self.in_channels
for x in architecture:
if type(x) == int:
out_channels = x
layers += [
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
),
nn.BatchNorm2d(x),
nn.ReLU(),
]
in_channels = x
elif x == "M":
layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))]
return nn.Sequential(*layers)
model = VGG_net(in_channels=3,num_classes=1000).to(device)
x = torch.randn(1,3,224,224).to(device)
print(model(x).shape)
| VGG_Arch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yohanesnuwara/bsc-thesis-carbon-capture-storage/blob/master/main/04_fluid_property_modelling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="u1JSlUOxcJ-m" colab_type="text"
# # **Fluid Property Modelling**
#
# In this notebook, we would like to understand how fluid property behaves with CO2 injection (in a system of mixed brine-CO2). Modelling fluid properties is very important for rock physics.
#
# The fluid properties are:
# * Incompressibility / bulk modulus (`Kf`)
# * Density (`rhof`)
#
# Another property, shear modulus (`Gf`) is zero, because of fluid. From these properties, seismic properties are calculated:
#
# * P-velocity (`Vp`)
# * S-velocity (`Vs`)
# + id="OIbIvbHy1gV9" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="4X3mI8gR0BK6" colab_type="text"
# We will use our own `Batzle_and_Wang` function from our own Github repo to model the brine. Then, we use `CoolProp` library from `CoolProp` Github repo to model the CO2.
# + [markdown] id="85Mwo1Th0zxU" colab_type="text"
# First, clone our repo and import the function `Batzle_and_Wang`
# + id="6sRTTicB03AH" colab_type="code" outputId="954a7e73-85ef-4310-8cbc-b08edd063c75" colab={"base_uri": "https://localhost:8080/", "height": 140}
# !git clone https://github.com/yohanesnuwara/bsc-thesis-carbon-capture-storage
# + id="BY0qvwGQ17T4" colab_type="code" colab={}
import sys
sys.path.append('/content/bsc-thesis-carbon-capture-storage/lib')
from Batzle_and_Wang import *
# + [markdown] id="tcM8MFx81J3p" colab_type="text"
# Then, install `CoolProp`.
# + id="vsM56l1nEjKQ" colab_type="code" outputId="08f3418e-cf92-4adb-888d-0981f0b1a638" colab={"base_uri": "https://localhost:8080/", "height": 105}
# !pip install CoolProp
# + [markdown] id="askNyh4I1OaO" colab_type="text"
# # Step 1. Modelling of Brine Properties
#
# Let's start with the brine. Inputs for brine calculation are:
# * Pressure (`Pp`)
# * Temperature (`temp`)
# * Salinity (`salinity`)
#
# We will vary each of these inputs to see how the properties of brine (K, rho) and its seismic properties (Vp, Vs) change.
#
# We will calculate both density and bulk modulus with varying pressure from 10 to 100 MPa. Temperature is constant 50<sup>o</sup>C and salinity is 0.05%.
#
# + id="c9UuJhRY3IPd" colab_type="code" colab={}
temp1 = 50
salinity1 = 0.05
Pp1 = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
rhobrine1 = BW_brine_density(temp1, Pp1, salinity1)
Kbrine1 = BW_brine_bulk(temp1, Pp1, salinity1, rhobrine1)
# + [markdown] id="QFQ2R3M-4lPT" colab_type="text"
# The curve of density change over pressure is visualized.
# + id="TR2BCDJy4koR" colab_type="code" outputId="d78c4558-77dc-47fb-9c2f-f83221c337e3" colab={"base_uri": "https://localhost:8080/", "height": 368}
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.title('Brine Density vs Pressure')
plt.xlabel('Pressure (MPa)'), plt.ylabel('Density (g/cc)')
plt.plot(Pp1, rhobrine1, '-', color='blue')
plt.subplot(1,2,2)
plt.title('Brine Bulk Modulus vs Pressure')
plt.xlabel('Pressure (MPa)'), plt.ylabel('Bulk Modulus (GPa)')
plt.plot(Pp1, Kbrine1, '-', color='red')
# + [markdown] id="UfcOdVzX50p5" colab_type="text"
# **Both density and bulk modulus of brine increases as pressure increases**. Let's vary the temperature from 10<sup>o</sup>C to 200<sup>o</sup>C, with constant pressure of 50 MPa and salinity of 0.05%
# + id="ZzzhaVoN6YSG" colab_type="code" outputId="32ba4d64-f39b-4061-cd38-70e54101f37b" colab={"base_uri": "https://localhost:8080/", "height": 368}
temp2 = np.linspace(10, 200, 200)
Pp2 = 50
salinity2 = 0.01
rhobrine2 = BW_brine_density(temp2, Pp2, salinity2)
Kbrine2 = BW_brine_bulk(temp2, Pp2, salinity2, rhobrine2)
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.title('Brine Density vs Temperature')
plt.xlabel('Temperature (C)'), plt.ylabel('Density (g/cc)')
plt.plot(temp2, rhobrine2, '-', color='blue')
plt.subplot(1,2,2)
plt.title('Brine Bulk Modulus vs Temperature')
plt.xlabel('Temperature (C)'), plt.ylabel('Bulk Modulus (GPa)')
plt.plot(temp2, Kbrine2, '-', color='red')
# + [markdown] id="B823_PMM__GL" colab_type="text"
# Opposite to the trend of brine density with increasing pressure, **brine density decreases as temperature decreases**. However, the change of brine **bulk modulus has a unique trend (go up then down)**. Now, we vary salinity from 0 (pure water) to 1% (very saline), with constant pressure of 50 MPa and constant temperature of 50<sup>o</sup>C.
# + id="6QuvVxLZA2G6" colab_type="code" outputId="9c30ba24-b0af-4d0e-ec01-7dd83e64f3db" colab={"base_uri": "https://localhost:8080/", "height": 368}
temp3 = 50
Pp3 = 50
salinity3 = np.linspace(0, 1, 1000)
rhobrine3 = BW_brine_density(temp3, Pp3, salinity3)
Kbrine3 = BW_brine_bulk(temp3, Pp3, salinity3, rhobrine3)
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.title('Brine Density vs Salinity')
plt.xlabel('Salinity (%)'), plt.ylabel('Density (g/cc)')
plt.plot(salinity3, rhobrine3, '-', color='blue')
plt.subplot(1,2,2)
plt.title('Brine Bulk Modulus vs Salinity')
plt.xlabel('Salinity (%)'), plt.ylabel('Bulk Modulus (GPa)')
plt.plot(salinity3, Kbrine3, '-', color='red')
| main/04_fluid_property_modelling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hall strip
# **Background**
#
# Motion in a rotating frame can be decomposed into a fast 'cyclotron' coordinate motion and a slower motion of the centers (X,Y) of cyclotron motion. It can be shown that X and Y do not commute, even though absolute spatial coordinates commute. As a result, a force generates motion in a perpendicular direction (the Hall effect). As a special case, a saddle potential enacts a squeezing operator on (X, Y), resulting in the exponential growth and decay of orthogonal sizes of a collection of particles. Since a BEC rotating sufficiently fast in an elliptical trap experiences a saddle potential in the rotating frame, this results in the extension of the cloud into a long strip. The strip has an unbounded length and a minimum width defined by the zero-point cyclotron motion of the bosons.
#
# For more, see https://arxiv.org/abs/1911.12347
#
# +
import sys, os
sys.path.append(os.path.abspath('..'))
import numpy as np
import matplotlib.pyplot as plt
from copy import copy, deepcopy
from tqdm.notebook import tqdm
from scipy.ndimage import rotate as rotate_image
from scipy.optimize import curve_fit
import time
import h5py
import pandas as pd
from PIL import Image
from condensate import Wavefunction, Environment, hbar
# %matplotlib inline
# -
# ## Prepare groundstate
# +
omega = 2*np.pi*10
epsilon = 0.225
dt = 1e-5
fov = 300e-6
e = Environment(DIM=512, fov=fov, N=0.5e6)
e.harmonic_potential(omega=omega, epsilon=epsilon)
# -
groundstate = Wavefunction(e)
groundstate.initialize_Psi(width=100)
groundstate.relax(vmax=3e8, dt=dt, steps=10000)
groundstate.env.rotating_frame(omegaR=[0.001]*10000)
groundstate.evolve(dt=dt, steps=10000, cooling=0.1)
groundstate.show_density()
groundstate.show_phase()
plt.imshow(np.angle(groundstate.Psi))
# ## Spin up
def rotation_freq_simple(timestep, whirrTime=30000, whirrMax=omega):
tanh = omega * 1.002 * np.tanh(3.46 * timestep / whirrTime)
return np.min([whirrMax,tanh])
def rotation_freq(timestep, whirrTime=30000, whirrMax=omega):
firstrampTime = whirrTime/10
tanh1 = np.max([0.05*omega, omega * 1.001 * np.tanh(3.3 * (timestep-0.86*firstrampTime) / whirrTime)])
tanh2 = 0.05*omega *(0.97+ 1.001 * np.tanh(3.3 * (-firstrampTime+timestep) / firstrampTime))
if timestep<firstrampTime:
return tanh2
else:
return np.min([whirrMax,tanh1])
# +
whirrMax = omega
steps = 50000
whirrtime = 50000
times = np.arange(steps)
Omega = [rotation_freq_simple(t, whirrTime=whirrtime) for t in times]
plt.figure(figsize=(6,5))
plt.plot(omega*dt*times, np.array(Omega)/omega)
plt.axhline(Omega[-1]/omega,ls='--',c='k')
plt.xlabel(r'$\omega t$')
plt.ylabel(r'$\Omega/\omega$')
plt.savefig('figures/rampup.png')
plt.show()
# -
hallstrip = deepcopy(groundstate)
# +
hallenv = hallstrip.env
hallenv.rotating_frame(omegaR=Omega)
hallenv.absorbing_boundaries(strength=100, radius=e.fov/2)
hallstrip.evolve(dt=dt, steps=steps, cooling=0.001)
# -
hallstrip.show_density()
hallstrip.show_phase()
widestrip = deepcopy(hallstrip)
# widestrip.env.g *= 1.17
def geometric_squeezing(stripPsi, steps=60000, frames=300, datafile='data/geometricsqueezing.hdf5'):
steps = frames*(steps//frames)
runtime = steps//frames
times = np.arange(steps)
Omega = omega* np.ones(runtime)
dt = 1e-5
psi = copy(stripPsi)
psi.env.rotating_frame(omegaR=Omega)
out = []
density = psi.density
density *= density>1
out.append(density)
for i in tqdm(range(frames), leave=False):
psi.evolve(dt=dt, steps=runtime, cooling=0.0)
density = psi.density
density *= density>1
out.append(density)
with h5py.File(datafile, 'a') as f:
dsname = f"geosqueeze"
dset = f.create_dataset(dsname, data=np.array(out))
dset.attrs['time'] = dt*runtime * np.arange(frames)
dset.attrs['dt'] = dt
geometric_squeezing(widestrip, steps=90000, frames=300)
# ## Process Data
# +
# Set some processing parameters
viewx = 350
viewy = 350
fov = 300e-6
dx = fov/512
mass = 3.8e-26
lb = np.sqrt(hbar / (2*mass *omega))
x = dx * (np.arange(viewx) - viewx//2)
y = dx * (np.arange(viewy) - viewy//2)
dt = 1e-5
times = np.arange(51)*dt*5e3
def gauss(x,x0,a,s): return a*np.exp(- (x-x0) **2 / (2*s**2))
def satexp(t, tau,a): return (3.31+a*np.exp(-t/tau))
def rotate_crop(array, viewx=200, viewy=350, angle=0):
""" Rotate and crop a 2d array """
s = np.shape(array)
rotated = rotate_image(array, angle, reshape=False)
cropped = rotated[(s[0]-viewy)//2 : (s[0]+viewy)//2 , (s[1]-viewx)//2 : (s[1]+viewx)//2 ]
return cropped
def find_angle(ncrop):
xsize = len(ncrop)
xx = np.linspace(-10, 10, xsize)
yy = np.linspace(-10, 10, xsize)
xx, yy = np.meshgrid(xx, yy)
# Calculate the moment of inertia tensor
Ixx = np.sum(ncrop*yy*yy)
Iyy = np.sum(ncrop*xx*xx)
Ixy = np.sum(ncrop*xx*yy)
Iyx = Ixy
I =np.array( [[Ixx, Ixy], [Iyx, Iyy]])
evals, evecs = np.linalg.eig(I)
iangle = (180*np.arctan(evecs[np.argmin(evals)][1]/evecs[np.argmin(evals)][0])/np.pi)
return iangle
def process_r1d(dset):
""" Process a dataset corresponding to a single squeeze time """
clouds = np.array([rotate_crop(a, viewx, viewy, 42) for a in dset[()]])
times = np.array(dset.attrs['time'])
xprofile = np.sum(rotate_crop(clouds[0],viewy=150), axis=0)
xprofilemax = np.max(xprofile)
gaussfit,_ = curve_fit(gauss, x, xprofile, [0, xprofilemax, 6e-6])
newresults = pd.DataFrame([[clouds, times, xprofile, gaussfit]],
columns=columns)
return newresults
# +
columns = ['cloud', 'time', 'xprofile', 'yprofile', 'gaussfit']
gs = pd.DataFrame(columns=columns)
with h5py.File('data/geometricsqueezing_withramp_straight.hdf5', 'r') as f:
for name in tqdm(f):
dset=f[name]
alltimes = np.array(dset.attrs['time'])
for i in tqdm(range(len(dset)-1), leave=False):
cloud = rotate_crop(np.fliplr(dset[i]), viewx, viewy, 43.5)
xprofile = np.sum(rotate_crop(cloud, viewx=350, viewy=150), axis=0)
xprofilemax = np.max(xprofile)
gaussfit,_ = curve_fit(gauss, x, xprofile, [0, xprofilemax, 6e-6])
yprofile = np.sum(rotate_crop(cloud, viewx=150, viewy=350), axis=1)
newresults = pd.DataFrame([[cloud, alltimes[i], xprofile, yprofile, gaussfit]], columns=columns)
gs = gs.append(newresults)
gs = gs.reset_index()
# -
# ### Hall drift velocity
yprofiles = []
for i,r in gs.iterrows():
if i>150:
yprofiles.append(r.yprofile)
yprofiles = np.array(yprofiles)
R = 25e-6
mask = abs(y) < R
Rj = np.argmax(np.diff(mask))
plt.plot(1e6*y, yprofiles[0,:])
plt.plot(1e6*y, yprofiles[70,:])
plt.axvspan(-1e6*y[Rj],1e6*y[Rj], color='r', alpha=0.2)
# +
def linear(x,m,b): return m*x + b
Rs = np.linspace(5e-6,100e-6,100)
columns = ['R', 'xint', 'yint', 'fit', 'slope']
drift = pd.DataFrame(columns=columns)
deltat = (gs.iloc[1].time - gs.iloc[0].time)
for R in tqdm(Rs, leave=False):
mask = abs(y) < R
Rindex = np.argmax(np.diff(mask))
N0 = np.sum(yprofiles[0,mask])
xint = []
yint = []
for i,yp in enumerate(yprofiles):
Nt = np.sum(yp[mask])
integral = np.trapz(yprofiles[:i,Rindex] / dx, dx=deltat)
xint.append( 2*omega * lb * integral / N0)
yint.append(1-(Nt/N0))
f,_ = curve_fit(linear, xint,yint, [0.1,0])
newresults = pd.DataFrame([[R, xint, yint, f, f[0]]], columns=columns)
drift = drift.append(newresults)
# -
Forcestraight = 0.5*epsilon*driftstraight.R/lb
Forcequartic = 0.5*epsilon*driftquartic.R/lb
testx = np.linspace(0,2.5)
plt.plot(testx, linear(testx,1,0),'r-', label='expected')
plt.plot(Forcestraight, driftstraight.slope,'g.-', label=r'GP $x^2-y^2$')
plt.plot(Forcequartic, driftquartic.slope,'b.-', label=r'GP $x^2-y^2 + r^4$')
plt.legend()
plt.xlabel(r'$F(2m\omega^2\ell_B)$')
plt.ylabel(r'$v_d (\omega\ell_B)$')
plt.xlim([0,2.5])
plt.tight_layout()
plt.savefig('figures/rotini1d/Fig2.png', dpi=120)
gpexport = pd.DataFrame(columns=['F', 'saddle','quartic'])
gpexport.F = Forcestraight
gpexport.saddle = driftstraight.slope
gpexport.quartic = driftquartic.slope
gpexport.to_csv('data/gp_drift.csv')
# +
# driftquartic = drift.copy()
# +
# driftstraight = drift.copy()
# -
plt.plot(drift.iloc[50].xint,drift.iloc[50].yint,'b.')
testx = np.linspace(0,0.8,100)
plt.plot(testx, linear(testx, *drift.iloc[50].fit),'r-')
plt.ylabel(r'$1-N(t)/N(0)$')
plt.xlabel(r'$\omega\ell_B\int_0^t dt \,n(t)\,/\,\,N(0)$')
plt.tight_layout()
plt.savefig('figures/rotini1d/Fig2inset.png', dpi=120)
# ### squeezing
cloud = gs.iloc[202].cloud
plt.imshow(cloud)
len(gs)
gs.iloc[i].gaussfit
i=2
plotx = (x-gs.iloc[i].gaussfit[0])/lb
plt.plot(plotx, 1e-16 * gs.iloc[i].xprofile)
plt.plot(plotx, 1e-16 * gauss(x,*gs.iloc[i].gaussfit))
plt.xlim([-10,10])
plt.xlabel(r'r ($\ell_B$)')
plt.ylabel(r'n (a.u.)')
plt.savefig('figures/rotini1d/largemu.png', dpi=120)
plt.show()
# i=50
# plotx = (x-gs.iloc[i].gaussfit[0])/lb
# plt.plot(plotx, 1e-16 * gs.iloc[i].xprofile)
# plt.plot(plotx, 1e-16 * gauss(x,*gs.iloc[i].gaussfit))
# plt.xlim([-5,5])
# plt.xlabel(r'r ($\ell_B$)')
# plt.ylabel(r'n (a.u.)')
# plt.show()
i=250
plotx = (x-gs.iloc[i].gaussfit[0])/lb
plt.plot(plotx, 1e-16 * gs.iloc[i].xprofile)
plt.plot(plotx, 1e-16 * gauss(x,*gs.iloc[i].gaussfit))
plt.xlim([-5,5])
plt.xlabel(r'r ($\ell_B$)')
plt.ylabel(r'n (a.u.)')
plt.savefig('figures/rotini1d/smallmu.png', dpi=120)
i=250
gpexport = pd.DataFrame(columns=['x', 'n'])
gpexport.x, gpexport.n = (x-gs.iloc[i].gaussfit[0])/lb, 1e-16 * gs.iloc[i].xprofile
gpexport.to_csv('data/gp_smallmu.csv')
gs.head()
widths = np.array([abs(r[6][2]) for r in gs.itertuples()])
plt.plot(gs['time'], 1e6*widths)
plt.axhline(1e6*lb/np.sqrt(2),c='k')
plt.xlabel('time [s]')
plt.ylabel(r'$\sigma$ [$\mu m$]')
# plt.ylim([2,8.5])
# plt.savefig('figures/rotini1d/widths_nog.png')
labdata = pd.read_csv('data/widthData.csv', names=['zt', 's'])
labdata.sort_values(by='zt', inplace=True, ignore_index=True)
zeta = 1.2*epsilon * omega / 2
plt.figure(figsize=(7,4))
# plt.figure(figsize=(17,14))
widths = np.array([1e-6 * abs(r[6][2]) / lb for r in gs.itertuples()])
plt.plot(labdata.zt[labdata.s<3], labdata.s[labdata.s<3],'k.-' ,alpha=0.4, label='data')
plt.plot((zeta * gs['time'] )-0.2, 1e6*widths, 'r-', label='GP')
plt.axhline(1/np.sqrt(2),c='k')
plt.xlabel(r'$\zeta t$')
plt.ylabel(r'$\sigma$ [$\ell_B$]')
plt.legend()
plt.ylim([0,2.5])
plt.xlim([0,7.5])
# plt.savefig('figures/rotini1d/widths_much_less_wiggles.png', dpi=200)
gpexport = pd.DataFrame(columns=['zt', 's'])
gpexport.zt = (zeta * gs['time'] )-0.2
gpexport.s = 1e6*widths
# +
# gpexport.to_csv('data/gp_squeezing_slowramp.csv')
# -
widths = np.array([abs(r[6][2]) for r in gs.itertuples()])
plt.plot(gs['time'], 1e6*widths)
plt.axhline(1e6*lb/np.sqrt(2),c='k')
plt.xlabel('time [s]')
plt.ylabel(r'$\sigma$ [$\mu m$]')
# plt.savefig('figures/rotini1d/widths.png')
# +
angles = []
for i,r in gs.iterrows():
angles.append(find_angle(r.cloud))
plt.plot(gs['time'], -44.97+np.array(angles))
# plt.axhline(90,c='k')
plt.xlabel('time [s]')
plt.ylabel(r'$\theta$ [deg]')
# plt.savefig('figures/rotini1d/angles.png')
# -
# # Reproducing lab parameters
# +
omega = 2*np.pi*88.6
epsilon = 0.2
dt = 1e-6
e = Environment(DIM=512, fov=120e-6, N=8e5)
e.harmonic_potential(omega=omega, epsilon=epsilon)
groundstate = Wavefunction(e)
groundstate.initialize_Psi(width=100)
groundstate.relax(vmax=1e9, dt=2*dt, steps=4000)
groundstate.evolve(dt=dt, cooling=0.01, steps=4000)
whirrMax = omega
steps = 100000
whirrtime = 100000
times = np.arange(steps)
Omega = [np.min([whirrMax, omega * 1.001 * np.tanh(3.3 * t / whirrtime)]) for t in times]
plt.plot(dt*times, Omega)
plt.axhline(Omega[-1],ls='--',c='k')
plt.show()
# +
# Run the sim - takes longer than the 10Hz sim above
hallstrip = deepcopy(groundstate)
hallenv = hallstrip.env
hallenv.rotating_frame(omegaR=Omega)
hallenv.absorbing_boundaries(strength=1, radius=e.fov/2)
hallstrip.evolve(dt=dt, steps=steps, cooling=0)
hallstrip.show_density()
hallstrip.show_phase()
# -
finalstrip = deepcopy(hallstrip)
# ## Crop and find the angle to the diagonal after the ramp
width = 512//2
plotrange=140
ncrop = finalstrip.density[(width-plotrange):(width+plotrange),(width-plotrange):(width+plotrange)]
plt.imshow(ncrop)
# +
xsize = len(ncrop)
xx = np.linspace(-10, 10, xsize)
yy = np.linspace(-10, 10, xsize)
xx, yy = np.meshgrid(xx, yy)
# Calculate the moment of inertia tensor
Ixx = np.sum(ncrop*yy*yy)
Iyy = np.sum(ncrop*xx*xx)
Ixy = np.sum(ncrop*xx*yy)
Iyx = Ixy
I =np.array( [[Ixx, Ixy], [Iyx, Iyy]])
evals, evecs = np.linalg.eig(I)
iangle = (180*np.arctan(evecs[np.argmin(evals)][1]/evecs[np.argmin(evals)][0])/np.pi)
# -
print(f"angle from diagonal equipotential: {(iangle-45):.2f} degrees")
# ## Quartic potential
# +
omega = 2*np.pi*10
epsilon = 0.225
dt = 1e-5
fov = 300e-6
DIM = 512
e = Environment(DIM=512, fov=fov, N=1e5)
V = np.zeros((DIM,DIM))
for i in range(DIM):
for j in range(DIM):
x = (i-DIM//2)*fov / DIM
y = (j-DIM//2)*fov / DIM
rsq = x**2 + y**2
harmonic = 0.5 * e.mass * ( ((1-epsilon) * (omega * x) **2) +
((1+epsilon) * (omega * y) **2))
quartic = 3e6* 0.5 * e.mass * (omega**2) * (rsq**2)
V[i,j] = (harmonic + quartic)/hbar
e.custom_potential(V)
e.show_potential()
# -
groundstate = Wavefunction(e)
groundstate.initialize_Psi(width=100)
groundstate.relax(vmax=3e8, dt=dt, steps=4000)
V = e.V.copy()
for i in range(DIM):
for j in range(DIM):
x = (i-DIM//2)*fov / DIM
y = (j-DIM//2)*fov / DIM
rsq = x**2 + y**2
centrif = 0.5 * e.mass * (omega**2) * rsq
V[i,j] -= centrif/hbar
a = plt.contour(V)
plt.colorbar()
plt.gca().set_aspect('equal', 'box')
plt.show()
def rotation_freq(timestep, whirrTime=30000, whirrMax=omega):
return np.min([whirrMax, omega * 1.001 * np.tanh(3.3 * timestep / whirrTime)])
# +
steps = 100000
times = np.arange(steps)
Omega = [ rotation_freq(t) for t in times]
plt.plot(omega*dt*times, np.array(Omega)/omega)
plt.axhline(Omega[-1]/omega,ls='--',c='k')
plt.xlabel(r'$\omega t$')
plt.ylabel(r'$\Omega/\omega$')
# plt.savefig('figures/rampup.png')
plt.show()
# -
def geometric_squeezing_withramp(stripPsi, steps=60000, frames=300, datafile='data/geometricsqueezing_withramp.hdf5'):
steps = frames*(steps//frames)
runtime = steps//frames
dt = 1e-5
out = []
psi = copy(stripPsi)
for i in tqdm(range(frames), leave=False):
times = np.arange(i*runtime, (i+1)*runtime)
psi.env.rotating_frame(omegaR=[ rotation_freq(t) for t in times ])
psi.evolve(dt=dt, steps=runtime, cooling=0.0)
density = psi.density
density *= density>1
out.append(density)
with h5py.File(datafile, 'a') as f:
dsname = f"geosqueeze"
dset = f.create_dataset(dsname, data=np.array(out))
dset.attrs['time'] = dt*runtime * np.arange(frames)
dset.attrs['dt'] = dt
hallstrip = deepcopy(groundstate)
hallstrip.env.absorbing_boundaries(strength=1, radius=e.fov/2)
geometric_squeezing_withramp(hallstrip, steps=60000, datafile='data/geometricsqueezing_withramp_quartic.hdf5')
# +
columns = ['cloud', 'time']
gs = pd.DataFrame(columns=columns)
with h5py.File('data/geometricsqueezing_withramp_quartic.hdf5', 'r') as f:
for name in tqdm(f):
dset=f[name]
alltimes = np.array(dset.attrs['time'])
for i in tqdm(range(len(dset)-1), leave=False):
cloud = np.fliplr(dset[i])
newresults = pd.DataFrame([[cloud, alltimes[i]]], columns=columns)
gs = gs.append(newresults)
# -
plt.imshow(gs.iloc[0].cloud)
steps = 60000
times = np.arange(steps)
Omega = [ rotation_freq(t) for t in times]
frames= 300
def plot_frame(frame, savefig=False):
V_frame = hallstrip.env.V.copy()
Omega_frame = Omega[frame*(steps//frames)]
for i in range(DIM):
for j in range(DIM):
x = (i-DIM//2)*fov / DIM
y = (j-DIM//2)*fov / DIM
rsq = x**2 + y**2
centrif = 0.5 * e.mass * (Omega_frame**2) * rsq
V_frame[i,j] -= centrif/hbar
f, axarr = plt.subplots(ncols=2, figsize=(8,4))
axarr[0].imshow(gs.iloc[frame].cloud, vmax=5e13, extent=1e6*np.array([-fov/2,fov/2,-fov/2,fov/2]), cmap='inferno')
axarr[0].contour(V_frame, 25, alpha=0.7,
vmin=-1e3, vmax=1e3, extent=1e6*np.array([-fov/2,fov/2,-fov/2,fov/2]), cmap='RdBu_r')
axarr[0].set_aspect('equal', 'box')
view = 213/2
axarr[0].set(xlim=[-view,view], ylim=[-view,view], xlabel=r'x [$\mu m$]', ylabel=r'y [$\mu m$]')
axarr[1].plot(omega*dt*times, np.array(Omega)/omega)
plt.axhline(Omega[-1]/omega,ls='--',c='k')
xnow = omega*dt*times[frame * steps//frames]
ynow = Omega_frame/omega
axarr[1].set(xlim=[0,np.max(omega*dt*times)], ylim=[0,1.1], xlabel=r'$\omega t$', ylabel=r'$\Omega/\omega$')
axarr[1].plot([xnow], [ynow], 'ro')
plt.axvline(xnow, c='k', alpha=0.1)
plt.axhline(ynow, c='k', alpha=0.1)
plt.tight_layout()
if savefig:
plt.savefig(f'figures/rotini1d/quartic_frames/geosqueeze_{frame}.jpg', dpi=190)
plt.close()
plot_frame(140, savefig=False)
2*1e-6*(213/2) * DIM / fov
img = gs.iloc[140].cloud
img = np.uint8(255*img/np.max(img))
size=363
img = rotate_crop(img, viewx=size,viewy=size)
im = Image.fromarray(img)
im
# im.save('figures/rotini1d/GPquarticframe.tiff')
plt.imshow(gs.iloc[140].cloud, vmax=5e13, extent=1e6*np.array([-fov/2,fov/2,-fov/2,fov/2]), cmap='inferno')
for frame in tqdm(range(frames-1)):
plot_frame(frame, savefig=True)
V = hallenv.V.copy()
for i in range(DIM):
for j in range(DIM):
x = (i-DIM//2)*fov / DIM
y = (j-DIM//2)*fov / DIM
rsq = x**2 + y**2
centrif = 0.5 * e.mass * (omega**2) * rsq
V[i,j] -= centrif/hbar
a = plt.contour(V)
plt.gca().set_aspect('equal', 'box')
plt.imshow(hallstrip.density)
plt.show()
hallenv.rotating_frame(omegaR = [omega]*40000)
hallstrip.evolve(dt=dt, steps=40000)
# # Noninteracting
# +
omega = 2*np.pi*10
epsilon = 0.225
dt = 1e-5
fov = 300e-6
e = Environment(DIM=512, fov=fov, N=0.00001)
e.harmonic_potential(omega=omega, epsilon=epsilon)
groundstate = Wavefunction(e)
groundstate.initialize_Psi(width=100)
groundstate.relax(vmax=3e8, dt=dt, steps=4000)
# -
hallstrip = deepcopy(groundstate)
# +
hallenv = hallstrip.env
hallenv.rotating_frame(omegaR=[omega]*100)
hallenv.absorbing_boundaries(strength=1, radius=e.fov/2)
hallstrip.evolve(dt=dt, steps=100, cooling=0)
# -
widestrip = deepcopy(hallstrip)
geometric_squeezing(widestrip, steps=70000, frames=300, datafile='data/geometricsqueezing_nonint.hdf5')
# +
columns = ['cloud', 'time', 'xprofile', 'gaussfit']
gsnonint = pd.DataFrame(columns=columns)
with h5py.File('data/geometricsqueezing_nonint.hdf5', 'r') as f:
for name in tqdm(f):
dset=f[name]
alltimes = np.array(dset.attrs['time'])
for i in tqdm(range(len(dset)-1), leave=False):
cloud = rotate_crop(np.fliplr(dset[i]), viewx, viewy, 42.2)
xprofile = np.sum(rotate_crop(cloud, viewx=350, viewy=150), axis=0)
xprofilemax = np.max(xprofile)
gaussfit,_ = curve_fit(gauss, x, xprofile, [0, xprofilemax, 6e-6])
newresults = pd.DataFrame([[cloud, alltimes[i], xprofile, gaussfit]], columns=columns)
gsnonint = gsnonint.append(newresults)
# -
widths = np.array([abs(r[4][2]) for r in gs.itertuples()])
plt.plot(gs['time'], 1e6*widths)
plt.axhline(1e6*lb/np.sqrt(2),c='k')
plt.xlabel('time [s]')
plt.ylabel(r'$\sigma$ [$\mu m$]')
plt.ylim([2,8.5])
# plt.savefig('figures/rotini1d/widths_nog.png')
zeta = 0.222 * omega / 2
plt.figure(figsize=(7,4))
# plt.figure(figsize=(17,14))
widths = np.array([1e-6 * abs(r[4][2]) / lb for r in gs.itertuples()])
widthsnonint = np.array([1e-6 * abs(r[4][2]) / lb for r in gsnonint.itertuples()])
plt.plot(labdata.zt, labdata.s,'k.-' ,alpha=0.05, label='data')
plt.plot((zeta * gs['time'] )+1.54, 1e6*widths, 'r-', label='GP')
plt.plot((zeta * gsnonint['time'] )+3, 1e6*widthsnonint, 'b-', label='GP g=0')
plt.axhline(1/np.sqrt(2),c='k')
plt.xlabel(r'$\zeta t$')
plt.ylabel(r'$\sigma$ [$\ell_B$]')
plt.legend()
plt.ylim([0,2])
plt.xlim([0,8.5])
# plt.savefig('figures/rotini1d/widths_vsnonint.png', dpi=200)
| notebooks/Hall strip.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (HAE)
# language: python
# name: hae
# ---
# # Procesamiento de imagen y vídeo
# ## Croma Key
# ###### _<NAME>_
# %matplotlib inline
import cv2
import numpy as np
import matplotlib.pyplot as plt
# +
img = cv2.imread('images/imcl27.jpg',1)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
fondo = cv2.imread('images/imcl28.jpg',1)
fondo = cv2.cvtColor(fondo,cv2.COLOR_BGR2RGB)
# -
# ### Distancia de color
# +
x,y = [0,0]
R,G,B = cv2.split(img)
R = R.astype('float')
G = G.astype('float')
B = B.astype('float')
r = R[x,y]
b = B[x,y]
g = G[x,y]
alfaDS = np.abs(R-r)+np.abs(B-b)+np.abs(G-g)
alfaDS[alfaDS>255] = 255
alfaDS[alfaDS<0] = 0
alfaDS = alfaDS.astype('uint8')
plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(alfaDS,cmap='gray')
plt.title('alfa')
l,alfaDS = cv2.threshold(alfaDS,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
plt.subplot(1,2,2)
plt.imshow(alfaDS.astype('uint8'),cmap='gray')
plt.title('alfa binaria')
alfaDS = alfaDS.astype('bool')
alfaDSF = (1-alfaDS).astype('bool')
# -
# ### Diferencia de color
# +
## Normalizador
#img = cv2.normalize(rgb.astype('float'),None,0.0,255.0,cv2.NORM_MINMAX)
#plt.imshow(img)
imgF = img.astype('float')
R,G,B = cv2.split(imgF)
M = G - np.maximum(R,B)
M[M<0]=0
M[M>255]=255
M = M.astype('uint8')
#Binarización,
l,alfa = cv2.threshold(M,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
alfaDF = (255-alfa).astype('bool')
alfaDFF = (1-alfaDF).astype('bool')
plt.figure(figsize=(16,8))
plt.subplot(1,3,1)
plt.imshow(M,cmap='gray')
plt.title('alfa')
plt.subplot(1,3,2)
plt.imshow(alfaDFF,cmap='gray')
plt.title('alfa binaria')
plt.subplot(1,3,3)
plt.imshow(alfaDF,cmap='gray')
plt.title('alfa binaria correcta')
# -
# ### Adición
# +
dsF = np.stack((alfaDSF,alfaDSF,alfaDSF),axis=2)*fondo
dsI = np.stack((alfaDS,alfaDS,alfaDS),axis=2)*img
dsFinal = dsF+dsI
dfF = np.stack((alfaDFF,alfaDFF,alfaDFF),axis=2)*fondo
dfI = np.stack((alfaDF,alfaDF,alfaDF),axis=2)*img
dfFinal = dfF+dfI
plt.figure(figsize=(16,8))
images = [dsI,dsF,dsFinal,dfI,dfF,dfFinal]
titles = ['Imagen distancia','Fondo distancia','Final distancia','Imagen diferencia','Fondo diferencia','Final diferencia']
for i in range(len(images)):
plt.subplot(2,3,i+1)
plt.imshow(images[i],cmap='gray')
plt.title(titles[i])
# -
| src/4 - Chroma Key.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Day 18 Snailfish
DEBUG = False
def parse(f):
with open(f) as fp:
f = fp.read()
return f.splitlines()
# Snailfish numbers must always be reduced, and the process of adding two snailfish numbers can result in snailfish numbers that need to be reduced. To reduce a snailfish number, you must repeatedly do the first action in this list that applies to the snailfish number:
#
# If any pair is nested inside four pairs, the leftmost such pair explodes.
# If any regular number is 10 or greater, the leftmost such regular number splits.
#
# +
def reduce(L):
while True:
if explode(L):
if DEBUG:
print("Explode")
pass
elif split(L):
if DEBUG:
print("Split")
pass
else:
break
def addition(x, y, direction="right"):
if direction == "left":
t = x[1]
else:
t = x[0]
if isinstance(t, int):
t += y
else:
addition(t, y, direction)
def add_left(x, y):
if isinstance(x[1], int):
x[1] += y
else:
add_left(x[1], y)
def add_right(x, y):
if isinstance(x[0], int):
x[0] += y
else:
add_right(x[0], y)
def split(L):
if isinstance(L, list):
for i in range(2):
if isinstance(L[i], int):
if L[i] >= 10:
L[i] = [L[i] // 2, L[i] - L[i] // 2]
return True
elif split(L[i]):
return True
# -
def explode(x, nested=0):
if isinstance(x, list):
if nested == 4:
return True, x[0], x[1]
result = explode(x[0], nested + 1)
if result:
zeroed, X, Y = result
if zeroed:
x[0] = 0
if isinstance(x[1], int):
x[1] += Y
else:
add_right(x[1], Y)
return False, X, 0
result = explode(x[1], nested + 1)
if result:
zeroed, X, Y = result
if zeroed:
x[1] = 0
if isinstance(x[0], int):
x[0] += X
else:
add_left(x[0], X)
return False, 0, Y
# To check whether it's the right answer, the snailfish teacher only checks the magnitude of the final sum. The magnitude of a pair is 3 times the magnitude of its left element plus 2 times the magnitude of its right element. The magnitude of a regular number is just that number.
def magnitude(x):
if isinstance(x, int):
return x
return 3 * magnitude(x[0]) + 2 * magnitude(x[1])
# +
# Test case
lines = parse("input_files/day18.test.txt")
# First number
result = eval(lines[0])
reduce(result)
for L in lines[1:]:
result = [result, eval(L)]
reduce(result)
if DEBUG:
print(result)
# 4140
print(magnitude(result))
# +
# Real case
lines = parse("input_files/day18.txt")
# First number
result = eval(lines[0])
reduce(result)
for i in lines[1:]:
result = [result, eval(i)]
reduce(result)
if DEBUG:
print(result)
# 3816
print(magnitude(result))
# +
largest_magnitude = 0
for i in lines:
for j in lines:
if i == j:
continue
result = [eval(i), eval(j)]
reduce(result)
largest_magnitude = max(largest_magnitude, magnitude(result))
# 4819
print(largest_magnitude)
# -
| day18.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Stochastic Processes: <br>Data Analysis and Computer Simulation
# <br>
#
#
# # Distribution function and random number
# <br>
#
#
# # 2. Generating random numbers with Gaussian/binomial/Poisson distributions
# <br>
# + [markdown] slideshow={"slide_type": "slide"}
# # 2.1. Preparations
# + [markdown] slideshow={"slide_type": "-"}
# ## Python built-in functions for random numbers (numpy.random)
# -
# 1. `seed(seed)`: Initialize the generator with an integer "seed".
# 1. `rand(d0,d1,...,dn)`: Return a multi-dimensional array of uniform random numbers of shape (d0, d1, ... , dn).
# 1. `randn(d0,d1,...,dn)`: The same as above but from the standard normal distribution.
# 1. `binomial(M,p,size)`: Draw samples from a binomial distribution with "M" and "p".
# 1. `poisson(a,size)`: Draw samples from a Poisson distribution with "a".
# 1. `choice([-1,1],size)`: Generates random samples from the two choices, -1 or 1 in this case.
# 1. `normal(ave,std,size)`: Draw random samples from a normal distribution.
# 1. `uniform([low,high,size])`: Draw samples from a uniform distribution.
#
#
# - See the Scipy website for details <br>
# https://docs.scipy.org/doc/numpy-dev/reference/routines.random.html
# + [markdown] slideshow={"slide_type": "slide"}
# ## Import common libraries
# -
% matplotlib inline
import numpy as np # import numpy library as np
import math # use mathematical functions defined by the C standard
import matplotlib.pyplot as plt # import pyplot library as plt
plt.style.use('ggplot') # use "ggplot" style for graphs
# + [markdown] slideshow={"slide_type": "slide"}
# # 2.2. Normal / Gaussian distribution
# + [markdown] slideshow={"slide_type": "-"}
# ## Generate random numbers, $x_0, x_1, \cdots, x_N$
# -
ave = 0.0 # set average
std = 1.0 # set standard deviation
N = 100000 # number of generated random numbers
np.random.seed(0) # initialize the random number generator with seed=0
X = ave+std*np.random.randn(N) # generate random sequence and store it as X
plt.ylim(-10,10) # set y-range
plt.xlabel(r'$i$',fontsize=16) # set x-label
plt.ylabel(r'$x_i$',fontsize=16) # set y-label
plt.plot(X,',') # plot x_i vs. i (i=1,2,...,N) with dots
plt.show() # draw plots
# + [markdown] slideshow={"slide_type": "slide"}
# ## Compare the distribution with the normal distribution function
# -
# \begin{equation}
# P(x)=\frac{1}{\sqrt{2\pi\sigma^2}}\exp\left[-\frac{(x-\langle X \rangle)^2}{2\sigma^2}\right]\tag{D1}
# \end{equation}
# + format="row" slideshow={"slide_type": "-"}
plt.hist(X,bins=25,normed=True) # plot normalized histogram of R using 25 bins
x = np.arange(-10,10,0.01) # create array of x from 0 to 1 with increment 0.01
y = np.exp(-(x-ave)**2/(2*std**2))/np.sqrt(2*np.pi*std**2) # create array of y as y=1
plt.xlim(-10,10) # set x-range
plt.plot(x,y,color='b') # plot y vs. x with blue line
plt.xlabel(r'$x$',fontsize=16) # set x-label
plt.ylabel(r'$P(x)$',fontsize=16) # set y-label
plt.legend([r'Gaussian',r'histogram'], fontsize=16) # set legends
plt.show() # display plots
# + [markdown] slideshow={"slide_type": "slide"}
# ## Calculate the auto-correlation function $\varphi(i)$
# -
# ### The definition
#
# \begin{equation}
# \varphi(i)=\frac{1}{N}\sum_{j=1}^{N} \left(x_j - \langle X\rangle\right)\left(x_{i+j}-\langle X\rangle\right) \tag{D2}
# \end{equation}
#
# \begin{equation}
# \varphi(i=0)=\frac{1}{N}\sum_{j=1}^{N} \left(x_j - \langle X\rangle\right)^2=\langle x_j - \langle X\rangle\rangle^2=\sigma^2\tag{D3}
# \end{equation}
#
# \begin{equation}
# \varphi(i\ne 0)= \langle x_j - \langle X\rangle\rangle\langle x_{i\ne j} - \langle X\rangle\rangle=0\hspace{5mm}
# (\rightarrow{\rm White\ noise})\hspace{-12mm}
# \tag{D4}
# \end{equation}
# + [markdown] slideshow={"slide_type": "slide"}
# ### A code example to calculate auto-correlation
# + slideshow={"slide_type": "-"}
def auto_correlate(x):
cor = np.correlate(x,x,mode="full")
return cor[N-1:]
c = np.zeros(N)
c = auto_correlate(X-ave)/N
plt.plot(c)
plt.xlim(-1000,10000)
plt.xlabel(r'$i$',fontsize=16)
plt.ylabel(r'$\varphi(i)$',fontsize=16)
print('\sigma^2 =',std**2)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # 2.3. Binomial distribution
# + [markdown] slideshow={"slide_type": "-"}
# ## Generate random numbers, $n_0, n_1, \cdots, n_N$
# -
p = 0.5 # set p, propability to obtain "head" from a coin toss
M = 100 # set M, number of tosses in one experiment
N = 100000 # number of experiments
np.random.seed(0) # initialize the random number generator with seed=0
X = np.random.binomial(M,p,N) # generate the number of heads after M tosses, N times, and store it as X
plt.xlabel(r'$i$',fontsize=16) # set x-label
plt.ylabel(r'$n_i$',fontsize=16) # set y-label
plt.plot(X,',') # plot n_i vs. i (i=1,2,...,N) with dots
plt.show() # draw plots
# + [markdown] slideshow={"slide_type": "slide"}
# ## Compare the distribution with the Binomial distribution function
# -
# \begin{equation}
# P(n)=\frac{M!}{n!(M-n)!}p^{n}(1-p)^{M-n}\tag{D5}
# \end{equation}
#
# \begin{equation}
# \langle n \rangle=Mp\tag{D6}
# \end{equation}
#
# \begin{equation}
# \sigma^2=Mp(1-p)\tag{D7}
# \end{equation}
# + slideshow={"slide_type": "-"}
def binomial(n,m,p):
comb=math.factorial(m)/(math.factorial(n)*math.factorial(m-n))
prob=comb*p**n*(1-p)**(m-n)
return prob
plt.hist(X,bins=20,normed=True) # plot normalized histogram of R using 22 bins
x = np.arange(M) # generate array of x values from 0 to 100, in intervals of 1
y = np.zeros(M) # generate array of y values, initialized to 0
for i in range(M):
y[i]=binomial(i,M,p) # compute binomial distribution P(n), Eq. (D5)
plt.plot(x,y,color='b') # plot y vs. x with blue line
plt.xlabel(r'$n$',fontsize=16) # set x-label
plt.ylabel(r'$P(n)$',fontsize=16) # set y-label
plt.legend([r'Binomial',r'histogram'], fontsize=16) # set legends
plt.show() # display plots
# + [markdown] slideshow={"slide_type": "slide"}
# ## Calculate the auto-correlation function $\varphi(i)$
# -
# ### The definition
#
# \begin{equation}
# \varphi(i)=\frac{1}{N}\sum_{j=1}^{N} \left(n_j - \langle n\rangle\right)\left(n_{i+j}-\langle n\rangle\right) \tag{D8}
# \end{equation}
#
# \begin{equation}
# \varphi(i=0)=\frac{1}{N}\sum_{j=1}^{N} \left(n_j - \langle n\rangle\right)^2=\langle n_j - \langle n\rangle\rangle^2=\sigma^2=Mp(1-p)\tag{D9}
# \end{equation}
#
# \begin{equation}
# \varphi(i\ne 0)= \langle n_j - \langle n\rangle\rangle\langle n_{i\ne j} - \langle n\rangle\rangle=0\hspace{8mm}
# (\rightarrow{\rm White\ noise})\hspace{10mm}
# \tag{D10}
# \end{equation}
# + [markdown] slideshow={"slide_type": "slide"}
# ### A code example to calculate auto-correlation
# + slideshow={"slide_type": "-"}
def auto_correlate(x):
cor = np.correlate(x,x,mode="full")
return cor[N-1:]
c = np.zeros(N)
c = auto_correlate(X-M*p)/N
plt.plot(c)
plt.xlim(-1000,10000)
plt.xlabel(r'$i$',fontsize=16)
plt.ylabel(r'$\varphi(i)$',fontsize=16)
print('\sigma^2 =',M*p*(1-p))
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# # 2.4. Poisson distribution
# + [markdown] slideshow={"slide_type": "-"}
# ## Generate random numbers, $n_0, n_1, \cdots, n_N$
# -
a = 10.0 # set a, the expected value
N = 100000 # number of generated random numbers
np.random.seed(0) # initialize the random number generator with seed=0
X = np.random.poisson(a,N) # generate randon numbers from poisson distribution
plt.xlabel(r'$i$',fontsize=16) # set x-label
plt.ylabel(r'$n_i$',fontsize=16) # set y-label
plt.plot(X,',') # plot n_i vs. i (i=1,2,...,N) with dots
plt.show() # draw plots
# + [markdown] slideshow={"slide_type": "slide"}
# ## Compare the distribution with the binomial distribution function
# -
# \begin{equation}
# P(n)=\frac{a^n e^{-a}}{n!}\tag{D11}
# \end{equation}
#
# \begin{equation}
# \langle n \rangle=a\tag{D12}
# \end{equation}
#
# \begin{equation}
# \sigma^2=a\tag{D13}
# \end{equation}
#
# + slideshow={"slide_type": "-"}
def poisson(n,a):
prob=a**n*np.exp(-a)/math.factorial(n)
return prob
plt.hist(X,bins=25,normed=True) # plot normalized histogram of X using 25 bins
x = np.arange(M) # generate array of x values from 0 to 100, in intervals of 1
y = np.zeros(M) # generate array of y values, initialized to zero
for i in range(M):
y[i]=poisson(i,a) # Compute Poisson distribution for n, Eq. (D11)
plt.plot(x,y,color='b') # plot y vs. x with blue line
plt.xlabel(r'$n$',fontsize=16) # set x-label
plt.ylabel(r'$P(n)$',fontsize=16) # set y-label
plt.legend([r'Poisson',r'histogram'], fontsize=16) # set legends
plt.show() # display plots
| edx-stochastic-data-analysis/downloaded_files/02/009x_22.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''chimera'': conda)'
# language: python
# name: python3
# ---
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sentencepiece as spm
import torch as th
cif_seg = [0.0000, 0.1429, 0.4694, 0.8571, 1.0816, 1.8980, 3.1020, 3.4490, 3.6735,
3.8163, 3.9388, 4.0816, 4.2245, 4.2857, 4.3878, 4.4898]
cif_without_align_seg = [0.0000, 0.2653, 0.5510, 0.6531, 0.9592, 1.9796, 3.1429, 3.3469, 3.5306,
3.7143, 3.8367, 3.9592, 4.0612, 4.2449, 4.3265, 4.4082, 4.4898]
real_seg = [(0.05, 0.17), (0.17, 0.45), (0.51, 1.21), (1.75, 2.43), (2.91, 3.09), (3.17, 3.37), (3.37, 3.63), (3.63, 3.73), (3.73, 3.77), (3.77, 3.99), (4.03, 4.11), (4.11, 4.35)]
sentence = ['It', 'was', 'dinnertime,', 'and', 'we', 'started', 'looking', 'for', 'a', 'place', 'to', 'eat.']
mid = [(l + r) / 2 for l, r in real_seg]
width = [r - l for l, r in real_seg]
plt.figure(dpi=500, figsize=(30, 10))
plt.stem(cif_seg, np.ones_like(cif_seg) + 2, bottom=2, markerfmt='none', linefmt='blue', label='w/ align')
plt.stem(cif_without_align_seg, np.ones_like(cif_without_align_seg) + 1, bottom=1, markerfmt='none', linefmt='green', label='w/o align')
bar = plt.bar(mid, height=np.ones_like(mid), width=width, fc='white', ec='orange', tick_label=sentence)
plt.yticks([], [])
plt.legend()
plt.savefig('segmentation.pdf')
plt.figure(dpi=100)
plt.stem(cif_without_align_seg, np.ones_like(cif_without_align_seg), markerfmt='none', linefmt='blue')
plt.bar(mid, height=np.ones_like(mid) * 2, width=width, color='orange')
plt.bar_label(bar, labels=sentence)
plt.savefig('/tmp/cif_noalign.png')
bpe = spm.SentencePieceProcessor()
bpe.Load('/home/ubuntu/work/datasets/must-c/en-de/spm_unigram10000_wave_joint.model')
transcripts = th.load('/home/ubuntu/work/experiments/tmp/mt_input.pt')
st_feature = th.load('/home/ubuntu/work/experiments/tmp/st_full_feature.pt')
mt_feature = th.load('/home/ubuntu/work/experiments/tmp/mt_feature.pt')
st_lengths = (st_feature.max(dim=-1)[1] > 1e-5).sum(dim=-1)
mt_lengths = (transcripts != 1).sum(dim=-1)
st_lengths, mt_lengths
pairwise_dist = th.zeros(st_lengths[0], mt_lengths[0]).to('cuda')
for i in range(st_lengths[0]):
for j in range(mt_lengths[0]):
pairwise_dist[i, j] = th.norm(st_feature[0, i] - mt_feature[0, j])
sns.heatmap(data=pairwise_dist.transpose(0, 1).cpu().detach().numpy(), cmap=sns.diverging_palette(10, 220, sep=40, n=7))
plt.figure(figsize=(30, 10))
plt.yticks(np.arange(mt_lengths[0].item()), [bpe.Decode(t.item()) for t in transcripts[0, :mt_lengths[0]]])
plt.xticks([], [])
plt.imshow(pairwise_dist.transpose(0, 1).cpu().detach().numpy(), aspect='auto', cmap=)
plt.savefig('alignment.pdf', format='pdf')
alpha0 = alpha[0, :st_lengths[0]].cpu().detach().numpy().reshape(1, -1)
plt.figure(dpi=500, figsize=(40, 40))
plt.imshow(alpha0)
bpe.DecodeIds(transcripts[0].tolist())
pairwise_cosdist = th.zeros(st_feature.size(0), text_embeddings.size(0)).to('cuda')
for i in range(st_feature.size(0)):
for j in range(text_embeddings.size(0)):
pairwise_cosdist[i, j] = (st_feature[i] * text_embeddings[j]).sum() / st_feature[i].norm() / text_embeddings[j].norm()
print(pairwise_cosdist.diag().mean(), pairwise_cosdist.mean())
sentence = bpe.Decode([ 294, 227, 1301, 33, 20, 7, 13, 476, 100, 8, 3589, 12,
331, 3921, 693, 55, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1])
print(sentence, bpe.EncodeAsPieces(sentence), sep='\n')
sentence = bpe.Decode([4914, 691, 35, 704, 2731, 10, 189, 33, 4649, 5, 939, 15,
243, 191, 69, 89, 785, 793, 51, 32, 74, 109, 3, 21,
758, 436, 679, 55, 2, 1, 1])
print(*bpe.EncodeAsPieces(sentence), sep=' ')
plt.plot([3.7129e+00, 9.1016e+00, 7.5469e+00, 8.2500e+00, 5.6289e+00, 1.0918e+00,
2.0703e+00, 7.0234e+00, 6.9336e+00, 6.4414e+00, 2.4316e+00, 2.9102e+00,
2.4238e+00, 2.9648e+00, 2.0508e+00, 4.0430e+00, 7.8594e+00, 8.2656e+00,
3.7969e+00, 2.7578e+00, 2.3770e+00, 2.9902e+00, 2.7891e+00, 2.8203e+00,
7.0117e+00, 3.4707e+00, 6.1680e+00, 2.5273e+00, 2.6621e+00, 6.7949e-05,
6.7949e-05])
plt.savefig('/tmp/align.png')
| cs291k/analysis/alignment/align.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('C:/Users/chanyoung/Desktop/Neural-GC-master/lorenz_96_10_10_1000.csv')
X = df[['a','b']].values
# -
def data_split(X, timelag = 10):
data = []
Y = []
for i in range(len(X) - 11):
data.append(X[i:i+10])
Y.append(X[i+11])
return data, Y
# +
import numpy as np
class RBFtimenet2d(object):
# This is one layer RBFnn, one-dims
def __init__(self, timelag = 10, lr = 0.01, epochs = 100, inferStds = True, seed = 1234):
self.lr = lr
self.epochs = epochs
self.inferStds = inferStds
self.seed = seed
self.timelag = timelag
np.random.seed(self.seed)
def cluster(self, X):
# simple cluster means and stds list about time series data
clusters = []
stds = []
cov = []
for x in X:
clu1, clu2 = np.mean(x[:, 0]), np.mean(x[:, 1])
clusters.append([clu1, clu2])
'''
std1, std2 = np.std(x[:, 0]), np.std(x[:, 1])
stds.append([std1, std2])
'''
cov.append(np.cov((x.T)))
return clusters, cov
# 확인 완료
def rbf(self, x, clusters, cov):
return np.exp(-1*((x[0] -clusters[0])**2 + (x[1] - clusters[1])**2)/
(np.linalg.inv(cov) * np.linalg.inv(cov)))
def fit(self, X, y):
self.c, self.cov = self.cluster(X)
self.w = np.random.randn(len(X),2,2)
self.b= np.random.randn(len(X),1)
# training
loss_list = []
loss_mean_list = []
F_list_epoch = []
for epoch in range(self.epochs):
loss_list2 = []
print('{} epoch train'.format(epoch))
pred_list = []
for i in range(len(X)):
rbf_x = np.array([self.rbf(x, self.c[i], self.cov[i]) for x in X[i]])
rbf_w = sum([rbf_.dot(self.w[i]) for rbf_ in rbf_x])
F = rbf_w[0][0] + rbf_w[1][1] + self.b[i]
loss = -(y[i][0] - F)
# loss predict value save
loss_list2.append(np.abs(loss))
pred_list.append(F)
# weight, bias, center, sigma update
self.w[i][0][0] += self.lr * rbf_w[0][0] *loss
self.w[i][1][0] += self.lr * rbf_w[1][0] *loss
self.w[i][0][1] += self.lr * rbf_w[0][1] *loss
self.w[i][1][1] += self.lr * rbf_w[1][1] *loss
self.b[i] += self.lr * loss
self.c[i] += self.lr * loss
self.cov[i] += self.lr * loss
F_list_epoch.append(pred_list)
loss_list.append(loss_list2)
loss_mean_list.append(np.mean(loss_list2))
print("{} epoch loss:".format(epoch), np.mean(loss_list2))
print('---------------------------------------')
print()
if epoch >= 5 and (loss_mean_list[epoch] > min(loss_mean_list[epoch - 5:epoch - 1])):
print("early stopping at {} epoch".format(epoch))
return loss_mean_list, F_list_epoch, loss_list
else:
continue
return loss_mean_list, F_list_epoch, loss_list
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# data load
df = pd.read_csv('C:/Users/chanyoung/Desktop/Neural-GC-master/lorenz_96_10_10_1000.csv')
X2d = df[['a','b']].values
# data fit
lag = 10
X_2d, Y = data_split(X2d,timelag = lag)
rbfnet2d = RBFtimenet2d(timelag = lag,lr=1e-3,epochs = 100)
loss_mean_list, F_list_epoch, loss_list = rbfnet2d.fit(X_2d, Y)
# -
rbfnet2d.w
# +
# data plot
fig, axarr = plt.subplots(1, 2, figsize=(16, 5))
axarr[0].plot(F_list_epoch[3],'-o', label='RBF-Net')
axarr[0].plot(Y,'-o', label='True')
axarr[0].set_xlabel('T')
axarr[0].set_title('Entire time series')
axarr[1].plot(F_list_epoch[3][:50],'-o', label='true')
axarr[1].plot(Y[:50],'-o', label='true')
axarr[1].set_xlabel('T')
axarr[1].set_title('First 50 time points')
plt.tight_layout()
plt.show()
# -
class RBFtimenet(object):
# This is one layer RBFnn
def __init__(self, timelag = 10, lr = 0.01, epochs = 100, inferStds = True, seed = 1234):
self.lr = lr
self.epochs = epochs
self.inferStds = inferStds
self.seed = seed
self.timelag = timelag
np.random.seed(self.seed)
def cluster(self, X):
# simple cluster means and stds list about time series data
clusters = [np.mean(x) for x in X]
stds = [np.std(x) for x in X]
return clusters, stds
def rbf(self, x, clusters, stds):
return np.exp(-1 / (2 * stds**2) * (x-clusters)**2)
def fit(self, X, y):
self.c, self.s = self.cluster(X)
self.w = np.random.randn(len(X), self.timelag)
self.b = np.random.randn(len(X),1)
# training
loss_list = []
loss_mean_list = []
F_list_epoch = []
for epoch in range(self.epochs):
loss_list2 = []
print('{} epoch train'.format(epoch))
pred_list = []
for i in range(len(X)):
rbf_x = np.array([self.rbf(x, self.c[i], self.s[i]) for x in X[i]])
F = rbf_x.T.dot(self.w[i]) + self.b[i]
loss = -(y[i] - F)
# loss predict value save
loss_list2.append(np.abs(loss))
pred_list.append(F)
# weight, bias, center, sigma update
self.w[i] -= self.lr * rbf_x.reshape(10,) * loss
self.b[i] -= self.lr * loss
self.c[i] += self.lr * loss
self.s[i] += self.lr * loss
F_list_epoch.append(pred_list)
loss_list.append(loss_list2)
loss_mean_list.append(np.mean(loss_list2))
print("{} epoch loss:".format(epoch), np.mean(loss_list2))
print('---------------------------------------')
if epoch >= 5 and (loss_mean_list[epoch] > min(loss_mean_list[epoch - 5:epoch - 1])):
print("early stopping at {} epoch".format(epoch))
return loss_mean_list, F_list_epoch, loss_list
else:
continue
return loss_mean_list, F_list_epoch, loss_list
X = df['a'].values
X_, Y = data_split(X)
rbfnet = RBFtimenet(timelag = 10,lr=1e-3,epochs = 1000)
loss_mean_list, F_list_epoch, loss_list = rbfnet.fit(X_, Y)
CD = np.log(np.var(rbfnet2d.b)/np.var(rbfnet.b))
CD
from scipy.stats import f_oneway
f_oneway(rbfnet2d.w_x2,np.zeros((989,10)))
rbfnet2d.w_x2.T.shape
# b -> a
| cs224w/RBFtimenet2d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
A = 10
B= 20
def swap(a,b):
a = a+b
b = a-b
a = -(b-a)
return a,b
print("Before Swap: A is {} and B is {}".format(A,B))
A, B = swap(A,B)
print("After Swap: A is {} and B is {}".format(A,B))
| AlgPractices/swapwithoutpara.py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (SageMaker JumpStart Data Science 1.0)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:eu-west-1:724265385110:image/sagemaker-jumpstart-data-science-1.0
# ---
# This notebook is used to show some exploratory data examples:
#
# We begin by setting up our data using the table technique
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
house = pd.read_csv('1_houses.csv')
house
# Filter to get only the expensive houses
house_expensive = house[house["expensive"] == True]
num_rooms_expensive = house_expensive["num_rooms"]
price_expensive = house_expensive["price"]
# Filter to get only the NOT expensive houses
house_not_expensive = house[house["expensive"] == False]
num_rooms_not_expensive = house_not_expensive["num_rooms"]
price_not_expensive = house_not_expensive["price"]
num_rooms_not_expensive
# +
tick_spacing = 1
fig, ax = plt.subplots(1,1)
ax.scatter(num_rooms_expensive, price_expensive, c='r',marker='x', label='Expensive')
ax.scatter(num_rooms_not_expensive, price_not_expensive, c='g',marker='o', label='Good price')
ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
plt.title('Plot Housing prices versus number of rooms')
plt.ylabel('Price')
plt.xlabel('Num rooms')
plt.rcParams["figure.figsize"] = (8,6)
plt.legend()
plt.grid()
plt.show()
# -
houses_unlabelled = pd.read_csv('1_houses_unlabelled.csv')
houses_unlabelled
# +
tick_spacing = 1
fig, ax = plt.subplots(1,1)
ax.scatter(houses_unlabelled['num_rooms'], houses_unlabelled['price'], c='r',marker='o')
ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
plt.title('Plot Housing prices versus number of rooms')
plt.ylabel('Price')
plt.xlabel('Num rooms')
plt.rcParams["figure.figsize"] = (2,4)
plt.grid()
plt.show()
# -
| 1_explore_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
#
#
from PyDSTool import *
from PyDSTool.Toolbox import phaseplane as pp
from matplotlib import pyplot as plt
from IPython.display import display, Math
dir = './figures/'
plt.rc('text', usetex=True)
# # Saddle-node bifurcation after transforming one parameters into a variable
# ## One-dimensional case
# ### Example 1:
# $$f(x,a;b)=-a-b-x^2$$
aa = np.arange(-2, 2, 0.01)
plt.plot(aa, -aa, lw=6, ls='-.', color='r')
plt.plot(aa, -aa, lw=3, ls='-', color='k')
plt.plot(aa, aa, lw=3, ls='-', color='g')
plt.plot(aa, aa**2, lw=3, ls='-', color='b')
plt.xlabel('$a$', fontsize=16)
plt.ylabel('$b$', fontsize=16)
plt.axis([-2, 2, -2, 2])
plt.savefig(dir + 'Example1TwoParsBif.pdf', bbox_inches='tight')
plt.show()
# **Second case**
# $$x'=f(x,a;b)=-a-b-x^2\\
# a'=g(a;b)=a-b$$
xstr = '-a-b-x**2'
astr = '-a+b'
params = {'b': -1}
ics = {'x': sqrt(2), 'a': -1}
DSargs = args(name='EX12')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'b': [-2, 2]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'a': [-2, 2]}
DSargs.varspecs = {'x': xstr, 'a': astr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.freepars = ['b']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 800
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# -
PC['EQ1'].display(('b', 'x'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((-2, 0.5))
PC.plot.fig1.axes1.axes.set_xlabel('$b$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example1Case2.pdf', bbox_inches='tight')
plt.show()
# **Third case**
# $$x'=f(x,a;b)=-a-b-x^2\\
# a'=g(a;b)=a^2-b$$
xstr = '-a-b-x**2'
astr = 'a**2-b'
params = {'b': 1/4}
ics = {'x': -1/2, 'a': -1/2}
DSargs = args(name='EX13')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'b': [-2, 2]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'a': [-2, 2]}
DSargs.varspecs = {'x': xstr, 'a': astr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.freepars = ['b']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 60
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# -
PC['EQ1'].display(('b', 'x'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((-0.5, 1.5))
PC.plot.fig1.axes1.axes.set_xlabel('$b$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylim((-0.75, 0.75))
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example1Case3.pdf', bbox_inches='tight')
plt.show()
# ### Example 2
# $$f(x,a;b)=b^2+1-a-x^2$$
aa = np.arange(1, 3, 0.01)
plt.plot(aa, sqrt(aa-1), lw=6, ls='-.', color='r')
plt.plot(aa, -sqrt(aa-1), lw=6, ls='-.', color='r')
plt.axvline(x=2, lw=3, color='g')
plt.axvline(x=1, lw=3, color='b')
aa = np.arange(0, 3, 0.01)
plt.plot(aa, aa-1, lw=3, color='k')
plt.xlabel('$a$', fontsize=16)
plt.ylabel('$b$', fontsize=16)
plt.axis([0, 3, -2, 2])
plt.savefig(dir + 'Example2TwoParsBif.pdf', bbox_inches='tight')
plt.show()
# **Case 1**
# $$g(a;b)=-a+2$$
xstr = 'b^2+1-a-x**2'
astr = '-a+2'
params = {'b': 2}
ics = {'x': sqrt(3), 'a': 2}
DSargs = args(name='EX22')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'b': [-4, 4]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'a': [-4, 4]}
DSargs.varspecs = {'x': xstr, 'a': astr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': sqrt(3), 'a': 2, 'b': 2}
PCargs.freepars = ['b']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# +
name2 = 'EQ2'
PCargs = args(name=name2)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': sqrt(3), 'a': 2, 'b': -2}
PCargs.freepars = ['b']
PCargs.values
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name2].backward()
PC.curves[name2].forward()
# -
PC['EQ1'].display(('b', 'x'), stability=True, linewidth=3)
PC['EQ2'].display(('b', 'x'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
# PC.plot.fig1.axes1.axes.set_yscale('log')
PC.plot.fig1.axes1.axes.set_xlim((-3, 3))
PC.plot.fig1.axes1.axes.set_xlabel('$b$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylim((-3, 3))
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example2Case1.pdf', bbox_inches='tight')
plt.show()
# **Case 2**
# $$g(a;b)=-a+1$$
xstr = 'b^2+1-a-x**2'
astr = '-a+1'
params = {'b': -2}
ics = {'x': -2, 'a': 1}
DSargs = args(name='EX22')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'b': [-4, 4]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'a': [-4, 4]}
DSargs.varspecs = {'x': xstr, 'a': astr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': 2, 'a': 1, 'b': -2}
PCargs.freepars = ['b']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# +
name2 = 'EQ2'
PCargs = args(name=name2)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': -2, 'a': 1, 'b': -2}
PCargs.freepars = ['b']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name2].backward()
PC.curves[name2].forward()
# -
PC['EQ1'].display(('b', 'x'), stability=True, linewidth=3)
PC['EQ2'].display(('b', 'x'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((-3, 3))
PC.plot.fig1.axes1.axes.set_xlabel('$b$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylim((-3, 3))
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example2Case2.pdf', bbox_inches='tight')
plt.show()
# **Case 3**
# $$g(a;b)=b-a+1$$
xstr = 'b^2+1-a-x**2'
astr = 'b-a+1'
params = {'b': 2}
ics = {'x': sqrt(2), 'a': 3}
DSargs = args(name='EX23')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'b': [-4, 4]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'a': [-6, 6]}
DSargs.varspecs = {'x': xstr, 'a': astr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': sqrt(2), 'a': 3, 'b': 2}
PCargs.freepars = ['b']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# +
name2 = 'EQ2'
PCargs = args(name=name2)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': sqrt(2), 'a': 0, 'b': -1}
PCargs.freepars = ['b']
PCargs.values
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name2].backward()
PC.curves[name2].forward()
# -
PC['EQ1'].display(('b', 'x'), stability=True, linewidth=3)
PC['EQ2'].display(('b', 'x'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((-2.5, 3.5))
PC.plot.fig1.axes1.axes.set_xlabel('$b$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylim((-3, 3))
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example2Case3.pdf', bbox_inches='tight')
plt.show()
# **Case 4**
# $$\dot b = g(b;a)=b-a+1$$
xstr = 'b^2+1-a-x**2'
bstr = 'b-a+1'
params = {'a': 3}
ics = {'x': sqrt(2), 'b': 2}
DSargs = args(name='EX24')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'a': [-6, 6]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'b': [-6, 6]}
DSargs.varspecs = {'x': xstr, 'b': bstr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': sqrt(2), 'b': 2, 'a': 3}
PCargs.freepars = ['a']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# +
name2 = 'EQ2'
PCargs = args(name=name2)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': sqrt(2), 'b': -1, 'a': 0}
PCargs.freepars = ['a']
PCargs.values
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 20
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name2].backward()
PC.curves[name2].forward()
# -
PC['EQ1'].display(('a', 'x'), stability=True, linewidth=3)
PC['EQ2'].display(('a', 'x'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((-1.5, 4.5))
PC.plot.fig1.axes1.axes.set_xlabel('$a$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylim((-3, 3))
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example2Case4.pdf', bbox_inches='tight')
plt.show()
# ## $N$-dimensional case
# ### Example 3
# $$x'=\mu-x^2+xy-xy^2$$
# $$y'=\lambda-y-x^2+yx^2$$
xstr = 'mu - x**2 + x*y - x*y**2'
ystr = 'lambda - y - x**2 + y*x**2'
params = {'mu': 0.1, 'lambda':0}
ics = {'x': 0.1, 'y': 0.1}
DSargs = args(name='EX3')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'mu': [-4, 4], 'lambda': [-4, 4]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'y': [-1e4, 1e4]}
DSargs.varspecs = {'x': xstr, 'y': ystr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
fps = pp.find_fixedpoints(DS, n=4, eps=1e-8)
fps
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': 0.27504269322489472, 'y': -0.081839518530518768}
PCargs.freepars = ['mu']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 800
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 40
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# -
PC['EQ1'].display(('mu', 'x'), stability=True, linewidth=3)
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((-4, 4))
PC.plot.fig1.axes1.axes.set_xlabel('$\mu$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
# plt.savefig(dir + 'Example1Case2.pdf', bbox_inches='tight')
plt.show()
# +
# Two-parameter bifurcation
namefold1 = 'F01'
PCargs = args(name=namefold1)
PCargs.type = 'LP-C'
PCargs.initpoint = 'EQ1:LP2'
PCargs.freepars = ['lambda', 'mu']
PCargs.StepSize = 1e-2
PCargs.MaxNumPoints = 200
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-4
PCargs.SaveEigen = True
PCargs.SaveJacobian = True
PCargs.LocBifPoints = 'all'
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[namefold1].forward()
# -
PC[namefold1].display(('mu', 'lambda'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','BT','CP'])
PC.plot.fig1.axes1.axes.set_xlim((-0.1, 1.4))
PC.plot.fig1.axes1.axes.set_xlabel('$\mu$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylim((-0.3, 1.5))
PC.plot.fig1.axes1.axes.set_ylabel('$\lambda$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.axvline(x=0.5, lw=3, color='g')
plt.savefig(dir + 'Example3TwoParBif.pdf', bbox_inches='tight')
plt.show()
# **Extended system**
xstr = 'mu - x**2 + x*y - x*y**2'
ystr = 'lambda - y - x**2 + y*x**2'
mustr = 'mu - 0.5'
params = {'lambda':0.5}
ics = {'x': 0.1, 'y': 0.1, 'mu': 0.6}
DSargs = args(name='EX32')
DSargs.pars = params
DSargs.tdata = [0, 10]
DSargs.pdomain = {'lambda': [0, 1.5]}
DSargs.xdomain = {'x': [-1e4, 1e4], 'y': [-1e4, 1e4], 'mu': [-4, 4]}
DSargs.varspecs = {'x': xstr, 'y': ystr, 'mu': mustr}
DSargs.ics = ics
DS = Generator.Vode_ODEsystem(DSargs)
fps = pp.find_fixedpoints(DS, n=4, eps=1e-8)
fps
# +
PC = ContClass(DS)
name1 = 'EQ1'
PCargs = args(name=name1)
PCargs.type = 'EP-C'
PCargs.initpoint = {'x': -0.70710678118647341, 'y': 0.0, 'mu': 0.5}
PCargs.freepars = ['lambda']
PCargs.StepSize = 1e-3
PCargs.MaxNumPoints = 800
PCargs.MaxStepSize = 5e-2
PCargs.MinStepSize = 1e-6
PCargs.MaxTestIters = 40
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.StopAtPoints = ['B']
PCargs.verbosity = 2
PC.newCurve(PCargs)
PC.curves[name1].backward()
PC.curves[name1].forward()
# -
PC['EQ1'].display(('lambda', 'x'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((0, 1.5))
PC.plot.fig1.axes1.axes.set_xlabel('$\lambda$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylabel('$x$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example3ExtSysBifX.pdf', bbox_inches='tight')
plt.show()
PC['EQ1'].display(('lambda', 'y'), stability=True, linewidth=3)
PC.plot.toggleLabels('off')
PC.plot.fig1.toggleAll('off', bytype=['P','B'])
PC.plot.fig1.axes1.axes.set_xlim((0, 1.5))
PC.plot.fig1.axes1.axes.set_xlabel('$\lambda$', fontsize=16)
PC.plot.fig1.axes1.axes.set_ylabel('$y$', fontsize=16)
PC.plot.fig1.axes1.axes.set_title('')
plt.savefig(dir + 'Example3ExtSysBifY.pdf', bbox_inches='tight')
plt.show()
| notebooks/Examples.ipynb |