code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def vector_size_check(*vector_variables):
return all(len(vector_variables[0]) == x for x in [len(vector) for vector in vector_variables[1:]])
vector_size_check([1,2,3], [4,5,6], [7,8,9])
def vector_addition(*vector_variables):
if vector_size_check(*vector_variables) == False:
raise ArithmeticError
return [sum(items) for items in zip(*vector_variables)]
vector_addition([1,2], [3,4], [5,6])
vector_addition([1,2,3], [4], [5,6])
def scalar_vector_product(alpha, vector_variable):
return [alpha*item for item in vector_variable]
scalar_vector_product(3, [1,2,3])
def matrix_size_check(*matrix_variables):
return all([len(set(len(matrix[0]) for matrix in matrix_variables)) == 1]) and all([len(set(len(matrix) for matrix in matrix_variables)) == 1])
mat1 = [[1,2], [3,4], [5,6]]
mat2 = [[2,5], [4,6], [7,8]]
mat3 = [[1,3], [5,7]]
matrix_size_check(mat1, mat2)
matrix_size_check(mat1, mat2, mat3)
def is_matrix_equal(*matrix_variables):
return all([all([len(set(row)) == 1 for row in zip(*matrix)]) for matrix in zip(*matrix_variables)])
is_matrix_equal(mat1, [[1,2], [3,4], [5,6]])
is_matrix_equal(mat1, mat2)
def matrix_addition(*matrix_variables):
if matrix_size_check(*matrix_variables) == False:
raise ArithmeticError
return [[sum(row) for row in zip(*matrix)] for matrix in zip(*matrix_variables)]
matrix_addition(mat1, mat2)
def matrix_transpose(matrix_variable):
return [[*col] for col in zip(*matrix_variable)]
matrix_transpose(mat1)
def scalar_matrix_product(alpha, matrix_variable):
return [scalar_vector_product(alpha, row) for row in matrix_variable]
scalar_matrix_product(2, mat1)
def is_product_availability_matrix(matrix_a, matrix_b):
return len([col for col in zip(*matrix_a)]) == len(matrix_b)
is_product_availability_matrix(mat1, mat2)
is_product_availability_matrix(mat1, mat3)
def matrix_product(matrix_a, matrix_b):
if is_product_availability_matrix(matrix_a, matrix_b) == False:
raise ArithmeticError
return [[sum(a*b for a, b in zip(row_a, col_b)) for col_b in zip(*matrix_b)] for row_a in matrix_a]
matrix_product(mat1, mat3)
|
Edwith_Python4ML/assignment_basic_linear_algebra.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 (tensorflow)
# language: python
# name: tensorflow
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kwler123/Deep-Reinforcement-Learning-Hands-On-Second-Edition/blob/master/t81_558_class_12_01_ai_gym.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="N58_HAIBIeX6"
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_12_01_ai_gym.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="dnID4yguIeX7"
# # T81-558: Applications of Deep Neural Networks
# **Module 12: Reinforcement Learning**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# + [markdown] id="w7l5rd2MIeX8"
# # Module 12 Video Material
#
# * **Part 12.1: Introduction to the OpenAI Gym** [[Video]](https://www.youtube.com/watch?v=_KbUxgyisjM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_01_ai_gym.ipynb)
# * Part 12.2: Introduction to Q-Learning [[Video]](https://www.youtube.com/watch?v=A3sYFcJY3lA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_02_qlearningreinforcement.ipynb)
# * Part 12.3: Keras Q-Learning in the OpenAI Gym [[Video]](https://www.youtube.com/watch?v=qy1SJmsRhvM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_03_keras_reinforce.ipynb)
# * Part 12.4: Atari Games with Keras Neural Networks [[Video]](https://www.youtube.com/watch?v=co0SwPWoZh0&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_04_atari.ipynb)
# * Part 12.5: Application of Reinforcement Learning [[Video]](https://www.youtube.com/watch?v=1jQPP3RfwMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_05_apply_rl.ipynb)
#
# + [markdown] id="UImTzmGTIeX9"
# # Part 12.1: Introduction to the OpenAI Gym
#
# [OpenAI Gym](https://gym.openai.com/) aims to provide an easy-to-setup general-intelligence benchmark with a wide variety of different environments. The goal is to standardize how environments are defined in AI research publications so that published research becomes more easily reproducible. The project claims to provide the user with a simple interface. As of June 2017, developers can only use Gym with Python.
#
# OpenAI gym is pip-installed onto your local machine. There are a few significant limitations to be aware of:
#
# * OpenAI Gym Atari only **directly** supports Linux and Macintosh
# * OpenAI Gym Atari can be used with Windows; however, it requires a particular [installation procedure](https://towardsdatascience.com/how-to-install-openai-gym-in-a-windows-environment-338969e24d30)
# * OpenAI Gym can not directly render animated games in Google CoLab.
#
# Because OpenAI Gym requires a graphics display, the only way to display Gym in Google CoLab is an embedded video. The presentation of OpenAI Gym game animations in Google CoLab is discussed later in this module.
#
# ### OpenAI Gym Leaderboard
#
# The OpenAI Gym does have a leaderboard, similar to Kaggle; however, the OpenAI Gym's leaderboard is much more informal compared to Kaggle. The user's local machine performs all scoring. As a result, the OpenAI gym's leaderboard is strictly an "honor's system." The leaderboard is maintained the following GitHub repository:
#
# * [OpenAI Gym Leaderboard](https://github.com/openai/gym/wiki/Leaderboard)
#
# If you submit a score, you are required to provide a writeup with sufficient instructions to reproduce your result. A video of your results is suggested, but not required.
#
# ### Looking at Gym Environments
#
# The centerpiece of Gym is the environment, which defines the "game" in which your reinforcement algorithm will compete. An environment does not need to be a game; however, it describes the following game-like features:
# * **action space**: What actions can we take on the environment, at each step/episode, to alter the environment.
# * **observation space**: What is the current state of the portion of the environment that we can observe. Usually, we can see the entire environment.
#
# Before we begin to look at Gym, it is essential to understand some of the terminology used by this library.
#
# * **Agent** - The machine learning program or model that controls the actions.
# Step - One round of issuing actions that affect the observation space.
# * **Episode** - A collection of steps that terminates when the agent fails to meet the environment's objective, or the episode reaches the maximum number of allowed steps.
# * **Render** - Gym can render one frame for display after each episode.
# * **Reward** - A positive reinforcement that can occur at the end of each episode, after the agent acts.
# * **Nondeterministic** - For some environments, randomness is a factor in deciding what effects actions have on reward and changes to the observation space.
#
# It is important to note that many of the gym environments specify that they are not nondeterministic even though they make use of random numbers to process actions. It is generally agreed upon (based on the gym GitHub issue tracker) that nondeterministic property means that a deterministic environment will still behave randomly even when given consistent seed value. The seed method of an environment can be used by the program to seed the random number generator for the environment.
#
# The Gym library allows us to query some of these attributes from environments. I created the following function to query gym environments.
#
# + id="cciTuR2MIeX-"
import gym
def query_environment(name):
env = gym.make(name)
spec = gym.spec(name)
print(f"Action Space: {env.action_space}")
print(f"Observation Space: {env.observation_space}")
print(f"Max Episode Steps: {spec.max_episode_steps}")
print(f"Nondeterministic: {spec.nondeterministic}")
print(f"Reward Range: {env.reward_range}")
print(f"Reward Threshold: {spec.reward_threshold}")
# + [markdown] id="kRXfAdwFDwUm"
# We will begin by looking at the MountainCar-v0 environment, which challenges an underpowered car to escape the valley between two mountains. The following code describes the Mountian Car environment.
# + colab={"base_uri": "https://localhost:8080/"} id="XYwy9cjlJjEH" outputId="3b0cdd22-832a-45ce-acdd-68907cb40a4f"
query_environment("MountainCar-v0")
# + [markdown] id="1TsQiaGJE3UA"
# There are three distinct actions that can be taken: accelrate forward, decelerate, or accelerate backwards. The observation space contains two continuous (floating point) values, as evident by the box object. The observation space is simply the position and velocity of the car. The car has 200 steps to escape for each epasode. You would have to look at the code to know, but the mountian car recieves no incramental reward. The only reward for the car is given when it escapes the valley.
# + colab={"base_uri": "https://localhost:8080/"} id="RF4n5cYEMyru" outputId="1d8672b4-ad18-408c-8b0f-cc5eaa9df836"
query_environment("CartPole-v1")
# + [markdown] id="QwvVKrNebUHJ"
# The CartPole-v1 environment challenges the agent to move a cart while keeping a pole balanced. The environment has an observation space of 4 continuous numbers:
#
# * Cart Position
# * Cart Velocity
# * Pole Angle
# * Pole Velocity At Tip
#
# To achieve this goal, the agent can take the following actions:
#
# * Push cart to the left
# * Push cart to the right
#
# There is also a continuous variant of the mountain car. This version does not simply have the motor on or off. For the continuous car the action space is a single floating point number that specifies how much forward or backward force is being applied.
# + colab={"base_uri": "https://localhost:8080/", "height": 176} id="UAlaMcJmNSY0" outputId="456d2977-9939-40c1-ad13-4576eeff0b38"
query_environment("MountainCarContinuous-v0")
# + [markdown] id="JBrlG1t6ceIa"
# Note: ignore the warning above, it is a relativly inconsequential bug in OpenAI Gym.
#
# Atari games, like breakout can use an observation space that is either equal to the size of the Atari screen (210x160) or even use the RAM memory of the Atari (128 bytes) to determine the state of the game. Yes thats bytes, not kilobytes!
# + colab={"base_uri": "https://localhost:8080/", "height": 121} id="ndTb-9pgJizW" outputId="8d1bf60a-ace3-4a6c-ac5b-c2f0c70cc319"
query_environment("Breakout-v0")
# + colab={"base_uri": "https://localhost:8080/", "height": 121} id="Ni1rxzmLKAdH" outputId="4066d9e0-882b-49b4-d399-e3a82c3a2a84"
query_environment("Breakout-ram-v0")
# + [markdown] id="3E253PBGPRuw"
# ### Render OpenAI Gym Environments from CoLab
#
# It is possible to visualize the game your agent is playing, even on CoLab. This section provides information on how to generate a video in CoLab that shows you an episode of the game your agent is playing. This video process is based on suggestions found [here](https://colab.research.google.com/drive/1flu31ulJlgiRL1dnN2ir8wGh9p7Zij2t).
#
# Begin by installing **pyvirtualdisplay** and **python-opengl**.
# + id="uF92FCzZMWPn"
# !pip install gym pyvirtualdisplay > /dev/null 2>&1
# !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1
# + [markdown] id="hS7L8kFMLkjN"
# Next, we install needed requirements to display an Atari game.
# + colab={"base_uri": "https://localhost:8080/"} id="78BfQoQKOq7z" outputId="90ca9b32-279a-49d8-e7f8-be9819651371"
# !apt-get update > /dev/null 2>&1
# !apt-get install cmake > /dev/null 2>&1
# !pip install --upgrade setuptools 2>&1
# !pip install ez_setup > /dev/null 2>&1
# !pip install gym[atari] > /dev/null 2>&1
# + [markdown] id="NjTHm2SpLz10"
# Next we define functions used to show the video by adding it to the CoLab notebook.
# + id="T9RpF49oOsZj"
import gym
from gym.wrappers import Monitor
import glob
import io
import base64
from IPython.display import HTML
from pyvirtualdisplay import Display
from IPython import display as ipythondisplay
display = Display(visible=0, size=(1400, 900))
display.start()
"""
Utility functions to enable video recording of gym environment
and displaying it.
To enable video, just do "env = wrap_env(env)""
"""
def show_video():
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
ipythondisplay.display(HTML(data='''<video alt="test" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
def wrap_env(env):
env = Monitor(env, './video', force=True)
return env
# + [markdown] id="K6NATj-kNADT"
# Now we are ready to play the game. We use a simple random agent.
# + colab={"base_uri": "https://localhost:8080/", "height": 421} id="XDKGJ9A3O8fT" outputId="fd00fc75-4054-49f1-fe87-2fec6c05b640"
#env = wrap_env(gym.make("MountainCar-v0"))
env = wrap_env(gym.make("CartPole-v1"))
#env = wrap_env(gym.make("Atlantis-v0"))
observation = env.reset()
while True:
env.render()
#your agent goes here
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
break;
env.close()
show_video()
|
t81_558_class_12_01_ai_gym.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf2
# language: python
# name: tf2
# ---
# # Handwritten
# %load_ext autoreload
# %autoreload 2
# %load_ext nb_black
# import pandas as pd
import tensorflow as tf
import dataduit as dd
import yeahml as yml
# ## Create Datasets
# +
# only use 2 of the features
mnist_dataset_config = {
"meta": {
"name": "mnist",
"logging": {"log_stream_level": "INFO"},
"in": {"from": "online", "type": "tfd"},
},
"read": {
"split_percents": [75, 15, 10],
"split_names": ["train", "val", "test"],
"from_stage": "preprocessed",
},
}
mnist_dict_raw = dd.read(mnist_dataset_config)
# +
# only use 2 of the features
fashion_dataset_config = {
"meta": {
"name": "fashion_mnist",
"logging": {"log_stream_level": "INFO"},
"in": {"from": "online", "type": "tfd"},
},
"read": {
"split_percents": [75, 15, 10],
"split_names": ["train", "val", "test"],
"from_stage": "preprocessed",
},
}
fashion_dict_raw = dd.read(fashion_dataset_config)
# +
# only use 2 of the features
kmnist_dataset_config = {
"meta": {
"name": "kmnist",
"logging": {"log_stream_level": "INFO"},
"in": {"from": "online", "type": "tfd"},
},
"read": {
"split_percents": [75, 15, 10],
"split_names": ["train", "val", "test"],
"from_stage": "preprocessed",
},
}
kmnist_dict_raw = dd.read(kmnist_dataset_config)
# -
raw_datadict = {
"mnist": mnist_dict_raw[0],
"fashion_mnist": fashion_dict_raw[0],
"kmnist": kmnist_dict_raw[0],
}
# ## alter dataset
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255.0, label
def load_image(datapoint):
input_image, input_mask = normalize_img(datapoint["image"], datapoint["label"])
return input_image, input_mask
ds_dict = {}
for ds_name, raw_dict in raw_datadict.items():
ds_dict[ds_name] = {}
for split_name, split_ds in raw_dict.items():
split_ds = split_ds.map(
load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
ds_dict[ds_name][split_name] = split_ds
# ## subset
sample_size = 5000
for ds_name, cur_dict in ds_dict.items():
for split_name, split_ds in cur_dict.items():
ds_dict[ds_name][split_name] = split_ds.take(sample_size)
ds_dict
# ## Specify the Model
example = "./handwritten_config.yml"
yml_dict = yml.create_configs(example)
# ## Build the model
# If you receive an error:
# AttributeError: 'google.protobuf.pyext._message.RepeatedCompositeCo' object has no attribute 'append'
# I personally used `pip install -U protobuf=3.8.0` to resolve
# per https://github.com/tensorflow/tensorflow/issues/33348
model = yml.build_model(yml_dict)
model.summary()
# ## Train the Model
#
# Notice here that we're using the created training and validation sets from `ds_dict`
ds_dict
# %%time
train_dict = yml.train_model(model, yml_dict, ds_dict)
yml.basic_plot_tracker(
train_dict["tracker"],
metrics=True,
local=False,
training=True,
validation=False,
loss=True,
size=(16, 8),
)
# ## Evaluate the Model
# +
# eval_dict = yml.eval_model(
# model,
# yml_dict,
# dataset=ds_dict["test"]
# )
# print(eval_dict)
# -
# ## Inspect model in Tensorflow
#
# In the command line you can navigate to the `albalone` directory and run: (provided tensorboard is installed in your environment)
#
# ```bash
# tensorboard --logdir model_a/
# ```
|
examples/handwritten/handwritten.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Packages
# +
import numpy as np
import matplotlib.pyplot as plt
import lightkurve as lk
from scipy import optimize
from scipy.signal import find_peaks
from astropy.stats import LombScargle
from matplotlib import rcParams
rcParams["figure.dpi"] = 150
from lmfit import Model
import scipy.special
import math
from astropy.io import fits
import random
from astropy.time import Time
import os
from scipy.ndimage import gaussian_filter
from astropy.stats import sigma_clip
from matplotlib import rcParams
rcParams["figure.dpi"] = 150
plt.rc('font', family='serif', size=11)
plt.tight_layout()
# -
from matplotlib import rcParams
rcParams["figure.dpi"] = 150
# # Functions
# +
#Expected units:
#t ... Time in days
#a1sini ... in seconds
#Porb ... in days
#star_planet_ratio ... how many times heavier is the star?
def tau(t, star_planet_ratio, a2sini , e, varpi, Porb, tp, nobessel):
Omega = 2 * np.pi / Porb
term1 = -a2sini / star_planet_ratio * 499 / 86400
term2 = (1-e**2)
sum_cosf = sum(scipy.special.jv(n,n*e)*np.cos(n*Omega*(t-tp)) for n in range(1,nobessel))
sum_sinf = sum(scipy.special.jvp(n,n*e,n=1)*np.sin(n*Omega*(t-tp)) for n in range(1,nobessel))
cosf = - e + 2*(1-e**2)/e * sum_cosf
sinf = 2 * math.sqrt(1 - e**2) * sum_sinf
term3 = (sinf * np.cos(varpi) + cosf * np.sin(varpi))/(1. + e*cosf)
sol = term1*term2*term3
return sol
def vrad(t, star_planet_ratio, a2sini , e, varpi, Porb, tp, nobessel):
Omega = 2 * np.pi / Porb
term1 = - Omega * a2sini / star_planet_ratio *1.736*10**6 #num.value is AU/day into m/s
term2 = math.sqrt(1-e**2)
sum_cosf = sum(scipy.special.jv(n,n*e)*np.cos(n*Omega*(t-tp)) for n in range(1,nobessel))
sum_sinf = sum(scipy.special.jvp(n,n*e,n=1)*np.sin(n*Omega*(t-tp)) for n in range(1,nobessel))
cosf = - e + 2*(1-e**2)/e * sum_cosf
sinf = 2 * math.sqrt(1 - e**2) * sum_sinf
term3 = (cosf * np.cos(varpi) - sinf * np.sin(varpi)) + e * np.cos(varpi)
sol = term1*term2*term3
return sol
def vrad2(t, star_planet_ratio, a2sini , e, varpi, Porb, tp, nobessel):
Omega = 2 * np.pi / Porb
term1 = - Omega * a2sini / star_planet_ratio * 1.736*10**6 #num.value is AU/day into m/s
term2 = math.sqrt(1-e**2)
sum_cosf = sum(scipy.special.jv(n,n*e)*np.cos(n*Omega*(t-tp)) for n in range(1,nobessel))
sum_sinf = sum(scipy.special.jvp(n,n*e,n=1)*np.sin(n*Omega*(t-tp)) for n in range(1,nobessel))
cosf = - e + 2*(1-e**2)/e * sum_cosf
sinf = 2 * math.sqrt(1 - e**2) * sum_sinf
term3 = (cosf * np.cos(varpi) + sinf * np.sin(varpi)) + e * np.cos(varpi)
sol = term1/term2*term3
return sol
#_b
star_planet_ratio_b = 1.8 / 11 * 1048
a2sini_b = 9.66 * np.sin(np.deg2rad(88.81))
e_b = 0.08
varpi_b = np.deg2rad(205.8)
Porb_b = 22.47*365.25
tp_b = 2455992
#_c
star_planet_ratio_c = 1.8 / 8.9 * 1048
a2sini_c = 2.7 * np.sin(np.deg2rad(88.81))
e_c = 0.24
varpi_c = np.deg2rad(-95)
Porb_c = 1220
tp_c = 2454117
# -
#Amplitude Spectrum Function
def AS(times, flux, oversample):
timebase = max(times) - min(times)
tmin = min(times)
tmax = max(times)
df = 1.0 / (tmax - tmin)
dt = np.median(np.diff(times))
ny = 0.5 / dt
oversample = oversample
freq = np.arange(df, 120, df / oversample)
model = LombScargle(times, flux)
power = model.power(freq, method = 'fast', normalization = 'psd')
fct = np.sqrt(4 / len(times))
amp = np.sqrt(np.abs(power)) * fct
return((freq, amp))
def dft_phase(x, y, freq):
"""
Discrete fourier transform to calculate the ASTC phase
given x, y, and an array of frequencies
Parameters
----------
x : `array`
Array in which to calculate
x : `array`
Returns:
----------
phase : `list`
A list of phases for the given frequencies
"""
freq = np.asarray(freq)
x = np.array(x)
y = np.array(y)
phase = []
for f in freq:
expo = 2.0 * np.pi * f * x
ft_real = np.sum(y * np.cos(expo))
ft_imag = np.sum(y * np.sin(expo))
phase.append(np.arctan2(ft_imag,ft_real))
## my phase wrapping patch
tmp = []
for i in phase:
if i < 0:
tmp.append(i + 2 * np.pi)
else:
tmp.append(i)
p = np.array(tmp)
p = (- p + np.pi/2) % (2 * np.pi)
return p
# +
#Fourier Function
def fourier_sum(freqs, amps, phase, y_noise, x):
y = []
for j in range(0, len(x)):
temp = []
temp.append(y_noise[j])
for i in range(0, len(freqs)):
#next line: sum(amplitude_i * sine(freq_i * x + phase_i)) for all pulsations i
temp.append(amps[i]*np.sin(2.*np.pi*freqs[i]*x[j]+phase[i]))
y.append(sum(temp))
return y
def lc_sim(freqs, amps, phs, time_start, time_end, cadence, precision):
freqs = np.array(freqs)
amps = np.array(amps)
phs = np.array(phs)
time = np.arange(time_start, time_end, cadence)
noise = np.random.normal(0, precision, len(time))
multisine = np.array(fourier_sum(freqs, amps, phs, noise, time))
return [freqs, amps, phs, time, multisine]
def lc_model(freqs, amps, phs, time_start, time_end, cadence_model):
freqs = np.array(freqs)
amps = np.array(amps)
phs = np.array(phs)
time_model = np.arange(time_start, time_end, cadence_model)
noise_model = [0 for i in time_model]
multisine_model = np.array(fourier_sum(freqs, amps, phs, noise_model, time_model))
return [freqs, amps, phs, time_model, multisine_model]
def lc_model_times(freqs, amps, phs, times):
freqs = np.array(freqs)
amps = np.array(amps)
phs = np.array(phs)
time_model = times
noise_model = [0 for i in time_model]
multisine_model = np.array(fourier_sum(freqs, amps, phs, noise_model, time_model))
return [freqs, amps, phs, time_model, multisine_model]
def lc_model_times(freqs, amps, phs, times, precision):
freqs = np.array(freqs)
amps = np.array(amps)
phs = np.array(phs)
time_model = times
noise_model = np.random.normal(0, precision, len(time_model))
#noise_model = [0 for i in time_model]
multisine_model = np.array(fourier_sum(freqs, amps, phs, noise_model, time_model))
return [freqs, amps, phs, time_model, multisine_model]
# -
def jd_to_datetime(jd, returniso=False):
tt = Time(jd, format='jd', scale='utc')
if returniso:
return tt.iso
else:
return tt.datetime
#https://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-parameters-using-the-optimize-leastsq-method-i
def fit_leastsq_my(times, mags, freqs, amps, phs):
pfit_all = []
perr_all = []
for i in range(len(freqs)):
def model_new(freq):
def f(x, p1, p2):
return abs(p1) * np.sin(2. * np.pi * (freq * x + (p2)%1))
return f
def ff(x, p):
return model_new(freqs[i])(x, *p)
# These are initial guesses for fits:
pstart = [amps[i], phs[i]]
def fit_leastsq(p0, datax, datay, function):
errfunc = lambda p, x, y: function(x,p) - y
pfit, pcov, infodict, errmsg, success = optimize.leastsq(errfunc, p0, args=(datax, datay), full_output=1, epsfcn=0.0001)
if (len(datay) > len(p0)) and pcov is not None:
s_sq = (errfunc(pfit, datax, datay)**2).sum()/(len(datay)-len(p0))
pcov = pcov * s_sq
else:
pcov = np.inf
error = []
for i in range(len(pfit)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
pfit_leastsq = pfit
perr_leastsq = np.array(error)
return pfit_leastsq, perr_leastsq
pfit, perr = fit_leastsq(pstart, times, mags, ff)
pfit_all.append(pfit)
perr_all.append(perr)
my_amps = np.array([pfit_all[i][0] for i in range(len(freqs))])
my_phs = np.array([pfit_all[i][1] for i in range(len(freqs))])
my_phs_err = np.array([perr_all[i][1] for i in range(len(freqs))])
return(freqs, my_amps, my_phs, my_phs_err)
# # Extract data
for name in ['<NAME>']:
sr = lk.search_lightcurvefile(name)
print(sr)
if len(sr) > 0:
#Download data
lcfs = sr.download_all()
# Use the normalized PDCSAP_FLUX
lc0 = lcfs[0].SAP_FLUX.normalize()
lc1 = lcfs[0].PDCSAP_FLUX.normalize()
# Loop through the rest of the LCFS, appending to the first light curve
for lcf in lcfs[1:]:
lc0 = lc0.append(lcf.SAP_FLUX.normalize())
lc1 = lc1.append(lcf.PDCSAP_FLUX.normalize())
#Remove bad quality and nans
q0 = lc0.quality == 0
lc0 = lc0[q0].remove_nans()
q1 = lc1.quality == 0
lc1 = lc1[q1].remove_nans()
#Plot data
ax = lc0.scatter(label = 'SAP_FLUX')
lc1.scatter(ax=ax, label = 'PDCSAP_FLUX')
#Times & Mags
times = lc1.time + 2457000
mags = -2.5 * np.log10(lc1.flux)
mags = mags - np.median(mags)
jds = []
for i in np.arange(2018, 2023, 1):
times_utc = ['{0}-01-01T12:00:00'.format(i)]
t = Time(times_utc, format='isot', scale='utc')
print(t)
print(t.jd)
jds.append(t.jd[0])
# +
t_18_22 = np.linspace(jds[0], jds[-1], 300)
TESS_mm_tmid = min(times) + (max(times) - min(times))/2
y_tau_b_offset = tau(TESS_mm_tmid, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c_offset = tau(TESS_mm_tmid, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
y_tau_b = tau(t_18_22, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c = tau(t_18_22, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
time_offset = 2457000
fig, ax1 = plt.subplots(1,1, figsize=(6, 3.5))
ax1.set_xlim(jds[0] - time_offset, jds[-1] - time_offset)
lns11 = ax1.plot(t_18_22 - time_offset, (y_tau_b + y_tau_c-y_tau_b_offset-y_tau_c_offset) * 86400, 'b', label = r"TD $\beta$ Pic b&c")
lns12 = ax1.plot(t_18_22 - time_offset, (y_tau_b-y_tau_b_offset) * 86400, 'b', ls = '--', label = r"TD $\beta$ Pic b")
lns13 = ax1.plot(t_18_22 - time_offset, (y_tau_c-y_tau_c_offset) * 86400, 'b', ls = '-.', label = r"TD $\beta$ Pic c")
lns31 = ax1.axvspan(min(times) - time_offset, max(times) - time_offset, alpha=0.5, label='TESS (mm)', color='C0')
lns32 = ax1.axvspan(min(times)+704 - time_offset, max(times)+704 - time_offset, alpha=0.5, label='TESS (em)', color='C1')
ax1.set_xlabel('Time [JD - 2457000]')
ax1.set_ylabel('TD [s]', color='b')
ax1.tick_params('y', colors='b')
ax1.set_ylim(-25,20)
ax2 = ax1.twiny()
ax2.tick_params(axis='x', rotation=0)
t_2018 = jds[0] - time_offset
t_2019 = jds[1] - time_offset
t_2020 = jds[2] - time_offset
t_2021 = jds[3] - time_offset
t_2022 = jds[4] - time_offset
ax2.set_xticks([t_2018,t_2019,t_2020,t_2021,t_2022])
xmin1, xmax1 = ax1.get_xlim()
ax2.set_xlim(xmin1,xmax1)
ax2.set_xticklabels(['2018', '2019', '2020', '2021', '2022'], fontsize=10)
ax2.set_xlabel('Time [yr]')
ax2.xaxis.set_label_coords(0.5,1.125)
ax1.axhline((tau(TESS_mm_tmid+704, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)-y_tau_b_offset)*86400)
ax1.axhline((tau(TESS_mm_tmid+704, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)-y_tau_c_offset+tau(TESS_mm_tmid+704, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)-y_tau_b_offset)*86400)
# +
freq, amp = AS(times, mags, 15)
freq=freq[5000:]
amp=amp[5000:]
max_power = amp.max()
peaks, _ = find_peaks(amp, height=max_power*0.07, distance=200)
sort_idx = np.argsort(amp[peaks])[::-1]
f_TESS = freq[peaks][sort_idx]
a_TESS = amp[peaks][sort_idx]
print(len(f_TESS))
p_TESS = dft_phase(times, mags, f_TESS)/(2*np.pi)
# +
t_18_22 = np.linspace(jds[0], jds[-1], 300)
y_tau_b = tau(t_18_22, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c = tau(t_18_22, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
# +
#times_pm_em = np.concatenate((times, times+704))
# -
times_pm_em = np.concatenate((times, times+2000*1/10, times+2000*2/10, times+2000*3/10, times+2000*4/10, times+2000*5/10, times+2000*6/10, times+2000*7/10, times+2000*8/10, times+2000*9/10, times+2000*10/10, times+2000*(-1/10), times+2000*(-2/10), times+2000*(-3/10), times+2000*(-4/10), times+2000*(-5/10)))
p_TESS[0]
# +
model_times = lc_model_times(f_TESS, a_TESS, p_TESS*2*np.pi, times_pm_em, 0)#20*np.sqrt(30)*1e-6)
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
ax.scatter(times_pm_em, model_times[4], s=3, c='b', label = 'sim.')
ax.set_xlabel('Time')
ax.set_ylabel('Normalized Flux')
ax.legend()
ax.invert_yaxis()
# -
a = times_pm_em[:len(times)]
b = model_times[4][:len(times)]
c = []
d = []
for i in range(10):
aaa = np.mean(times_pm_em[len(times)*i:len(times)*(i+1)])
aa = times_pm_em[len(times)*i:len(times)*(i+1)] - aaa
a = times_pm_em[len(times)*i:len(times)*(i+1)]
b = model_times[4][len(times)*i:len(times)*(i+1)]
c.append(dft_phase(a,b, [f_TESS[0]]))
d.append(((dft_phase(aa,b, [f_TESS[0]])-2*np.pi*[f_TESS[0]][0]*aaa))%(2*np.pi))
c[0]/6.28
plt.scatter(range(10), c)
plt.scatter(range(10), d, marker = 'x')
plt.ylim(0, 6.28)
# +
y_tau_b = tau(times_pm_em, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c = tau(times_pm_em, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
plt.scatter(times_pm_em, (y_tau_b-y_tau_b_offset)*86400, s=1)
plt.scatter(times_pm_em, (y_tau_c-y_tau_c_offset)*86400, s=1)
plt.scatter(times_pm_em, (y_tau_b + y_tau_c-y_tau_b_offset-y_tau_c_offset)*86400, s=1)
# -
np.arange(0, len(times_pm_em)+len(times), len(times))
f_TESS
# +
no_freqs_run = 4
no_freqs_run_weighted = 4
print('CALCULATE TIMES')
# times_run = tAT
# magnitudes_run = mAT
# segment_borders = np.array([0, len(tA17), len(tA17)+len(tA18), len(tA17)+len(tA18)+len(tTESS)])
# times_run = tRT
# magnitudes_run = mRT
# segment_borders = np.array([0, len(tR1), len(tR1)+len(tR2), len(tR1)+len(tR2)+len(tTESS)])
times_run = tBT
magnitudes_run = mBT
segment_borders = np.array([0, len(tB15), len(tB15)+len(tB16), len(tB15)+len(tB16)+len(tB17), len(tB15)+len(tB16)+len(tB17)+len(tTESS)])
print('CALCULATE TIME DELAYS (DATA)')
times_TD = times_run# - y_tau_b - y_tau_c
magnitudes_TD = magnitudes_run
time_0 = times_TD[0]
time_slice, mag_slice, phase, phase_err = [], [], [], []
time_delays, time_delays_err, time_midpoints = [], [], []
t_edge = []
iteration = 0
# Iterate over lightcurve
for t, y, idx in zip(times_TD, magnitudes_TD, range(len(times_run))):
time_slice.append(t)
mag_slice.append(y)
# In each segment
if idx == segment_borders[iteration+1]-1:
print(iteration)
print(idx)
#print('{0}/{1}'.format(counter, size))
# Append the time midpoint
time_slice = np.array(time_slice)
mag_slice = np.array(mag_slice)
time_midpoints.append(np.mean(time_slice))
# And the phases for each frequency
phase_guess = dft_phase(time_slice, mag_slice, f_TESS)
sol = fit_leastsq_my(time_slice, mag_slice, f_TESS, a_TESS, phase_guess/(2*np.pi))
#print(phase_guess)
phase.append(phase_guess)
phase_err.append(sol[3]*2*np.pi)
time_slice, mag_slice = [], []
t_edge.append(t)
iteration += 1
#print(phase)
phase = np.unwrap(phase)
phase -= np.mean(phase)
phase = np.array(phase).T
phase_err = np.array(phase_err).T
print(phase)
print('Calc TD')
# Phase wrapping patch
for ph, ph_err, f in zip(phase, phase_err, f_TESS):
td = ph / (2*np.pi*f)
time_delays.append(td-np.mean(td))
time_delays_err.append(ph_err / (2*np.pi*f))
print('CALCULATE WEIGHTED VALUES')
time_delays_tmp = time_delays[:no_freqs_run_weighted]
time_delays_err_tmp = time_delays_err[:no_freqs_run_weighted]
td_weighted = []
td_weighted_err = []
for i in range(len(time_midpoints)):
#print(i)
tds, tds_err = [], []
for j in range(len(time_delays_tmp)):
#print(j)
tds.append(time_delays_tmp[j][i])
tds_err.append(time_delays_err_tmp[j][i])
tds = np.array(tds)
tds_err = np.array(tds_err)
td_weighted.append(sum(tds/tds_err**2)/sum(1/tds_err**2))
td_weighted_err.append((1/sum(1/tds_err**2))**(1/2))
td_weighted = np.array(td_weighted)
td_weighted_err = np.array(td_weighted_err)
print('PLOT EVERYTHING')
times_sim = np.linspace(jds[0], jds[-1], 1000)
y_tau_b_sim_curve = tau(times_sim, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c_sim_curve = tau(times_sim, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
fig, ax1 = plt.subplots(1,1, figsize=(6, 3.5))
time_offset = 2457000
t_2018 = jds[0] - time_offset
t_2019 = jds[1] - time_offset
t_2020 = jds[2] - time_offset
t_2021 = jds[3] - time_offset
ax1.set_xlabel('Time [JD - 2457000]')
ax1.set_ylabel('TD [s]', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twiny()
ax2.tick_params(axis='x', rotation=0)
xmin1, xmax1 = ax1.get_xlim()
ax2.set_xlim(xmin1,xmax1)
ax2.set_xlabel('Time [yr]', labelpad=-200)
ax2.xaxis.set_label_coords(0.5,1.09)
y_tau_b_sim = tau(np.array(time_midpoints), star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c_sim = tau(np.array(time_midpoints), star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
lns11 = ax1.plot(times_sim - time_offset, (y_tau_b_sim_curve + y_tau_c_sim_curve - y_tau_b_sim[-1] - y_tau_c_sim[-1]) *86400, 'b', label = r"TD $\beta$ Pic b&c")
lns12 = ax1.plot(times_sim - time_offset, (y_tau_b_sim_curve - y_tau_b_sim[-1]) *86400, 'b', ls = '--', label = r"TD $\beta$ Pic b")
lns13 = ax1.plot(times_sim - time_offset, (y_tau_c_sim_curve - y_tau_c_sim[-1]) *86400, 'b', ls = '-.', label = r"TD $\beta$ Pic c")
lns2 = ax1.errorbar(np.array(time_midpoints) - time_offset, (td_weighted-td_weighted[-1])*86400, yerr = td_weighted_err*86400, fmt='.', marker='x', c='k', label = 'weighted')
leg_errbar = []
for i in range(no_freqs_run):
lns3 = ax1.errorbar(np.array(time_midpoints) - time_offset, (time_delays[i]-time_delays[i][-1])*86400, yerr = time_delays_err[i]*86400, fmt='.', alpha = ((a_TESS[i]/a_TESS[0])**2)/2)
leg_errbar.append(lns3)
lns = [lns11[0], lns12[0], lns13[0], lns2]#lns2[0], lns4, lns5, lns6, lns3]
labs = [l.get_label() for l in lns]
fig.tight_layout()
plt.show()
t_td_BT = np.array(time_midpoints)
tau_td_BT = []
for j in range(len(f_TESS)):
tau_td_BT.append(np.array([i for i in time_delays][j]-[i for i in time_delays][j][0]))
tau_td_BT = np.array(tau_td_BT)*86400
tau_err_td_BT = np.array(time_delays_err)*86400
tau_weighted_td_BT = td_weighted*86400-td_weighted[0]*86400
tau_err_weighted_td_BT = td_weighted_err*86400
for i in range(no_freqs_run):
plt.errorbar(np.array(time_midpoints) - time_offset, (phase[i]-phase[i][-1]), yerr = phase_err[i], fmt='.', alpha = ((a_TESS[i]/a_TESS[0])**2)/2)
# -
# +
no_freqs_run = 4
no_freqs_run_weighted = 4
print('CALCULATE TIMES')
times_run = tAT
magnitudes_run = mAT
segment_borders = np.array([0, len(tA17), len(tA17)+len(tA18), len(tA17)+len(tA18)+len(tTESS)])
# times_run = tRT
# magnitudes_run = mRT
# segment_borders = np.array([0, len(tR1), len(tR1)+len(tR2), len(tR1)+len(tR2)+len(tTESS)])
# times_run = tBT
# magnitudes_run = mBT
# segment_borders = np.array([0, len(tB15), len(tB15)+len(tB16), len(tB15)+len(tB16)+len(tB17)+len(tTESS)])
#AS
freq, amp = AS(times, mags, 15)
freq=freq[5000:]
amp=amp[5000:]
# PEAKS
max_power = amp.max()
peaks, _ = find_peaks(amp, height=max_power*0.1, distance=200)
# FREQS SORTED BY AMPLITUDE
sort_idx = np.argsort(amp[peaks])[::-1]
f_TESS = freq[peaks][sort_idx]
a_TESS = amp[peaks][sort_idx]
print(len(f_TESS))
print('CREATE MULTISINE MODEL')
#Phases
f_TESS = f_TESS[:no_freqs_run]
a_TESS = a_TESS[:no_freqs_run]
p_TESS = dft_phase(times, mags, f_TESS)/(2*np.pi)
# #model_times = lc_model_times(f_TESS, a_TESS, p_TESS*2*np.pi, times_run, noise_run)
print('CALCULATE TIME DELAYS (DATA)')
times_TD = times_run# - y_tau_b - y_tau_c
magnitudes_TD = magnitudes_run
time_0 = times_TD[0]
time_slice, mag_slice, phase, phase_err = [], [], [], []
time_delays, time_delays_err, time_midpoints = [], [], []
t_edge = []
iteration = 0
# Iterate over lightcurve
for t, y, idx in zip(times_TD, magnitudes_TD, range(len(times_run))):
time_slice.append(t)
mag_slice.append(y)
# In each segment
if idx == segment_borders[iteration+1]-1:
print(iteration)
print(idx)
#print('{0}/{1}'.format(counter, size))
# Append the time midpoint
time_slice = np.array(time_slice)
mag_slice = np.array(mag_slice)
time_midpoints.append(np.mean(time_slice))
# And the phases for each frequency
phase_guess = dft_phase(time_slice, mag_slice, f_TESS)
#print(phase_guess)
phase.append(phase_guess)
phase_err.append(sol[3]*2*np.pi)
time_slice, mag_slice = [], []
t_edge.append(t)
iteration += 1
#print(phase)
phase = np.unwrap(phase)
phase -= np.mean(phase)
phase = np.array(phase).T
phase_err = np.array(phase_err).T
print(phase)
print('Calc TD')
# Phase wrapping patch
for ph, ph_err, f in zip(phase, phase_err, f_TESS):
td = ph / (2*np.pi*f)
time_delays.append(td-np.mean(td))
time_delays_err.append(ph_err / (2*np.pi*f))
print('CALCULATE WEIGHTED VALUES')
time_delays_tmp = time_delays[:no_freqs_run_weighted]
time_delays_err_tmp = time_delays_err[:no_freqs_run_weighted]
td_weighted = []
td_weighted_err = []
for i in range(len(time_midpoints)):
#print(i)
tds, tds_err = [], []
for j in range(len(time_delays_tmp)):
#print(j)
tds.append(time_delays_tmp[j][i])
tds_err.append(time_delays_err_tmp[j][i])
tds = np.array(tds)
tds_err = np.array(tds_err)
td_weighted.append(sum(tds/tds_err**2)/sum(1/tds_err**2))
td_weighted_err.append((1/sum(1/tds_err**2))**(1/2))
td_weighted = np.array(td_weighted)
td_weighted_err = np.array(td_weighted_err)
print('PLOT EVERYTHING')
times_sim = np.linspace(jds[0], jds[-1], 1000)
y_tau_b_sim_curve = tau(times_sim, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c_sim_curve = tau(times_sim, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
fig, ax1 = plt.subplots(1,1, figsize=(6, 3.5))
time_offset = 2457000
t_2018 = jds[0] - time_offset
t_2019 = jds[1] - time_offset
t_2020 = jds[2] - time_offset
t_2021 = jds[3] - time_offset
ax1.set_xlabel('Time [JD - 2457000]')
ax1.set_ylabel('TD [s]', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twiny()
ax2.tick_params(axis='x', rotation=0)
xmin1, xmax1 = ax1.get_xlim()
ax2.set_xlim(xmin1,xmax1)
ax2.set_xlabel('Time [yr]', labelpad=-200)
ax2.xaxis.set_label_coords(0.5,1.09)
y_tau_b_sim = tau(np.array(time_midpoints), star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c_sim = tau(np.array(time_midpoints), star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
lns11 = ax1.plot(times_sim - time_offset, (y_tau_b_sim_curve + y_tau_c_sim_curve - y_tau_b_sim[-1] - y_tau_c_sim[-1]) *86400, 'b', label = r"TD $\beta$ Pic b&c")
lns12 = ax1.plot(times_sim - time_offset, (y_tau_b_sim_curve - y_tau_b_sim[-1]) *86400, 'b', ls = '--', label = r"TD $\beta$ Pic b")
lns13 = ax1.plot(times_sim - time_offset, (y_tau_c_sim_curve - y_tau_c_sim[-1]) *86400, 'b', ls = '-.', label = r"TD $\beta$ Pic c")
lns2 = ax1.errorbar(np.array(time_midpoints) - time_offset, (td_weighted-td_weighted[-1])*86400, yerr = td_weighted_err*86400, fmt='.', marker='x', c='k', label = 'weighted')
leg_errbar = []
for i in range(no_freqs_run):
lns3 = ax1.errorbar(np.array(time_midpoints) - time_offset, (time_delays[i]-time_delays[i][-1])*86400, yerr = time_delays_err[i]*86400, fmt='.', alpha = ((a_TESS[i]/a_TESS[0])**1)/2)
leg_errbar.append(lns3)
lns = [lns11[0], lns12[0], lns13[0], lns2]#lns2[0], lns4, lns5, lns6, lns3]
labs = [l.get_label() for l in lns]
fig.tight_layout()
plt.show()
t_td_BT = np.array(time_midpoints)
tau_td_BT = []
for j in range(len(f_TESS)):
tau_td_BT.append(np.array([i for i in time_delays][j]-[i for i in time_delays][j][0]))
tau_td_BT = np.array(tau_td_BT)*86400
tau_err_td_BT = np.array(time_delays_err)*86400
tau_weighted_td_BT = td_weighted*86400-td_weighted[0]*86400
tau_err_weighted_td_BT = td_weighted_err*86400
for i in range(no_freqs_run):
plt.errorbar(np.array(time_midpoints) - time_offset, (phase[i]-phase[i][-1]), yerr = phase_err[i], fmt='.', alpha = ((a_TESS[i]/a_TESS[0])**2)/2)
# -
# +
import os
datadir = os.getcwd() + '/data/'
tTESS, mTESS = np.loadtxt(datadir + 'TESS.txt').T
tB15, mB15 = np.loadtxt(datadir + 'BRITE15.txt').T
tB16, mB16 = np.loadtxt(datadir + 'BRITE16.txt').T
tB17, mB17 = np.loadtxt(datadir + 'BRITE17.txt').T
tA17, mA17 = np.loadtxt(datadir + 'ASTEP17_2048.txt').T
tA18, mA18 = np.loadtxt(datadir + 'ASTEP18_2048.txt').T
tR1, mR1 = np.loadtxt(datadir + 'bRing1.txt').T
tR2, mR2 = np.loadtxt(datadir + 'bRing2.txt').T
# +
tAT = np.concatenate((tA17, tA18, tTESS))
mAT = np.concatenate((mA17, mA18, mTESS))
tRT = np.concatenate((tR1, tR2, tTESS))
mRT = np.concatenate((mR1, mR2, mTESS))
tBT = np.concatenate((tB15, tB16, tB17, tTESS))
mBT = np.concatenate((mB15, mB16, mB17, mTESS))
# -
plt.scatter(tAT, mAT)
plt.scatter(tRT, mRT)
plt.scatter(tBT, mBT)
#https://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-parameters-using-the-optimize-leastsq-method-i
def fit_leastsq_my(times, mags, freqs, amps, phs):
pfit_all = []
perr_all = []
for i in range(len(freqs)):
def model_new(freq):
def f(x, p1, p2):
return abs(p1) * np.sin(2. * np.pi * (freq * x + (p2)%1))
return f
def ff(x, p):
return model_new(freqs[i])(x, *p)
# These are initial guesses for fits:
pstart = [amps[i], phs[i]]
def fit_leastsq(p0, datax, datay, function):
errfunc = lambda p, x, y: function(x,p) - y
pfit, pcov, infodict, errmsg, success = optimize.leastsq(errfunc, p0, args=(datax, datay), full_output=1, epsfcn=0.0001)
if (len(datay) > len(p0)) and pcov is not None:
s_sq = (errfunc(pfit, datax, datay)**2).sum()/(len(datay)-len(p0))
pcov = pcov * s_sq
else:
pcov = np.inf
error = []
for i in range(len(pfit)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
pfit_leastsq = pfit
perr_leastsq = np.array(error)
return pfit_leastsq, perr_leastsq
pfit, perr = fit_leastsq(pstart, times, mags, ff)
pfit_all.append(pfit)
perr_all.append(perr)
my_amps = np.array([pfit_all[i][0] for i in range(len(freqs))])
my_phs = np.array([pfit_all[i][1] for i in range(len(freqs))])
my_phs_err = np.array([perr_all[i][1] for i in range(len(freqs))])
return(freqs, my_amps, my_phs, my_phs_err)
# +
freq, amp = AS(times, mags, 15)
freq=freq[5000:]
amp=amp[5000:]
max_power = amp.max()
peaks, _ = find_peaks(amp, height=max_power*0.07, distance=200)
sort_idx = np.argsort(amp[peaks])[::-1]
f_TESS = freq[peaks][sort_idx]
a_TESS = amp[peaks][sort_idx]
print(len(f_TESS))
p_TESS = dft_phase(times, mags, f_TESS)/(2*np.pi)
# -
f_TESS += 0.00003
# +
no_freqs_run = 4
no_freqs_run_weighted = 4
print('CALCULATE TIMES')
# times_run = tAT
# magnitudes_run = mAT
# segment_borders = np.array([0, len(tA17), len(tA17)+len(tA18), len(tA17)+len(tA18)+len(tTESS)])
# times_run = tRT
# magnitudes_run = mRT
# segment_borders = np.array([0, len(tR1), len(tR1)+len(tR2), len(tR1)+len(tR2)+len(tTESS)])
times_run = times_pm_em
magnitudes_run = model_times[4]
segment_borders = np.arange(0, len(times_pm_em)+len(times), len(times))
print('CALCULATE TIME DELAYS (DATA)')
times_TD = times_run - y_tau_b - y_tau_c
magnitudes_TD = magnitudes_run
time_0 = times_TD[0]
time_slice, mag_slice, phase, phase_err = [], [], [], []
time_delays, time_delays_err, time_midpoints = [], [], []
t_edge = []
iteration = 0
# Iterate over lightcurve
for t, y, idx in zip(times_TD, magnitudes_TD, range(len(times_run))):
time_slice.append(t)
mag_slice.append(y)
# In each segment
if idx == segment_borders[iteration+1]-1:
print(iteration)
#print(idx)
#print('{0}/{1}'.format(counter, size))
# Append the time midpoint
time_slice = np.array(time_slice)
mag_slice = np.array(mag_slice)
time_midpoints.append(np.mean(time_slice))
# And the phases for each frequency
phase_guess = dft_phase(time_slice, mag_slice, f_TESS)
sol = fit_leastsq_my(time_slice, mag_slice, f_TESS, a_TESS, phase_guess/(2*np.pi))
#print(phase_guess)
phase.append(phase_guess)
phase_err.append(sol[3]*2*np.pi)
time_slice, mag_slice = [], []
t_edge.append(t)
iteration += 1
print(phase)
phase = np.array(phase)
mean_phase = np.mean(phase)
phase[np.where(phase - mean_phase > np.pi/2)] -= np.pi
phase[np.where(phase - mean_phase < -np.pi/2)] += np.pi
phase = np.unwrap(phase)
phase -= np.mean(phase)
phase = np.array(phase).T
phase_err = np.array(phase_err).T
#print(phase)
print('Calc TD')
# Phase wrapping patch
for ph, ph_err, f in zip(phase, phase_err, f_TESS):
td = ph / (2*np.pi*f)
time_delays.append(td-np.mean(td))
time_delays_err.append(ph_err / (2*np.pi*f))
print('CALCULATE WEIGHTED VALUES')
time_delays_tmp = time_delays[:no_freqs_run_weighted]
time_delays_err_tmp = time_delays_err[:no_freqs_run_weighted]
td_weighted = []
td_weighted_err = []
for i in range(len(time_midpoints)):
#print(i)
tds, tds_err = [], []
for j in range(len(time_delays_tmp)):
#print(j)
tds.append(time_delays_tmp[j][i])
tds_err.append(time_delays_err_tmp[j][i])
tds = np.array(tds)
tds_err = np.array(tds_err)
td_weighted.append(sum(tds/tds_err**2)/sum(1/tds_err**2))
td_weighted_err.append((1/sum(1/tds_err**2))**(1/2))
td_weighted = np.array(td_weighted)
td_weighted_err = np.array(td_weighted_err)
print('PLOT EVERYTHING')
times_sim = np.linspace(jds[0], jds[-1], 1000)
y_tau_b_sim_curve = tau(times_sim, star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c_sim_curve = tau(times_sim, star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
fig, ax1 = plt.subplots(1,1, figsize=(6, 3.5))
time_offset = 2457000
t_2018 = jds[0] - time_offset
t_2019 = jds[1] - time_offset
t_2020 = jds[2] - time_offset
t_2021 = jds[3] - time_offset
ax1.set_xlabel('Time [JD - 2457000]')
ax1.set_ylabel('TD [s]', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twiny()
ax2.tick_params(axis='x', rotation=0)
xmin1, xmax1 = ax1.get_xlim()
ax2.set_xlim(xmin1,xmax1)
ax2.set_xlabel('Time [yr]', labelpad=-200)
ax2.xaxis.set_label_coords(0.5,1.09)
y_tau_b_sim = tau(np.array(time_midpoints), star_planet_ratio_b, a2sini_b, e_b, varpi_b, Porb_b, tp_b, 100)
y_tau_c_sim = tau(np.array(time_midpoints), star_planet_ratio_c, a2sini_c, e_c, varpi_c, Porb_c, tp_c, 100)
lns11 = ax1.plot(times_sim - time_offset, (y_tau_b_sim_curve + y_tau_c_sim_curve - y_tau_b_sim[-1] - y_tau_c_sim[-1]) *86400, 'b', label = r"TD $\beta$ Pic b&c")
lns12 = ax1.plot(times_sim - time_offset, (y_tau_b_sim_curve - y_tau_b_sim[-1]) *86400, 'b', ls = '--', label = r"TD $\beta$ Pic b")
lns13 = ax1.plot(times_sim - time_offset, (y_tau_c_sim_curve - y_tau_c_sim[-1]) *86400, 'b', ls = '-.', label = r"TD $\beta$ Pic c")
lns2 = ax1.errorbar(np.array(time_midpoints) - time_offset, (td_weighted-td_weighted[-1])*86400, yerr = td_weighted_err*86400, fmt='.', marker='x', c='k', label = 'weighted')
leg_errbar = []
for i in range(no_freqs_run):
lns3 = ax1.errorbar(np.array(time_midpoints) - time_offset, (time_delays[i]-time_delays[i][-1])*86400, yerr = time_delays_err[i]*86400, fmt='.', alpha = ((a_TESS[i]/a_TESS[0])**1)/2)
leg_errbar.append(lns3)
#ax1.set_ylim(-50, 30)
lns = [lns11[0], lns12[0], lns13[0], lns2]#lns2[0], lns4, lns5, lns6, lns3]
labs = [l.get_label() for l in lns]
fig.tight_layout()
plt.show()
t_td_BT = np.array(time_midpoints)
tau_td_BT = []
for j in range(len(f_TESS)):
tau_td_BT.append(np.array([i for i in time_delays][j]-[i for i in time_delays][j][0]))
tau_td_BT = np.array(tau_td_BT)*86400
tau_err_td_BT = np.array(time_delays_err)*86400
tau_weighted_td_BT = td_weighted*86400-td_weighted[0]*86400
tau_err_weighted_td_BT = td_weighted_err*86400
for i in range(no_freqs_run):
plt.errorbar(np.array(time_midpoints) - time_offset, (phase[i]-phase[i][-1]), yerr = phase_err[i], fmt='.', alpha = ((a_TESS[i]/a_TESS[0]))/2)
#plt.ylim(-2, 2)
# -
|
debugging4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from IPython.display import display
from twitpol import config
# -
date_gap_dir = (config.DATA / 'queries' / 'date_gaps')
harris_csv = list(date_gap_dir.glob('*Harris*.csv'))[0]
buttigieg_csv = list(date_gap_dir.glob('*Buttigieg*.csv'))[0]
print(harris_csv)
print(buttigieg_csv)
df_harris = pd.read_csv(harris_csv, index_col=0)
df_buttigieg = pd.read_csv(buttigieg_csv, index_col=0)
df_buttigieg.head(10)
# +
def streak_start_or_end(row):
row['start'] = (row['dt_prev'] != 1)
row['end'] = (row['dt_next'] != 1)
return row
def find_streaks(df):
df = df.copy()
df['Date'] = pd.to_datetime(df['Date'])
df['next'] = df['Date'].shift(-1)
df['prev'] = df['Date'].shift(1)
df['dt_next'] = (df['next'] - df['Date']).apply(lambda x: int(x.days) if not pd.isnull(x) else 0)
df['dt_prev'] = (df['Date'] - df['prev']).apply(lambda x: int(x.days) if not pd.isnull(x) else 0)
df = df.apply(streak_start_or_end, axis=1)
streaks = []
curr_streak = []
for idx, row in df[['Date', 'start', 'end']].iterrows():
date_, is_start, is_end = row
if is_start:
curr_streak.append(date_)
if is_end:
curr_streak.append(date_)
streaks.append(curr_streak)
curr_streak = []
df_streaks = pd.DataFrame(streaks, columns=['start', 'end'])
return df_streaks
# +
harris_streaks = find_streaks(df_harris)
buttigieg_streaks = find_streaks(df_buttigieg)
harris_streaks['name'] = 'HARRIS'
buttigieg_streaks['name'] = 'BUTTIGIEG'
df_all = pd.concat((harris_streaks, buttigieg_streaks), axis=0, sort=True)
df_all = df_all[['start', 'end', 'name']]
df_all.to_csv(date_gap_dir / 'harris_buttigieg_date_gaps.csv')
# -
|
notebooks/missing_harris_buttigieg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import hann
from scipy import fftpack as fft
from scipy.signal import hann
from scipy.signal import chebwin
from scipy.signal import blackman
from scipy import signal
from scipy.signal import butter
from scipy.signal import filtfilt
# +
#FUNCION DE VENTANAS
def windows(s):
N=s.shape[0]
F = fft.fft(s, axis=0)
F = 20* np.log(np.abs(F))
w=hann(N)
Fw= fft.fft(s*w)
Fw=20*np.log10(np.abs(Fw))
f = np.linspace(0, 1000/2, N//2)
plt.figure(figsize=(10,7))
plt.subplot(3,1,1)
plt.plot(f,F[0:N//2],color='black')
plt.plot(f,Fw[0:N//2],color='red')
plt.title("Señal con ventana: Hann")
plt.grid(True)
#Ventana Cheb
w1 = chebwin(N, 100)
Fw1 = fft.fft(s*w1)
Fw1 = 20* np.log(np.abs(Fw1))
plt.figure(figsize = (10,7))
plt.subplot(3,1,2)
plt.plot(f,F[0:N//2],color='black')
plt.plot(f, Fw1[0:N//2], color='red')
plt.title("Señal con ventana: chebwin")
plt.grid(True)
#Ventana Blackman
w2 = blackman(N)
Fw2 = fft.fft(SO2*w2)
Fw2 = 20* np.log(np.abs(Fw2))
plt.figure(figsize = (10,7))
plt.subplot(3,1,2)
plt.plot(f,F[0:N//2],color='black')
plt.plot(f, Fw2[0:N//2], color='red')
plt.title("Señal con ventana: blackman")
plt.grid(True)
return ()
# -
#PSD
def PSD (signal, l=1024, T=0.5, sr=1000):
N=signal.shape[0]
psd=[]
i = 0;
cont = 0;
while cont <= N:
ps=fft.fft(signal[i:i+l])
psd.append(20*np.log(np.abs(ps)))
i = i+int((1-T)*l)
cont = i+l
psd=np.array(psd[:-1])
psd = np.mean(psd,0)
n = np.size(psd,0)//2
plt.plot(np.linspace(0,sr/2,n), psd[0:n])
plt.grid(True)
return [psd[0:n]]
#FUNCION QUE EVALUA LA FUNCION DE TRANSFERENCIA DE UN FILTRO
def Respuesta(w,h):
w,h=signal.freqz(w, h)
w.shape
h.shape
angles = np.unwrap(np.angle(h))
plt.title('Digital filter frequency response')
plt.plot(w, 20 * np.log10(abs(h)), 'b')
plt.plot(w, angles, 'g')
plt.ylabel('Angle (radians)', color='g')
plt.xlabel('Frecuencia [rad/muestra]')
plt.grid(True)
return ()
def m_movil_t (x, N=1024):
y = []
for i in range(len(x)//N):
m = np.mean(x[i*N:i*N+N])
y.append(x[i*N:i*N+N]-m)
return (np.array(y).flatten())
def ten_lin (s, t, N=1024):
y_fin = []
for i in range(len(s)//N):
x = []
x.append(t[i*N:i*N+N])
x.append(np.ones(len(t[i*N:i*N+N])))
x = np.array(x).T
y = []
y.append(s[i*N:i*N+N])
y = np.array(y).T
w = np.dot(x.T,x)
w = np.linalg.inv(w)
w = np.dot(w, x.T)
w = np.dot(w,y)
yy = w[0]*t[i*N:i*N+N] + w[1]
yy = s[i*N:i*N+N]-yy
y_fin.append(yy)
return (np.array(y_fin).flatten())
# ## Actividad II
#
# ### Ejercicio 1
path = '/Users/<NAME>/Desktop/MCIB/MCIB-19-P/data/Señales adquirid/'
fname = 'Med4.txt'
# +
data = np.loadtxt(path+fname,delimiter='\t',dtype='str')
t=data[ : , 0]
SO2=data[:,1]
RESP=data[:,2]
EMG=data[:,3]
PUL=data[:,4]
ECG=data[:,5]
t = t.astype('float')*60
SO2 = SO2.astype('float')
RESP = RESP.astype('float')
EMG = EMG.astype('float')
PUL = PUL.astype('float')
ECG = ECG.astype('float')
# -
# ###### IMPLEMENTACIÓN
# # Eleccion de la mejor ventana
# +
W1=windows(SO2)
W1=windows(ECG)
W1=windows(EMG)
W1=windows(PUL)
W1=windows(RESP)
# -
path = '/Users/<NAME>/Desktop/MCIB/MCIB-19-P/data/Señales adquirid/'
fname = 'Lect1.txt'
# +
data1 = np.loadtxt(path+fname,delimiter='\t',dtype='str')
t1=data1[:,0]
SO21=data1[:,1]
RESP1=data1[:,2]
EMG1=data1[:,3]
PUL1=data1[:,4]
ECG1=data1[:,5]
t1 = t1.astype('float')*60
SO21 = SO2.astype('float')
RESP1 = RESP.astype('float')
EMG1 = EMG.astype('float')
PUL1 = PUL.astype('float')
ECG1 = ECG.astype('float')
# +
plt.figure(figsize = (15, 3))
plt.subplot(1,2,1)
plt.title("SO2, Ejercicio")
PS1= PSD (SO2)
plt.subplot(1,2,2)
PS11= PSD (SO21)
plt.title("SO2, Lectura")
plt.figure(figsize = (15, 3))
plt.subplot(1,2,1)
PS2= PSD (EMG)
plt.title("EMG, Ejercicio")
plt.subplot(1,2,2)
PS21= PSD (EMG1)
plt.title("EMG, Lectura")
plt.figure(figsize = (15, 3))
plt.subplot(1,2,1)
PS3= PSD (PUL)
plt.title("PUL, Ejercicio")
plt.subplot(1,2,2)
PS31= PSD (PUL1)
plt.title("PUL, Lectura")
plt.figure(figsize = (15, 3))
plt.subplot(1,2,1)
PS4= PSD (RESP)
plt.title("RESP, Ejercicio")
plt.subplot(1,2,2)
PS41= PSD (RESP1)
plt.title("RESP, Lectura")
plt.figure(figsize = (15, 3))
plt.subplot(1,2,1)
PS5= PSD (ECG)
plt.title("ECG, Ejercicio")
plt.subplot(1,2,2)
PS51= PSD (ECG1)
plt.title("ECG, Lectura")
plt.show()
# -
# ### Ejercicio 2
def filt_but(s, w, ord = 5):
w = w/500
[b,a] =butter(ord, w, 'low')
x = filtfilt(b, a, s)
return (x,b,a)
# +
# Filtro notch para quitar el ruido de 60 Hz en algunas señales
[b_n, a_n] = signal.iirnotch(60.0, 70.0, 1000)
plt.figure(figsize = (25, 4))
Respuesta(b_n,a_n)
#SO2
[SO2_f, b, a]= filt_but(SO2, 10)
plt.figure(figsize = (25, 4))
plt.subplot(1,2,1)
plt.plot(t[40000:55000], SO2[40000:55000],color = 'red', label="Señal sin filtrar")
plt.plot(t[40000:55000], SO2_f[40000:55000],color = 'green', label="Señal filtrada")
plt.legend(loc='best')
plt.title("Señal de saturación de O2, Ejercicio")
plt.grid(True)
plt.subplot(1,4,3)
Respuesta(b,a)
#RESP
[RESP_f, b, a]= filt_but(RESP, 10)
plt.figure(figsize = (25, 4))
plt.subplot(1,2,1)
plt.plot(t[40000:55000], RESP[40000:55000],color = 'red', label="Señal sin filtrar")
plt.plot(t[40000:55000], RESP_f[40000:55000],color = 'green', label="Señal filtrada")
plt.legend(loc='best')
plt.title("Señal de respiración, Ejercicio")
plt.grid(True)
plt.subplot(1,4,3)
Respuesta(b,a)
#EMG
EMG_n= filtfilt(b_n, a_n, EMG)
[EMG_f, b, a]= filt_but(EMG_n, 150)
plt.figure(figsize = (25, 4))
plt.subplot(1,2,1)
plt.plot(t[40000:55000], EMG[40000:55000],color = 'red', label="Señal sin filtrar")
plt.plot(t[40000:55000], EMG_f[40000:55000],color = 'green', label="Señal filtrada")
plt.legend(loc='best')
plt.title("Señal de EMG, Ejercicio")
plt.grid(True)
plt.subplot(1,4,3)
Respuesta(b,a)
#PUL
[PUL_f, b, a]= filt_but(PUL, 5)
plt.figure(figsize = (25, 4))
plt.subplot(1,2,1)
plt.plot(t[40000:55000], PUL[40000:55000],color = 'red', label="Señal sin filtrar")
plt.plot(t[40000:55000], PUL_f[40000:55000],color = 'green', label="Señal filtrada")
plt.legend(loc='best')
plt.title("Señal de pulso, Ejercicio")
plt.grid(True)
plt.subplot(1,4,3)
Respuesta(b,a)
#ECG
ECG_n= filtfilt(b_n, a_n, ECG)
[ECG_f, b, a]= filt_but(ECG_n, 100)
plt.figure(figsize = (25, 4))
plt.subplot(1,2,1)
plt.plot(t[40000:55000], ECG[40000:55000],color = 'red', label="Señal sin filtrar")
plt.plot(t[40000:55000], ECG_f[40000:55000],color = 'green', label="Señal filtrada")
plt.legend(loc='best')
plt.title("Señal de ECG, Ejercicio")
plt.grid(True)
plt.subplot(1,4,3)
Respuesta(b,a)
plt.show()
# -
# ### Ejercicio 3
# +
#Tendencia ---- Filtrado
from scipy import signal
ECG_ten = ten_lin (ECG, t,1000)
ECG_ten_n= filtfilt(b_n, a_n, ECG_ten)
[ECG_ten_fil, b, a]= filt_but(ECG_ten_n, 100)
# Filtrado ---- Tendencia
ECG_f_n= filtfilt(b_n, a_n, ECG)
[ECG_fil, b1, a1]= filt_but(ECG_f_n, 100)
ECG_fil_ten = ten_lin (ECG_fil, t,1000)
plt.figure(figsize = (15, 4))
plt.plot(t[45000:60000], ECG[45000:60000]+30,color = 'red', label="Señal sin procesar")
plt.plot(t[45000:60000], ECG_ten_fil[45000:60000],color = 'blue', label="Tendencia -> Filtrado")
plt.plot(t[45000:60000], ECG_fil_ten[45000:60000],color = 'green', label="Filtrado -> Tendencia")
plt.legend(loc='best')
plt.title("Señal de EEG, Ejercicio")
plt.ylim(-5,60)
plt.xlim(45,60)
plt.grid(True)
# -
|
SRC/Practica2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (data-modelling-with-cassandra)
# language: python
# name: pycharm-8b572134
# ---
# + [markdown] pycharm={}
# # Part I. ETL Pipeline for Pre-Processing the Files
# + [markdown] pycharm={}
# ## PLEASE RUN THE FOLLOWING CODE FOR PRE-PROCESSING THE FILES
# + [markdown] pycharm={}
# #### Import Python packages
# + pycharm={"is_executing": false}
# Import Python packages
from decimal import Decimal
import pandas as pd
import cassandra
import re
import os
import glob
import numpy as np
import json
import csv
# + [markdown] pycharm={}
# #### Creating list of filepaths to process original event csv data files
# + pycharm={"is_executing": false}
# checking your current working directory
print(os.getcwd())
# Get your current folder and subfolder event data
filepath = os.getcwd() + '/event_data'
# Create a for loop to create a list of files and collect each filepath
for root, dirs, files in os.walk(filepath):
# join the file path and roots with the subdirectories using glob
file_path_list = glob.glob(os.path.join(root,'*'))
#print(file_path_list)
# + [markdown] pycharm={}
# #### Processing the files to create the data file csv that will be used for Apache Casssandra tables
# + pycharm={"is_executing": false}
# initiating an empty list of rows that will be generated from each file
full_data_rows_list = []
# for every filepath in the file path list
for f in file_path_list:
# reading csv file
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
next(csvreader)
# extracting each data row one by one and append it
for line in csvreader:
#print(line)
full_data_rows_list.append(line)
# uncomment the code below if you would like to get total number of rows
#print(len(full_data_rows_list))
# uncomment the code below if you would like to check to see what the list of event data rows will look like
#print(full_data_rows_list)
# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
# + pycharm={"is_executing": false}
# check the number of rows in your csv file
with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
print(sum(1 for line in f))
# + [markdown] pycharm={}
# # Part II. Complete the Apache Cassandra coding portion of your project.
#
# + [markdown] pycharm={}
#
# #### Creating a Cluster
# + pycharm={"is_executing": false}
from cassandra.cluster import Cluster
try:
# This should make a connection to a Cassandra instance your local machine
cluster = Cluster(['192.168.99.100'])
# To establish connection and begin executing queries, need a session
session = cluster.connect()
except Exception as e:
print(e)
# + [markdown] pycharm={}
#
# #### Create Keyspace
# + pycharm={"is_executing": false}
try:
session.execute("""CREATE KEYSPACE IF NOT EXISTS udacity
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }
""")
except Exception as e:
print(e)
# + [markdown] pycharm={}
#
# #### Set Keyspace
# + pycharm={"is_executing": false}
try:
session.set_keyspace('udacity')
except Exception as e:
print(e)
# + [markdown] pycharm={}
# ### Now we need to create tables to run the following queries. Remember, with Apache Cassandra you model the database tables on the queries you want to run.
# + [markdown] pycharm={}
# ## Create queries to ask the following three questions of the data
#
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# #### Query 1: Give me the artist, song title and song's length in the music app history that was heard during
#
# + pycharm={"metadata": false, "name": "#%%\n", "is_executing": false}
## sessionId = 338, and itemInSession = 4
try:
session.execute("""CREATE TABLE IF NOT EXISTS history_by_session (
session_id bigint, item_in_session int, artist text, song_title text, length decimal,
PRIMARY KEY ((session_id), item_in_session))
""")
except Exception as e:
print(e)
# + [markdown] pycharm={"metadata": false}
# #### Insert data into table `history_by_session`
# + pycharm={"is_executing": false}
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO history_by_session (session_id, item_in_session, artist, song_title, length) "
query = query + "VALUES(%s, %s, %s, %s, %s)"
session.execute(query, (int(line[8]), int(line[3]), line[0], line[9], Decimal(line[5])))
# + [markdown] pycharm={}
# #### Do a SELECT to verify that the data have been inserted into `history_by_session`
#
# + pycharm={"is_executing": false}
try:
rows = session.execute("""SELECT artist, song_title, length
FROM history_by_session
WHERE session_id = 338
AND item_in_session = 4""")
df = pd.DataFrame(list(rows), columns=['artist', 'song_title', 'length'])
print(df.to_string())
except Exception as e:
print(e)
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# #### Query 2: Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name)\
# + pycharm={"is_executing": false}
## for userid = 10, sessionid = 182
try:
session.execute("""CREATE TABLE IF NOT EXISTS history_by_user_session (
user_id bigint, session_id bigint, item_in_session int, artist text, song_title text,
user_first_name text, user_last_name text,
PRIMARY KEY ((user_id, session_id), item_in_session))
WITH CLUSTERING ORDER BY (item_in_session DESC)
""")
except Exception as e:
print(e)
# + [markdown] pycharm={"metadata": false}
# #### Insert data into table `history_by_user_session`
# + pycharm={"metadata": false, "name": "#%%\n", "is_executing": false}
# We have provided part of the code to set up the CSV file. Please complete the Apache Cassandra code below#
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO history_by_user_session (user_id, session_id, item_in_session, artist, song_title, " \
"user_first_name, user_last_name) "
query = query + "VALUES(%s, %s, %s, %s, %s, %s, %s)"
session.execute(query, (int(line[10]), int(line[8]), int(line[3]), line[0], line[9], line[1], line[4]))
# + [markdown] pycharm={"metadata": false}
# #### Do a SELECT to verify that the data have been inserted into `history_by_user_session`
# + pycharm={"metadata": false, "name": "#%%\n", "is_executing": false}
try:
rows = session.execute("""SELECT artist, song_title, user_first_name, user_last_name
FROM history_by_user_session
WHERE user_id = 10
AND session_id = 182""")
df = pd.DataFrame(list(rows), columns=['artist', 'song_title', 'user_first_name', 'user_last_name'])
print(df.to_string())
except Exception as e:
print(e)
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# #### Query 3: Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'
# + pycharm={"is_executing": false}
try:
session.execute("""CREATE TABLE IF NOT EXISTS history_by_song_title (
user_id bigint, song_title text, user_first_name text, user_last_name text,
PRIMARY KEY ((song_title), user_id))
""")
except Exception as e:
print(e)
# + [markdown] pycharm={"metadata": false, "name": "#%% md\n"}
# #### Insert data into table `history_by_song_title`
# + pycharm={"metadata": false, "name": "#%%\n", "is_executing": false}
# We have provided part of the code to set up the CSV file. Please complete the Apache Cassandra code below#
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
query = "INSERT INTO history_by_song_title (user_id, song_title, user_first_name, user_last_name) "
query = query + "VALUES(%s, %s, %s, %s)"
session.execute(query, (int(line[10]), line[9], line[1], line[4]))
# + [markdown] pycharm={"name": "#%% md\n"}
# #### Do a SELECT to verify that the data have been inserted into each table
#
# + pycharm={"metadata": false, "name": "#%%\n", "is_executing": false}
try:
rows = session.execute("""SELECT user_id, song_title, user_first_name, user_last_name
FROM history_by_song_title
WHERE song_title='All Hands Against His Own'
""")
df = pd.DataFrame(list(rows), columns=['user_id', 'user_first_name', 'user_last_name', 'song_title'])
print(df.to_string())
except Exception as e:
print(e)
# + [markdown] pycharm={}
# ### Drop the tables before closing out the sessions
# + pycharm={"is_executing": false}
try:
session.execute("DROP TABLE history_by_session")
session.execute("DROP TABLE history_by_user_session")
session.execute("DROP TABLE history_by_song_title" )
except Exception as e:
print(e)
# + [markdown] pycharm={}
# ### Close the session and cluster connection¶
# + pycharm={}
session.shutdown()
cluster.shutdown()
# + pycharm={}
|
Project_1B_ Project_Template.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # sMRI Quality Control
#
# ---
#
# Questions:
# * How do we identify image preprocessing failures?
#
# Objectives:
# * Visualize processing failures
# * Familiarize with automatic QC tools
#
# ## Things that can go wrong
#
# ### Acquisition
#
# Due to MR physics (e.g. Field of view (FOV), ghosting, aliasing), incorrect parameters can truncate and/or duplicate brain anatomy.
#
# 
#
# Due to participant (e.g. Motion artifacts), participant specific issues such as motion artifcats in Parkinson’s patients can manifest in the scan (e.g. ringing effect showing ripples or curved lines).
#
# 
#
# ### Quantification
#
# Existing image processing pipelines (e.g. FreeSurfer, CIVET) will have a few QC tools and examples that can help with failure detection and quality control of volumetric segmentations and surface parcellations.
#
# 
#
# Usage of new method will require your own QC protocols. Especially for highly specific segmentation methods require visual inspection from a neuroanatomy expert. Even for the qualitiative visual inspection, it is important create a QC protocol and share it with the results.
#
# 
#
# ## Automatic QC tools
#
# ### Using reports from [existing pipelines](https://fmriprep.org/en/stable/_static/sample_report.html)。
#
# 
#
# ### Using QC tools
#
# [MRIQC](https://github.com/poldracklab/mriqc): extracts no-reference IQMs (image quality metrics) from structural (T1w and T2w) and functional MRI data.
#
# Individual report | Group report
# :-------------------------:|:-------------------------:
#  | 
#
# [VisualQC](https://github.com/raamana/visualqc): assistive tool to improve the quality control workflow of neuroimaging data.py
#
# T1w acquisition | Alignment| Cortical Parcellation
# :-------------------------:|:-------------------------:|:----:|
#  | | 
|
sMRI/5-sMRI_Quality_Control.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Custom regression models
#
# Like for univariate models, it is possible to create your own custom parametric survival models. Why might you want to do this?
#
# - Create new / extend AFT models using known probability distributions
# - Create a piecewise model using domain knowledge about subjects
# - Iterate and fit a more accurate parametric model
#
# *lifelines* has a very simple API to create custom parametric regression models. You only need to define the cumulative hazard function. For example, the cumulative hazard for the constant-hazard regression model looks like:
#
# $$
# H(t, x) = \frac{t}{\lambda(x)}\\ \lambda(x) = \exp{(\vec{\beta} \cdot \vec{x}^{\,T})}
# $$
#
# where $\beta$ are the unknowns we will optimize over.
#
#
# Below are some example custom models.
# +
from lifelines.fitters import ParametricRegressionFitter
from autograd import numpy as np
from lifelines.datasets import load_rossi
class ExponentialAFTFitter(ParametricRegressionFitter):
# this class property is necessary, and should always be a non-empty list of strings.
_fitted_parameter_names = ['lambda_']
def _cumulative_hazard(self, params, t, Xs):
# params is a dictionary that maps unknown parameters to a numpy vector.
# Xs is a dictionary that maps unknown parameters to a numpy 2d array
beta = params['lambda_']
X = Xs['lambda_']
lambda_ = np.exp(np.dot(X, beta))
return t / lambda_
rossi = load_rossi()
rossi['intercept'] = 1.0
# the below variables maps dataframe columns to parameters
regressors = {
'lambda_': rossi.columns
}
eaf = ExponentialAFTFitter().fit(rossi, 'week', 'arrest', regressors=regressors)
eaf.print_summary()
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
class DependentCompetingRisksHazard(ParametricRegressionFitter):
"""
Reference
--------------
<NAME> Valdez, UNDERSTANDING RELATIONSHIPS USING COPULAS
"""
_fitted_parameter_names = ["lambda1", "rho1", "lambda2", "rho2", "alpha"]
def _cumulative_hazard(self, params, T, Xs):
lambda1 = np.exp(np.dot(Xs["lambda1"], params["lambda1"]))
lambda2 = np.exp(np.dot(Xs["lambda2"], params["lambda2"]))
rho2 = np.exp(np.dot(Xs["rho2"], params["rho2"]))
rho1 = np.exp(np.dot(Xs["rho1"], params["rho1"]))
alpha = np.exp(np.dot(Xs["alpha"], params["alpha"]))
return ((T / lambda1) ** rho1 + (T / lambda2) ** rho2) ** alpha
swf = DependentCompetingRisksHazard(penalizer=0.1)
rossi = load_rossi()
rossi["intercept"] = 1.0
rossi["week"] = rossi["week"] / rossi["week"].max() # scaling often helps with convergence
covariates = {
"lambda1": rossi.columns,
"lambda2": rossi.columns,
"rho1": ["intercept"],
"rho2": ["intercept"],
"alpha": ["intercept"],
}
swf.fit(rossi, "week", event_col="arrest", regressors=covariates, timeline=np.linspace(0, 2))
swf.print_summary(2)
ax = swf.plot()
ax = swf.predict_survival_function(rossi.loc[::100]).plot()
ax.set_title("Predicted survival functions for selected subjects")
# -
# ### Cure models
#
# Suppose in our population we have a subpopulation that will never experience the event of interest. Or, for some subjects the event will occur so far in the future that it's essentially at time infinity. In this case, the survival function for an individual should not asymptically approach zero, but _some positive value_. Models that describe this are sometimes called cure models (i.e. the subject is "cured" of death and hence no longer susceptible) or time-lagged conversion models.
#
# It would be nice to be able to use common survival models _and_ have some "cure" component. Let's suppose that for individuals that will experience the event of interest, their survival distrubtion is a Weibull, denoted $S_W(t)$. For a random selected individual in the population, thier survival curve, $S(t)$, is:
#
# $$
# \begin{align*}
# S(t) = P(T > t) &= P(\text{cured}) P(T > t\;|\;\text{cured}) + P(\text{not cured}) P(T > t\;|\;\text{not cured}) \\
# &= p + (1-p) S_W(t)
# \end{align*}
# $$
#
# Even though it's in an unconvential form, we can still determine the cumulative hazard (which is the negative logarithm of the survival function):
#
# $$ H(t) = -\log{\left(p + (1-p) S_W(t)\right)} $$
# +
from autograd.scipy.special import expit
class CureModel(ParametricRegressionFitter):
_scipy_fit_method = "SLSQP"
_scipy_fit_options = {"ftol": 1e-10, "maxiter": 200}
_fitted_parameter_names = ["lambda_", "beta_", "rho_"]
def _cumulative_hazard(self, params, T, Xs):
c = expit(np.dot(Xs["beta_"], params["beta_"]))
lambda_ = np.exp(np.dot(Xs["lambda_"], params["lambda_"]))
rho_ = np.exp(np.dot(Xs["rho_"], params["rho_"]))
sf = np.exp(-(T / lambda_) ** rho_)
return -np.log((1 - c) + c * sf)
cm = CureModel(penalizer=0.0)
rossi = load_rossi()
rossi["intercept"] = 1.0
covariates = {"lambda_": rossi.columns, "rho_": ["intercept"], "beta_": ['intercept', 'fin']}
cm.fit(rossi, "week", event_col="arrest", regressors=covariates, timeline=np.arange(250))
cm.print_summary(2)
# -
cm.predict_cumulative_hazard(rossi.loc[::100]).plot(figsize=(12,6))
# +
# what's the effect on the survival curve if I vary "age"
fig, ax = plt.subplots(figsize=(12, 6))
cm.plot_covariate_groups(['age'], values=np.arange(20, 50, 5), cmap='coolwarm', ax=ax)
# -
# ### Spline models
#
# See `royston_parmar_splines.py` in the examples folder: https://github.com/CamDavidsonPilon/lifelines/tree/master/examples
|
docs/jupyter_notebooks/Custom Regression Models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Apresentação
#
# O objetivo deste notebook é apresentar a solução para o trabalho individual proposto no módulo de **Análise Preditiva Avançada** do curso de MBA em Business Analytics e Big Data da FGV (turma Berrini).
#
# **Professores**
# * <NAME>
# * <NAME>
#
# | Item | Dados do Aluno |
# |-----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------|
# | Nome | <NAME> |
# | Matrícula | A57622988 |
# | E-mail | <a href="mailto:<EMAIL>"><EMAIL></a>|
# | <a href="https://github.com/ldaniel"><img src="https://avatars2.githubusercontent.com/u/205534?s=460&v=4" title="ldaniel" width="50" height="50"></a> | GitHub: https://github.com/ldaniel |
# | <a href="https://www.kaggle.com/leandrodaniel"><img src="https://avatars2.githubusercontent.com/u/205534?s=460&v=4" title="ldaniel" width="50" height="50"></a> | Kaggle: https://www.kaggle.com/leandrodaniel |
# ## Enunciado do Trabalho
#
# ### Questão 1
# Tomando por base o conjunto de dados MNIST, padrão do pacote Keras, defina e elabore uma rede neural sequencial, apresentando os formatos dos tensores de entrada e saída, bem como os tensores intermediários na rede. Treine e teste o modelo de rede neural sequencial desenvolvido, de forma que ele consiga atingir uma precisão de pelo menos 97% no teste, no reconhecimento das imagens de números escritos à mão livre. Compare o desempenho da rede neural no treino utilizando dados normalizados e não normalizados.após 50 épocas de treino. Aumente o número de camadas internas da rede neural e determine se isto melhora ou não a qualidade dos resultados no teste. Qual o número de camadas que você consideraria ideal?
#
# ### Questão 2
# Resolva o mesmo problema da Questão 1 utilizando o algoritmo Random Forests. Tentem trabalhar a hiperparametrização para aumento do desempenho. Compare o seu melhor resultado com o resultado obtido na Questão 1 e comente.
# # Configuração inicial
# ## Instalando o TensorFlow
#
# Criado pela equipe do Google Brain, o TensorFlow é uma biblioteca de código aberto para computação numérica e machine learning em larga escala. O TensorFlow reúne uma série de modelos e algoritmos de *machine learning* e *deep learning* (também conhecido como redes neurais) e os torna úteis por meio de uma metáfora comum. Ele usa o Python para fornecer uma API de front-end (o que aparece na tela) conveniente para criar aplicativos com a estrutura, enquanto executa esses aplicativos em C ++ de alto desempenho.
# + _kg_hide-output=false
# !pip install tensorflow-datasets
# -
# ## Definindo os pacotes requeridos
#
# Abaixo temos todos os pacotes requeridos para executar este trabalho.
# +
# basic libraries
import os
import random
import numpy as np
import matplotlib.pyplot as plt
# to perform Neural Networki analysis
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.layers import Input, Dense, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from mlxtend.plotting import plot_confusion_matrix
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
# to perform Random Forest analysis
from sklearn.ensemble import RandomForestClassifier
# to perform Hyperopt otimization
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
# -
# ## Garantindo reprodutibilidade
#
# Como o objetivo de garantir que os resultados sejam reproduzíveis a qualquer tempo, todos os seeds serão inicializados com o mesmo número.
# +
# the answer to life the universe and everything
seed = 42
# defining the same seed for all ramdomic modules
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
# -
# # MNIST Datasets
#
# O banco de dados MNIST (do inglês, Modified National Institute of Standards and Technology database) é um grande banco de dados de dígitos manuscritos que é comumente usado para o treinamento de vários sistemas de processamento de imagens. Este banco de dados também é amplamente utilizado para treinamento e testes no campo da *machine learning*. Foi criado misturando as amostras dos conjuntos de dados originais do NIST.
#
# Os criadores acharam que, como o conjunto de dados de treinamento do NIST foi obtido de funcionários do American Census Bureau, enquanto o conjunto de dados de teste foi coletado de estudantes americanos do ensino médio, não era adequado para experimentos de aprendizado de máquina. Além disso, as imagens em preto e branco do NIST foram normalizadas para caber em uma caixa delimitadora de 28x28 pixels e anti-alias, o que introduziu níveis de escala de cinza.
#
# O MNIST contém 60.000 imagens de treinamento e 10.000 imagens de teste. Metade do conjunto de treinamento e metade do conjunto de testes foram retirados do conjunto de dados de treinamento do NIST, enquanto a outra metade do conjunto de treinamento e a outra metade do conjunto de testes foram retirados do conjunto de dados de teste do NIST.
# +
# defining train dataset
x_train, y_train = tfds.as_numpy(
tfds.load('mnist',
batch_size = -1,
split = 'train',
as_supervised = True))
# defining test dataset
x_test, y_test = tfds.as_numpy(
tfds.load('mnist',
batch_size = -1,
split = 'test',
as_supervised = True))
# -
# ## Checando o dataset do MNIST
#
# Para averiguar se o dataset foi carregado, será utilizado o código abaixo para checar algumas imagens do dataset de treino.
# +
plt.rcParams.update({'font.size': 16})
fig = plt.figure(figsize = (6, 6))
columns = 6
rows = 4
for i in range(1, columns * rows + 1):
rnd = np.random.randint(0, len(x_train))
img = np.reshape(x_train[rnd], (28, 28))
fig.add_subplot(rows, columns, i)
plt.title(y_train[rnd])
plt.axis('off')
plt.imshow(img, cmap = 'gray')
plt.show()
# -
# ## Definindo os datasets de treino e teste
#
# Os datasets de treino e teste serão criados visando a transformação do shape dos tensores de labels **(60000,)** para **(60000, 1)**, bem como a transformação dos lables para dummies utilizando a função **to_categorical()** e, por fim, a normalização dos dados entre 0 e 1.
# +
y_train = np.expand_dims(y_train, axis = 1)
y_test = np.expand_dims(y_test, axis = 1)
y_train = to_categorical(y_train, num_classes = 10)
y_test = to_categorical(y_test, num_classes = 10)
x_train = x_train / 255
x_test = x_test / 255
# -
# # Redes Neurais
#
# ## Iniciando com uma rede neural simples
#
# Como parte da **Questão 1**, será definida uma rede neural sequencial simples, com as seguintes características:
#
# * **Hiden layers:**
# - **Total**: 1
# - **Neurons:** 512
# - **Activation function:** RELU
# - **Dropout:** 30%
# * **Output layer:**
# - **Neurons:** 10 neurons
# - **Activation function:** SOFTMAX
# +
i = Input(x_train.shape[1:])
a = Flatten()(i)
a = Dense(512, activation = 'relu')(a)
a = Dropout(0.3)(a)
a = Dense(10, activation = 'softmax')(a)
model_NNS = Model(i ,a)
# -
# Na sequência, o tensor é compilado e configurado para o treino e seus resultados exibidos para conferência.
# +
model_NNS.compile(optimizer = 'rmsprop',
loss = "categorical_crossentropy",
metrics = ["accuracy"])
model_NNS.summary(line_length = None,
positions = None,
print_fn = None)
# -
# ## Criando uma nova rede neural
#
# Uma nova rede será criada com as seguintes características:
#
# * **Hiden layers:**
# - **Total**: 3
# - **Neurons:** 512, 256, 128, 64
# - **Activation function:** RELU
# - **Dropout:** 30%, 20%, 20%, 20%
# * **Output layer:**
# - **Neurons:** 10 neurons
# - **Activation function:** SOFTMAX
# +
i = Input(x_train.shape[1:])
b = Flatten()(i)
b = Dense(512, activation = 'relu')(b)
b = Dropout(0.3)(b)
b = Dense(256, activation = 'relu')(b)
b = Dropout(0.2)(b)
b = Dense(128, activation = 'relu')(b)
b = Dropout(0.2)(b)
b = Dense(64, activation = 'relu')(b)
b = Dropout(0.2)(b)
b = Dense(10, activation = 'softmax')(b)
model_NNC = Model(i ,b)
# -
# Novamente, o tensor é compilado e configurado para o treino e seus resultados exibidos para conferência.
# +
model_NNC.compile(optimizer = 'rmsprop',
loss = "categorical_crossentropy",
metrics = ["accuracy"])
model_NNC.summary(line_length = None,
positions = None,
print_fn = None)
# -
# ## Criando uma nova rede neural convolucional (CNN)
#
# Uma Rede Neural Convolucional (do inglês, Convolutional Neural Network ou **CNN**) é um algoritmo de Deep Learning que pode captar uma imagem de entrada, atribuir importância (pesos e vieses que podem ser aprendidos) a vários aspectos / objetos da imagem e ser capaz de diferenciar um do outro. O pré-processamento exigido em uma **CNN** é muito menor em comparação com outros algoritmos de classificação. Enquanto nos métodos primitivos os filtros são feitos à mão, com treinamento suficiente, as CNNs têm a capacidade de aprender esses filtros / características.
#
# A arquitetura de uma **CNN** é análoga àquela do padrão de conectividade de neurônios no cérebro humano e foi inspirada na organização do Visual Cortex. Os neurônios individuais respondem a estímulos apenas em uma região restrita do campo visual conhecida como Campo Receptivo. Uma coleção desses campos se sobrepõe para cobrir toda a área visual.
#
# Uma nova rede convolucional será criada com as características definidas, conforme código a seguir.
# +
i = Input(x_train.shape[1:])
c = Conv2D(32, (3,3), activation ='relu', padding = 'same')(i)
c = BatchNormalization()(c)
c = Conv2D(32, (3,3), activation ='relu', padding = 'same')(c)
c = BatchNormalization()(c)
c = MaxPooling2D(2,2)(c)
c = Conv2D(64, (3,3), activation ='relu', padding = 'same')(c)
c = BatchNormalization()(c)
c = Conv2D(64, (3,3), activation ='relu', padding = 'same')(c)
c = BatchNormalization()(c)
c = MaxPooling2D(2,2)(c)
c = Conv2D(128, (3,3), activation ='relu', padding = 'same')(c)
c = BatchNormalization()(c)
c = Conv2D(128, (3,3), activation ='relu', padding = 'same')(c)
c = BatchNormalization()(c)
c = MaxPooling2D(2,2)(c)
c = Conv2D(256, (3,3), activation ='relu', padding = 'same')(c)
c = BatchNormalization()(c)
c = Conv2D(256, (3,3), activation ='relu', padding = 'same')(c)
c = BatchNormalization()(c)
c = MaxPooling2D(2,2)(c)
c = Flatten()(c)
c = Dense(512, activation = 'relu')(c)
c = Dropout(0.3)(c)
c = Dense(256, activation = 'relu')(c)
c = Dropout(0.2)(c)
c = Dense(128, activation = 'relu')(c)
c = Dropout(0.2)(c)
c = Dense(64, activation = 'relu')(c)
c = Dropout(0.2)(c)
c = Dense(10, activation = 'softmax')(c)
model_CNN = Model(i, c)
# -
# A rede convolucional será verificada, posteriormente, quanto a acurácia de classificação das imagens.
# +
model_CNN.compile(optimizer = 'rmsprop',
loss = "categorical_crossentropy",
metrics = ["accuracy"])
model_CNN.summary(line_length = None,
positions = None,
print_fn = None)
# -
# ## Treinando os modelos de redes neurais
#
# Os modelos serão treinados e testados de forma que ele consiga atingir, conforme solicitado pelo enunciado do trabalho:
#
# `"(...) uma precisão de pelo menos 97% no teste, no reconhecimento das imagens de números escritos à mão livre (...)"`
#
# Na sequência, o modelo terá o desempenho comparado a rede neural no treino utilizando dados normalizados e não normalizados, após 50 épocas de treino.
# +
batch_size = 2048
train_gen = ImageDataGenerator(rotation_range = 10,
horizontal_flip = False,
vertical_flip = False,
width_shift_range = 0.1,
height_shift_range = 0.1,
rescale = 1.,
zoom_range = 0.2,
fill_mode = 'nearest',
cval = 0)
train_generator_NNS = train_gen.flow(x_train, y_train, batch_size)
# utilizado para o treinamento da rede sem a normalização dos dados
train_generator_NNS_N = train_gen.flow(x_train * 255, y_train, batch_size)
train_generator_NNC = train_gen.flow(x_train, y_train, batch_size)
train_generator_CNN = train_gen.flow(x_train, y_train, batch_size)
steps_per_epoch = x_train.shape[0] // batch_size
checkpoint_NNS = ModelCheckpoint('model_NNS.h5',
monitor = 'val_loss',
verbose = 0,
save_best_only = True,
mode = 'auto')
# checkpoint utilizado para o treinamento da rede sem a normalização dos dados
checkpoint_NNS_N = ModelCheckpoint('model_NNS_N.h5',
monitor = 'val_loss',
verbose = 0,
save_best_only = True,
mode = 'auto')
checkpoint_NNC = ModelCheckpoint('model_NNC.h5',
monitor = 'val_loss',
verbose = 0,
save_best_only = True,
mode = 'auto')
checkpoint_CNN = ModelCheckpoint('model_CNN.h5',
monitor = 'val_loss',
verbose = 0,
save_best_only = True,
mode = 'auto')
# -
# setting the number of epochs
epochs = 50
# + _kg_hide-output=true
# %%time
history_NNS = model_NNS.fit(train_generator_NNS,
validation_data = (x_test, y_test),
steps_per_epoch = steps_per_epoch,
epochs = epochs,
callbacks = [checkpoint_NNS])
# -
# Seguindo a proposta da **Questão 1**, será realizada uma execução para avaliar o treino da rede com dados não normalizados.
# + _kg_hide-output=true
# %%time
history_NNS_N = model_NNS.fit(train_generator_NNS_N,
validation_data = (x_test * 255, y_test),
steps_per_epoch = steps_per_epoch,
epochs = epochs,
callbacks = [checkpoint_NNS_N])
# -
# Na sequência, será realizada uma execução para avaliar o treino da rede com mais camadas para determinarmos se isto melhora ou não a qualidade dos resultados no teste.
# + _kg_hide-output=true
# %%time
history_NNC = model_NNC.fit(train_generator_NNC,
validation_data = (x_test, y_test),
steps_per_epoch = steps_per_epoch,
epochs = epochs,
callbacks = [checkpoint_NNC])
# -
# Por fim, será realizada uma execução para avaliar o treino da rede neural convolucional.
# + _kg_hide-output=true
# %%time
history_CNN = model_CNN.fit(train_generator_CNN,
validation_data = (x_test, y_test),
steps_per_epoch = steps_per_epoch,
epochs = epochs,
callbacks = [checkpoint_CNN])
# -
model_NNS = tf.keras.models.load_model('model_NNS.h5')
model_NNS_N = tf.keras.models.load_model('model_NNS_N.h5')
model_NNC = tf.keras.models.load_model('model_NNC.h5')
model_CNN = tf.keras.models.load_model('model_CNN.h5')
# ## Comparando o desempenho das 4 redes neurais
#
# Dos 4 modelos desenvolvidos é possível verificar que o modelo convolucional (CNN) é o que apresenta maior acurácia e maior velocidade de convergência dos parâmetros. Os dados normalizados não apresentaram ganho ou perdas expressivsas ao modelo.
# +
def pred(model, x_test):
pred_prob = model.predict(x_test)
pred = np.argmax(pred_prob, axis = 1)
return pred
print('\033[1;36m[ Rede Neural Simples (Normalizada) -----------------------] \033[0;0m')
print(classification_report(np.argmax(y_test, axis = 1), pred(model_NNS, x_test)))
print("Accuracy: %.4f" % accuracy_score(np.argmax(y_test, axis = 1), pred(model_NNS, x_test)))
print('\n\r')
print('\033[1;36m[ Rede Neural Simples (Não Normalizada) -------------------] \033[0;0m')
print(classification_report(np.argmax(y_test, axis = 1), pred(model_NNS_N, x_test * 255)))
print("Accuracy: %.4f" % accuracy_score(np.argmax(y_test, axis = 1), pred(model_NNS_N, x_test * 255)))
print('\n\r')
print('\033[1;36m[ Rede Neural Complexa ------------------------------------] \033[0;0m')
print(classification_report(np.argmax(y_test, axis = 1), pred(model_NNC, x_test)))
print("Accuracy: %.4f" % accuracy_score(np.argmax(y_test, axis = 1), pred(model_NNC, x_test)))
print('\n\r')
print('\033[1;36m[ Rede Neural Convolucional (CNN) -------------------------] \033[0;0m')
print(classification_report(np.argmax(y_test, axis = 1), pred(model_CNN, x_test)))
print("Accuracy: %.4f" % accuracy_score(np.argmax(y_test, axis = 1), pred(model_CNN, x_test)))
print('\n\r')
# +
plt.rcParams.update({'font.size': 16})
def plot_confusion_mtx(model, x_test, plot_tittle):
pred_prob = model.predict(x_test)
pred = np.argmax(pred_prob, axis = 1)
CM = confusion_matrix(np.argmax(y_test, axis = 1), pred)
plot_confusion_matrix(conf_mat = CM, figsize = (16, 8))
plt.title(plot_tittle)
plt.xticks(range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
plt.yticks(range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
plt.show()
plot_confusion_mtx(model_NNS, x_test, 'Rede Neural Simples (Normalizada)')
plot_confusion_mtx(model_NNS_N, x_test * 255, 'Rede Neural Simples (Não Normalizada)')
plot_confusion_mtx(model_NNC, x_test, 'Rede Neural Complexa')
plot_confusion_mtx(model_CNN, x_test, 'Rede Neural Convolucional (CNN)')
# +
y_true = np.argmax(y_test, axis = 1)
pred_prob = model_CNN.predict(x_test)
y_pred = np.argmax(pred_prob, axis = 1)
errors = (y_pred - y_true != 0)
hits = (y_pred - y_true == 0)
y_test_erros = y_pred[errors]
y_true_errors = y_true[errors]
x_test_errors = x_test[errors]
y_test_hits = y_pred[hits]
y_true_hits = y_true[hits]
x_test_hits = x_test[hits]
print('\033[1;32mPerformance do modelo escolhido \033[0;0m')
print('\033[0;36mRede Neural Convolucional (CNN) \033[0;0m')
print('\033[0;36mTotal de erros: \t' + str(len(y_test_erros)) + "\033[0;36m")
print('\033[0;36mTotal de acertos: \t' + str(len(y_test_hits)) + "\033[0;36m")
# -
# Em se tratando de redes neurais não convolucionais, a aboardagem de utilização de uma única camada pode ser suficiente para resolver o problema com uma acurácia satisfatória e com um tempo de processamento não muito elevado. A adição de camadas convolucionais, tomando-se como exemplo os inúmeros testes realizados pela comunidade de cientistas de dados, mostra que resultado das redes neurais melhorar de forma expressiva quando aplicadas a imagens.
# # Random Forest
#
# Os chamados **métodos ensemble**, são construídos da mesma forma que algoritmos mais básicos, como regressão linear, árvore de decisão ou knn, por exemplo, mas possuem uma característica principal que os diferenciam, a combinação de diferentes modelos para se obter um único resultado. Essa característica torna esses algoritmos mais robustos e complexos, levando a um maior custo computacional que costuma ser acompanhando de melhores resultados.
#
# Normalmente na criação de um modelo, escolhemos o algoritmo que apresenta o melhor desempenho para os dados em questão. Podemos testar diferentes configurações deste algoritmo escolhido, gerando assim diferentes modelos, mas no fim do processo de machine learning, escolhemos apenas um. Com um método ensemble serão criados vários modelos diferentes a partir de um algoritmo, mas não escolheremos apenas um para utilização final, e sim todos.
#
# No algoritmo **Random Forest** serão criadas várias árvores de decisão,a “floresta” que ele cria é uma combinação (ensemble) de árvores de decisão, na maioria dos casos treinados com o método de bagging. A idéia principal do método de bagging é que a combinação dos modelos de aprendizado aumenta o resultado geral.
#
# Uma grande vantagem do algoritmo de florestas aleatórias é que ele pode ser utilizado tanto para tarefas de classificação quanto para regressão, o que representa a maioria dos sistemas de aprendizagem de máquina atuais.
# ## Reiniciando os datasets de treino e test
#
# Antes de iniciarmos a análise com o **Random Forest**, vamos reiniciar os datasets de treino e teste para sua condição inicial, a fim de termos os mesmos parâmetros de partida para analisarmos a performance, posteriormente. Também será realizados o *reshape* dos tensores para que cada pixel represente uma variável do modelo.
# +
x_train, y_train = tfds.as_numpy(
tfds.load('mnist',
batch_size = -1,
split = 'train',
as_supervised = True))
x_test, y_test = tfds.as_numpy(
tfds.load('mnist',
batch_size = -1,
split = 'test',
as_supervised = True))
# reshaping tensors to represent the pixels
x_train = x_train.reshape([x_train.shape[0], -1]).astype('float32')
x_test = x_test.reshape([x_test.shape[0], -1]).astype('float32')
x_train.shape
# -
# ## Criando e executando o modelo Random Forest (não otimizado)
#
# Conforme proposta da **Questão 2**, será criado um modelo de *Random Forest* utilizando alguns hiperparâmetros para uma análise inicial.
# +
# %%time
RF = RandomForestClassifier(n_estimators = 100,
criterion = 'gini',
max_depth = 256,
min_samples_split = 15,
min_samples_leaf = 10,
n_jobs = -1,
random_state = seed)
RF.fit(x_train, y_train)
# -
# A acurácia do modelo é verificada a seguir.
# +
pred = RF.predict(x_test)
print('\033[1;36m[ Random Forest (não otimizado) ---------------------------] \033[0;0m')
print(classification_report(y_test, pred))
print("Accuracy: %.4f" % accuracy_score(y_test, pred))
print('\n\r')
# +
plt.rcParams.update({'font.size': 16})
def plot_confusion_mtx(model, x_test, plot_tittle):
pred= model.predict(x_test)
CM = confusion_matrix(y_test, pred)
plot_confusion_matrix(conf_mat = CM, figsize = (16, 8))
plt.title(plot_tittle)
plt.xticks(range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
plt.yticks(range(10), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
plt.show()
plot_confusion_mtx(RF, x_test, 'Random Forest (não otimizado)')
# +
y_pred = RF.predict(x_test)
errors = (y_pred - y_test != 0)
hits = (y_pred - y_test == 0)
y_test_erros = y_pred[errors]
y_true_errors = y_test[errors]
x_test_errors = x_test[errors]
y_test_hits = y_pred[hits]
y_true_hits = y_test[hits]
x_test_hits = x_test[hits]
print('\033[1;32mPerformance do modelo \033[0;0m')
print('\033[0;36mRandom Forest (não otimizado) \033[0;0m')
print('\033[0;36mTotal de erros: \t' + str(len(y_test_erros)) + "\033[0;36m")
print('\033[0;36mTotal de acertos: \t' + str(len(y_test_hits)) + "\033[0;36m")
# -
# ## Otimizando o modelo Random Forest com hiperparametrização
#
# O Hyperopt é uma biblioteca Python para otimização serial e paralela em espaços de pesquisa "estranhos", que podem incluir dimensões com valor real, discretas e condicionais. O código a seguir inicializa um `space` de configuração de hiperparâmetros.
hyper_space = {'n_estimators': hp.quniform('n_estimators', 25, 500, 5),
'criterion': hp.choice('criterion', ["gini", "entropy"]),
'max_depth': hp.quniform('max_depth', 1, 100, 1),
'min_samples_split': hp.quniform('min_samples_split', 5, 50, 1),
'min_samples_leaf': hp.quniform('min_samples_leaf', 2, 20, 1)}
# Em seguida, será definido o modelo em uma função que utiliza os valores presentes em nosso espaço de busca, configurado anteriormente.
def hyper_RF(hyper_space):
global best_score
RF = RandomForestClassifier(n_estimators = int(hyper_space['n_estimators']),
criterion = hyper_space['criterion'],
max_depth = int(hyper_space['max_depth']),
min_samples_split = int(hyper_space['min_samples_split']),
min_samples_leaf = int(hyper_space['min_samples_leaf']),
n_jobs = -1,
random_state = seed)
RF.fit(x_train, y_train)
pred = RF.predict(x_test)
acc = 1 - accuracy_score(y_test, pred)
if (acc < best_score):
best_score = acc
return {'loss': acc, 'status': STATUS_OK }
# Na sequência, será iniciado o objeto `Trials` que irá armazenar o resultdado de cada uma das **50** interações do processo de otimização realizado pela função `fmin`.
# +
# %%time
trials = Trials()
neval = 50
best_score = 0
best_hyper = fmin(fn = hyper_RF,
space = hyper_space,
algo = tpe.suggest,
max_evals = neval,
trials = trials,
rstate = np.random.RandomState(seed))
best_hyper
# -
# Por fim, será realizado o treinamento de um modelo **Random Forest** com os melhores hiperparâmetros encontrados no espaço de busca executado anteriormente.
# +
OPT_RF = RandomForestClassifier(n_estimators = int(best_hyper['n_estimators']),
criterion = ["gini", "entropy"][best_hyper['criterion']],
max_depth = int(best_hyper['max_depth']),
min_samples_split = int(best_hyper['min_samples_split']),
min_samples_leaf = int(best_hyper['min_samples_leaf']),
n_jobs = -1,
random_state = seed)
OPT_RF.fit(x_train, y_train)
# -
# A acurácia do modelo de **Random Forest** com hiperparâmetros é verificada, de forma semelhante aos demais modelos.
# +
pred = OPT_RF.predict(x_test)
print('\033[1;36m[ Random Forest (otimizado) -------------------------------] \033[0;0m')
print(classification_report(y_test, pred))
print("Accuracy: %.4f" % accuracy_score(y_test, pred))
print('\n\r')
# -
plot_confusion_mtx(OPT_RF, x_test, 'Random Forest - Optimized')
# +
y_pred = OPT_RF.predict(x_test)
errors = (y_pred - y_test != 0)
hits = (y_pred - y_test == 0)
y_test_erros = y_pred[errors]
y_true_errors = y_test[errors]
x_test_errors = x_test[errors]
y_test_hits = y_pred[hits]
y_true_hits = y_test[hits]
x_test_hits = x_test[hits]
print('\033[1;32mPerformance do modelo \033[0;0m')
print('\033[0;36mRandom Forest (otimizado) \033[0;0m')
print('\033[0;36mTotal de erros: \t' + str(len(y_test_erros)) + "\033[0;36m")
print('\033[0;36mTotal de acertos: \t' + str(len(y_test_hits)) + "\033[0;36m")
# -
# Para a etapa do trabalho baseada em **Random Forest** foram utiliadas duas abordagens, sendo a primeira com um modelo não otimizado e a segunda utilizando hiperparâmetros. Em comparação com a etapa baseada em Redes Neurais, podemos concluir que a performance ficou aquém nos modelos baseados em **Random Forest**, ademais, o tempo de processamento e hardware necessários para executar este tipo de modelo é muito superior aos das Redes Neurais.
# # Referências
#
# * https://www.tensorflow.org/api_docs/python/tf/keras
# * http://deeplearningbook.com.br/introducao-as-redes-neurais-convolucionais/
# * https://didatica.tech/o-que-e-e-como-funciona-o-algoritmo-randomforest/
# * https://medium.com/machina-sapiens/o-algoritmo-da-floresta-aleat%C3%B3ria-3545f6babdf8
|
notebooks/fgv_assignments/individual/leandro-fgv-mba-trabalho-de-analise-preditiva-avancada.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # EOSC 582 Assignment II (Petty, p128, 154, 168) - Due Feb 13
__author__ = 'Yingkai (<NAME>'
__email__ = '<EMAIL>'
# # Content
# 1. [**Problem 6.9**](#Problem-6.9)
# 1. [**Problem 6.28**](#Problem-6.28)
# 1. [**Problem 7.4**](#Problem-7.4)
import numpy as np
import matplotlib.pyplot as plt
from __future__ import print_function
% matplotlib inline
# # Problem 6.9
# **Q1: Calculate $\mathrm{T_B}$**
#
# We can use inverse Planck function for $\mathrm{T_B}$:
#
# \begin{equation*}
# T_B\left(\lambda, L_e\right) = \frac{2h/k_B}{\lambda \ln{\left( 2hc^2/\lambda^5 L_e+1 \right)}}
# \end{equation*}
#
# $h$: Planck constant; $k_B$: Boltzmann constant; $c$: Speed of light.
# Given conditions:
wave_length = 12*1e-6 # m
radiance = 6.2*1e6 # W/m^2/m/sr
# Constants:
c=2.99792458e+08 # m/s, Speed of light
h=6.62606876e-34 # Js, Planck constant
kb=1.3806503e-23 # J/K, Boltzman constant
# Calculate $\mathrm{T_B}$
TB = (h*c/kb)/(wave_length*np.log((2 * h * c**2)/(wave_length**5 * radiance) + 1))
print('Brightness Temperature: {}'.format(TB))
# ** Q2: Calculate real temperature by using emissivity**
#
# If the surface is in a thermal equilibrium state, we can use *Kirchhoff's law* and get the following relation:
#
# \begin{equation*}
# L_e = \epsilon \cdot L_b
# \end{equation*}
#
# $\mathrm{L_e}$: "true" radiance; $\mathrm{L_b}$: "equvilant" black body radiance.
#
# Thus we get the right black body radiance for inverse Planck function.
emiss = 0.9
radiance = radiance / emiss
T_true = (h*c/kb)/(wave_length*np.log((2 * h * c**2)/(wave_length**5 * radiance) + 1))
print('True Temperature: {}'.format(TB))
# ** Q3: Ratio of brightness temperature and true temperature **
TB/T_true
# The ratio is **not equal** to emissivity, and we can prove that by play with Planck function.
# # Problem 6.28
Freq = np.array([ 6.93 , 10.65 , 18.70 , 23.80 , 36.50 , 89.00 ])
N_10 = np.array([8.095+2.371j, 7.431+2.708j, 6.164+2.980j, 5.566+2.958j, 4.575+2.721j, 3.115+1.842j])
N_20 = np.array([8.211+2.144j, 7.745+2.416j, 6.712+2.819j, 6.151+2.901j, 5.123+2.831j, 3.433+2.083j])
# ** Q1: Calculate brightness temperature for all cases**
#
# Since the surface is smooth, we can use *Fresnel relations* for reflectivity:
#
# * For vertical polarization:
# \begin{equation*}
# R_p = \left| \frac{cos \Phi_t - Ncos \Phi_i}{cos \Phi_t + Ncos \Phi_i} \right|^2
# \end{equation*}
# * For horizontal polarization:
# \begin{equation*}
# R_s = \left| \frac{cos \Phi_i - Ncos \Phi_t}{cos \Phi_i + Ncos \Phi_t} \right|^2
# \end{equation*}
#
# Here $\mathrm \Phi_i = 55 ^ \circ $. According to *Snell's Law*:
# \begin{equation*}
# \Phi_t = arcsin\left( \frac{sin\left( \Phi_i \right)}{N} \right)
# \end{equation*}
#
# Then we can calculate $\mathrm{T_B}$ as:
#
# * for horizontal channel:
#
# \begin{equation*}
# T_{B} = \epsilon_s T = \left( 1 - R_s \right) T
# \end{equation*}
#
# * for vertical channel:
#
# \begin{equation*}
# T_{B} = \epsilon_p T = \left( 1 - R_p \right) T
# \end{equation*}
def TBright_calc(theta_i, N, T):
theta_t = np.arcsin( np.sin(theta_i)/N )
Rp = np.abs((np.cos(theta_t) - N*np.cos(theta_i))/(np.cos(theta_t) + N*np.cos(theta_i)))**2
Rs = np.abs((np.cos(theta_i) - N*np.cos(theta_t))/(np.cos(theta_i) + N*np.cos(theta_t)))**2
TBH = T * (1-Rs)
TBV = T * (1-Rp)
return TBH, TBV
theta_i = 55./360*2*np.pi
# calculate all the cases in a for-loop.
TBH = np.empty([len(Freq), 2]) # horizontal channel for 10 and 20 degC cases
TBV = np.empty([len(Freq), 2])
for i in range(len(Freq)):
TBH10, TBV10 = TBright_calc(theta_i, N_10[i], 10+273.15)
TBH20, TBV20 = TBright_calc(theta_i, N_20[i], 20+273.15)
TBH[i, :] = [TBH10, TBH20]
TBV[i, :] = [TBV10, TBV20]
# Show the result.
print('Brightness temperature: \n==============================\ni) Horizontal channels\n\t10degC\t\t20degC')
print(TBH)
print('ii) Vertical channels\n\t10degC\t\t20degC')
print(TBV)
print('==============================')
# ** Sensitivity, find the best channel **
#
# Here I define the sensitivity as:
#
# \begin{equation*}
# \frac{T_B(20^\circ C) - T_B(10^\circ C)}{10^\circ C}
# \end{equation*}
#
# And we believe sensible channel is good.
Sen_TBH = (TBH[:, 1] - TBH[:, 0])/10
Sen_TBV = (TBV[:, 1] - TBV[:, 0])/10
# compare the results.
print('Frequency: \n{}'.format(Freq))
print('Sensitivity for horizontal channel: \n{}'.format(Sen_TBH))
print('Sensitivity for vertical channel: \n{}'.format(Sen_TBV))
# Channels with frequency higher than 23.8 GHz is not recomended, so we will use **6.93 GHz vertical** channel.
# # Problem 7.4
# We can fill the blank with the following equations:
#
# \begin{equation*}
# \beta_e = \rho k_e = \sigma_eN
# \end{equation*}
#
# \begin{equation*}
# \widetilde{\omega} = \frac{\beta_s}{\beta_e}
# \end{equation*}
#
# \begin{equation*}
# \sigma_e = k_em
# \end{equation*}
#
# \begin{equation*}
# Qe = \frac{\sigma_e}{A}
# \end{equation*}
#
# And here I just type the results:
a = np.array([3.89e2, 6.6e21, 2.8e-19 , 1e-4, 0 , 7.3e-26 , 4.8e-4 , 2.83e-23, 0.187 , 0 ]).T
b = np.array([1e3 , 1e10 , 7.07e-14, 0.2 , 0.1, 1.41e-17, 1.41e-7, 1.41e-14, 1.41e-4, 1.41e-5]).T
c = np.array([0.45 , 80 , 3.14e-6 , 0.6 , 0.4, 4.19e-6 , 3.35e-4, 1.89e-6 , 1.51e-4, 6.03e-5]).T
d = np.array([150 , 1e9 , 3.14e-10, 2 , 0.9, 4.19e-12, 4.19e-3, 6.28e-10, 0.628 , 0.565 ]).T
import csv
import pandas
from itertools import izip
with open('eosc582_a2_p74.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(izip(a, b, c, d))
pd_table = pandas.read_csv('eosc582_a2_p74.csv', names=['(a)', '(b)', '(c)', '(d)'])
pd_table
|
EOSC_582/EOSC_582_Assignment_II_Petty_p128_p154_p168.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## The `Image` Mark
#
# `Image` is a `Mark` object, used to visualize images in standard format (png, jpg etc...), in a `bqplot` `Figure`
# It takes as input an [ipywidgets `Image` widget](https://github.com/jupyter-widgets/ipywidgets/blob/master/ipywidgets/widgets/widget_image.py)
# ### The ipywidgets Image
# +
import ipywidgets as widgets
import os
image_path = os.path.abspath('../../data_files/trees.jpg')
with open(image_path, 'rb') as f:
raw_image = f.read()
ipyimage = widgets.Image(value=raw_image, format='jpg')
ipyimage
# -
# ### Displaying the image inside a bqplot Figure
# +
from bqplot import *
# Create the scales for the image coordinates
scales={'x': LinearScale(), 'y': LinearScale()}
# Define the bqplot Image mark
image = Image(image=ipyimage, scales=scales)
# Create the bqplot Figure to display the mark
fig = Figure(title='Trees', marks=[image], padding_x=0, padding_y=0)
fig
# -
# ### Mixing with other marks
#
# `Image` is a mark like any other, so they can be mixed and matched together.
scales = {'x': LinearScale(min=-1, max=2), 'y': LinearScale(min=-0.5, max=2)}
image = Image(image=ipyimage, scales=scales)
lines = Lines(x=[0, 1, 1, 0, 0], y=[0, 0, 1, 1, 0], scales=scales, colors=['red'])
fig = Figure(marks=[image, lines], padding_x=0, padding_y=0, animation_duration=1000)
fig.axes = [Axis(scale=scales['x']), Axis(scale=scales['y'], orientation='vertical')]
fig
# Its traits (attributes) will also respond dynamically to a change from the backend
# Full screen
image.x = [-1, 2]
image.y = [-.5, 2]
# ## Pyplot
#
# It may seem verbose to first open the image file, create an `ipywidgets` `Image`, then create the scales and so forth.
#
# The `pyplot` api does all of that for you, via the `imshow` function.
# +
import bqplot.pyplot as bqp
bqp.figure()
bqp.imshow(image_path, 'filename')
bqp.show()
# -
#
# The signature is
#
# `bqp.imshow(image, format)`
#
# - `image` is the `Image` data, depending on the passed `format`, can be one of:
# - an instance of an ipywidgets Image
# - a file name
# - a raw byte string
# - `format`: {'widget', 'filename', ...}
# Type of the input argument.
# If not 'widget' or 'filename', must be a format supported by the
# `ipywidgets` `Image`.
#
|
examples/Marks/Object Model/Image.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# https://www.pythoncentral.io/how-to-create-a-python-package/
# -
# Package with only functions
from packages import samplemodule as m
m.cube(40)
m.squared(20)
# %whos
dir(m)
m.__file__
# +
# Package with only classes
# -
from Animals import Mammals
myMammals = Mammals()
myMammals.printmembers()
|
14. Adv Python Packages/04. Importing module from a package.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from kafka import KafkaConsumer
import json
from influxdb import InfluxDBClient
consumer = KafkaConsumer(
bootstrap_servers='broker:9092',
value_deserializer = lambda v: json.loads(v.decode('ascii')),
auto_offset_reset='earliest'
)
# +
consumer.subscribe(topics='auth.log')
# client = InfluxDBClient('grafana.patodo-112924.edge-PG0.clemson.cloudlab.us', '8086', 'test')
client = InfluxDBClient('grafana', '8086', 'test')
# client.get_list_database()
for message in consumer:
print(message.value)
# client.write_points([message.value],'s','test')
# -
data = [{'ws1': 'Dec 8 23:35:01 ws1 CRON[13617]: pam_unix(cron:session): session opened for user root by (uid=0)\n'},
{'ws1': 'Dec 8 23:35:01 ws1 CRON[13617]: pam_unix(cron:session): session closed for user root\n'},
{'ws1': 'Dec 8 23:37:14 ws1 sshd[13650]: Did not receive identification string from 192.168.3.11 port 35402\n'},
{'ws1': 'Dec 8 23:37:44 ws1 sshd[13658]: Invalid user user from 192.168.3.11 port 45544\n'},
{'ws1': 'Dec 8 23:37:48 ws1 sshd[13658]: Received disconnect from 192.168.3.11 port 45544:11: Normal Shutdown, Thank you for playing [preauth]\n'},
{'ws1': 'Dec 8 23:37:48 ws1 sshd[13658]: Disconnected from invalid user user 192.168.3.11 port 45544 [preauth]\n'},
{'ws1': 'Dec 8 23:40:01 ws1 CRON[13688]: pam_unix(cron:session): session opened for user root by (uid=0)\n'},
{'ws1': 'Dec 8 23:40:01 ws1 CRON[13688]: pam_unix(cron:session): session closed for user root\n'}]
for message in data:
webserver = ''.join(list(message.keys()))
values = ''.join(list(message.values()))
print(keys)
print(values.split(" "))
json_body = {
"measurement": webserver,
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2021-11-11T23:00:00Z",
"fields": {
"Float_value": 0.7,
"Int_value": 4,
"String_value": "Text",
"Bool_value": True
}
}
# client.write_points([message.value],'s','test')
|
notebooks/consumer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# +
import tensorflow as tf
NUM_CLASSES = 228
STARTER_LEARNING_RATE = 0.001
CUT_OFF = 0.184
DECAY_STEPS = 400000
DECAY_RATE = 0.5
def alexnet_model_fn(features, labels, mode):
"""Model function for Alexnet."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
input_layer = tf.convert_to_tensor(features["x"])
#print("input_layer: {}".format(input_layer.shape))
conv1 = tf.layers.conv2d(inputs=input_layer,filters=96,kernel_size=[11, 11],strides=4,padding="valid",activation=tf.nn.relu)
#print("conv1: {}".format(conv1.shape))
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[3, 3], strides=2, padding='valid')
#print("pool1: {}".format(pool1.shape))
conv2 = tf.layers.conv2d(inputs= pool1,filters=256,kernel_size=[5, 5],padding="same",activation=tf.nn.relu)
#print("conv2: {}".format(conv2.shape))
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[3, 3], strides=2, padding='valid')
#print("pool2: {}".format(pool2.shape))
conv3 = tf.layers.conv2d(inputs=pool2,filters=384,kernel_size=[3, 3],padding="same",activation=tf.nn.relu)
#print("conv3: {}".format(conv3.shape))
conv4 = tf.layers.conv2d(inputs=conv3,filters=384,kernel_size=[3, 3],padding="same",activation=tf.nn.relu)
#print("conv4: {}".format(conv4.shape))
conv5 = tf.layers.conv2d(inputs=conv4,filters=256,kernel_size=[3, 3],padding="same",activation=tf.nn.relu)
#print("conv5: {}".format(conv5.shape))
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[3, 3], strides=2,padding='valid')
#print("pool5: {}".format(pool2.shape))
pool5_flat = tf.reshape(conv5, [-1, 12*12*256])
#print("pool5_flat: {}".format(pool5_flat.shape))
fc6 = tf.layers.dense(inputs=pool5_flat, units=4096, activation=tf.nn.relu)
#print("dense1: {}".format(fc6.shape))
dropout6 = tf.layers.dropout(inputs=fc6, rate=0.2, training=mode == tf.estimator.ModeKeys.TRAIN)
#print("dropout6: {}".format(dropout6.shape))
fc7 = tf.layers.dense(inputs=dropout6, units=4096, activation=tf.nn.relu)
#print("fc7: {}".format(fc7.shape))
dropout7 = tf.layers.dropout(inputs=fc7, rate=0.2, training=mode == tf.estimator.ModeKeys.TRAIN)
#print("dropout7: {}".format(dropout7.shape))
# Logits Layer
# Input Tensor Shape: [batch_size, 4096]
# Output Tensor Shape: [batch_size, 228]
logits = tf.layers.dense(inputs=dropout7, units=NUM_CLASSES)
#print("logits: {}".format(logits.shape))
# Generate Predictions
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.cast(tf.sigmoid(logits) >= CUT_OFF, tf.int8, name="class_tensor"),
# Add `sigmoid_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.sigmoid(logits, name="prob_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
#w_tensor = tf.convert_to_tensor(w)
#w_tensor = tf.reshape(w_tensor, [-1,228])
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits)#, weights=w_tensor)
#loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=STARTER_LEARNING_RATE, global_step=global_step,
decay_steps=DECAY_STEPS, decay_rate=DECAY_RATE
)
if global_step % DECAY_STEPS == 0:
tf.logging.info('Learning rate at global step '+str(global_step)+': '+str(learning_rate))
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(
loss=loss,
global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Customize evaluation metric
def meanfscore(predictions, labels):
predictions = tf.reshape(tf.transpose(predictions), [-1])
labels = tf.convert_to_tensor(labels)
labels = tf.reshape(tf.transpose(labels), [-1])
precision_micro, update_op_p = tf.metrics.precision(labels, predictions)
recall_micro, update_op_r = tf.metrics.recall(labels, predictions)
f1_mircro = tf.div(tf.multiply(2., tf.multiply(precision_micro, recall_micro)), tf.add(precision_micro, recall_micro), name="eval_tensor")
return f1_mircro, tf.group(update_op_p, update_op_r)
def precision_micro(predictions, labels):
predictions = tf.reshape(tf.transpose(predictions), [-1])
labels = tf.convert_to_tensor(labels)
labels = tf.reshape(tf.transpose(labels), [-1])
precision_micro, update_op_p = tf.metrics.precision(labels, predictions)
return precision_micro, update_op_p
def recall_micro(predictions, labels):
predictions = tf.reshape(tf.transpose(predictions), [-1])
labels = tf.convert_to_tensor(labels)
labels = tf.reshape(tf.transpose(labels), [-1])
recall_micro, update_op_r = tf.metrics.recall(labels, predictions)
return recall_micro, update_op_r
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"meanfscore": meanfscore(predictions["classes"], labels),
"precision_micro": precision_micro(predictions["classes"], labels),
"recall_micro": recall_micro(predictions["classes"], labels)}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
# +
import numpy as np
import pandas as pd
import cv2
NUM_CLASSES = 228
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
def load_images(addrs_list):
images = np.empty((len(addrs_list), IMAGE_WIDTH, IMAGE_HEIGHT, 3), dtype=np.float32)
for i, fpath in enumerate(addrs_list):
img = cv2.imread(fpath, cv2.IMREAD_COLOR)
img = cv2.resize(img, (224, 224))
images[i, ...] = img#.transpose(2, 0, 1)
if i % 1000 == 0:
print('Loading images: {}'.format(i))
return images
def get_multi_hot_labels(df, index_list):
label_id = [df['labelId'][i] for i in index_list]
labels_matrix = np.zeros([len(index_list), NUM_CLASSES], dtype=np.uint8())
for i in range(len(label_id)):
for j in range(len(label_id[i].split(' '))):
row, col = i, int(label_id[i].split(' ')[j]) - 1
labels_matrix[row][col] = 1
return labels_matrix
# -
import gc, datetime
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
tf.logging.set_verbosity(tf.logging.INFO)
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# +
train_df = pd.read_csv('train.csv')
validation_df = pd.read_csv('val.csv')
train_path_list = train_df['imagePath']
eval_path_list = validation_df['imagePath']
eval_data = load_images(eval_path_list)
eval_labels = get_multi_hot_labels(validation_df, list(range(validation_df.shape[0])))
# -
eval_data.shape, eval_labels.shape
validation_df.shape
train_df.shape
def main():
train_iter_size = 10000
num_iters = 200
batch_size = 1
eval_every_iters = 1
np.random.seed(123)
#train_steps = []
#train_losses = []
eval_steps = []
eval_losses = []
eval_precision = []
eval_recall = []
eval_meanfscore = []
# Create the Estimator
multilabel_classifier = tf.estimator.Estimator(
model_fn=alexnet_model_fn, model_dir="model/multilabel_alexnet_model")
# Set up logging for predictions
#tensors_to_log = {"probabilities": "sigmoid_tensor"}
#tensors_to_log = {"meanfscore": "eval_tensor"}
tensors_to_log = []
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=1000)
for k in range(num_iters):
print('Trained images so far: {}'.format(k * train_iter_size))
# Randomly load training data and labels
print('Loading train images..')
random_indices = np.random.randint(0, train_df.shape[0], size=train_iter_size)
train_paths = [train_path_list[i] for i in random_indices]
train_data = load_images(train_paths)
print('Loading train labels..')
train_labels = get_multi_hot_labels(train_df, random_indices)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=batch_size,
num_epochs=1,
shuffle=True)
multilabel_classifier.train(
input_fn=train_input_fn,
hooks=[logging_hook])
if k % eval_every_iters == 0:
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
shuffle=False)
eval_results = multilabel_classifier.evaluate(input_fn=eval_input_fn)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
print(eval_results)
with open('/home/ec2-user/SageMaker/imat/model/loss_alexnet_lrdecay.csv', 'a') as loss_file:
loss_file.write(str(eval_results['global_step'])+','+str(eval_results['loss'])+'\n')
with open('/home/ec2-user/SageMaker/imat/model/score_alexnet_lrdecay.csv', 'a') as score_file:
score_file.write(str(eval_results['global_step'])+','+str(eval_results['meanfscore'])+','+str(eval_results['precision_micro'])+','+str(eval_results['recall_micro'])+'\n')
eval_steps.append(eval_results['global_step'])
eval_losses.append(eval_results['loss'])
eval_precision.append(eval_results['precision_micro'])
eval_recall.append(eval_results['recall_micro'])
eval_meanfscore.append(eval_results['meanfscore'])
# Garbage collection
train_data = None
train_labels = None
gc.collect()
eval_track = {'eval_steps':eval_steps,
'eval_losses':eval_losses,
'eval_precision':eval_precision,
'eval_recall':eval_recall,
'eval_meanfscore':eval_meanfscore}
return eval_track
eval_track = main()
# eval loss plot
plt.figure(figsize=(12,8))
plt.plot(eval_track['eval_steps'], eval_track['eval_losses'])
plt.xlabel("Step")
plt.ylabel("Validation loss")
# eval score plot
plt.figure(figsize=(12,8))
plt.plot(eval_track['eval_steps'], eval_track['eval_meanfscore'], label = 'meanfscore')
plt.plot(eval_track['eval_steps'], eval_track['eval_precision'], label = 'precision')
plt.plot(eval_track['eval_steps'], eval_track['eval_recall'], label = 'recall')
plt.legend()
plt.xlabel("Step")
plt.ylabel("Score")
|
notebooks/Alexnet_train.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PhysioNet/Computing in Cardiology Challenge 2020
# ## Classification of 12-lead ECGs
# ### 5. Tune Model
# # Setup Noteboook
# +
# 3rd party libraries
import os
import sys
import numpy as np
import tensorflow as tf
# Local Libraries
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))))
from kardioml.models.deepecg.utils.devices.device_check import print_device_counts
from kardioml.models.deepecg.train.train import train
from kardioml.models.deepecg.model.model import Model
from kardioml import DATA_PATH, OUTPUT_PATH, LABELS_COUNT, ECG_LEADS, FS
# Configure Notebook
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# # 1. Hyper-Parameter Search
# +
# Set data path
data_path = os.path.join(DATA_PATH, 'formatted')
# Set model name
training_run = 'hpt_15'
# Set sample length (seconds)
duration = 60
# Loop through CV folds
for num_res_layers in [6, 7, 8, 9, 10, 11, 12]:
for num_filts in [128, 256, 512, 1024]:
for drop_rate in [0.3]:
for kernel_size in [3]:
for fs in [300]:
experiment_name = 'num_res_layers-{}_num_filts-{}_drop_rate-{}_kernel_size-{}_fs-{}'.format(num_res_layers,
num_filts,
drop_rate,
kernel_size,
fs)
print('Hyper-Parameter: {}'.format(experiment_name))
# Set data path
lookup_path = os.path.join(DATA_PATH, 'deepecg')
# Print devices
print_device_counts()
# Set save path for graphs, summaries, and checkpoints
output_path = os.path.join(OUTPUT_PATH, training_run)
os.makedirs(output_path, exist_ok=True)
# Set model name
model_name = 'sess_{}'.format(experiment_name)
# Maximum number of checkpoints to keep
max_to_keep = 1
# Set randome states
seed = 0
# Get training dataset dimensions
length, channels = (int(duration * fs), 13)
# Number of classes
classes = LABELS_COUNT
# Choose network
network_name = 'DeepECGV1'
# Set hyper-parameter
hyper_params = {'num_res_layers': num_res_layers, 'drop_rate': drop_rate, 'kernel_size': kernel_size,
'conv_filts': num_filts, 'res_filts': num_filts, 'skip_filts': num_filts,
'dilation': True, 'fs': fs}
# Set network inputs
network_parameters = {'length': length, 'channels': channels, 'classes': classes, 'seed': seed,
'hyper_params': hyper_params}
try:
# Create model
print('Initializing Model')
model = Model(model_name=model_name,
network_name=network_name,
network_parameters=network_parameters,
save_path=output_path,
data_path=data_path,
lookup_path=lookup_path,
max_to_keep=max_to_keep)
# Set hyper-parameters
epochs = 50
batch_size = 16
# Train model
print('Training Start')
train(model=model, epochs=epochs, batch_size=batch_size)
print('Training End\n')
except:
print('Training Failure: {}'.format(experiment_name))
|
kardioml/models/deepecg/notebooks/5_hyper_parameter_tuning_test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Mosaicking
import os
from time import time
import xarray as xr
import rioxarray
import rasterio
# #### Function of mosaicking
# +
def xr_build_mosaic_ds(tifs, product='band'):
start = time()
my_da_list = []
topleft_affine = None
for i, tif in enumerate(tifs):
da = xr.open_rasterio(tif)
da = da.squeeze().drop(labels='band')
da.name = product
my_da_list.append(da)
tnow = time()
elapsed = tnow - start
print(tif, elapsed)
ds = xr.merge(my_da_list)
return ds
def xr_write_geotiff_from_ds(ds, out_path):
print(ds)
print(f'OUTPUT=={out_path}')
ds.rio.to_raster(out_path)
# -
|
postprocess-avgs/other_fun_schitt/mosaicking.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IMDB
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.text import *
# ## Preparing the data
# First let's download the dataset we are going to study. The [dataset](http://ai.stanford.edu/~amaas/data/sentiment/) has been curated by <NAME> et al. and contains a total of 100,000 reviews on IMDB. 25,000 of them are labelled as positive and negative for training, another 25,000 are labelled for testing (in both cases they are highly polarized). The remaning 50,000 is an additional unlabelled data (but we will find a use for it nonetheless).
#
# We'll begin with a sample we've prepared for you, so that things run quickly before going over the full dataset.
path = untar_data(URLs.IMDB_SAMPLE)
path.ls()
# It only contains one csv file, let's have a look at it.
df = pd.read_csv(path/'texts.csv')
df.head()
df['text'][1]
# It contains one line per review, with the label ('negative' or 'positive'), the text and a flag to determine if it should be part of the validation set or the training set. If we ignore this flag, we can create a DataBunch containing this data in one line of code:
data_lm = TextDataBunch.from_csv(path, 'texts.csv')
# By executing this line a process was launched that took a bit of time. Let's dig a bit into it. Images could be fed (almost) directly into a model because they're just a big array of pixel values that are floats between 0 and 1. A text is composed of words, and we can't apply mathematical functions to them directly. We first have to convert them to numbers. This is done in two differents steps: tokenization and numericalization. A `TextDataBunch` does all of that behind the scenes for you.
#
# Before we delve into the explanations, let's take the time to save the things that were calculated.
data_lm.save()
# Next time we launch this notebook, we can skip the cell above that took a bit of time (and that will take a lot more when you get to the full dataset) and load those results like this:
data = TextDataBunch.load(path)
# ### Tokenization
# The first step of processing we make the texts go through is to split the raw sentences into words, or more exactly tokens. The easiest way to do this would be to split the string on spaces, but we can be smarter:
#
# - we need to take care of punctuation
# - some words are contractions of two different words, like isn't or don't
# - we may need to clean some parts of our texts, if there's HTML code for instance
#
# To see what the tokenizer had done behind the scenes, let's have a look at a few texts in a batch.
data = TextClasDataBunch.load(path)
data.show_batch()
# The texts are truncated at 100 tokens for more readability. We can see that it did more than just split on space and punctuation symbols:
# - the "'s" are grouped together in one token
# - the contractions are separated like this: "did", "n't"
# - content has been cleaned for any HTML symbol and lower cased
# - there are several special tokens (all those that begin by xx), to replace unknown tokens (see below) or to introduce different text fields (here we only have one).
# ### Numericalization
# Once we have extracted tokens from our texts, we convert to integers by creating a list of all the words used. We only keep the ones that appear at least twice with a maximum vocabulary size of 60,000 (by default) and replace the ones that don't make the cut by the unknown token `UNK`.
#
# The correspondance from ids to tokens is stored in the `vocab` attribute of our datasets, in a dictionary called `itos` (for int to string).
data.vocab.itos[:10]
# And if we look at what a what's in our datasets, we'll see the tokenized text as a representation:
data.train_ds[0][0]
# But the underlying data is all numbers
data.train_ds[0][0].data[:10]
# ### With the data block API
# We can use the data block API with NLP and have a lot more flexibility than what the default factory methods offer. In the previous example for instance, the data was randomly split between train and validation instead of reading the third column of the csv.
#
# With the data block API though, we have to manually call the tokenize and numericalize steps. This allows more flexibility, and if you're not using the defaults from fastai, the variaous arguments to pass will appear in the step they're revelant, so it'll be more readable.
data = (TextList.from_csv(path, 'texts.csv', cols='text')
.split_from_df(col=2)
.label_from_df(cols=0)
.databunch())
# ## Language model
# Note that language models can use a lot of GPU, so you may need to decrease batchsize here.
bs=48
# Now let's grab the full dataset for what follows.
path = untar_data(URLs.IMDB)
path.ls()
(path/'train').ls()
# The reviews are in a training and test set following an imagenet structure. The only difference is that there is an `unsup` folder on top of `train` and `test` that contains the unlabelled data.
#
# We're not going to train a model that classifies the reviews from scratch. Like in computer vision, we'll use a model pretrained on a bigger dataset (a cleaned subset of wikipedia called [wikitext-103](https://einstein.ai/research/blog/the-wikitext-long-term-dependency-language-modeling-dataset)). That model has been trained to guess what the next word, its input being all the previous words. It has a recurrent structure and a hidden state that is updated each time it sees a new word. This hidden state thus contains information about the sentence up to that point.
#
# We are going to use that 'knowledge' of the English language to build our classifier, but first, like for computer vision, we need to fine-tune the pretrained model to our particular dataset. Because the English of the reviews left by people on IMDB isn't the same as the English of wikipedia, we'll need to adjust the parameters of our model by a little bit. Plus there might be some words that would be extremely common in the reviews dataset but would be barely present in wikipedia, and therefore might not be part of the vocabulary the model was trained on.
# This is where the unlabelled data is going to be useful to us, as we can use it to fine-tune our model. Let's create our data object with the data block API (next line takes a few minutes).
data_lm = (TextList.from_folder(path)
#Inputs: all the text files in path
.filter_by_folder(include=['train', 'test', 'unsup'])
#We may have other temp folders that contain text files so we only keep what's in train and test
.random_split_by_pct(0.1)
#We randomly split and keep 10% (10,000 reviews) for validation
.label_for_lm()
#We want to do a language model so we label accordingly
.databunch(bs=bs))
data_lm.save('tmp_lm')
# We have to use a special kind of `TextDataBunch` for the language model, that ignores the labels (that's why we put 0 everywhere), will shuffle the texts at each epoch before concatenating them all together (only for training, we don't shuffle for the validation set) and will send batches that read that text in order with targets that are the next word in the sentence.
#
# The line before being a bit long, we want to load quickly the final ids by using the following cell.
data_lm = TextLMDataBunch.load(path, 'tmp_lm', bs=bs)
data_lm.show_batch()
# We can then put this in a learner object very easily with a model loaded with the pretrained weights. They'll be downloaded the first time you'll execute the following line and stored in `~/.fastai/models/` (or elsewhere if you specified different paths in your config file).
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
learn.lr_find()
learn.recorder.plot(skip_end=15)
learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7))
learn.save('fit_head')
learn.load('fit_head');
# To complete the fine-tuning, we can then unfeeze and launch a new training.
learn.unfreeze()
learn.fit_one_cycle(10, 1e-3, moms=(0.8,0.7))
learn.save('fine_tuned')
# How good is our model? Well let's try to see what it predicts after a few given words.
learn.load('fine_tuned');
TEXT = "I liked this movie because"
N_WORDS = 40
N_SENTENCES = 2
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
# We not only have to save the model, but also it's encoder, the part that's responsible for creating and updating the hidden state. For the next part, we don't care about the part that tries to guess the next word.
learn.save_encoder('fine_tuned_enc')
# ## Classifier
# Now, we'll create a new data object that only grabs the labelled data and keeps those labels. Again, this line takes a bit of time.
path = untar_data(URLs.IMDB)
# +
data_clas = (TextList.from_folder(path, vocab=data_lm.vocab)
#grab all the text files in path
.split_by_folder(valid='test')
#split by train and valid folder (that only keeps 'train' and 'test' so no need to filter)
.label_from_folder(classes=['neg', 'pos'])
#label them all with their folders
.databunch(bs=bs))
data_clas.save('tmp_clas')
# -
data_clas = TextClasDataBunch.load(path, 'tmp_clas', bs=bs)
data_clas.show_batch()
# We can then create a model to classify those reviews and load the encoder we saved before.
learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)
learn.load_encoder('fine_tuned_enc')
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))
learn.save('first')
learn.load('first');
learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))
learn.save('second')
learn.load('second');
learn.freeze_to(-3)
learn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
learn.save('third')
learn.load('third');
learn.unfreeze()
learn.fit_one_cycle(2, slice(1e-3/(2.6**4),1e-3), moms=(0.8,0.7))
learn.predict("I really loved that movie, it was awesome!")
|
nbs/dl1/lesson3-imdb.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Regression with TensorFlow
#
# ## Steps:
# - <a href="#Introduction"> Introduction </a>
# - <a href="#Importing-Libraries"> Importing Libraries </a>
# - <a href="#Preparing-Data"> Preparing Data </a>
# - <a href="#Building-Graph"> Building Graph </a>
# - <a href="#Training-Model"> Training Model </a>
# - <a href="#Visualizing-Graph"> Visualizing Graph </a>
# - <a href="#Conclusion"> Conclusion </a>
# ### Introduction
#
# In this notebook, we'll be using the TensorFlow deep learning framework for linear Regression over a small car $ CO_2 $ emissions dataset. Our gole is to build a model that can predict car $ CO_2 $ emissions before the car menufacture.
#
# Here we've some pre-recorded real-time features column they have relation with the label. We'll build a computation graph and feed them by features and labeled data to build our model. Then we can use the model to predict new cars $ CO_2 $ emissions before it manufactures even if it ever has seen before.
# ### Importing Libraries
#
# In this sections, we'll import some necessary python libraries and TensorFlow.
# +
import numpy as np
import pandas as pd
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
# -
# ### Preparing Data
#
# In this sections, we'll
# - Read our dataset using pandas as dataframe
# - Visualize data using matplotlib scatter plot
# - Clean data (if necessary)
# - Select features and label
emissions = pd.read_csv('../Datasets/CO2EMISSIONS/FuelConsumptionCo2.csv')
emissions.head()
# +
ENGINESIZE = emissions['ENGINESIZE']
CO2EMISSIONS = emissions['CO2EMISSIONS']
train_X = np.array(ENGINESIZE)
train_Y = np.array(CO2EMISSIONS)
number = train_X.shape[0]
# -
plt.scatter(train_X, train_Y)
plt.show()
# ### Building Graph
#
# In this sections, we'll
#
# - Define placeholders for input X (number of fire) and label Y (number of theft)
# - Define weight and bias, initialized to 0
# - Use the square error as the loss function
# - Use the gradient descent with learning rate of 0.01 to minimize the loss
# +
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
# -
predictions = tf.add(tf.multiply(X, W), b)
loss_function = tf.reduce_sum(tf.pow(predictions - Y, 2))/(2 * number)
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(loss_function)
init = tf.global_variables_initializer()
# ### Training Model
#
# In this sections, we'll
#
# - Train the model and iterate 1000 epochs.
# - Feed features and labeled data to the newral-newtwork.
# - Show performance and plot model.
with tf.Session() as sess:
sess.run(init)
print("Start Training...")
for epoch in range(1000):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
if (epoch+1) % 100 == 0:
c = sess.run(loss_function, feed_dict={X: train_X, Y: train_Y})
print("Epoch:", '%04d' % (epoch+1), "Loss =",
"{:.5f}".format(c), "Weight =", sess.run(W), "Bias =", sess.run(b))
print("\nOptimization Finished!\n")
training_loss = sess.run(loss_function, feed_dict={X: train_X, Y: train_Y})
print("Final Training Loss=", training_loss, "Weight =",
sess.run(W), "Bias =", sess.run(b), '\n')
plt.plot(train_X, train_Y, 'o', label='Training Data')
plt.plot(train_X, sess.run(W) * train_X +
sess.run(b), label='Fitted Model')
plt.legend()
plt.show()
# ### Visualizing Graph
#
# Here we've just show the saved computation graph that generated by the TensorFlow visualization tool TensorBoard.
#
# <img src="./Images/02_Linear_Regression_with_TF_Graph.jpg">
# ### Conclusion
#
# So, the example of this notebook is focused to solve Linear Regression using deep learning. Now we can solve almost every different problem by following this approach.
|
TensorFlow/02_Linear_Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='./img/jupyter_logo.png' align='right' width='30%'></img>
# # Functions
#
# * [generate_geographical_subset](#generate_geographical_subset)
# <hr>
# #### Load required libraries
import xarray as xr
# <hr>
# ### <a id='generate_geographical_subset'></a> `generate_geographical_subset`
def generate_geographical_subset(xarray, latmin, latmax, lonmin, lonmax):
"""
Generates a geographical subset of a xarray DataArray
Parameters:
xarray (xarray DataArray): a xarray DataArray with latitude and longitude coordinates
latmin, latmax, lonmin, lonmax (int): boundaries of the geographical subset
Returns:
Geographical subset of a xarray DataArray.
"""
return xarray.where((xarray.latitude < latmax) & (xarray.latitude > latmin) & (xarray.longitude < lonmax) & (xarray.longitude > lonmin),drop=True)
# <hr>
# © 2020 | <NAME>
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img style="float: right" alt="Creative Commons Lizenzvertrag" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a>
|
functions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
from keras.utils import to_categorical
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
data_train = pd.read_csv('../input/fashionmnist/fashion-mnist_train.csv')
data_test = pd.read_csv('../input/fashionmnist/fashion-mnist_test.csv')
# +
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
X = np.array(data_train.iloc[:, 1:])
y = to_categorical(np.array(data_train.iloc[:, 0]))
#Here we split validation data to optimiza classifier during training
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=13)
#Test data
X_test = np.array(data_test.iloc[:, 1:])
y_test = to_categorical(np.array(data_test.iloc[:, 0]))
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_val = X_val.astype('float32')
X_train /= 255
X_test /= 255
X_val /= 255
# +
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
batch_size = 256
num_classes = 10
epochs = 100
#input image dimensions
img_rows, img_cols = 28, 28
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
kernel_initializer='he_normal',
input_shape=input_shape))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))
# -
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_val, y_val))
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
Deep Learning using Tensorflow Keras/MNIST-Fashion/fashion-mnist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# ***
# # NumPy
# ***
# In the class discussing **Python lists** we saw how lists can be used to store several different data types. We also looked at the flexibility of lists when we changed, added and removed elements. There will be many times in your data science careers when you will want to perform operations over entire collections of values which you cannot do in lists. For example if I have a list of integers, [1,2,3,4] the list data type does not allow me to sum all the elements of the list.
#
# In this class we are going to explore the [NumPy package](https://www.numpy.org). Let's kick off by revisiting our list of employees salaries.
# A list of employees salaries
salaries = [20000, 25000, 30000, 35000, 40000, 450000, 50000]
# How do we sum this list? We can't, because Python cannot do calculations over lists. Instead, we use NumPy package and its **array()**. NumPy arrays are an alternative to lists which allow us to perform calculations over an entire array.
#
# Before going any further you will need to install the NumPy package. We discussed how to do this in the previous class so go ahead now and use PIP to install NumPy.
# +
# Start by importing numpy as no
import numpy as np
# Create a new list of employees salaries
salaries2019 = [20000, 25000, 30000, 35000, 40000, 450000, 50000]
# Pass salaries2019 to numpy array
np_salaries = np.array(salaries2019)
# Output np_salaries
np_salaries
# +
# Use sum() to sum values in np_salaries
total_salaries = sum(np_salaries)
# Output total_salaries
total_salaries
# +
# Find the average salary
average_salary = np.median(np_salaries)
average_salary
# -
# As you can see the calculations were performed across the entire array. NumPy is an incredibly powerful tool to have at your disposal. To learn more about NumPy don't forget you can always check out the documentation.
help(np.median)
# With the NumPy arrays that we have been using so far they have been of only one data type, integers. You might also have noticed the speed at which Numpy can perform its calculations. Its speed comes from an assumption. NumPy assumes that your array is of only one type. An array of integers, floats and so on. In short, Numpy arrays can be of only one data type.
# Create a mixed NumPy array
np.array(["tony", 35000, True])
# As you can see numpy converted the entire array to strings.
# Output type of np_salaries
type(np_salaries)
# NumPy arrays are the own data type which means, as we discussed in the last chapter that it can have its own methods. Lets look at an example:
# A simple list
python_list = [1,2,3]
# NumPy Array
numpy_array = np.array([1,2,3])
# Concatenate the simple list together
python_list + python_list
# Concatenate the numpy array
numpy_array + numpy_array
# As you can see, Python performed an element wide sum of the numpy array. Be careful when working with data types, the output can sometimes be not what you expected. Apart from these differences you can work with numpy arrays in almost the same way that you work with python lists. For example we can use indexing which we've already learned to select elements from an array.
# +
# Create a new salaries array
salaries2019 = [20000, 25000, 30000, 35000, 40000, 450000, 50000]
new_salaries = np.array(salaries2019)
new_salaries
# Select an element from the array using its index
new_salaries[1]
# -
# With NumPy arrays we can also use conditional statements such as greater than **(>)** and less than **(<)**. Imagine, you've just been asked to provide an output of all your employees salaries who earn more than 30,000. How can this be done? We can use a boolean. Use this boolean array in square brackets to do sub-setting only elements that are true are selected for a new numpy array.
# +
# Create the big_salaries array
big_salaries = (new_salaries > 30000)
# Output array
print(big_salaries)
# Output all salaries greater than 30000
print(new_salaries[big_salaries])
# -
# ## 2D NumPy Arrays
# Using NumPy we can also create multi-dimensional arrays. Let's take a look by creating two new NumPy arrays, np_salaries and np_service.
# Create np_salaries
np_salaries = np.array([20000, 25000, 30000, 35000, 40000, 45000, 50000])
# Create np_service
np_service = np.array([1,2,3,4,5,6,7])
# Print out type
print(type(np_salaries))
print(type(np_service))
# In the output above, "ndarray" stands for N-dimensional. We can create arrays of several dimensions but for now lets just focus on 2 dimensional arrays.
# +
# Combine two lists together surrounded in square brackets
np_2d_array = np.array([[20000, 25000, 30000, 35000, 40000, 45000, 50000], [1,2,3,4,5,6,7]])
# Print np_2d_array
np_2d_array
# -
# As you can see the output is rectangular data structure. Each sublist of the list corresponds to a row in the 2D numpy array. We can examine the shape of the array with the following code:
# Shape attribute providing more info on the data structure
np_2d_array.shape
# Just like we did in previous example we can still perform calculations and use sub-setting. Let's say we want to select the entire first row and the third element from that row, how? We can use indexing as we have done before.
# Select first row of np_2d_array
np_2d_array[0]
# Select first row and the third element
np_2d_array[0][2]
# What we are doing here, is first selecting a row and then from that row performing another selection. We can obtain the same results by using single square brackets and a comma.
# Select first row and the third element
np_2d_array[0,2]
# Select entire first row and columns 1 and 2
np_2d_array[:, 1:3]
# +
# Sum first row of np_2d_array
total_salaries = sum(np_2d_array[0])
#Print total_salaries
total_salaries
|
Learn-python/Part 1 - Introduction to Python/04-NumPy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Definition Phase
#
# ### Build MemSQL Connection
# +
import pymysql
import pandas
HOST = "127.0.0.1"
PORT = 3306
USER = "root"
PASSWORD = ""
DATABASE = "nba"
conn = pymysql.connect(
host=HOST,
port=PORT,
user=USER,
password=PASSWORD,
database=DATABASE,
charset='utf8mb4'
)
# -
# ### Define Methods for Retrieving Each Phase of a Season
def get_games_for_season(season_start_yr, season_phase):
""" Create a database and table for this benchmark to use. """
season_start_yr_fmt = "%d-01-01" % season_start_yr
cte_alias = ""
if season_phase == "regular_season":
cte_alias = "agirs"
cte_name = "all_games_in_regular_season"
cte = """
all_games_in_regular_season AS (
SELECT
*
FROM
game_header gh,
season_dates sd
WHERE
gh.game_date BETWEEN
sd.regular_season_start AND
sd.regular_season_end
)
"""
elif season_phase == "playoffs":
cte_alias = "agip"
cte_name = "all_games_in_playoffs"
cte = """
all_games_in_playoffs AS (
SELECT
*
FROM
game_header gh,
season_dates sd
WHERE
gh.game_date BETWEEN
sd.playoffs_start AND
sd.playoffs_end
)
"""
elif season_phase == "finals":
cte_alias = "agif"
cte_name = "all_games_in_finals"
cte = """
all_games_in_finals AS (
SELECT
*
FROM
game_header gh,
season_dates sd
WHERE
gh.game_date BETWEEN sd.finals_start AND sd.finals_end
)
"""
return pandas.read_sql_query("""
WITH season_dates AS (
SELECT
*
FROM season
WHERE regular_season_start > "%(season_start_yr_fmt)s"
ORDER BY regular_season_start ASC
LIMIT 1
), %(cte)s
SELECT
%(cte_alias)s.game_date,
%(cte_alias)s.natl_tv_broadcaster,
ht.name home_team_name,
at.name away_team_name,
hls.pts home_team_pts,
als.pts away_team_pts
FROM
%(cte_name)s %(cte_alias)s
-- Get the home team information
JOIN team ht ON
%(cte_alias)s.home_team_id = ht.id
JOIN line_score hls ON
hls.game_id = %(cte_alias)s.game_id AND
hls.team_id = ht.id
-- Get the visiting team information
JOIN team at ON
%(cte_alias)s.away_team_id = at.id
JOIN line_score als ON
als.game_id = %(cte_alias)s.game_id AND
als.team_id = at.id
ORDER BY %(cte_alias)s.game_date DESC
""" % {
"season_start_yr_fmt": season_start_yr_fmt,
"cte": cte,
"cte_name": cte_name,
"cte_alias": cte_alias
}, conn)
# ### Select a Season To Retrieve Game Data For
# +
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
# TODO: Get the years from the database
NBA_MODERN_ERA_START_YEAR=1979
NBA_SEASON_PHASES = ['Regular Season', 'Playoffs', 'Finals']
NOW_YEAR = 2018
selected_year = NBA_MODERN_ERA_START_YEAR
selected_phase = NBA_SEASON_PHASES[0]
def get_games(x):
selected_year = x
regular_season_games_df = get_games_for_season(x, "regular_season")
playoff_games_df = get_games_for_season(x, "playoffs")
finals_games_df = get_games_for_season(x, "finals")
print("Year:", selected_year)
print("# Regular Season Games:", regular_season_games_df.size)
print("# Playoff Games:", playoff_games_df.size)
print("# Finals Games:", finals_games_df.size)
interact(get_games, x=widgets.IntSlider(
min=NBA_MODERN_ERA_START_YEAR,
max=2018,
step=1,
description="NBA Season",
value=NBA_MODERN_ERA_START_YEAR
));
# interact(f, x=NBA_SEASON_PHASES);
# Teams
# -
# ## Visualize
print(selected_year)
# +
import plotly.plotly as ply
from plotly.graph_objs import *
game_names = []
for i, gdf in regular_season_games_df.iterrows():
game_name = '[%s] %s (%s) @ %s (%s)' % (
gdf['game_date'],
gdf['home_team_name'],
gdf['home_team_pts'],
gdf['away_team_name'],
gdf['away_team_pts']
)
game_names.append(game_name)
trace1 = Scatter(
x=regular_season_games_df['home_team_pts'],
y=regular_season_games_df['away_team_pts'],
text=game_names,
mode='markers'
)
layout = Layout(
xaxis=XAxis( title='Home Team Points' ),
yaxis=YAxis( type='log', title='Visitor Team Points' )
)
data = Data([trace1])
fig = Figure(data=data, layout=layout)
ply.iplot(fig, filename='Home Team Points v Away Team Points Comparison')
# -
# ### Heatmap of point differentials for each team.
# +
import plotly.plotly as ply
from plotly.graph_objs import *
heatmap_matrix = []
teams = {}
for i, gdf in games_df.iterrows():
home_name = gdf['home_team_name']
away_name = gdf['away_team_name']
home_pts = gdf['home_team_pts']
away_pts = gdf['away_team_pts']
if home_name not in teams:
teams[home_name] = {}
if away_name not in teams:
teams[away_name] = {}
if away_name not in teams[home_name]:
teams[home_name][away_name] = 0
if home_name not in teams[away_name]:
teams[away_name][home_name] = 0
teams[away_name][home_name] += home_pts
teams[home_name][away_name] += away_pts
"""
if (away_name == "Warriors" or home_name == "Warriors") and \
(away_name == "Cavaliers" or home_name == "Cavaliers"):
print(away_name, away_pts, '@', home_name, home_pts)
"""
sorted_team_names = sorted(teams.keys())
layout = Layout(
xaxis=XAxis(title='Home Team Name'),
yaxis=YAxis(title='Away Team Name')
)
point_differentials = []
for x_team in sorted_team_names:
z = []
for y_team in sorted_team_names:
if x_team == y_team:
z.append(0)
else:
delta = teams[x_team][y_team] - teams[y_team][x_team]
z.append(delta)
point_differentials.append(z)
trace = Heatmap(
x=sorted_team_names,
y=sorted_team_names,
z=point_differentials,
colorscale='Blackbody'
)
data=[trace]
ply.iplot(data, filename='basic-heatmap')
# +
def get_season_info_for_year(season_start_yr):
""" Create a database and table for this benchmark to use. """
season_start_yr_fmt = "%d-01-01" % season_start_yr
return pandas.read_sql_query("""
SELECT
*
FROM season
WHERE regular_season_start > %s
ORDER BY regular_season_start ASC
LIMIT 1;
""" % season_start_yr_fmt, conn)
season_info = get_season_info_for_year(1979)
print(season_info)
# -
|
.ipynb_checkpoints/2017-18 NBA Season <> MemSQL Analytics-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Car:
pass
class Car:
#Initializing attributes
def __init__(self, model, manufacturer, year, modeltype, color):
self.model = model
self.manufacturer = manufacturer
self.year = year
self.modeltype = modeltype
self.color = color
def describe(self):
print(f'someone owns that {self.year} {self.color} {self.manufacturer} {self.model} {self.modeltype}')
def repaint(self, new_color):
self.color = new_color
print(f'The {self.year} {self.manufacturer} {self.model} {self.modeltype} has been repainted to {self.color} ')
mycar = Car('Mondeo','ford', 2018, 'Hatchback', 'black')
print(f'My car is {mycar.color} {mycar.manufacturer} {mycar.model} {mycar.modeltype}
parked on the street corner. it was made in {mycar.year}. Although I bought it 4 years ago as used car')
mycar.describe()
mycar.repaint('Yellow')
### Exercise 1:
import pip
try:
import numpy as np
except ImportError:
pip.main(['install', 'numpy'])
try:
import pandas as pd
except ImportError:
pip.main(['install', 'pandas'])
np.random.seed(0)
random_number = np.random.random(10)
print(random_number)
s = pd.Series(random_number)
s
print(type(s))
print(type(random_number))
index_value = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
s = pd.Series(random_number, index=index_value)
print(s)
import pandas
s1 = pandas.Series([1, 2, 3, 4, 5])
s2 = pandas.Series([6, 7, 8, 9, 10])
s3 = pandas.Series([11, 12, 13, 14, 15])
s4 = pandas.Series([16, 17, 18, 19, 20])
df = pandas.DataFrame({'s1':s1, 's2':s2, 's3':s3, 's4':s4})
df
index_value = ['a', 'b', 'c', 'd', 'e']
df = pandas.DataFrame({'s1':s1, 's2':s2, 's3':s3, 's4':s4})
df
|
Chapter01-Python_Overview_and_Main_Packages/.ipynb_checkpoints/Exercise1.1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# !pip install pywedge
import pywedge as pw
# !pip3 install pandas_profiling --upgrade
import pandas_profiling as pp
from pandas_profiling import ProfileReport
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
df_train=pd.read_csv('/kaggle/input/titanic/train.csv')
df_train.head(5)
df_train.shape
df_train.columns
df_train.dtypes
df_train.isna().sum().sort_values()
df_train.info()
df_train.describe()
df_train.describe().style.background_gradient(axis=1,cmap=sns.light_palette('green', as_cmap=True))
# # What is Pywedge?
# Pywedge is an open-source python library which is a complete package that helps you in Visualizing the data, Pre-process the data and also create some baseline models which can be further tuned to make the best machine learning model for the data.
#
# ***!pip install pywedge***
#
# ***import pywedge as pw***
dash = pw.Pywedge_Charts(df_train, c=None, y='Survived')
dashboard = dash.make_charts()
# ## Preprocessing the data
# ***loading the dataset***
df_train=pd.read_csv('../input/titanic/train.csv')
df_test=pd.read_csv('../input/titanic/test.csv')
# ***we are here handle regression. so we use type='regression' , you can use classification as type for classification problem***
blm = pw.baseline_model(df_train, df_test, c=None, y='Survived', type='Regression')
blm.Regression_summary()
# +
#blm.predictions_baseline
# -
# # pandas_profiling
# Generates profile reports from a pandas DataFrame. The pandas df.describe() function is great but a little basic for serious exploratory data analysis. pandas_profiling extends the pandas DataFrame with df.profile_report() for quick data analysis.
#
# ***For each column the following statistics - if relevant for the column type - are presented in an interactive HTML report:***
#
# *Type inference: detect the types of columns in a dataframe.*
#
# *Essentials: type, unique values, missing values*
#
# *Quantile statistics like minimum value, Q1, median, Q3, maximum, range, interquartile range*
#
# *Descriptive statistics like mean, mode, standard deviation, sum, median absolute deviation, coefficient of variation, kurtosis, skewness*
#
# ***Most frequent values***
#
# Histograms
#
# Correlations highlighting of highly correlated variables, Spearman, Pearson and Kendall matrices
#
# Missing values matrix, count, heatmap and dendrogram of missing values
#
# Duplicate rows Lists the most occurring duplicate rows
#
# Text analysis learn about categories (Uppercase, Space), scripts (Latin, Cyrillic) and blocks (ASCII) of text data
#import pandas_profiling as pp
#from pandas_profiling import ProfileReport
pp.ProfileReport(df_train)
|
Machine Learning/ML Projects/best-eda-using-pywedge-and-pandas-profiling (1).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''anaconda3'': virtualenv)'
# language: python
# name: python37464bitanaconda3virtualenv8d5eaf14266749f6b296cfba195766f3
# ---
# +
import requests
import json
import re
import pandas as pd
from datetime import datetime
import sqlite3
import concurrent.futures
import asyncio
from typing import List
from tqdm import tqdm
import time
import multiprocessing
def transform(response) -> pd.DataFrame:
## Parsing Column Names
columns_meta_data = response.json()["meta"]["view"]["columns"]
column_names = [re.sub(':', '' , x["fieldName"]) for x in columns_meta_data]
print("There are {} columns in this data set".format (str(len(column_names))))
print("There are {} rows of data".format(str(len(response.json()["data"]))))
## Storing and cleaning data as a DataFrame
df = pd.DataFrame(response.json()["data"], columns=column_names)
df["test_date"] = pd.to_datetime(df["test_date"]).astype("str")
df["county"] = df["county"].apply(lambda x: re.sub(' ', '_' , x.lower()).replace(".", ""))
df[["new_positives", "cumulative_number_of_positives", "total_number_of_tests", "cumulative_number_of_tests"]] = \
df[["new_positives", "cumulative_number_of_positives", "total_number_of_tests", "cumulative_number_of_tests"]].astype("int")
## Selecting Desired Columns
df = df[["county", "test_date", "new_positives", "cumulative_number_of_positives", "total_number_of_tests", "cumulative_number_of_tests"]]
df["load_date"] = datetime.today().strftime("%Y-%m-%d")
print("Sample Data")
display(df.sample(5))
return df
def load(df, county_names, db_name = "covid.db"):
# Create tables
conn = sqlite3.connect(db_name)
## Since our program is CPU bound and not IO bound - using multi-processing instead of multi-threading
t1 = time.perf_counter()
with concurrent.futures.ProcessPoolExecutor() as executer:
results = [executer.submit(ingest, df, county_name, db_name) for county_name in county_names]
for f in concurrent.futures.as_completed(results):
print(f.result())
t2 = time.perf_counter()
print(f'Finished in {t2-t1} seconds')
def create_table_cmd(county_name):
type_map = { "test_date": "TEXT",
"new_positives": "INTEGER",
"cumulative_number_of_positives": "INTEGER",
"total_number_of_tests": "INTEGER",
"cumulative_number_of_tests": "INTEGER",
"load_date": "TEXT" }
sql_cols = []
sql_cols += [f" {col} {type_map[col]}" for col in type_map]
sql_cols = ',\n'.join(sql_cols)
cmd = f"""CREATE TABLE if not exists {county_name} (
{sql_cols}
);"""
return cmd
def ingest(df, county_name, db_name) -> str:
conn_temp = sqlite3.connect(db_name)
c = conn_temp.cursor()
# Create Table with for County if it does not exist
cmd = create_table_cmd(county_name)
c.execute(cmd)
# Adding Data to Table
df_county = df[df["county"] == county_name].drop(["county"], axis = 1)
max_date_in_table = pd.read_sql(f"select max(test_date) from {county_name}", conn_temp).values[0][0]
if max_date_in_table is not None:
df_county = df_county[pd.to_datetime(df_county.test_date) > pd.to_datetime(max_date_in_table)]
df_county.to_sql(county_name, conn_temp, if_exists='append', index = False)
conn_temp.commit()
conn_temp.close()
return f"{county_name} table is updated on {datetime.today().date()} at {datetime.today().time().strftime('%H:%M %p')}. {df_county.shape[0]} row(s) added."
## Main ETL Function
def main(url, db_name):
try:
response = requests.get(url)
print(f"Loaded response from {url}")
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print (e.response.text)
print(f"Response Code: {response.status_code}")
df = transform(response)
county_names = df.county.unique()
assert(len(county_names) == 62), "Mismatch in the number of counties"
load(df, county_names, db_name)
if __name__ == "__main__":
# Specify the URL and DB Name
url = "https://health.data.ny.gov/api/views/xdss-u53e/rows.json?accessType=DOWNLOAD"
db_name = "covid.db"
main(url, db_name)
|
Egen ETL Notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch.nn as nn
import torch.optim as optim
import sys
sys.path.append('../vanilla_densenet_small/')
from densenet import DenseNet
# -
model = DenseNet(
growth_rate=12, block_config=(8, 12, 10),
num_init_features=48, bn_size=4, drop_rate=0.25,
final_drop_rate=0.25, num_classes=200
)
|
ttq_densenet_small/Parse DenseNet small.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="Ae82cWy0FMb8" outputId="3cb5e57e-27d2-4cea-8c66-b5bef132bdd4"
# !wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
# + id="I81k2jkrFxwa"
# !bunzip2 /content/shape_predictor_68_face_landmarks.dat.bz2
datFile = "/content/shape_predictor_68_face_landmarks.dat"
# + id="Lt_jQWB3F345"
import cv2
import numpy as np
import dlib
from imutils import face_utils
# + id="PMxxKD7xF8Gq"
from google.colab.patches import cv2_imshow
# + id="_RNh6IUYF_Mc"
def distance(ptA,ptB):
dist = np.linalg.norm(ptA - ptB)
return dist
def eye_AR(a,b,c,d,e,f):
up = distance(b,d) + distance(c,e)
down = distance(a,f)
ratio = up/(2.0*down)
#Checking if it is blinked
if(ratio>0.25):
return 2
elif(ratio>0.21 and ratio<=0.25):
return 1
else:
return 0
# + id="TE1Q43grGF7x"
frame = cv2.imread('image1.jpg')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(datFile)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="JQfp5CPoGMjf" outputId="a7f6501d-4ac9-4315-b6fa-85f87e01022a"
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
landmarks = predictor(gray, face)
landmarks = face_utils.shape_to_np(landmarks)
left_eye = eye_AR(landmarks[36],landmarks[37],landmarks[38], landmarks[41], landmarks[40], landmarks[39])
right_eye = eye_AR(landmarks[42],landmarks[43],landmarks[44], landmarks[47], landmarks[46], landmarks[45])
if(left_eye==0 and right_eye==0):
status="Na3ss"
color = (0,0,255)
elif (left_eye==1 and right_eye==1):
status="Fih n3ass"
color = (255,0,0)
else:
status="Fay9"
color = (0,255,0)
cv2.putText(frame, status, (x1, y1-2),cv2.FONT_HERSHEY_SIMPLEX, 10, color ,2)
for n in range(0, 68):
(x,y) = landmarks[n]
cv2.circle(frame, (x, y), 5, (255, 255, 255), 6)
cv2_imshow(frame)
|
detect_sleeping_for_image.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Spectrum approximation experiment (Section 5.2)
#
# The script compares how close is the spectrum of a coarse graph to that of the original graph.
#
# The code accompanies paper [Graph reduction with spectral and cut guarantees](http://www.jmlr.org/papers/volume20/18-680/18-680.pdf) by <NAME> published at JMLR/2019 ([bibtex](http://www.jmlr.org/papers/v20/18-680.bib)).
#
# This work was kindly supported by the Swiss National Science Foundation (grant number PZ00P2 179981).
#
# 15 March 2019
#
# [<NAME>](https://andreasloukas.blog)
#
# [](https://zenodo.org/badge/latestdoi/175851068)
#
# Released under the Apache license 2.0
# !pip install networkx
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
# +
from graph_coarsening.coarsening_utils import *
import graph_coarsening.graph_lib as graph_lib
import graph_coarsening.graph_utils as graph_utils
import numpy as np
import scipy as sp
from scipy import io
from scipy.linalg import circulant
import time
import os
import matplotlib
import matplotlib.pylab as plt
import pygsp as gsp
gsp.plotting.BACKEND = 'matplotlib'
# -
# ### Parameters
# +
graphs = ['yeast', 'airfoil', 'minnesota', 'bunny']
methods = ['heavy_edge', 'variation_edges', 'variation_neighborhoods', 'algebraic_JC', 'affinity_GS', 'kron']
K_all = np.array([10,40], dtype=np.int32)
r_all = [0.3, 0.5, 0.7]
print('k: ', K_all, '\nr: ', r_all)
# -
# ### The actual experiment code (this will take long)
# If one needs to just see the results, skip running this part.
# +
rerun_all = False
rewrite_results = False
if rerun_all:
algorithm = 'greedy'
max_levels = 10
n_methods = len(methods)
n_graphs = len(graphs)
flag = (K_all[-1] == -1)
for graphIdx, graph in enumerate(graphs):
N = 4000
if graph == 'bunny':
G = graph_lib.real(N, 'bunny')
elif graph == 'swissroll':
G = graph_lib.knn(N, 'swissroll')
elif graph == 'barabasi-albert':
G = graph_lib.models(N, 'barabasi-albert')
elif graph == 'block':
G = graph_lib.clusterable(N, 'block', K=10, p = 10/N, q = 0.5/N) # works
elif graph == 'regular':
G = graph_lib.models(N, 'regular', k=10)
elif graph == 'grid':
N1 = int(np.sqrt(N))
G = graphs.Grid2d(N1=N1, N2=N1) # large r: edge-based better for moderate K, then heavy edge, small r: edge/neighborhood-based
else:
G = graph_lib.real(N, graph)
N = G.N
if flag:
kmax = int(np.floor(N*(1-max(r_all))))-1
else:
kmax = max(K_all)
# precompute spectrum needed for metrics
if kmax > N/2:
[Uk,lk] = eig(G.L)
else:
offset = 2*max(G.dw)
T = offset*sp.sparse.eye(G.N, format='csc') - G.L
lk, Uk = sp.sparse.linalg.eigsh(T, k=kmax, which='LM', tol=1e-6)
lk = (offset-lk)[::-1]
Uk = Uk[:,::-1]
G.estimate_lmax()
lambda_max = G.lmax
eigenvalue = np.zeros((n_methods, len(K_all), len(r_all)))
ratio = np.zeros((n_methods, len(K_all), len(r_all)))
for rIdx,r in enumerate(r_all):
n_target = int(np.floor(N*(1-r)))
if flag: K_all[-1] = int(np.floor(N*(1-r)))-1
for KIdx, K in enumerate(K_all):
print('{} {}| K:{:2.0f}'.format(graph, N, K))
if K > n_target:
print('Warning: K={}>n_target={}. skipping'.format(K, n_target))
continue
for methodIdx,method in enumerate(methods):
# algorithm is not deterministic: run a few times
if method == 'kron':
if KIdx == 0:
n_iterations = 2
n_failed = 0
r_min = 1.0
for iteration in range(n_iterations):
Gc, iG = kron_coarsening(G, r=r, m=None)
metrics = kron_quality(iG, Gc, kmax=K_all[-1], Uk=Uk[:,:K_all[-1]], lk=lk[:K_all[-1]])
if metrics['failed']: n_failed += 1
else:
r_min = min(r_min, metrics['r'])
for iKIdx, iK in enumerate(K_all):
eigenvalue[methodIdx, iKIdx, rIdx] += np.nanmean(metrics['error_eigenvalue'][:iK])
eigenvalue[methodIdx, :, rIdx] /= (n_iterations-n_failed)
ratio[ methodIdx, :, rIdx] = r_min
if np.abs(r_min - r) > 0.02: print('Warning: ratio={} instead of {} for {}'.format(r_min, r, method))
else:
C, Gc, Call, Gall = coarsen(G, K=K, r=r, max_levels=max_levels, method=method, algorithm=algorithm, Uk=Uk[:,:K], lk=lk[:K])
metrics = coarsening_quality(G, C, kmax=K, Uk=Uk[:,:K], lk=lk[:K])
eigenvalue[methodIdx, KIdx, rIdx] = np.nanmean(metrics['error_eigenvalue'])
ratio[methodIdx, KIdx, rIdx] = metrics['r']
if np.abs(metrics['r'] - r) > 0.02:
print('Warning: ratio={} instead of {} for {}'.format(metrics['r'], r, method))
if rewrite_results:
filepath = os.path.join('..', 'results', 'experiment_spectrum_'+ graph +'.npz')
print('.. saving to "' + filepath + '"')
np.savez(filepath, methods=methods, K_all=K_all, r_all=r_all, eigenvalue=eigenvalue, ratio=ratio)
print('done!')
# -
# ### General code for nice printing
# +
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
graphs = ['yeast', 'airfoil', 'minnesota', 'bunny']
# -
# ### Show all results as an ASCI table
# +
latex = False
sep = '&' if latex else ','
for KIdx,K in enumerate(K_all):
print('\n%--------------------------------------------------------------------')
print(f'% K: {K}:')
print('%--------------------------------------------------------------------')
if latex:
string = 'r'
for i in range(16): string += 'C{4mm}'
print('\\begin{table}[]\n\\scriptsize\\centering\n\\begin{tabular}{' + string + '}\n\\toprule')
# graph title line
line = ''
for graphIdx, graph in enumerate(graphs):
if latex :
line = '{}\\multicolumn{{3}}{{c}}{{{:}}}{}'.format(line, graph,sep)
else:
line = '{} {:21s} , '.format(line, graph)
line = line[:-1]
print('{0:18} {1} {2} \\\\'.format(' ', sep, line)) # \multicolumn{3}{c}{minesotta}
if latex: print('\\cmidrule(l){2-16} ')
# reduction title line
line = '{0:18} {1} '.format(' ', sep)
for graphIdx, graph in enumerate(graphs):
for rIdx, r in enumerate(r_all):
line = '{}{:4.0f}\\% {} '.format(line, 100*r,sep)
line = '{}{:1s}'.format(line, ' ')
line = line[:-3]
print('{}\\\\'.format(line))
for methodIdx,method in enumerate(methods):
method = method.replace('_', ' ')
if method == 'heavy edge':
method = 'heavy edge'
elif 'variation edges' in method:
method = 'local var. (edges)'
elif (method == 'variation neighborhoods') or (method == 'variation neighborhood'):
method = 'local var. (neigh)'
elif 'algebraic' in method:
method = 'algebraic dist.'
elif 'affinity' in method:
method = 'affinity'
elif method == 'kron':
method = 'kron'
else:
continue
# will hold one string per graph
strings = []
# for each graph
for graphIdx, graph in enumerate(graphs):
filepath = os.path.join('..', 'results', 'experiment_spectrum_'+ graph +'.npz')
data = np.load(filepath)
eigenvalue = data['eigenvalue']
# eigenvalue *= lmax[graphIdx]
# for each r
string = ''
for rIdx, r in enumerate(r_all):
if min(eigenvalue[:,KIdx,rIdx]) == eigenvalue[methodIdx,KIdx,rIdx]:
if latex:
string = '{} \\textbf{{{:0.3f}}} &'.format(string, eigenvalue[methodIdx,KIdx,rIdx])
else:
string = '{} {}{:0.4f}{} ,'.format(string, color.BOLD, eigenvalue[methodIdx,KIdx,rIdx], color.END)
else:
if latex:
string = '{} {:0.3f} {}'.format(string, eigenvalue[methodIdx,KIdx,rIdx], sep)
else:
string = '{} {:0.4f} {}'.format(string, eigenvalue[methodIdx,KIdx,rIdx], sep)
strings.append(string)
combined = ' '.join(s for s in strings)
print('{0:18s} {2}{1} \\\\'.format(method, combined[:-2], sep))
if latex: print('\\bottomrule\n\\end{tabular}\n\\end{table}')
# -
# ### Measure error improvement
# +
measure = np.zeros((len(graphs), len(K_all), 2))*np.NaN
print('===========================================================')
for KIdx, K in enumerate(K_all):
for graphIdx, graph in enumerate(graphs):
filepath = os.path.join('..', 'results', 'experiment_spectrum_'+ graph +'.npz')
data = np.load(filepath)
eigenvalue = data['eigenvalue']
measure[graphIdx,KIdx,0] = np.min(eigenvalue[[0,3,4,5],KIdx,-1]) / np.min(eigenvalue[:,KIdx,-1],0)
measure[graphIdx,KIdx,1] = np.min(eigenvalue[[0,3,4], KIdx,-1]) / np.min(eigenvalue[:,KIdx,-1],0)
print(' {:10} K:{}, with Kron:{:1.3f}, without Kron:{:1.3f}'.format(graph, K, measure[graphIdx,KIdx,0], measure[graphIdx,KIdx,1]))
print('For this k: ' + str(np.nanmean(measure[:,KIdx,0])) + '/' + str(np.nanmean(measure[:,KIdx,1])))
print('-----------------------------------------------------------')
print('===========================================================')
print('Overall:')
print(str(np.nanmean(measure[:,:,0])) + '/' + str(np.nanmean(measure[:,:,1])))
# -
# ### Generate a vertical latex table of the results (Table 1, 2)
for KIdx,K in enumerate(K_all):
print('\n%--------------------------------------------------------------------')
print(f'% K: {K}:')
print('%--------------------------------------------------------------------')
print('\\begin{table}[]\n\\footnotesize\\centering\n\\resizebox{0.75\\textwidth}{!}{\n\\begin{tabular}{@{}rccccccc@{}}\n\\toprule')
# headers
line = '{:27} & {:20}'.format('', '$r$')
for methodIdx, method in enumerate(methods):
method = method.replace('_', ' ')
if method == 'heavy edge':
method = '\\begin{tabular}[c]{@{}c@{}}heavy\\\\ edge\\end{tabular}'
elif 'variation edges' in method:
method = '\\begin{tabular}[c]{@{}c@{}}local var.\\\\ (edges)\\end{tabular}'
elif (method == 'variation neighborhoods') or (method == 'variation neighborhood'):
method = '\\begin{tabular}[c]{@{}c@{}}local var.\\\\ (neigh.)\\end{tabular}'
elif 'algebraic' in method:
method = '\\begin{tabular}[c]{@{}c@{}}algebraic\\\\ distance\\end{tabular}'
elif 'affinity' in method:
method = 'affinity'
elif method == 'kron':
method = '\\begin{tabular}[c]{@{}c@{}}Kron\\\\ reduction\\end{tabular}'
else: continue
line += ' & {:20}'.format(method)
line += '\\\\ \\midrule'
print(line)
for graphIdx, graph in enumerate(graphs):
filepath = os.path.join('..', 'results', 'experiment_spectrum_'+ graph +'.npz')
data = np.load(filepath)
eigenvalue = data['eigenvalue']#*lmax[graphIdx]
for rIdx, r in enumerate(r_all):
if rIdx == 0: line = '\\multirow{3}{*}{' + graph + '}'
else: line = ''
line = '{:27} & {:19}\%'.format(line, int(r*100))
for methodIdx, method in enumerate(methods):
if min(eigenvalue[:,KIdx,rIdx]) == eigenvalue[methodIdx,KIdx,rIdx]:
line += ' & \\textbf{{{:0.3f}}}{:6}'.format(eigenvalue[methodIdx,KIdx,rIdx],'')
else:
line += ' & {:0.3f}{:15}'.format(eigenvalue[methodIdx,KIdx,rIdx], '')
line += '\\\\'
if rIdx == len(r_all)-1 and graphIdx < len(graphs)-1: line += '\cmidrule(l){2-8}'
print(line)
print('\\bottomrule\n\\end{tabular}\n}\n\\caption{??}\n\\label{table:K=' + str(K) + '}\n\\end{table}')
|
examples/experiment_spectrum.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8
# language: python
# name: python3.8
# ---
# # Train with RAPIDS
#
# description: train with RAPIDS and cuML on a subset of the airlines dataset
# +
from azureml.core import Workspace
ws = Workspace.from_config()
ws
# +
import git
from pathlib import Path
# get root of git repo
prefix = Path(git.Repo(".", search_parent_directories=True).working_tree_dir)
# training script
script_dir = prefix.joinpath("code", "models", "rapids")
script_name = "train.py"
# environment file
environment_file = prefix.joinpath("environments", "rapids-example.dockerfile")
# azure ml settings
environment_name = "rapids-airline-example"
experiment_name = "rapids-airline-example"
compute_target = "gpu-V100-1"
# + tags=[]
print(open(script_dir.joinpath(script_name)).read())
# +
from azureml.core import ScriptRunConfig, Experiment, Environment, Dataset
ds = Dataset.File.from_files(
"https://airlinedataset.blob.core.windows.net/airline-20m/*"
)
arguments = [
"--data_dir",
ds.as_mount(),
"--n_bins",
32,
"--compute",
"single-GPU",
"--cv-folds",
1,
]
env = Environment(environment_name)
env.docker.enabled = True
env.docker.base_image = None
env.docker.base_dockerfile = environment_file
env.python.user_managed_dependencies = True
src = ScriptRunConfig(
source_directory=script_dir,
script=script_name,
arguments=arguments,
environment=env,
compute_target=compute_target,
)
run = Experiment(ws, experiment_name).submit(src)
run
# +
from azureml.widgets import RunDetails
RunDetails(run).show()
# -
run.wait_for_completion(show_output=True)
|
notebooks/rapids/train-airlines.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Instructions
#
# ### Roles
#
# - **Facilitator:** Ask questions, keep the conversation flowing.
# - **Recorder:** Keep notes in the chat on important answers / examples.
# - **Reporter:** Share the team's solutions when called on.
#
# First alphabetical by first name is the Facilitator, second is the Reporter, third is the Recorder.
#
#
# ### Question 1
#
# For each of the following statements, determine whether it is true or false.
# - **If the statement is false**, give a counterexample through a piece of working code.
# - **If the statement is true**, identify a reasonable mistake that could lead someone to believe that it was false.
#
# Spend about 3-4 minutes on each question, in any order that your team agrees on.
#
# **(A)**. Every class definition requires an `__init__()` method in order for the definition to run without an error.
#
# **(B)**. Both class and instance variables must be preceded by self when referenced inside the class definition.
#
# **(C)**. Class and instance variables can be accessed either by custom-designed functions or by direct reference.
#
# **(D)**. Every class definition requires an `__init__()` method in order to instantiate objects in that class without error.
# ### Question 2
#
# Same instructions as above: each of the following statements, determine whether it is true or false.
# - **If the statement is false**, give a counterexample through a piece of working code.
# - **If the statement is true**, identify a reasonable mistake that could lead someone to believe that it was false.
#
# Spend about 3-4 minutes on each question.
#
# **(A)**. If `classB` inherits from `classA`, and `classB` has method `foo()`, then `classA` **cannot** have method `foo()`.
#
# **(B)**. If `classB` inherits from `classA`, and `classB` has method `foo()`, then `classA` **must** have method `foo()`.
|
live_lectures/live-lecture-7.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
arr=np.load('testout6710.npy')
arr
# -
plt.plot(arr['two'][0])
arr[0]['three']
|
firmware/components/cnumpy/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 7.264609, "end_time": "2020-11-15T07:17:03.377169", "exception": false, "start_time": "2020-11-15T07:16:56.112560", "status": "completed"} tags=[]
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import layers
import tensorflow as tf
from keras.preprocessing.image import img_to_array
from tensorflow.keras import backend
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + papermill={"duration": 0.020638, "end_time": "2020-11-15T07:17:03.420756", "exception": false, "start_time": "2020-11-15T07:17:03.400118", "status": "completed"} tags=[]
# + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 0.02081, "end_time": "2020-11-15T07:17:03.462240", "exception": false, "start_time": "2020-11-15T07:17:03.441430", "status": "completed"} tags=[]
# Most of the given data set images contain more than one person,for for our model we need area cover by face(with mask or without mask) only not the entire image. We use the csv data set to crop only the required part and than load it, in the train, test and valid data frames.
# + papermill={"duration": 4.338439, "end_time": "2020-11-15T07:17:07.821520", "exception": false, "start_time": "2020-11-15T07:17:03.483081", "status": "completed"} tags=[]
def data_set(dir_data):
data=[]
target=[]
data_map = {
'with_mask':1,
'without_mask':0
}
skipped=0
root=dir_data+'_annotations.csv'
df1 = pd.read_csv(root)
df1.dataframeName = '_annotations.csv'
nRow, nCol = df1.shape
for i in range(len(df1)):
without_mask='without_mask'
k=dir_data+df1['filename'][i]
image=cv2.imread(k)
xmin=int(df1['xmin'][i])
ymin=int(df1['ymin'][i])
xmax=int(df1['xmax'][i])
ymax=int(df1['ymax'][i])
image=image[ymin:ymax, xmin:xmax]
try:
# resizing to (70 x 70)
image = cv2.resize(image,(70,70))
except Exception as E:
skipped += 1
print(E)
continue
if(df1['class'][i]=='mask'):
without_mask='with_mask'
image=img_to_array(image)
data.append(image)
target.append(data_map[without_mask])
data = np.array(data, dtype="float") / 255.0
target = tf.keras.utils.to_categorical(np.array(target), num_classes=2)
return data, target
training_data, training_target=data_set('/kaggle/input/face-mask-detection/train/')
testing_data, testing_target=data_set('/kaggle/input/face-mask-detection/test/')
valid_data, valid_target=data_set('/kaggle/input/face-mask-detection/valid/')
# + [markdown] papermill={"duration": 0.021873, "end_time": "2020-11-15T07:17:07.866511", "exception": false, "start_time": "2020-11-15T07:17:07.844638", "status": "completed"} tags=[]
#
#
# Let's take a quick look at what the data looks like:
#
# + papermill={"duration": 2.541406, "end_time": "2020-11-15T07:17:10.430515", "exception": false, "start_time": "2020-11-15T07:17:07.889109", "status": "completed"} tags=[]
plt.figure(0, figsize=(100,100))
for i in range(1,10):
plt.subplot(10,5,i)
plt.imshow(training_data[i])
# + [markdown] papermill={"duration": 0.027137, "end_time": "2020-11-15T07:17:10.486398", "exception": false, "start_time": "2020-11-15T07:17:10.459261", "status": "completed"} tags=[]
# Now, we check the format of the images
# channels_last=(row,col,channels)
# channels_first=(channel,row,col)
# + papermill={"duration": 0.040368, "end_time": "2020-11-15T07:17:10.554069", "exception": false, "start_time": "2020-11-15T07:17:10.513701", "status": "completed"} tags=[]
img_shape=training_data[0].shape
depth, height, width=3, img_shape[0], img_shape[1]
img_shape=(height, width, depth)
chanDim=-1
if backend.image_data_format() == "channels_first": #Returns a string, either 'channels_first' or 'channels_last'
img_shape = (depth, height, width)
chanDim = 1
# + [markdown] papermill={"duration": 0.028232, "end_time": "2020-11-15T07:17:10.610167", "exception": false, "start_time": "2020-11-15T07:17:10.581935", "status": "completed"} tags=[]
# Building Model
# + papermill={"duration": 0.305341, "end_time": "2020-11-15T07:17:10.943992", "exception": false, "start_time": "2020-11-15T07:17:10.638651", "status": "completed"} tags=[]
model=Sequential()
model.add(layers.Conv2D(32,(3,3),input_shape=img_shape))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(64,(3,3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(128,(3,3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Conv2D(256,(3,3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(64,activation='relu'))
model.add(layers.Dropout(0.4))
model.add(layers.Dense(2,activation='softmax'))
adam =tf.keras.optimizers.Adam(0.001)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# + [markdown] papermill={"duration": 0.02802, "end_time": "2020-11-15T07:17:10.999737", "exception": false, "start_time": "2020-11-15T07:17:10.971717", "status": "completed"} tags=[]
# check the summary of model
# + papermill={"duration": 0.04517, "end_time": "2020-11-15T07:17:11.076530", "exception": false, "start_time": "2020-11-15T07:17:11.031360", "status": "completed"} tags=[]
model.summary()
# + [markdown] papermill={"duration": 0.02851, "end_time": "2020-11-15T07:17:11.135693", "exception": false, "start_time": "2020-11-15T07:17:11.107183", "status": "completed"} tags=[]
# Now we apply data augumentation for training our model with more data set which produce by modifying the same data set
# + papermill={"duration": 0.037419, "end_time": "2020-11-15T07:17:11.201474", "exception": false, "start_time": "2020-11-15T07:17:11.164055", "status": "completed"} tags=[]
# augmenting dataset
aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode="nearest")
# + [markdown] papermill={"duration": 0.028241, "end_time": "2020-11-15T07:17:11.260477", "exception": false, "start_time": "2020-11-15T07:17:11.232236", "status": "completed"} tags=[]
# Finally, we are ready to train our model.
# + papermill={"duration": 307.153173, "end_time": "2020-11-15T07:22:18.443366", "exception": false, "start_time": "2020-11-15T07:17:11.290193", "status": "completed"} tags=[]
history = model.fit(aug.flow(training_data, training_target, batch_size=10),
epochs=70,
validation_data=(valid_data, valid_target),
verbose=2,
shuffle=True)
# + [markdown] papermill={"duration": 0.085322, "end_time": "2020-11-15T07:22:18.615406", "exception": false, "start_time": "2020-11-15T07:22:18.530084", "status": "completed"} tags=[]
# let's plot a graph between accuracy of training and validation data set
# + papermill={"duration": 0.352114, "end_time": "2020-11-15T07:22:19.053437", "exception": false, "start_time": "2020-11-15T07:22:18.701323", "status": "completed"} tags=[]
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.ylabel(['accuracy'])
plt.xlabel(['epoch'])
plt.legend(['accuracy', 'val_accuracy'])
# + [markdown] papermill={"duration": 0.097943, "end_time": "2020-11-15T07:22:19.239081", "exception": false, "start_time": "2020-11-15T07:22:19.141138", "status": "completed"} tags=[]
# let's plot a graph between loss of training and validation data set
# + papermill={"duration": 0.306744, "end_time": "2020-11-15T07:22:19.632221", "exception": false, "start_time": "2020-11-15T07:22:19.325477", "status": "completed"} tags=[]
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel(['loss'])
plt.xlabel(['epoch'])
plt.legend(['loss', 'val_loss'])
# + [markdown] papermill={"duration": 0.087783, "end_time": "2020-11-15T07:22:19.810096", "exception": false, "start_time": "2020-11-15T07:22:19.722313", "status": "completed"} tags=[]
# Find loss and accuracy of our model
# + papermill={"duration": 0.406423, "end_time": "2020-11-15T07:22:20.305089", "exception": false, "start_time": "2020-11-15T07:22:19.898666", "status": "completed"} tags=[]
loss, accuracy = model.evaluate(testing_data,testing_target)
print('accuracy= ',loss," loss= ",loss)
# + [markdown] papermill={"duration": 0.091227, "end_time": "2020-11-15T07:22:20.486771", "exception": false, "start_time": "2020-11-15T07:22:20.395544", "status": "completed"} tags=[]
# let's use our model on the testing data and get the report of our model
# + papermill={"duration": 0.356652, "end_time": "2020-11-15T07:22:20.933945", "exception": false, "start_time": "2020-11-15T07:22:20.577293", "status": "completed"} tags=[]
yhat = model.predict(testing_data)
test_pred=np.argmax(yhat,axis=1)
testing_target=np.argmax(testing_target,axis=1)
# + papermill={"duration": 0.951922, "end_time": "2020-11-15T07:22:21.976626", "exception": false, "start_time": "2020-11-15T07:22:21.024704", "status": "completed"} tags=[]
from sklearn.metrics import classification_report
import sklearn.metrics as metrics
import itertools
report = classification_report(testing_target, test_pred)
print(report)
# + [markdown] papermill={"duration": 0.090268, "end_time": "2020-11-15T07:22:22.160020", "exception": false, "start_time": "2020-11-15T07:22:22.069752", "status": "completed"} tags=[]
# confusion matrix
# + papermill={"duration": 0.107263, "end_time": "2020-11-15T07:22:22.359409", "exception": false, "start_time": "2020-11-15T07:22:22.252146", "status": "completed"} tags=[]
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.RdYlGn):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# + papermill={"duration": 0.341381, "end_time": "2020-11-15T07:22:22.792762", "exception": false, "start_time": "2020-11-15T07:22:22.451381", "status": "completed"} tags=[]
confusion = metrics.confusion_matrix(testing_target, test_pred)
plt.figure()
plot_confusion_matrix(confusion, classes=['without_mask','with_mask'], title='Confusion matrix')
|
Face-Mask-Detection/face-mask-detection-using-cnn/face-mask-detection-using-cnn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="uk9iuTbMcP3y" colab_type="text"
# # Desafio 1:
# + id="qiGKcip3cN-j" colab_type="code" outputId="897d6169-e884-4deb-91a2-51295a01fbf2" colab={"base_uri": "https://localhost:8080/", "height": 302}
sns.boxplot(x='color', y='imdb_score', data=color_or_bw)
# + [markdown] id="o8aQeTQ0cN-m" colab_type="text"
# # Desafio 2:
# + id="nA8W1IA5cN-n" colab_type="code" outputId="b301b8fa-310e-4f4e-d974-ecc1ec79c267" colab={"base_uri": "https://localhost:8080/", "height": 52}
imdb_usa.sort_values('lucro').head(1)['movie_title']
# + [markdown] id="VUAm2txQcN-s" colab_type="text"
# # Desafio 3:
# + id="HoWOvSm8cN-t" colab_type="code" outputId="0536a62c-b358-4034-d83c-7a7124f357ee" colab={"base_uri": "https://localhost:8080/", "height": 677}
imdb_usa.query('budget > 0 and gross > 0').sort_values('title_year', ascending=False).head(20)[['movie_title', 'lucro', 'title_year']]
# + id="t79ak040cN-x" colab_type="code" outputId="a8f34c58-26a8-4e49-e0be-52a84d104b0a" colab={"base_uri": "https://localhost:8080/", "height": 313}
lucro_year = imdb_usa[["title_year", "lucro"]].dropna()
sns.scatterplot(x="title_year", y="lucro", data=lucro_year)
# + [markdown] id="dm2That3cN-0" colab_type="text"
# # Desafio 4:
#
# + id="cwHnmoxdcN-0" colab_type="code" outputId="e7bb25fc-809a-4794-8fdc-993eacc50f18" colab={"base_uri": "https://localhost:8080/", "height": 111}
pre_war = imdb_usa.query('title_year < 1940').sort_values('lucro', ascending=False).dropna()
pre_war[['movie_title', 'lucro']].head(2)
# + [markdown] id="5riNkCbTcN-3" colab_type="text"
# # Desafio 5:
# + id="ycMfroAdcN-4" colab_type="code" outputId="0451556e-d82f-42ef-b09f-21259653b646" colab={"base_uri": "https://localhost:8080/", "height": 81}
gross_director.drop_duplicates('director_name').query('filmes_irmaos == 18')
# + [markdown] id="ab5-l8YbcN-7" colab_type="text"
# # Desafio 5:
# + id="Shrb-akmcN-7" colab_type="code" outputId="124d2679-e663-4dce-cdac-3cecc8a70254" colab={"base_uri": "https://localhost:8080/", "height": 422}
plt.figure(figsize=(12, 6))
sns.scatterplot(x="filmes_irmaos", y="dindin", data=gross_director)
# + id="_imaZ5n-BxHU" colab_type="code" outputId="3b75dc26-9860-4456-89d2-71528e098bf3" colab={"base_uri": "https://localhost:8080/", "height": 614}
gross_director[(gross_director['filmes_irmaos'] > 16) & (gross_director['filmes_irmaos'] < 20)]
# + [markdown] id="8hpgIwR3cN_C" colab_type="text"
# # Desafio 6:
#
# + id="TGXmOBepcN_D" colab_type="code" outputId="eea8adc8-6799-4ad1-a9f3-48448c3c684e" colab={"base_uri": "https://localhost:8080/", "height": 173}
filmes_depois_de_2000 = imdb_usa.query('title_year >= 2000')
filmes_depois_de_2000[["gross", "budget", "lucro", "title_year"]].corr()
# + [markdown] id="vMYMNhjQcN_G" colab_type="text"
# # Desafio: interpretar essa correlação.
# + [markdown] id="-iCQDrMtcN_G" colab_type="text"
# Em comparação à correlação anterior vista, para os filmes de depois de 2000 a correlação entre gross-budget e budget-lucro aumentou enquanto entre gross-lucro diminuiu, todos com pequenas margens de diferenças.
#
# Olhando para budget-lucro, foi um aumento pequeno no valor da correlação desses filmes do imdb, mas que condiz com o esperado de que o budget é cada vez maior com o tempo assim como o lucro, os dois crescendo de forma bem semelhante.
# + [markdown] id="cvO4fxRMcN_H" colab_type="text"
# # Desafio 7:
# + id="szE0k90VcN_H" colab_type="code" outputId="1517f4d2-ecf4-45ce-b82b-20464836455a" colab={"base_uri": "https://localhost:8080/", "height": 487}
sns.pairplot(imdb_usa, y_vars='lucro', x_vars=['budget', 'gross'], height=6, kind='reg')
# + [markdown] id="Bia2B5P6cN_K" colab_type="text"
# # Desafio 8:
# + id="kd-Sphk_cN_K" colab_type="code" outputId="e3dc5b72-7ae8-494b-ce7e-00b201eeea83" colab={"base_uri": "https://localhost:8080/", "height": 142}
imdb_usa[["gross", "budget", "imdb_score"]].corr()
# + id="cUNULTQVcN_N" colab_type="code" outputId="a9953a30-8333-4ef0-a59c-745713991e5d" colab={"base_uri": "https://localhost:8080/", "height": 476}
sns.pairplot(imdb_usa, y_vars='imdb_score', x_vars=['budget', 'gross'], height=6, kind='reg')
# + id="SQ4yA8JlcN_Q" colab_type="code" outputId="06636b4e-7991-4645-9d32-aa07b3ff7d24" colab={"base_uri": "https://localhost:8080/", "height": 326}
votos_por_ano = imdb_usa.groupby('title_year')['num_voted_users'].mean()
imdb_usa_votos_por_ano = imdb_usa.join(votos_por_ano, on='title_year', rsuffix='rs')
imdb_usa_votos_por_ano.rename(columns={'num_voted_usersrs': 'votos_por_ano'}, inplace=True)
imdb_usa_votos_por_ano.head()
# + id="7akdxLXfcN_S" colab_type="code" outputId="77cdad01-a69c-40ed-aa71-9805936ce95c" colab={"base_uri": "https://localhost:8080/", "height": 111}
imdb_usa_votos_por_ano[['title_year', 'votos_por_ano']].corr()
# + id="BMQNUUtdcN_W" colab_type="code" outputId="70a6597a-8e29-4a91-a537-031e9370a4dd" colab={"base_uri": "https://localhost:8080/", "height": 411}
plt.figure(figsize=(12, 6))
sns.scatterplot(x='title_year', y='votos_por_ano', data=imdb_usa_votos_por_ano)
# + [markdown] id="dxDtVcS6Cnju" colab_type="text"
# # Desafio 9:
#
# O desafio nove era de interpretação, compartilhe sua solução com outros alunos e debata as soluções.
# + id="ao_4-B4GCc31" colab_type="code" colab={}
|
aula-03/Desafios_aula03.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Author(s): <NAME> (<EMAIL>) and <NAME> (<EMAIL>)
# -
# <a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a>
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/figures//chapter20_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Cloning the pyprobml repo
# !git clone https://github.com/probml/pyprobml
# %cd pyprobml/scripts
# # Installing required software (This may take few minutes)
# !apt install octave -qq > /dev/null
# !apt-get install liboctave-dev -qq > /dev/null
# ## Figure 20.1:
# An illustration of PCA where we project from 2d to 1d. Circles are the original data points, crosses are the reconstructions. The red star is the data mean.
# Figure(s) generated by [pcaDemo2d.py](https://github.com/probml/pyprobml/blob/master/scripts/pcaDemo2d.py)
# %run ./pcaDemo2d.py
# ## Figure 20.2:
# An illustration of PCA applied to MNIST digits from class 9. Grid points are at the 5, 25, 50, 75, 95 \% quantiles of the data distribution along each dimension. The circled points are the closest projected images to the vertices of the grid. Adapted from Figure 14.23 of \citep HastieBook .
# Figure(s) generated by [pca_digits.py](https://github.com/probml/pyprobml/blob/master/scripts/pca_digits.py)
# %run ./pca_digits.py
# ## Figure 20.3:
# a) Some randomly chosen $64 \times 64$ pixel images from the Olivetti face database. (b) The mean and the first three PCA components represented as images.
# Figure(s) generated by [pcaImageDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaImageDemo.m)
# !octave -W pcaImageDemo.m >> _
# ## Figure 20.4:
# Illustration of the variance of the points projected onto different 1d vectors. $v_1$ is the first principal component, which maximizes the variance of the projection. $v_2$ is the second principal component which is direction orthogonal to $v_1$. Finally $v'$ is some other vector in between $v_1$ and $v_2$. Adapted from Figure 8.7 of \citep Geron2019 .
# Figure(s) generated by [pca_projected_variance.py](https://github.com/probml/pyprobml/blob/master/scripts/pca_projected_variance.py)
# %run ./pca_projected_variance.py
# ## Figure 20.5:
# Effect of standardization on PCA applied to the height/weight dataset. (Red=female, blue=male.) Left: PCA of raw data. Right: PCA of standardized data.
# Figure(s) generated by [pcaStandardization.py](https://github.com/probml/pyprobml/blob/master/scripts/pcaStandardization.py)
# %run ./pcaStandardization.py
# ## Figure 20.6:
# Reconstruction error on MNIST vs number of latent dimensions used by PCA. (a) Training set. (b) Test set.
# Figure(s) generated by [pcaOverfitDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaOverfitDemo.m)
# !octave -W pcaOverfitDemo.m >> _
# ## Figure 20.7:
# (a) Scree plot for training set, corresponding to \cref fig:pcaErr (a). (b) Fraction of variance explained.
# Figure(s) generated by [pcaOverfitDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaOverfitDemo.m)
# !octave -W pcaOverfitDemo.m >> _
# ## Figure 20.8:
# Profile likelihood corresponding to PCA model in \cref fig:pcaErr (a).
# Figure(s) generated by [pcaOverfitDemo.m](https://github.com/probml/pmtk3/blob/master/demos/pcaOverfitDemo.m)
# !octave -W pcaOverfitDemo.m >> _
# ## Figure 20.10:
# Illustration of EM for PCA when $D=2$ and $L=1$. Green stars are the original data points, black circles are their reconstructions. The weight vector $\mathbf w $ is represented by blue line. (a) We start with a random initial guess of $\mathbf w $. The E step is represented by the orthogonal projections. (b) We update the rod $\mathbf w $ in the M step, keeping the projections onto the rod (black circles) fixed. (c) Another E step. The black circles can 'slide' along the rod, but the rod stays fixed. (d) Another M step. Adapted from Figure 12.12 of \citep BishopBook .
# Figure(s) generated by [pcaEmStepByStep.m](https://github.com/probml/pmtk3/blob/master/demos/pcaEmStepByStep.m)
# !octave -W pcaEmStepByStep.m >> _
# ## Figure 20.12:
# Mixture of PPCA models fit to a 2d dataset, using $L=1$ latent dimensions and $K=1$ and $K=10$ mixture components.
# Figure(s) generated by [mixPpcaDemoNetlab.m](https://github.com/probml/pmtk3/blob/master/demos/mixPpcaDemoNetlab.m)
# !octave -W mixPpcaDemoNetlab.m >> _
# ## Figure 20.14:
# (a) 150 synthetic 16 dimensional bit vectors. (b) The 2d embedding learned by binary PCA, fit using variational EM. We have color coded points by the identity of the true ``prototype'' that generated them. (c) Predicted probability of being on. (d) Thresholded predictions.
# Figure(s) generated by [binaryFaDemoTipping.m](https://github.com/probml/pmtk3/blob/master/demos/binaryFaDemoTipping.m)
# !octave -W binaryFaDemoTipping.m >> _
# ## Figure 20.30:
# Illustration of some data generated from low-dimensional manifolds. (a) The 2d Swiss-roll manifold embedded into 3d.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# %run ./manifold_digits_sklearn.py
# ## Figure 20.31:
# Metric MDS applied to (a) Swiss roll.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# %run ./manifold_digits_sklearn.py
# ## Figure 20.33:
# Isomap applied to (a) Swiss roll.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# %run ./manifold_digits_sklearn.py
# ## Figure 20.34:
# (a) Noisy version of Swiss roll data. We perturb each point by adding $\mathcal N (0, 0.5^2)$ noise. (b) Results of Isomap applied to this data.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# ## Figure 20.35:
# Visualization of the first 8 kernel principal component basis functions derived from some 2d data. We use an RBF kernel with $\sigma ^2=0.1$.
# Figure(s) generated by [kpcaScholkopf.m](https://github.com/probml/pmtk3/blob/master/demos/kpcaScholkopf.m)
# !octave -W kpcaScholkopf.m >> _
# ## Figure 20.36:
# Kernel PCA applied to (a) Swiss roll.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# %run ./manifold_digits_sklearn.py
# ## Figure 20.37:
# LLE applied to (a) Swiss roll.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# %run ./manifold_digits_sklearn.py
# ## Figure 20.38:
# Laplacian eigenmaps applied to (a) Swiss roll.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# %run ./manifold_digits_sklearn.py
# ## Figure 20.41:
# tSNE applied to (a) Swiss roll.
# Figure(s) generated by [manifold_swiss_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_swiss_sklearn.py) [manifold_digits_sklearn.py](https://github.com/probml/pyprobml/blob/master/scripts/manifold_digits_sklearn.py)
# %run ./manifold_swiss_sklearn.py
# %run ./manifold_digits_sklearn.py
|
notebooks/figures/chapter20_figures.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # The Scalar Wave Equation: Creating an Einstein Toolkit thorn
#
# ## Author: <NAME>
# ### Formatting improvements courtesy <NAME>
#
# [comment]: <> (Abstract: TODO)
#
# [comment]: <> (Module Status and Validation Notes: TODO)
#
# ### NRPy+ Source Code for this module:
# * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py) [\[**tutorial**\]](Tutorial-ScalarWave.ipynb)
# * [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) [\[**tutorial**\]](Tutorial-ScalarWave.ipynb)
#
# ## Introduction:
# This tutorial module focuses on how to construct an Einstein Toolkit (ETK) thorn (module) that will set up the expressions for the right-hand sides of $\partial_t u$ and $\partial_t v$ for the scalar wave equation, as defined in the [Tutorial-ScalarWave.ipynb](Tutorial-ScalarWave.ipynb) NRPy+ tutorial module. In that module, we used NRPy+ to contruct the SymPy expressions for these scalar wave "time-evolution equations". This thorn is largely based on and should function similarly to the $\text{WaveToyC}$ thorn included in the Einstein Toolkit (ETK) $\text{CactusWave}$ arrangement.
#
# When interfaced properly with the ETK, this module will propagate the initial data for $u$ and $v$ defined in [IDScalarWaveNRPy](Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb) forward in time by integrating the equations for $\partial_t u$ and $\partial_t v$ subject to spatial boundary conditions. The time evolution itself is handled by the $\text{MoL}$ (Method of Lines) thorn in the $\text{CactusNumerical}$ arrangement, and the boundary conditions by the $\text{Boundary}$ thorn in the $\text{CactusBase}$ arrangement.
#
# Similar to the [IDScalarWaveNRPy](Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb) module, we will construct the WaveToyNRPy module in two steps.
#
# 1. Call on NRPy+ to convert the SymPy expressions for the evolution equations into one C-code kernel.
# 1. Write the C code and linkages to the Einstein Toolkit infrastructure (i.e., the .ccl files) to complete this Einstein Toolkit module.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This module is organized as follows
#
# 1. [Step 1](#initializenrpy): Call on NRPy+ to convert the SymPy expressions for the Scalar Wave evolution equations' RHSs into a C-code kernel
# 1. [Step 2](#etk): Interfacing with the Einstein Toolkit
# 1. [Step 2.a](#etkc): Constructing the Einstein Toolkit C-code calling functions that include the C code kernels
# 1. [Step 2.b](#cclfiles): CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure
# 1. [Step 2.c](#etk_list): Add the C file to Einstein Toolkit compilation list
# 1. [Step 3](#code_validation): Code Validation, Convergence Tests
# 1. [Step 4](#latex_pdf_output): Output this module to $\LaTeX$-formatted PDF file
# <a id='initializenrpy'></a>
#
# # Step 1: Call on NRPy+ to convert the SymPy expressions for the Scalar Wave evolution equations' RHSs into a C-code kernel \[Back to [top](#toc)\]
# $$\label{initializenrpy}$$
#
# After importing the core modules, since we are writing an ETK thorn, we'll need to set `"grid::GridFuncMemAccess"` to `"ETK"`. SymPy expressions for the scalar wave evolution equations' RHSs are written inside [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py), and we simply import them for use here.
#
# +
# Step 1a: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
import loop
# Step 1b: This is an Einstein Toolkit (ETK) thorn. Here we
# tell NRPy+ that gridfunction memory access will
# therefore be in the "ETK" style.
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
# Step 1c: Set the finite differencing order.
# IMPORTANT: If you set FD_CENTDERIVS_ORDER to 8, be sure to use
# the parameter files and scripts in the directory:
# WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTest
# If you set FD_CENTDERIVS_ORDER to 4, be sure to use
# the parameter files and scripts in the directory:
# WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4) # SET TO EIGHTH ORDER
# Step 1d: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
# This sets swrhs.uu_rhs and swrhs.vv_rhs.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# -
# Now, we need to output these expressions to C code kernel, which will be housed within a file named "ScalarWave_RHSs.h". This "header" file will be #include'd in the main C file below. Also, so that we can check that the number of ghost zones in the ETK numerical grid is set properly, we will also output the NRPy+ `finite_difference::FD_CENTDERIVS_ORDER parameter` to a `#`define statement within `NRPy_params.h`.
#
# +
# Step 2: Register uu_rhs and vv_rhs gridfunctions so
# they can be written to by NRPy.
uu_rhs,vv_rhs = gri.register_gridfunctions("AUX",["uu_rhs","vv_rhs"])
# Step 3: Create the C code output kernel.
scalar_RHSs_to_print = [\
lhrh(lhs=gri.gfaccess("out_gfs","uu_rhs"),rhs=swrhs.uu_rhs),\
lhrh(lhs=gri.gfaccess("out_gfs","vv_rhs"),rhs=swrhs.vv_rhs),]
scalar_RHSs_CcodeKernel = fin.FD_outputC("returnstring",scalar_RHSs_to_print)
scalar_RHSs_looped = loop.loop(["i2","i1","i0"],["cctk_nghostzones[2]","cctk_nghostzones[1]","cctk_nghostzones[0]"],\
["cctk_lsh[2]-cctk_nghostzones[2]","cctk_lsh[1]-cctk_nghostzones[1]",\
"cctk_lsh[0]-cctk_nghostzones[0]"],\
["1","1","1"],["#pragma omp parallel for","",""],"",scalar_RHSs_CcodeKernel)
# Step 4: Create directories for the thorn if they don't exist.
# !mkdir WaveToyNRPy 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.
# !mkdir WaveToyNRPy/src 2>/dev/null # 2>/dev/null: Don't throw an error if the directory already exists.
# Step 5: Write the C code kernel to file.
with open("WaveToyNRPy/src/ScalarWave_RHSs.h", "w") as file:
file.write(str(scalar_RHSs_looped))
with open("WaveToyNRPy/src/NRPy_params.h", "w") as file:
file.write("#define FD_CENTDERIVS_ORDER "+str(par.parval_from_str("finite_difference::FD_CENTDERIVS_ORDER"))+"\n")
# -
# <a id='etk'></a>
#
# # Step 2: Interfacing with the Einstein Toolkit \[Back to [top](#toc)\]
# $$\label{etk}$$
#
# <a id='etkc'></a>
#
# ## Step 2.a: Constructing the Einstein Toolkit C-code calling functions that include the C code kernels \[Back to [top](#toc)\]
# $$\label{etkc}$$
#
# Now that we have generated the C code kernel `ScalarWave_RHSs.h` and the parameters file `NRPy_params.h`, we will need to write C code to make use of these files. To do this, we can simply follow the example within the [IDScalarWaveNRPy tutorial module](Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb). Functions defined by these files will be called by the Einstein Toolkit scheduler (specified in `schedule.ccl` below).
# +
# %%writefile WaveToyNRPy/src/WaveToyNRPy.c
#include <math.h>
#include <stdio.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
#include "NRPy_params.h"
void WaveToyNRPy_check_params(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
if( (cctk_nghostzones[0] != FD_CENTDERIVS_ORDER/2) ||
(cctk_nghostzones[1] != FD_CENTDERIVS_ORDER/2) ||
(cctk_nghostzones[2] != FD_CENTDERIVS_ORDER/2) ) {
char error_string[200];
sprintf(error_string,"WaveToyNRPy_check_params: expected ghost_size to be set to %d , but found ghostxyz = %d %d %d\n",
FD_CENTDERIVS_ORDER/2,cctk_nghostzones[0],cctk_nghostzones[1],cctk_nghostzones[2]);
CCTK_WARN (0, error_string);
}
}
void WaveToyNRPy_set_rhs(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL invdx0 = 1.0 / (CCTK_DELTA_SPACE(0));
const CCTK_REAL invdx1 = 1.0 / (CCTK_DELTA_SPACE(1));
const CCTK_REAL invdx2 = 1.0 / (CCTK_DELTA_SPACE(2));
#include "ScalarWave_RHSs.h"
}
/* Boundary Condition code adapted from WaveToyC thorn in ETK, implementing built-in
* ETK BC functionality
*/
void WaveToyNRPy_SelectBCs(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const char *bctype;
bctype = NULL;
if (CCTK_EQUALS(bound,"flat") || CCTK_EQUALS(bound,"static") ||
CCTK_EQUALS(bound,"radiation") || CCTK_EQUALS(bound,"robin") ||
CCTK_EQUALS(bound,"none"))
{
bctype = bound;
}
else if (CCTK_EQUALS(bound,"zero"))
{
bctype = "scalar";
}
/* Uses all default arguments, so invalid table handle -1 can be passed */
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"WaveToyNRPy::uuGF", bctype) < 0)
{
CCTK_WARN (0, "ScalarWave_Boundaries: Error selecting boundary condition");
}
if (bctype && Boundary_SelectVarForBC (cctkGH, CCTK_ALL_FACES, 1, -1,
"WaveToyNRPy::vvGF", bctype) < 0)
{
CCTK_WARN (0, "ScalarWave_Boundaries: Error selecting boundary condition");
}
}
void WaveToyNRPy_InitSymBound(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
int sym[3];
sym[0] = 1;
sym[1] = 1;
sym[2] = 1;
SetCartSymVN(cctkGH, sym,"WaveToyNRPy::uuGF");
SetCartSymVN(cctkGH, sym,"WaveToyNRPy::vvGF");
return;
}
void WaveToyNRPy_RegisterVars(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_INT ierr CCTK_ATTRIBUTE_UNUSED = 0;
/* Register all the evolved grid functions with MoL */
ierr += MoLRegisterEvolved(CCTK_VarIndex("WaveToyNRPy::uuGF"), CCTK_VarIndex("WaveToyNRPy::uu_rhsGF"));
ierr += MoLRegisterEvolved(CCTK_VarIndex("WaveToyNRPy::vvGF"), CCTK_VarIndex("WaveToyNRPy::vv_rhsGF"));
/* Register all the evolved Array functions with MoL */
return;
}
# -
# <a id='cclfiles'></a>
#
# ## Step 2.b: CCL files - Define how this module interacts and interfaces with the larger Einstein Toolkit infrastructure \[Back to [top](#toc)\]
# $$\label{cclfiles}$$
#
# Writing a module ("thorn") within the Einstein Toolkit requires that three "ccl" files be constructed, all in the root directory of the thorn:
#
# 1. `interface.ccl`: defines the gridfunction groups needed, and provides keywords denoting what this thorn provides and what it should inherit from other thorns. This file governs the interaction between this thorn and others; more information can be found in the [official Einstein Toolkit documentation](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-260000C2.2).
# With "implements", we give our thorn its unique name. By "inheriting" other thorns, we tell the Toolkit that we will rely on variables that exist within those functions. Then, we tell the toolkit that we want the scalars `uuGF` and `vvGF` to be visible to other thorns by using the keyword "public".
# +
# %%writefile WaveToyNRPy/interface.ccl
implements: WaveToyNRPy
inherits: Boundary grid
USES INCLUDE: loopcontrol.h
USES INCLUDE: Symmetry.h
USES INCLUDE: Boundary.h
CCTK_INT FUNCTION MoLRegisterEvolved(CCTK_INT IN EvolvedIndex, CCTK_INT IN RHSIndex)
USES FUNCTION MoLRegisterEvolved
CCTK_INT FUNCTION GetBoundarySpecification(CCTK_INT IN size, CCTK_INT OUT ARRAY nboundaryzones, CCTK_INT OUT ARRAY is_internal, CCTK_INT OUT ARRAY is_staggered, CCTK_INT OUT ARRAY shiftout)
USES FUNCTION GetBoundarySpecification
CCTK_INT FUNCTION SymmetryTableHandleForGrid(CCTK_POINTER_TO_CONST IN cctkGH)
USES FUNCTION SymmetryTableHandleForGrid
CCTK_INT FUNCTION Boundary_SelectGroupForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN group_name, CCTK_STRING IN bc_name)
USES FUNCTION Boundary_SelectGroupForBC
CCTK_INT FUNCTION Boundary_SelectVarForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN var_name, CCTK_STRING IN bc_name)
USES FUNCTION Boundary_SelectVarForBC
public:
cctk_real scalar_fields_rhs type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
uu_rhsGF,vv_rhsGF
} "The evolved scalar fields"
public:
cctk_real scalar_fields type = GF Timelevels=3 tags='tensortypealias="Scalar"'
{
uuGF,vvGF
} "The evolved scalar fields"
# -
# 2. `param.ccl`: specifies free parameters within the thorn, enabling them to be set at runtime. It is required to provide allowed ranges and default values for each parameter. More information on this file's syntax can be found in the [official Einstein Toolkit documentation](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-265000C2.3). A number of parameters are defined, and more parameters can be easily added in later versions. We also set the number of timelevels we will store in memory.
# +
# %%writefile WaveToyNRPy/param.ccl
shares: MethodOfLines
USES CCTK_INT MoL_Num_Evolved_Vars
USES CCTK_INT MoL_Num_ArrayEvolved_Vars
restricted:
CCTK_INT SimpleWave_MaxNumEvolvedVars "Number of evolved variables used by this thorn" ACCUMULATOR-BASE=MethodofLines::MoL_Num_Evolved_Vars STEERABLE=RECOVER
{
2:2 :: "Number of evolved variables used by this thorn"
} 2
restricted:
CCTK_INT SimpleWave_MaxNumArrayEvolvedVars "Number of Array evolved variables used by this thorn" ACCUMULATOR-BASE=MethodofLines::MoL_Num_ArrayEvolved_Vars STEERABLE=RECOVER
{
0:0 :: "Number of Array evolved variables used by this thorn"
} 0
restricted:
KEYWORD bound "Type of boundary condition to use"
{
"flat" :: "Flat (von Neumann, n grad phi = 0) boundary condition"
"static" :: "Static (Dirichlet, dphi/dt=0) boundary condition"
"radiation" :: "Radiation boundary condition"
"robin" :: "Robin (phi(r) = C/r) boundary condition"
"zero" :: "Zero (Dirichlet, phi=0) boundary condition"
"none" :: "Apply no boundary condition"
} "static"
restricted:
CCTK_INT timelevels "Number of active timelevels" STEERABLE=RECOVER
{
0:3 :: ""
} 3
restricted:
CCTK_REAL wavespeed "The speed at which the wave propagates"
{
*:* :: "Wavespeed as a multiple of c"
} 1.0
# -
# 3. `schedule.ccl`: allocates storage for gridfunctions, defines how the thorn's functions should be scheduled in a broader simulation, and specifies the regions of memory written to or read from gridfunctions. `schedule.ccl`'s official documentation may be found [here](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-268000C2.4).
#
# We first assign storage for both scalar gridfunctions, and then specify the standardized ETK "scheduling bins" in which we want each of our thorn's functions to run.
# +
# %%writefile WaveToyNRPy/schedule.ccl
STORAGE: scalar_fields_rhs[timelevels]
STORAGE: scalar_fields[timelevels]
schedule WaveToyNRPy_InitSymBound at BASEGRID
{
LANG: C
OPTIONS: global
} "Schedule symmetries"
schedule WaveToyNRPy_check_params at CCTK_PARAMCHECK
{
LANG: C
OPTIONS: global
} "Check sanity of parameters"
schedule WaveToyNRPy_set_rhs as WaveToy_Evolution IN MoL_CalcRHS
{
LANG: C
READS: uuGF(Everywhere)
READS: vvGF(Everywhere)
WRITES: uu_rhsGF(Interior)
WRITES: VV_rhsGF(Interior)
# FIXME: Add syncs
} "Evolution of 3D wave equation"
schedule WaveToyNRPy_SelectBCs in MoL_PostStep
{
LANG: C
OPTIONS: level
SYNC: scalar_fields
} "Boundaries of 3D wave equation"
schedule GROUP ApplyBCs as WaveToyNRPy_ApplyBCs in MoL_PostStep after WaveToyNRPy_SelectBCs
{
} "Apply boundary conditions"
schedule GROUP ApplyBCs as WaveToyNRPy_ApplyBCs at POSTRESTRICT
{
} "Apply boundary conditions"
schedule WaveToyNRPy_RegisterVars in MoL_Register
{
LANG: C
OPTIONS: meta
} "Register Variables for MoL"
# -
# <a id='etk_list'></a>
#
# ## Step 2.c: Add the C file to Einstein Toolkit compilation list \[Back to [top](#toc)\]
# $$\label{etk_list}$$
#
# We will also need `make.code.defn`, which indicates the list of files that need to be compiled. This thorn only has the one C file to compile.
# %%writefile WaveToyNRPy/src/make.code.defn
SRCS = WaveToyNRPy.c
# <a id='code_validation'></a>
#
# # Step 3: Code Validation, Convergence Tests \[Back to [top](#toc)\]
# $$\label{code_validation}$$
#
# We have performed a number of convergence tests with $\text{WaveToyNRPy}$ and $\text{IDScalarWaveNRPy}$ within the ETK, which are presented below.
#
# **One and three-dimensional scalar wave equation code tests, adopting fourth-order finite differencing, coupled to RK4 method-of-lines for time integration**
#
# Inside the directory *`WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/`* are the files used for this convergence test:
# 1. **1D-planewave-\*.par** : ETK parameter files needed for performing the 1D tests. These parameter files set up a sinusoidal wave propagating along the x-axis of a numerical grid that is of minimal extent in the y and z directions, but extends from -12 to +12 along the x-axis (in units of $\omega=k=c=1$). The parameter files are identical, except one has grid resolution that is twice as high (so the errors should drop in the higher resolution case by a factor of $2^4$, since we adopt fourth-order-convergent timestepping and spatial finite differencing.)
# 1. **planewave_along_3D_diagonal\*.par** : ETK parameter files needed for performing the 3D tests. These parameter files set up a sinusoidal plane wave propagating along the x=y=z diagonal of a 3D numerical grid that extends from -12 to +12 along the x-, y-, and z-axis (in units of $\omega=k=c=1$). The parameter files are identical, except one has grid resolution that is twice as high (so the errors should drop in the higher resolution case by a factor of $2^4$, since we adopt fourth-order-convergent timestepping and spatial finite differencing.)
# 1. **runscript.sh** : Runs the cactus executable (assumed to be named *cactus_etilgrmhd-FD4*) for all of the above parameter files.
# 1. **convert_IOASCII_1D_to_gnuplot.sh** : Used by **runscript.sh** to convert the 1D output from the execution into a format that [gnuplot](http://gnuplot.info/) can recognize.
# 1. **gnuplot_script** : Script for creating code validation convergence plots with [gnuplot](http://gnuplot.info/).
#
# **Fourth-order code validation test results:**
#
# We start with the 1D tests. The plot below shows the discrepancy between numerical and exact solutions to the scalar wave equation at two different resolutions: purple is low resolution ($\Delta x_{\rm low}=0.4$) and green is high resolution ($\Delta x_{\rm high}=0.2$). Since this test adopts fourth-order finite differencing for spatial derivatives and fourth-order Runge-Kutta (RK4) for timestepping, we would expect this error to drop by a factor of approximately $(\Delta x_{\rm low}/\Delta x_{\rm high})^4 = (0.4/0.2)^4 = 2^4=16$ when going from low to high resolution, and after rescaling the error in the high-resolution case by 16, we see that indeed it overlaps the low-resolution result quite nicely, confirming fourth-order convergence.
from IPython.display import Image
Image("./WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/out-RK4-FD4-1D.png", width=600, height=600)
# Next we perform the same test, but with a plane wave propagating along the x=y=z diagonal. This test verifies that errors in propagation along the y and z directions converge to zero as expected as well.
Image("./WaveToyNRPy/example_parfiles/4thOrder_ConvergenceTests/out-RK4-FD4-3D.png", width=600, height=600)
# #### Three-dimensional scalar wave equation code tests, adopting *eighth*-order finite differencing, coupled to *RK8* method-of-lines for time integration
#
# Inside the directory *`WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTests/`* are the files used for this convergence test:
# 1. **planewave_along_3D_diagonal\*.par** : ETK parameter files needed for performing the 3D tests. These parameter files set up a sinusoidal plane wave propagating along the x=y=z diagonal of a 3D numerical grid that extends from -12 to +12 along the x-, y-, and z-axis (in units of $\omega=k=c=1$). The parameter files are identical, except one has grid resolution that is twice as high (so the errors should drop in the higher resolution case by a factor of $2^4$, since we adopt fourth-order-convergent timestepping and spatial finite differencing.)
# 1. **runscript.sh** : Runs the cactus executable (assumed to be named *cactus_etilgrmhd-FD8*) for all of the above parameter files.
# 1. **convert_IOASCII_1D_to_gnuplot.sh** : Used by **runscript.sh** to convert the 1D output from the execution into a format that [gnuplot](http://gnuplot.info/) can recognize.
# 1. **gnuplot_script** : Script for creating code validation convergence plots with [gnuplot](http://gnuplot.info/).
#
# **Eighth-order code validation test results:**
#
# The plot below shows the discrepancy between numerical and exact solutions to the scalar wave equation at two different resolutions: purple is low resolution ($\Delta x_{\rm low}=0.4$) and green is high resolution ($\Delta x_{\rm high}=0.2$). Since this test adopts **eighth**-order finite differencing for spatial derivatives and **eighth**-order Runge-Kutta (RK8) for timestepping, we would expect this error to drop by a factor of approximately $(\Delta x_{\rm low}/\Delta x_{\rm high})^8 = (0.4/0.2)^8 = 2^8=256$ when going from low to high resolution, and after rescaling the error in the high-resolution case by 16, we see that indeed it overlaps the low-resolution result quite nicely, confirming fourth-order convergence.
Image("WaveToyNRPy/example_parfiles/8thOrder_ConvergenceTest/FD8-RK8__test_output_plot.png", width=600, height=600)
# <a id='latex_pdf_output'></a>
#
# # Step 4: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-ETK_thorn-WaveToyNRPy.pdf](Tutorial-ETK_thorn-WaveToyNRPy.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-ETK_thorn-WaveToyNRPy.ipynb
# !pdflatex -interaction=batchmode Tutorial-ETK_thorn-WaveToyNRPy.tex
# !pdflatex -interaction=batchmode Tutorial-ETK_thorn-WaveToyNRPy.tex
# !pdflatex -interaction=batchmode Tutorial-ETK_thorn-WaveToyNRPy.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
|
Tutorial-ETK_thorn-WaveToyNRPy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # I have tested various regression models for car pricing prediction dataset, below code represents EDA, Feature Selection and Model training
#
# Do check it out as i embark on building machine learning models!
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy
import seaborn as sns
# %matplotlib inline
# -
df = pd.read_csv('bmw.csv')
df.head()
df.info()
print(df.transmission.unique())
print(df.model.unique())
print(df.fuelType.unique())
df.corr()
plt.scatter(x=df['year'], y=df['price'])
plt.show
plt.scatter(x=df['mileage'], y=df['price'])
plt.show
plt.scatter(x=df['engineSize'], y=df['price'])
plt.show
plt.figure(figsize=(16,5))
sns.stripplot(x="model", y="price", data=df)
plt.show
df.describe()
plt.figure(figsize=(16,5))
sns.stripplot(x="transmission", y="price", data=df)
plt.show
plt.figure(figsize=(16,5))
sns.stripplot(x="fuelType", y="price", data=df)
plt.show
# # Model 1
#
# +
features= ['year','transmission','mileage','fuelType','engineSize']
df2= df.drop(['model','tax','mpg'], axis=1)
print(df2)
# +
df2= df.drop(['model','tax','mpg'], axis=1)
d = {'Diesel':0, 'Petrol':1, 'Other':2 , 'Hybrid':3, 'Electric':4}
df2['fuelType'] = df2['fuelType'].map(d)
d1 ={'Automatic':0, 'Manual':1, 'Semi-Auto':2}
df2['transmission'] = df2['transmission'].map(d1)
print(df2)
# +
X = df2[['year','mileage','transmission','fuelType','engineSize']]
y = df2['price']
from sklearn import linear_model
regr = linear_model.LinearRegression()
regr.fit(X,y)
print(regr.coef_)
prdictedprice = regr.predict([[2021,100000,2,0,2]])
print(prdictedprice)
# +
test_df = df2.loc[7000:]
y_test = test_df['price']
X_test = test_df[['year','mileage','transmission','fuelType','engineSize']]
y_pred = regr.predict(X_test)
print(y_pred)
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_pred)
print(r2)
# -
# # Model 2
# +
import statsmodels.formula.api as smf
model = smf.ols('price ~ year + mileage + transmission + fuelType + engineSize', data=df2)
results =model.fit()
print(results.summary())
# -
# # Model 3
from scipy import stats
# +
X = df['mileage']
y = df['price']
slope, intercept, r, p ,std_err = stats.linregress(X,y)
def myfunc(X):
return slope*X + intercept
mymodel = list(map(myfunc, X))
fig, ax =plt.subplots()
ax.scatter(X,y)
ax.plot(X, mymodel)
fig.set_size_inches(15,8)
fig.show
print("r value of given problem:", r)
print("p value of given problem:", p)
print(std_err)
print(slope, intercept)
# -
# # Model 4
# +
from sklearn import linear_model
X = df[['mileage', 'year']]
y = df['price']
regr = linear_model.LinearRegression()
regr.fit(X,y)
print(regr.coef_)
print(regr.predict([[100000,2021]]))
test_df = df.loc[7000:,['mileage','year']]
y_test = df.loc[7000:,'price']
X_test = test_df[['mileage','year']]
y_pred = regr.predict(X_test)
print(y_pred)
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_pred)
print(r2)
from sklearn.metrics import mean_squared_error
MSE = mean_squared_error(y_test, y_pred)
print(MSE)
|
Regression models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('bokeh', 'matplotlib')
# In the [previous guide](3-Tabular_Datasets.ipynb) we discovered how to work with tabular datasets. Although tabular datasets are extremely common, many other datasets are best represented by regularly sampled n-dimensional arrays (such as images, volumetric data, or higher dimensional parameter spaces). On a 2D screen and using traditional plotting libraries, it is often difficult to visualize such parameter spaces quickly and succinctly, but HoloViews lets you quickly slice and dice such a dataset to explore the data and answer questions about it easily.
# ## Gridded
#
# Gridded datasets usually represent observations of some continuous variable across multiple dimensions---a monochrome image representing luminance values across a 2D surface, volumetric 3D data, an RGB image sequence over time, or any other multi-dimensional parameter space. This type of data is particularly common in research areas that make use of spatial imaging or modeling, such as climatology, biology, and astronomy but can also be used to represent any arbitrary data that varies over multiple dimensions.
#
# In HoloViews terminology the dimensions the data varies over are the so called key dimensions (**kdims**), which define the coordinates of the underlying array. The actual value arrays are described by the value dimensions (**vdims**). Libraries like ``xarray`` or ``iris`` allow you to store the coordinates with the array, but here we will declare the coordinate arrays ourselves so we can get a better understanding of how the gridded data interfaces work. We will therefore start by loading a very simple 3D array:
data = np.load('../assets/twophoton.npz')
calcium_array = data['Calcium']
calcium_array.shape
# This particular NumPy dataset contains data from a 2-photon calcium imaging experiment, which provides an indirect measure of neural activity encoded via changes in fluorescent light intensity. The 3D array represents the activity of a 2D imaging plane over time, forming a sequence of images with a shape of (62, 111) over 50 time steps. Just as we did in the [Tabular Dataset](../4-Tabular_Datasets.ipynb) getting-started guide we start by wrapping our data in a HoloViews ``Dataset``. However, for HoloViews to understand the raw NumPy array we need to pass coordinates for each of the dimensions (or axes) of the data. For simplicity, here we will simply use integer coordinates for the ``'Time'``, ``'x'`` and ``'y'`` dimensions:
ds = hv.Dataset((np.arange(50), np.arange(111), np.arange(62), calcium_array),
['Time', 'x', 'y'], 'Fluorescence')
ds
# As we should be used to by now the ``Dataset`` repr shows us the dimensions of the data. If we inspect the ``.data`` attribute we can see that by default HoloViews will store this data as a simple dictionary of our key dimension coordinates and value dimension arrays:
type(ds.data), list(ds.data.keys())
# ### Other datatypes
# Instead of defining the coordinates manually, we recommend using [xarray](http://xarray.pydata.org/en/stable/), which will flexibly work with labeled n-dimensional arrays. We can even make a clone of our dataset and set the datatype to xarray to convert to an ``xarray.Dataset``, which is the recommended format for gridded data in HoloViews:
ds.clone(datatype=['xarray']).data
# To see more details on working with different datatypes have a look at the [user guide](../user_guide/08-Gridded_Datasets.ipynb).
# ### Viewing the data
#
# Perhaps the most natural representation of this dataset is as an Image displaying the fluorescence at each point in time. Using the ``.to`` interface, we can map the dimensions of our ``Dataset`` onto the dimensions of an Element. To display an image, we will pick the ``Image`` element and specify the ``'x'`` and ``'y'`` as the key dimensions. Since we only have one value dimension, we won't have to declare it explicitly:
# %opts Image (cmap='viridis')
ds.to(hv.Image, ['x', 'y']).hist()
# The slider widget allows you to scrub through the images for each time, and you can also play the frames as an animation in forward or reverse by pressing the ``P`` and ``R`` keys (respectively) after clicking on the slider.
#
# Once you have selected an individual plot, you can interact with it by zooming (which does not happen to give additional detail with this particular downsampled dataset), or by selecting the ``Box select`` tool in the plot toolbar and drawing a Fluorescence range on the Histogram to control the color mapping range.
#
# When using ``.to`` or ``.groupby`` on larger datasets with many key dimensions or many distinct key-dimension values, you can use the ``dynamic=True`` flag, letting you explore the parameter space dynamically (for more detail have a look at the [Live Data](../5-Live_Data.ipynb) and [Pipeline] sections).
# ### Selecting
#
# Often when working with multi-dimensional datasets, we are only interested in small regions of the parameter space. For instance, when working with neural imaging data like this, it is very common to focus on regions of interest (ROIs) within the larger image. Here we will fetch some bounding boxes from the data we loaded earlier. ROIs are often more complex polygons but for simplicity's sake we will use simple rectangular ROIs specified as the left, bottom, right and top coordinate of a bounding box:
ROIs = data['ROIs']
roi_bounds = hv.Path([hv.Bounds(tuple(roi)) for roi in ROIs])
print(ROIs.shape)
# Here we have 147 ROIs representing bounding boxes around 147 identified neurons in our data. To display them we have wrapped the data in ``Bounds`` elements, which we can overlay on top of our animation. Additionally we will create some ``Text`` elements to label each ROI. Finally we will use the regular Python indexing semantics to select along the Time dimension, which is the first key dimension and can therefore simply be specified like ``ds[21]``. Just like the ``select`` method, indexing like this indexes and slices by value, not the index (which are one and the same here):
# +
# %%opts Image [width=400 height=400 xaxis=None yaxis=None]
# %%opts Path (color='white') Text (text_color='white' text_font_size='8pt')
opts = dict(halign='left', valign='bottom')
roi_text = hv.NdOverlay({i: hv.Text(roi[0], roi[1], str(i), **opts) for i, roi in enumerate(ROIs)})
(ds[21].to(hv.Image, ['x', 'y']) * roi_bounds * roi_text).relabel('Time: 21')
# -
# Now we can use these bounding boxes to select some data, since they simply represent coordinates. Looking at ROI #60 for example, we can see the neuron activate quite strongly in the middle of our animation. Using the ``select`` method, we can select the x and y-coordinates of our ROI and the rough time period when we saw the neuron respond:
x0, y0, x1, y1 = ROIs[60]
roi = ds.select(x=(x0, x1), y=(y0, y1), time=(250, 280)).relabel('ROI #60')
roi.to(hv.Image, ['x', 'y'])
# ### Faceting
# Even though we have selected a very small region of the data, there is still quite a lot of data there. We can use the ``faceting`` methods to display the data in different ways. Since we have only a few pixels in our dataset now, we can for example plot how the fluorescence changes at each pixel in our ROI over time. We simply use the ``.to`` interface to display the data as ``Curve`` types, with time as the key dimension. If you recall from [Tabular Data](3-Tabular_Data.ipynb), the ``.to`` method will group by any remaining key dimensions (in this case ``'x'`` and ``'y'``) to display sliders. Here we will instead facet the ``Curve`` elements using ``.grid``, allowing us to see the evolution of the fluorescence signal over time and space:
# %%opts GridSpace [shared_xaxis=True shared_yaxis=True]
roi.to(hv.Curve, 'Time').grid()
# The above cell and the previous cell show the same data, but visualized in very different ways depending on how the data was mapped onto the screen.
# ### Aggregating
#
# Instead of generating a Curve for each pixel individually, we may instead want to average the data across x and y to get an aggregated estimate of that neuron's activity. For that purpose we can use the aggregate method to get the average signal within the ROI window. Using the ``spreadfn`` we can also compute the standard deviation between pixels, which helps us understand how variable the signal is across that window (to let us know what we have covered up when aggregating). We will display the mean and standard deviation data as a overlay of a ``Spread`` and ``Curve`` Element:
# %%opts Overlay [show_legend=False width=600]
agg = roi.aggregate('Time', np.mean, spreadfn=np.std)
hv.Spread(agg) * hv.Curve(agg)
# Of course, we could combine all of these approaches and aggregate each ROI, faceting the entire dataset by ROI to show how the activity of the various neurons differs.
#
# As you can see, HoloViews makes it simple for you to select and display data from a large gridded dataset, allowing you to focus on whatever aspects of the data are important to answer a given question. The final getting-started section covers how you can provide [Live Data](5-Live_Data.ipynb) visualizations to let users dynamically choose what to display interactively.
|
examples/sites/holoviews/examples/getting_started/4_Gridded_Datasets.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as pt
import pandas as pd
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.ensemble import ExtraTreesClassifier
from sklearn.naive_bayes import GaussianNB
train_data=pd.read_csv("data/train.csv").as_matrix()
test_data=pd.read_csv("data/test.csv").as_matrix()
clf=GradientBoostingClassifier()
total=train_data.shape[0]
total_test=test_data.shape[0]
#training data
xtrain=train_data[0:total, 1:]
train_label=train_data[0:total, 0]
clf.fit(xtrain, train_label)
xtest=test_data[0:total_test, 0:]
p=clf.predict(xtest)
output=[]
for i in range(0, total_test):
curr_list=[]
curr_list.append(i + 1)
curr_list.append(p[i])
output.append(curr_list)
df=pd.DataFrame(output, columns=['ImageId', 'Label'])
df.to_csv("output.csv", index=False)
|
.ipynb_checkpoints/Digit Recognizer-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time Series
#
# Based on:
#
# - https://onlinecourses.science.psu.edu/stat510/book/export/html/661
#
# Formally:
#
# > A **univariate time series** is a sequence of measurements of the same variable collected over time. Most often, the measurements are made at regular time intervals.
#
# Then **univariate time series**, or just a **time series** is a sequence taken at successive equally spaced points in time.
#
# $$ \{ S_{n} \} = \{ S \; (n \; \Delta t) \}$$
#
# $$ S_{0} = S \; (0 \; * \; \Delta t) $$
#
# $$ S_{1} = S \; (1 \; * \; \Delta t) $$
#
# $$ S_{2} = S \; (2 \; * \; \Delta t) $$
#
# $$ ... $$
#
#
# 
#
# The time interval $\Delta t$ at which measurements are taken is named **sampling interval**, and the total time $T$ at which measurements are taken is named **observation time**.
#
# ### Note:
#
# > A **time series** is a list of observations where the ordering matters.
#
# Ordering is very important because there is dependency and changing the order could change the meaning of the data.
# ## First remember ...
#
# **Linear methods** interpret all regular structure in a data set through linear correlations. This means, in brief, that the intrinsic dynamics of the system are governed next paradigm:
#
# > **Linear paradigm:** "*Small cuases lead to small effects*"
#
# **Linear equations** can only lead to exponentially decaying (or growing) or (damped) periodically oscillating solutions.
#
# 
#
# So, if one has any irregular behaviour and we assume that the system behaves linearly, then it has to be atributted to some random external input to the system:
#
# $$ S_{n} = x_{n} + \eta_{n} $$
#
#
# > **Chaos paradigm**: "*Nonlinear chaotic systems can produce irregular data with purely deterministic equations of motion in an autonomous way, i.e. without time dependent inputs*"
#
#
# **Nonlinear chaotic systems** have **dependence on initial conditions**: *Tiny changes in the input lead to LARGE variations in the output*.
#
# 
#
# It is important to take into account that the system still being **deterministic** in the sense that the variables behave according to their physical rules, then it is not random, but it is **highly unpredictable** and subject to **vast variations**.
# ## Time series analysis goals
#
# > We look for signatures of **deterministic nature** of the system
#
# ### Important characteristics to consider first
#
# Some important questions to first consider when first looking at a time series are:
#
# - Is there a **trend**?
# > On average, the measurements tend to increase (or decrease) over time?
#
# - Is there **seasonality**?
# > There is a **regularly repeating pattern** of highs and lows related to calendar time such as seasons, quarters, months, days of the week, and so on?
#
# - Are there **outliers**?
# > In regression, outliers are far away from your model. With time series data, your outliers are far away from your other data.
#
# - Is there a **long-run cycle** or period unrelated to seasonality factors?
#
# - Is there **constant variance** over time, or is the **variance non-constant**?
#
# - Are there any **abrupt changes** to either the level of the series or the variance?
# # Stationarity
#
# We need to know that the numbers we measure correspond to properties of the studied object, up to some measurement error.
#
# > **Reproducibility** is closely connected to two different notions of **stationarity**.
#
# ## First concept of Stationarity (weakest form)
#
# > Stationarity requires that all parameters that are relevant for a system's dynamics have to be fixed and constant during the measurement period (and these parameters should be the same when the
# experiment is reproduced).
#
# - This is a requirement to be fulfilled not only by the experimental set-up but also by the process taking place in this fixed environment.
#
# - If the process under observation is a probabilistic one, it will be characterised by probability distributions for the variables involved. For a stationary process, these probabilities may not depend on time. The same holds if the process is specified by a set of transition probabilities between different states.
#
# - If there are deterministic rules governing the dynamics, these rules must not change during the time covered by a time series.
#
# ### Unfortunately ...
#
# ... In most cases we do not have direct access to the system which produces a signal and we cannot establish evidence that its parameters are indeed constant.
# ## Second concept of stationarity (which is based on the available data itself)
#
# > A signal is called stationary if all joint probabilities of finding the system at some time in one state and at some later time in another state are independent of time within the observation period, i.e. when calculated from the data.
#
# From [Stationary process - Wikipedia](https://en.wikipedia.org/wiki/Stationary_process)
#
# > Stationary process (a.k.a. a **strict/strictly stationary process** or **strong/strongly stationary process**) is a stochastic process whose unconditional joint probability distribution does not change when shifted in time. Consequently, parameters such as mean and variance also do not change over time.
#
# A time series, as any other measurement, has to provide enough information to determine the quantity of interest unambiguously.
#
# This includes the **constancy of relevant parameters**, but it also requires that **phenomena belonging to the dynamics are contained in the time series sufficiently frequently**, so that the probabilities or other rules can be inferred properly.
#
# ### Remarks
#
# > We deal with the problem of **how non-stationarity can be detected for a given data set**, but obviously stationarity is a property which can never be positively established.
#
# There are many processes which are formally stationary when the limit of infinitely long
# observation times can be taken, but which behave effectively like non-stationary
# processes when studied over finite times, for example: intermittency.
#
# - If the observed signal is quite regular almost all of the time, but contains one very irregular burst every so often, then the time series has to be considered to be non-stationary for our purposes, even in the case where all parameters remained exactly constant but the signal is intermittent.
#
# - Only if the rare events (e.g. the irregular bursts mentioned before) also appear several times in the time series can we speak of an effective independence of the observed joint probabilities and thus of stationarity.
#
# ### Note:
#
# Be aware that almost all the methods and results on time series analysis assume the validity of both conditions:
#
# - **The parameters of the system remain constant**.
# - **The phenomenon is sufficiently sampled:** the time series should cover a stretch of time which is much longer than the longest characteristic time scale that is relevant for the evolution of the system ... **Remember:** we are looking for **reproducibility**, and it requires that the probabilities or other rules must be inferred properly.
#
# > The concentration of sugar in the blood of a human is driven by the consumption of food and thus roughly follows a 24 hour cycle. If this quantity is recorded over 24 hours or less, the process must be considered
# non-stationary no matter how many data points have been taken during that time
#
# ### Why do we care about stationarity ?
#
# > **Fact:** we try to approach a dynamical phenomenon through a single finite time series, and hence is a **requirement** of almost all statistical tools for time series data, including the linear ones.
#
# Time series analysis methods can be applied to any sequence of data, including non-stationary data. However:
#
# > When data is not stationary: "**The results cannot be assumed to characterise the underlying system**".
#
# ### Methods to deal with non-stationary data
#
# - One way out can be segmentation of the time series into almost stationary segments
# # Testing stationarity - Practical methods
#
# A series $x_{n}$ is said to be stationary must fulfill the condition:
#
# > The **dynamical properties** of the system underlying a signal **must not change** during the observation period.
#
# Then, it must satisfy the following properties:
#
# - The mean is the same for all $n$.
# - The variance of $x_{n}$ is the same for all $n$.
# - The covariance (and also correlation) between $x_{n}$ and $x_{n-\tau}$ is the same for all $n$.
#
# Where $\tau$ is the **time lag**.
#
#
# ## Requirements:
#
# - As a first requirement, the time series should cover a stretch of time which is much longer than the longest characteristic time scale that is relevant for the evolution of the system.
#
# Quantitative information can be gained from the power spectrum. The longest relevant time scale can be estimated as the inverse of the lowest frequency which still contains a significant fraction of the total power of the signal.
#
# A time series can be considered stationary only on much larger time scales.
#
# ## Method 1
#
# > Check if dynamical properties of the system do change over time
#
# It can be checked simply by measuring such properties for several segments of the data set.
#
# > **Note:** Characteristics with known or negligible statistical fluctuations are preferable for this purpose.
#
# The **statistically most stable quantities** are:
#
# - The **mean**
# - The **variance**
#
# To detect less obvious non-stationarity, it may be needed more **subtle quantities** such as:
#
# - Spectral components
# - Correlations
# - Use nonlinear statistics.
#
# **Summary:**
#
# Try to compute:
#
# - Moving average (rolling average or running average)
# - Moving variance (rolling variance or running variance)
# - Transition probabilities
# - Correlations
# - ... Among others
#
# These quantities must not differ beyond their statistical fluctuations.
#
# ### What about chaotic systems?
#
# In **experimental chaotic systems**, it is not uncommon for **a parameter drift to result in no visible drift** in the mean or the distribution of values. Linear correlations and the spectrum may also be unaffected. **Only the nonlinear dynamical relations and transition probabilities change appreciably**.
#
# ### ... The problem of being sufficiently sampled
#
# > Test for convergence
#
# Whether the data set is a sufficient sample for a particular application, such as the estimate of a characteristic quantity, may be tested by **observing the convergence of that quantity when larger and larger fractions of the available data are used for its computation**:
#
# > An attractor dimension obtained from the first half of the data must not differ substantially from the value determined using the second half, and should agree with the value computed for the whole data set within the estimated statistical errors.
#
# This test is very crude since the convergence of nonlinear statistics can be very slow and, indeed, not much is known about its rate.
#
# ### Remarks
#
# - Unlike in the linear theory, the estimates of nonlinear quantities that we will make later cannot usually be studied sufficiently to rigorously prove their correctness and convergence.
#
# - It is relevant to distinguish between a quantity such as a mean value, and the way one derives a number for it from a finite sample. This is called an estimate, and there can exist different estimates for the same cuantity in the same data set, depending on the assumptions made.
# ## Autocorrelation
#
# For **Mean** and **Variance** the time ordering of the measurements is irrelevant and
# thus **they cannot give any information about the time evolution of a system**.
#
# **Autocorrelation** gives this type of information.
#
# The estimation of the autocorrelations from a time series is straightforward as long
# as the lag $\tau$ is small compared to the total length of the time series. Therefore,
# estimates of autocorrelation, are only reasonable for $\tau << N$.
#
#
# If we plot values sn versus the corresponding values a fixed lag $\tau$ earlier, $S_{n-\tau}$, the autocorrelation $c_{\tau}$, quantifies how these points are distributed.
#
# Cases:
#
# - If they spread out evenly over the plane, then $c_{\tau} = 0$.
# - If they tend to crowd along the diagonal $S_{n} = S_{n-\tau}$, then $c_{\tau} > 0$.
# - If they are closer to the line $S_{n} = - S_{n - \tau}$, then $c_{\tau} < 0$.
#
# The latter two cases reflect some tendency of $S_{n}$ and $S_{n-\tau}$ to be proportional to each other, which makes it plausible that the autocorrelation function reflects only linear correlations.
#
# Obviously, if a signal is periodic in time, then the autocorrelation function is periodic in the lag $\tau$.
#
# ### Autocorrelations, noise and chaos
#
# Note:
#
# - Stochastic processes have decaying autocorrelations but the rate of decay depends on the properties of the process.
# - Autocorrelations of signals from deterministic chaotic systems typically also decay exponentially with increasing lag.
#
# > Autocorrelations are not characteristic enough to distinguish random from deterministic chaotic signals.
# ## Fourier transform: power spectrum, periodogram and spectrogram
#
# Instead of describing the statistical properties of a signal in real space one can ask
# about its properties in Fourier space.
#
# The Fourier transform establishes a one-to-one correspondence between the signal at certain times (time domain) and how certain frequencies contribute to the signal, and how the phases of the oscillations are
# related to the phases of other oscillations (frequency domain).
#
# The power spectrum is particularly useful for studying the oscillations of a system. There will be sharper or broader peaks at the dominant frequencies and at their integer multiples, the harmonics.
#
# - Purely periodic or quasi-periodic signals show sharp spectral lines.
# - Measurement noise adds a continuous floor to the spectrum.
#
# > Thus in the spectrum, purely periodic signal and noise are readily distinguished.
#
# - Deterministic chaotic signals may also have sharp spectral lines but even in the absence of noise there will be a continuous part of the spectrum.
#
#
# This is an immediate consequence of the exponentially decaying autocorrelation function.
#
# > Without additional information it is impossible to infer from the spectrum whether the continuous part is due to noise on top of a (quasi-)periodic signal or to chaoticity.
#
# **See:** <NAME>, <NAME> - Practical Numerical Algorithms for Chaotic Systems.
|
stationarity/stationarity.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import os
print(os.getcwd())
os.chdir("/Users/terminator/Desktop/Trading/")
nifty50_wk = pd.read_csv("Nifty 50 Historical Data Weekly Gain 2017-2021.csv")
nifty50_wk
# +
#https://stackoverflow.com/questions/65323421/python-investpy-package-to-get-data-of-nifty50-index-get-stock-historical-dat
# +
class European_Call_Payoff:
def __init__(self, strike):
self.strike = strike
def get_payoff(self, stock_price):
if stock_price > self.strike:
return stock_price - self.strike
else:
return 0
class GeometricBrownianMotion:
def simulate_paths(self):
while(self.T - self.dt > 0):
dWt = np.random.normal(0, math.sqrt(self.dt)) # Brownian motion
dYt = self.drift*self.dt + self.volatility*dWt # Change in price
self.current_price += dYt # Add the change to the current price
self.prices.append(self.current_price) # Append new price to series
self.T -= self.dt # Accound for the step in time
def __init__(self, initial_price, drift, volatility, dt, T):
self.current_price = initial_price
self.initial_price = initial_price
self.drift = drift
self.volatility = volatility
self.dt = dt
self.T = T
self.prices = []
self.simulate_paths()
# +
# Model Parameters
paths = 200
initial_price = 17816
drift = -16
volatility = 0.08*initial_price
dt = 1/(365*24)
T = dt*24*3
price_paths = []
# Generate a set of sample paths
for i in range(0, paths):
price_paths.append(GeometricBrownianMotion(initial_price, drift, volatility, dt, T).prices)
# +
call_payoffs = []
ec = European_Call_Payoff(initial_price)
risk_free_rate = .036*T
for price_path in price_paths:
call_payoffs.append(ec.get_payoff(price_path[-1])/(1 + risk_free_rate)) # We get the last stock price in the series generated by GBM to determin the payoff and discount it by one year
# Plot the set of generated sample paths
for price_path in price_paths:
plt.plot(price_path)
plt.show()
print(np.average(call_payoffs)) # Options are in blocks of 100
# +
import math
from scipy.stats import norm
class EuropeanCall:
def call_price(
self, asset_price, asset_volatility, strike_price,
time_to_expiration, risk_free_rate
):
b = math.exp(-risk_free_rate*time_to_expiration)
x1 = math.log(asset_price/(b*strike_price)) + .5*(asset_volatility*asset_volatility)*time_to_expiration
x1 = x1/(asset_volatility*(time_to_expiration**.5))
z1 = norm.cdf(x1)
z1 = z1*asset_price
x2 = math.log(asset_price/(b*strike_price)) - .5*(asset_volatility*asset_volatility)*time_to_expiration
x2 = x2/(asset_volatility*(time_to_expiration**.5))
z2 = norm.cdf(x2)
z2 = b*strike_price*z2
return z1 - z2
def __init__(
self, asset_price, asset_volatility, strike_price,
time_to_expiration, risk_free_rate
):
self.asset_price = asset_price
self.asset_volatility = asset_volatility
self.strike_price = strike_price
self.time_to_expiration = time_to_expiration
self.risk_free_rate = risk_free_rate
self.price = self.call_price(asset_price, asset_volatility, strike_price, time_to_expiration, risk_free_rate)
class EuropeanPut:
def put_price(
self, asset_price, asset_volatility, strike_price,
time_to_expiration, risk_free_rate
):
b = math.exp(-risk_free_rate*time_to_expiration)
x1 = math.log((b*strike_price)/asset_price) + .5*(asset_volatility*asset_volatility)*time_to_expiration
x1 = x1/(asset_volatility*(time_to_expiration**.5))
z1 = norm.cdf(x1)
z1 = b*strike_price*z1
x2 = math.log((b*strike_price)/asset_price) - .5*(asset_volatility*asset_volatility)*time_to_expiration
x2 = x2/(asset_volatility*(time_to_expiration**.5))
z2 = norm.cdf(x2)
z2 = asset_price*z2
return z1 - z2
def __init__(
self, asset_price, asset_volatility, strike_price,
time_to_expiration, risk_free_rate
):
self.asset_price = asset_price
self.asset_volatility = asset_volatility
self.strike_price = strike_price
self.time_to_expiration = time_to_expiration
self.risk_free_rate = risk_free_rate
self.price = self.put_price(asset_price, asset_volatility, strike_price, time_to_expiration, risk_free_rate)
ec = EuropeanCall(17416, 0.175, 17600, 1.8/365, 0.036)
print(ec.price)
# +
# importing nse from nse tools
from nsetools import Nse
# creating a Nse object
nse = Nse()
# getting quote of the sbin
quote = nse.get_quote('sbin')
# printing company name
print(quote['companyName'])
# printing buy price
print("Buy Price : " + str(quote['buyPrice1']))
# + jupyter={"outputs_hidden": true} tags=[]
# #!pip install nsepy
# -
from nsepy import get_history
from datetime import date
data = get_history(symbol="NIFTY 50", start=date(2019,10,31), end=date(2021,11,30), index = True)
plt.plot(data[['Close']])
data["DayOfWeek"] = data.index.to_series().dt.dayofweek
data
nifty_opt = get_history(symbol="NIFTY",
start=date(2015,1,1),
end=date(2021,12,15),
index=True,
option_type='CE',
strike_price=18000,
expiry_date=date(2021,12,30))
# + jupyter={"outputs_hidden": true} tags=[]
nifty_opt[nifty_opt['Number of Contracts']>100][-50:]
#[["Open", "Low", "Close", "High"]].mean(axis=1)[-50:]#.hist(bins =100)
# +
# #!pip install forex-python
# -
import datetime
from forex_python.converter import CurrencyRates
c = CurrencyRates()
date_obj = datetime.datetime(2014, 5, 23, 18, 36, 28, 151012)
c.get_rate('USD', 'INR', date_obj)
# + tags=[]
# #!pip install yfinance
import yfinance as yf
import matplotlib.pyplot as plt
data = yf.download('BTC-USD','2021-01-01','2021-09-30')
data.head()
# -
data = yf.download('USDINR=X','2021-01-01','2021-09-30')
data.head()
data = yf.download('BZ=F','2021-01-01','2021-09-30')
data.head()
# + tags=[]
import yfinance as yf
data = yf.download( # or pdr.get_data_yahoo(...
# tickers list or string as well
tickers = "INR=X",
# use "period" instead of start/end
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# (optional, default is '1mo')
period = "ytd",
# fetch data by interval (including intraday if period < 60 days)
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# (optional, default is '1d')
interval = "1d",
# group by ticker (to access via data['SPY'])
# (optional, default is 'column')
group_by = 'ticker',
# adjust all OHLC automatically
# (optional, default is False)
auto_adjust = True,
# download pre/post regular market hours data
# (optional, default is False)
prepost = False,
# use threads for mass downloading? (True/False/Integer)
# (optional, default is True)
threads = True,
# proxy URL scheme use use when downloading?
# (optional, default is None)
proxy = None
)
print(data)
# + jupyter={"outputs_hidden": true} tags=[]
# #!pip install investpy
# + tags=[]
import investpy
data = investpy.economic_calendar(
from_date='12/09/2021',
to_date ='15/12/2021'
)
print(data.head())
# -
data[data.importance.isin(["medium", "high"])]
data.importance
20000*2**10
# +
# If liquidity dries market falls - No chance Nifty will get to 19000 by Jan
# But Nifty can go to 18000 by March
# +
# What is the probability that from this point the index moves to what % atleast once
data = yf.download( # or pdr.get_data_yahoo(...
# tickers list or string as well
tickers = "INR=X",
# use "period" instead of start/end
# valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# (optional, default is '1mo')
period = "ytd",
# fetch data by interval (including intraday if period < 60 days)
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# (optional, default is '1d')
interval = "1d",
# group by ticker (to access via data['SPY'])
# (optional, default is 'column')
group_by = 'ticker',
# adjust all OHLC automatically
# (optional, default is False)
auto_adjust = True,
# download pre/post regular market hours data
# (optional, default is False)
prepost = False,
# use threads for mass downloading? (True/False/Integer)
# (optional, default is True)
threads = True,
# proxy URL scheme use use when downloading?
# (optional, default is None)
proxy = None
)
data
# -
|
NSE Options.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import pandas as pd
import time
Match=pd.read_csv('DataSets/Match.csv')
Match.index=Match.id
Players=pd.read_csv('DataSets/Player.csv')
PlayerAt=pd.read_csv('DataSets/Player_Attributes.csv')
Team=pd.read_csv('DataSets/Team.csv')
Total=pd.read_csv('DataSets/MatchData2.csv',sep=';')
Total.loc[:,'DateTime']=pd.to_datetime(Total['Date'],format='%d/%m/%Y')
Data=Match[['id']].copy()
Data.columns=['Match_ID']
Data.loc[:,'DateTime']=Match['date']
Data.loc[:,'HomeScore']=Match['home_team_goal']
Data.loc[:,'AwayScore']=Match['away_team_goal']
Data.loc[:,'Result']=0
Data.loc[ Data['HomeScore'] > Data['AwayScore'] ,'Result']=1
Data.loc[ Data['HomeScore'] < Data['AwayScore'] ,'Result']=-1
Data.loc[:,'Home_ID']=Match['home_team_api_id']
Data.loc[:,'Away_ID']=Match['away_team_api_id']
for i in range(1,12):
Data.loc[:,'HomePlayer_'+str(i)]=Match['home_player_'+str(i)]
for i in range(1,12):
Data.loc[:,'AwayPlayer_'+str(i)]=Match['away_player_'+str(i)]
ColumnNames=['HomePlayer_1', 'HomePlayer_2', 'HomePlayer_3', 'HomePlayer_4',
'HomePlayer_5', 'HomePlayer_6', 'HomePlayer_7', 'HomePlayer_8',
'HomePlayer_9', 'HomePlayer_10', 'HomePlayer_11', 'AwayPlayer_1',
'AwayPlayer_2', 'AwayPlayer_3', 'AwayPlayer_4', 'AwayPlayer_5',
'AwayPlayer_6', 'AwayPlayer_7', 'AwayPlayer_8', 'AwayPlayer_9',
'AwayPlayer_10', 'AwayPlayer_11']
for col in ColumnNames:
Data.loc[:,col+'_Overall'] = 0
startTime=time.time()
for index in Data.index:
PlayerID=Data[col].loc[index]
MatchDate=Data['DateTime'].loc[index]
PlayerTable = PlayerAt[PlayerAt['player_api_id'] == PlayerID]
PlayerTable2 = Players[Players['player_api_id'] == PlayerID]
PlayerTable=PlayerTable.sort_values(by=['date'])
try:
PlayerTable=PlayerTable[PlayerTable['date']<MatchDate].iloc[-1]
Data.at[index, col + '_Overall'] =PlayerTable['overall_rating']
except:
Data.at[index, col + '_Overall'] =None
EndTime=time.time()
print(col+' is completed in '+str(EndTime-startTime)+' seconds')
# Home Team Last Summary
for index in Data.index:
HomeID=Data.loc[index,'Home_ID']
MatchDate=Data.loc[index,'DateTime']
TeamMatch=Match[(Match['home_team_api_id']==HomeID) | (Match['away_team_api_id']==HomeID)]
TeamMatch=TeamMatch.sort_values(by=['date'])
TeamMatch=TeamMatch[TeamMatch['date']<MatchDate].iloc[-5:]
GiveGoal=0
TakeGoal=0
WinNumber=0
for indexTeam in TeamMatch.index:
if TeamMatch.loc[indexTeam,'home_team_api_id']==HomeID:
MatchGiveGoal = TeamMatch.loc[indexTeam,'home_team_goal']
MatchTakeGoal = TeamMatch.loc[indexTeam,'away_team_goal']
GiveGoal=GiveGoal+MatchGiveGoal
TakeGoal= TakeGoal+MatchTakeGoal
if MatchGiveGoal>MatchTakeGoal:
WinNumber = WinNumber+1
else:
MatchGiveGoal = TeamMatch.loc[indexTeam, 'away_team_goal']
MatchTakeGoal = TeamMatch.loc[indexTeam,'home_team_goal']
GiveGoal = GiveGoal + MatchGiveGoal
TakeGoal = TakeGoal + MatchTakeGoal
if MatchGiveGoal > MatchTakeGoal:
WinNumber = WinNumber + 1
Data.at[index,'Last_5_match_goal_diff_home '] = GiveGoal-TakeGoal
Data.at[index,'Last_5_match_win_home '] = WinNumber
#Away Team
for index in Data.index:
AwayID=Data.loc[index,'Away_ID']
MatchDate=Data.loc[index,'DateTime']
TeamMatch=Match[(Match['away_team_api_id']==AwayID) | (Match['away_team_api_id']==AwayID)]
TeamMatch=TeamMatch.sort_values(by=['date'])
TeamMatch=TeamMatch[TeamMatch['date']<MatchDate].iloc[-5:]
GiveGoal=0
TakeGoal=0
WinNumber=0
for indexTeam in TeamMatch.index:
if TeamMatch.loc[indexTeam,'home_team_api_id']==HomeID:
MatchGiveGoal = TeamMatch.loc[indexTeam,'home_team_goal']
MatchTakeGoal = TeamMatch.loc[indexTeam,'away_team_goal']
GiveGoal=GiveGoal+MatchGiveGoal
TakeGoal= TakeGoal+MatchTakeGoal
if MatchGiveGoal>MatchTakeGoal:
WinNumber = WinNumber+1
else:
MatchGiveGoal = TeamMatch.loc[indexTeam, 'away_team_goal']
MatchTakeGoal = TeamMatch.loc[indexTeam,'home_team_goal']
GiveGoal = GiveGoal + MatchGiveGoal
TakeGoal = TakeGoal + MatchTakeGoal
if MatchGiveGoal > MatchTakeGoal:
WinNumber = WinNumber + 1
Data.at[index,'Last_5_match_goal_diff_away '] = GiveGoal-TakeGoal
Data.at[index,'Last_5_match_win_away '] = WinNumber
#Away Team
for index in Data.index:
AwayID=Data.loc[index,'Away_ID']
HomeID=Data.loc[index,'Home_ID']
MatchDate=Data.loc[index,'DateTime']
BTWMatch=Match[(Match['away_team_api_id']==AwayID) & (Match['home_team_api_id']==HomeID) | (Match['away_team_api_id']==HomeID) & (Match['home_team_api_id']==AwayID)]
# BTWMatch=BTWMatch[BTWMatch['date']<MatchDate]
Last_5_match_btw=0
if len(BTWMatch)==0:
pass
else:
if len(BTWMatch)>5:
BTWMatch=BTWMatch.iloc[-5:]
for BTWMatchIndex in BTWMatch.index:
HTG = BTWMatch.loc[BTWMatchIndex, 'home_team_goal']
ATG = BTWMatch.loc[BTWMatchIndex, 'away_team_goal']
if HomeID == BTWMatch.loc[BTWMatchIndex,'home_team_api_id']:
IsHomeHome=True
else:
IsHomeHome=False
if IsHomeHome==True:
if HTG>ATG:
Last_5_match_btw=Last_5_match_btw + 1
elif HTG < ATG:
Last_5_match_btw = Last_5_match_btw - 1
elif IsHomeHome==False:
if HTG>ATG:
Last_5_match_btw=Last_5_match_btw -1
elif HTG < ATG:
Last_5_match_btw = Last_5_match_btw + 1
Data.at[index,'Last_5_match_btw'] = Last_5_match_btw
for index in Data.index:
MatchInfo=Data.loc[index]
HomeTeamPlayers= ['HomePlayer_1_Overall', 'HomePlayer_2_Overall', 'HomePlayer_3_Overall',
'HomePlayer_4_Overall', 'HomePlayer_5_Overall', 'HomePlayer_6_Overall',
'HomePlayer_7_Overall', 'HomePlayer_8_Overall', 'HomePlayer_9_Overall',
'HomePlayer_10_Overall', 'HomePlayer_11_Overall']
AwayTeamPlayers= ['AwayPlayer_1_Overall', 'AwayPlayer_2_Overall', 'AwayPlayer_3_Overall',
'AwayPlayer_4_Overall', 'AwayPlayer_5_Overall', 'AwayPlayer_6_Overall',
'AwayPlayer_7_Overall', 'AwayPlayer_8_Overall', 'AwayPlayer_9_Overall',
'AwayPlayer_10_Overall', 'AwayPlayer_11_Overall']
HomePlayerSummary=MatchInfo[HomeTeamPlayers]
AwayPlayerSummary=MatchInfo[AwayTeamPlayers]
Data.at[index,'Team_variance_home ']= (np.std(HomePlayerSummary))**2
Data.at[index,'Team_Mean_home ']= (np.mean(HomePlayerSummary))
Data.at[index,'Team_variance_away ']= (np.std(AwayPlayerSummary))**2
Data.at[index,'Team_Mean_away ']= (np.mean(AwayPlayerSummary))
#Match Information like Card,Corner,and Shout statistics
for index in Data.index:
HomeID=Data.loc[index,'Home_ID']
HomeTeamName=Team[Team['team_api_id']==HomeID]['team_long_name'].iloc[0]
MatchDay=Data.loc[index,'DateTime']
HomeMatchData=Total[(Total['HomeTeam']==HomeTeamName) | (Total['AwayTeam']==HomeTeamName)]
HomeMatchDataLast5=HomeMatchData[HomeMatchData['DateTime']<MatchDay].iloc[-5:]
ShoutHome=0
ShoutOnHome=0
CardHome=0
FaulHome=0
CornerHome=0
for MatchIndex in HomeMatchDataLast5.index:
if HomeMatchDataLast5.loc[MatchIndex,'HomeTeam'] ==HomeTeamName :
HomeOrAway='H'
else:
HomeOrAway='A'
Shout=HomeMatchDataLast5.loc[MatchIndex,HomeOrAway+'S']
ShoutHome=ShoutHome+Shout
ShoutOn= HomeMatchDataLast5.loc[MatchIndex,HomeOrAway+'ST']
ShoutOnHome=ShoutOnHome+ShoutOn
YellowCard= HomeMatchDataLast5.loc[MatchIndex,HomeOrAway+'Y']
RedCard= HomeMatchDataLast5.loc[MatchIndex,HomeOrAway+'R']
CardHome=CardHome+ (2*RedCard)+ YellowCard
Faul= HomeMatchDataLast5.loc[MatchIndex,HomeOrAway+'F']
FaulHome=FaulHome+Faul
Corner= HomeMatchDataLast5.loc[MatchIndex,HomeOrAway+'C']
CornerHome=CornerHome+Corner
Data.at[index,'Last_5_match_shout_total_home ']=ShoutHome
Data.at[index,'Last_5_match_shout_on_home ']=ShoutOnHome
Data.at[index,'Last_5_match_card_home ']=CardHome
Data.at[index,'Last_5_match_faul_home ']=FaulHome
Data.at[index,'Last_5_match_corner_home ']=CornerHome
for index in Data.index:
AwayID = Data.loc[index, 'Away_ID']
AwayTeamName = Team[Team['team_api_id'] == AwayID]['team_long_name'].iloc[0]
MatchDay = Data.loc[index, 'DateTime']
AwayMatchData = Total[(Total['AwayTeam'] == AwayTeamName) | (Total['AwayTeam'] == AwayTeamName)]
AwayMatchDataLast5 = AwayMatchData[AwayMatchData['DateTime'] < MatchDay].iloc[-5:]
ShoutAway = 0
ShoutOnAway = 0
CardAway = 0
FaulAway = 0
CornerAway = 0
for MatchIndex in AwayMatchDataLast5.index:
if AwayMatchDataLast5.loc[MatchIndex, 'HomeTeam'] == AwayTeamName:
HomeOrAway = 'H'
else:
HomeOrAway = 'A'
Shout = AwayMatchDataLast5.loc[MatchIndex, HomeOrAway + 'S']
ShoutAway = ShoutAway + Shout
ShoutOn = AwayMatchDataLast5.loc[MatchIndex, HomeOrAway + 'ST']
ShoutOnAway = ShoutOnAway + ShoutOn
YellowCard = AwayMatchDataLast5.loc[MatchIndex, HomeOrAway + 'Y']
RedCard = AwayMatchDataLast5.loc[MatchIndex, HomeOrAway + 'R']
CardAway = CardAway + (2 * RedCard) + YellowCard
Faul = AwayMatchDataLast5.loc[MatchIndex, HomeOrAway + 'F']
FaulAway = FaulAway + Faul
Corner = AwayMatchDataLast5.loc[MatchIndex, HomeOrAway + 'C']
CornerAway = CornerAway + Corner
Data.at[index, 'Last_5_match_shout_total_Away '] = ShoutAway
Data.at[index, 'Last_5_match_shout_on_Away '] = ShoutOnAway
Data.at[index, 'Last_5_match_card_Away '] = CardAway
Data.at[index, 'Last_5_match_faul_Away '] = FaulAway
Data.at[index, 'Last_5_match_corner_Away '] = CornerAway
# Home Team Last Match Result
for index in Data.index:
HomeID=Data.loc[index,'Home_ID']
MatchDate=Data.loc[index,'DateTime']
TeamMatch=Match[(Match['home_team_api_id']==HomeID) | (Match['away_team_api_id']==HomeID)]
TeamMatch=TeamMatch.sort_values(by=['date'])
LastMatchOrder=TeamMatch[TeamMatch['date']<MatchDate].iloc[-1:]
if len(LastMatchOrder)>0:
if LastMatchOrder['home_team_api_id'].iloc[0] == HomeID:
HomeTeamGaol = LastMatchOrder['home_team_goal'].iloc[0]
RivalTeamGoal = LastMatchOrder['away_team_goal'].iloc[0]
HomeResult=0
if HomeTeamGaol<RivalTeamGoal:
HomeResult = -1
elif HomeTeamGaol>RivalTeamGoal:
HomeResult = 1
else:
HomeTeamGaol = LastMatchOrder['away_team_goal'].iloc[0]
RivalTeamGoal = LastMatchOrder['home_team_goal'].iloc[0]
HomeResult = 0
if HomeTeamGaol < RivalTeamGoal:
HomeResult = -1
elif HomeTeamGaol > RivalTeamGoal:
HomeResult = 1
Data.at[index,'home_team_last_result']=HomeResult
else:
Data.at[index,'home_team_last_result']=None
#away team last match result
for index in Data.index:
AwayID=Data.loc[index,'Away_ID']
MatchDate=Data.loc[index,'DateTime']
TeamMatch=Match[(Match['home_team_api_id']==AwayID) | (Match['away_team_api_id']==AwayID)]
TeamMatch=TeamMatch.sort_values(by=['date'])
LastMatchOrder=TeamMatch[TeamMatch['date']<MatchDate].iloc[-1:]
if len(LastMatchOrder)>0:
if LastMatchOrder['home_team_api_id'].iloc[0] == AwayID:
AwayTeamGaol = LastMatchOrder['home_team_goal'].iloc[0]
RivalTeamGoal = LastMatchOrder['away_team_goal'].iloc[0]
AwayResult=0
if AwayTeamGaol<RivalTeamGoal:
AwayResult = -1
elif AwayTeamGaol>RivalTeamGoal:
AwayResult = 1
else:
AwayTeamGaol = LastMatchOrder['away_team_goal'].iloc[0]
RivalTeamGoal = LastMatchOrder['home_team_goal'].iloc[0]
AwayResult = 0
if AwayTeamGaol < RivalTeamGoal:
AwayResult = -1
elif AwayTeamGaol > RivalTeamGoal:
AwayResult = 1
Data.at[index,'away_team_last_result']=AwayResult
else:
Data.at[index,'away_team_last_result']=None
for col in ColumnNames:
del Data[col]
del Data['Away_ID']
del Data['Home_ID']
# -
Data=Data.dropna()
Data.to_excel('FinalDataSet.xlsx')
|
DataCollection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/timothylombard/RSB/blob/master/RSB.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="jX4JSH0qu0Vy" colab_type="text"
# # **-1-**
# ## **Analytic Approach to Project Management of PCB Designs**
#
#
# ---
#
#
# + [markdown] id="PTIyhR0A-9ML" colab_type="text"
#
# The document you are reading is a Jupyter notebook, hosted in [Colaboratory.](https://colab.research.google.com/) It is not a static page, but an interactive environment that runs real code! <br>
#
# The graphs in this notebook can be changed by updating variables in the agreement form.
#
# Use it, modify it, have fun! Note: the [usual and customary restrictions apply.](https://github.com/timothylombard/RSB/blob/master/LICENSE) <br>
#
# Comments or suggestions: <EMAIL>
#
# ##Note:
# If you are getting unexpected results try the following: <br>
#
#
# 1. Refresh notebook and re-run each cell in sequence.
# 2. If the previous step doesn't work- it could be because notebook has been altered from it's master source. [Reload new copy to go back to starting point](https://colab.research.google.com/github/timothylombard/RSB/blob/master/RSB.ipynb).
#
#
# + [markdown] id="ggbiCezdtHe6" colab_type="text"
# This is a Google Colab Notebook to explore a project for the Road Runner Service Bureau<br>
# 
#
# + [markdown] id="wi-y1or6Nu8h" colab_type="text"
# ##To Analyze, Data is required
# Here is a sample data set: https://raw.githubusercontent.com/timothylombard/RSB/master/RSBdata.csv
#
# Use this [template](https://docs.google.com/spreadsheets/d/1NUL4kl_ZZBQZSozCFONzH-7no0BX48sg7gEyaNE5ZG8/copy) to enter data for your design.
# + id="x0mr-QJiIcyW" colab_type="code" cellView="form" colab={}
import pandas as pd #import the pandas module and shorten to pd
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.pyplot import text
from matplotlib.ticker import FuncFormatter
import matplotlib.ticker as mtick
import seaborn; seaborn.set()
from dateutil import parser
from bokeh.io import show, output_notebook
from bokeh.palettes import PuBu4
from bokeh.plotting import figure
from bokeh.models import Label
from tabulate import tabulate
from datetime import date # Import
#Define Functions
def int2dollar(number):
''' function that takes a number (float or integer)
and returns a currency string rounded up to the nearest dollar'''
return f'${number:,.0f}'
def change_report(dataframe, column2check):
''' function that takes a dataframe and a columnname and returns diff report '''
changes = [] #This is an empty list to store our change data
series_to_check = dataframe[column2check]
change_pairs = [(i, i-1) for i in range(1,len(series_to_check)) #remove the -1
if series_to_check[i]!=series_to_check[i-1] ]
for x in change_pairs:
change = list(df.iloc[[x[0]]].itertuples())
prior = list(df.iloc[[x[1]]].itertuples())
conn_diff = change[0].Connections - prior[0].Connections
pct_conn_diff = conn_diff / prior[0].Connections
comp_diff = change[0].Total_Components - prior[0].Total_Components
pct_comp_diff = comp_diff / prior[0].Total_Components
change_dict = {"-0- Change Date":change[0].Index.date(),
"-1.0- Connections":change[0].Connections,
"-1.1- Previous Connections":prior[0].Connections,
"-1.2- Diff in Conns": conn_diff,
"-1.3- Pct Change Conns": f'{pct_conn_diff :.2%}',
"-2.0- Component Count": change[0].Total_Components,
"-2.1- Previous Comp Count":prior[0].Total_Components,
"-2.2- Diff in Comp Count":comp_diff,
"-2.3- Pct Change in Comp Count": f'{pct_comp_diff :.2%}',
"-3.0- Routed Connections":change[0].Connected,
"-3.1- Previous Routed Connections" :prior[0].Connected,
"-3.2- Impact to Connected Progress" : prior[0].Connected - change[0].Connected,
"-4.0- Parts Placed" : change[0].Placed,
"-4.1- Previous Parts Placed": prior[0].Placed,
"-4.2- Impact to Placement" : prior[0].Placed - change[0].Placed,
}
changes.append(change_dict)
return changes
def pprint_df(dframe):
print(tabulate(dframe, headers='keys', tablefmt='psql', showindex=False))
#@title Enter the link to the CSV file
#assign variable link that points to copy of csv data
link = 'https://raw.githubusercontent.com/timothylombard/RSB/master/RSBdata.csv' #@param {type:"string"}
# + id="cO1JTOBVr6d-" colab_type="code" cellView="form" colab={}
#@title Print out the CSV data from link
df = pd.read_csv(link)
pprint_df(df)
# + id="bKmbupibuoFS" colab_type="code" cellView="form" colab={}
#@title Fill in Key Agreement Details then hit run button on the left
project_name = 'ABC_CPU_Main' #@param {type:"string"}
schedule = '2017-01-10' #@param {type:"date"}
budget = 9000 #@param {type:"number"}
regular_rate = 50.0 #@param {type:"number"}
overtime_rate = 75.0 #@param {type:"number"}
# + id="pruXXcYVjUmV" colab_type="code" cellView="form" colab={}
#@title Placement and Routing progress Chart
df['pctPlaced'] = df['Placed']/df['Total_Components']
df['pctConn'] = df['Connected']/df['Connections']
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace=True)
ax = df[['pctConn','pctPlaced']].plot(title='Placement Routing Progress for '+project_name)
ax.yaxis.set_major_formatter(FuncFormatter('{0:.0%}'.format))#format y-axis to %
plt.legend(loc='upper left')
plt.axvline(x=schedule, color='g', linestyle='-')
text(x=(parser.parse(schedule) + pd.Timedelta(days=.1)), y=0, s="Target Schedule "+str(schedule))
ax.set_ylim(bottom=0)
# + [markdown] id="ZiBEYISTvFcD" colab_type="text"
#
#
# In the above chart, percentages were calculated by using the formula-
#
#
# $Percent Placed = \frac{parts Placed}{TotalComponentCount}$
#
# $Percent Connected = \frac{Connected}{Connections}$
#
#
#
#
#
# + [markdown] id="TQSLfIb6ziEm" colab_type="text"
# # -6- Budget Calculations
# + id="pKoP8kxZLDf4" colab_type="code" cellView="form" colab={}
#@title Expenses over time
df['regrunning'] = df['Reg_Hours'].cumsum()
df['otrunning'] = df['OT_Hours'].cumsum()
df['spend'] = df['regrunning']*regular_rate + df['otrunning']*overtime_rate
budget_string = int2dollar(budget)
spend_string = int2dollar(df.iloc[-1].spend)
ax = df['spend'].plot()
#add reference line showing budget target
plt.axhline(y=budget, color='r', linestyle='-')
text(x=df.index.min(), y=budget+budget/25, s="Target Budget: "+ budget_string)
#add reference line showing schedule target
plt.axvline(x=schedule, color='g', linestyle='-')
#text(x=(parser.parse(schedule) + pd.Timedelta(days=1)), y=budget*.1, s="Target Schedule "+str(schedule))
text(x=(parser.parse(schedule) + pd.Timedelta(days=.1)), y=0, s="Target Schedule "+str(schedule))
plt.title('Expenses to date for '+project_name+': '+spend_string);
fmt = '${x:,.0f}'
tick = mtick.StrMethodFormatter(fmt)
ax.yaxis.set_major_formatter(tick)
ax.set_ylim(bottom=0)
# + id="Wsn84ADguyl4" colab_type="code" cellView="form" colab={}
#@title Schedule Bullet Graph
#A simple demonstration of date calculations.
from datetime import date # Import
start_date = date(2019,1,1)
end_date = date.today()
agreement_date = parser.parse(schedule) # schedule was set in the agreement form
days_worked = df.index.max() - df.index.min() # subtract last record date from first
days_worked.days
days4job = agreement_date - df.index.min() # subtract the first recort date from agreement_date
days4job.days
data = [(project_name, days4job.days, days_worked.days)]
limits = [0, days4job.days, days4job.days *2, days4job.days *3]
labels = ["OK", "Late", "Very Late"]
cats = [x[0] for x in data]
#determine if we are late or still have days remaining
if days4job.days > days_worked.days:
status = str(days4job.days - days_worked.days)+" days remaining"
else:
status = str(days_worked.days - days4job.days)+" days past due"
# Calculate the percentage of schedule days used.
sch_pct = f'{days_worked.days/days4job.days*100:.2f}' +"% "
# Make the basic figure...
p=figure(
title=sch_pct +"Day "+str(days_worked.days)+" on a "+str(days4job.days)+" day project: "+status,
plot_height=125,
plot_width=500,
y_range=cats,
toolbar_location=None,
)
p.x_range.range_padding = 0
p.grid.grid_line_color = None
p.xaxis[0].ticker.num_minor_ticks = 0
for left, right, color in zip(limits[:-1], limits[1:], PuBu4[::-1]):
p.hbar(y=cats, left=left, right=right, height=0.8, color=color)
perf = [x[2] for x in data]
p.hbar(y=cats, left=0, right=perf, height=0.3, color="gray")
comp = [x[1]for x in data]
p.segment(x0=comp, y0=[(x, -0.5) for x in cats], x1=comp,
y1=[(x, 0.5) for x in cats], color="Firebrick", line_width=2)
for start, label in zip(limits[:-1], labels):
p.add_layout(Label(x=start, y=0, text=label, text_font_size="10pt",
text_color='black', y_offset=5, x_offset=15))
p.add_layout(Label(x=start, y=0, text=label, text_font_size="10pt",
text_color='black', y_offset=5, x_offset=15))
citation = Label(x=days4job.days, y=50, y_units='screen',
text_font_size="8pt", text= "Target "+schedule, render_mode='css', x_offset=5)
p.add_layout(citation)
output_notebook()
show(p)
# + id="oO9R9ON2jixb" colab_type="code" cellView="form" colab={}
#@title Expenses Bullet Graph
spend2date = df.loc[df.index.max(),'spend']
b_data = [(project_name, budget, spend2date)]
b_limits = [0, budget, budget *2, budget *3]
b_labels = ["OK", "Over Budget", "Way Over Budget"]
b_cats = [x[0] for x in data]
if budget > spend2date:
b_status = int2dollar(budget - spend2date)+" budget remaining"
else:
b_status = int2dollar(spend2date - budget)+" over target"
budget_pct = f'{spend2date/budget*100:.2f}' +"% "
bp=figure(
title=int2dollar(spend2date) +" ("+budget_pct+") spent on a "+int2dollar(budget)+" budget target project: "+b_status,
plot_height=125,
plot_width=700,
y_range=b_cats,
toolbar_location=None,
)
bp.x_range.range_padding = 0
bp.grid.grid_line_color = None
bp.xaxis[0].ticker.num_minor_ticks = 0
for left, right, color in zip(b_limits[:-1], b_limits[1:], PuBu4[::-1]):
bp.hbar(y=b_cats, left=left, right=right, height=0.8, color=color)
b_perf = [x[2] for x in b_data]
bp.hbar(y=b_cats, left=0, right=b_perf, height=0.3, color="gray")
b_comp = [x[1]for x in b_data]
bp.segment(x0=b_comp, y0=[(x, -0.5) for x in b_cats], x1=b_comp,
y1=[(x, 0.5) for x in b_cats], color="Green", line_width=2)
for start, label in zip(b_limits[:-1], b_labels):
bp.add_layout(Label(x=start, y=0, text=label, text_font_size="10pt",
text_color='black', y_offset=5, x_offset=15))
bp.add_layout(Label(x=start, y=0, text=label, text_font_size="10pt",
text_color='black', y_offset=5, x_offset=15))
output_notebook()
show(bp)
# + [markdown] id="ddQS8sdJ5N2L" colab_type="text"
# # Analyzing Changes
# To create a change report .<br>
# <br>
#
#
# * Look for changes in the number of connections.
# * Create a change pair. (The row that changed and the row prior)
# * Analyze several aspects of the change pair
# * Report
#
#
#
# + id="R65QmyIyPctD" colab_type="code" cellView="form" colab={}
#@title Change Report
report = change_report(df, 'Connections')
print("Count of changes : ", len(report))
if report:
for chg_no, change in enumerate(report,1):
cdf = pd.DataFrame.from_records(change, index=[0])
print("Stats on Change #"+str(chg_no), project_name)
print(tabulate(cdf.T, headers="keys", tablefmt="fancy_grid"))
else:
print("No Changes")
# + [markdown] id="IS_gH979ZOT8" colab_type="text"
# 
|
RSB.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.6 64-bit
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
try:
# %tensorflow_version 2.x
except:
pass
import warnings
warnings.filterwarnings('ignore')
# +
import pythainlp
from pythainlp import word_tokenize
# from pythainlp.corpus import thai_stopwords as stopwords
from pythainlp.corpus import wordnet
from nltk.stem.porter import PorterStemmer
from nltk.corpus import words
from stop_words import get_stop_words
pythainlp.__version__
# -
import nltk
nltk.download('words')
th_stop = tuple(pythainlp.corpus.thai_stopwords())
en_stop = tuple(get_stop_words('en'))
p_stemmer = PorterStemmer()
clean_text = ["เผยผลฉีดวัคซีน คนสูงอายุโดยเฉพาะผู้ที่มีโรคร่วม ภูมิคุ้มกันบกพร่อง หรืออยู่บ้านพักคนชรา ภูมิจะลดลงเร็วกว่ากลุ่มอื่น","เผยผลฉีดวัคซีน คนสูงอายุโดยเฉพาะผู้ที่มีโรคร่วม","เผยแพร่ข้อมูลชุดของตนเองบ้างใน preprint พบว่าอัตราการลดลงของประสิทธิผลของวัคซีนที่ใช้ในอังกฤษลดลงทั้ง 3 ชนิด โดยที่ AstraZeneca และ Pfizer vaccine ก็ลดลงในอัตราส่วนพอๆ กันแทบจะเป็นเส้นขนาน เมื่อติดตามไปอย่างน้อย 5 เดือน ประสิทธิผลของวัคซีน Pfizer ยังคงสูงกว่า AstraZenecaอัตราลดลงในกลุ่มเปราะบาง คือคนสูงอายุโดยเฉพาะผู้ที่มีโรคร่วม ภูมิคุ้มกันบกพร่อง หรืออยู่บ้านพักคนชรา จะลดลงเร็วกว่ากลุ่มอื่น ข้อมูลนี้น่าจะเป็นเหตุผลหนึ่งที่ JCVI แนะนำให้เริ่มฉีดวัคซีนเข็มสามในกลุ่มเสี่ยงเหล่านี้ก่อน หลังได้รับวัคซีนครบอย่างน้อย 6 เดือน โดยใช้ Pfizer vaccine เป็นวัคซีนหลัก"]
def split_word(text):
tokens = word_tokenize(text,engine='newmm')
# Remove stop words ภาษาไทย และภาษาอังกฤษ
tokens = [i for i in tokens if not i in th_stop and not i in en_stop]
# หารากศัพท์ภาษาอังกฤษ
# English
tokens = [p_stemmer.stem(i) for i in tokens]
# Thai
# tokens_temp=[]
# for i in tokens:
# w_syn = wordnet.synsets(i)
# if (len(w_syn)>0) and (len(w_syn[0].lemma_names('tha'))>0):
# tokens_temp.append(w_syn[0].lemma_names('tha')[0])
# else:
# tokens_temp.append(i)
# tokens = tokens_temp
# ลบตัวเลข
tokens = [i for i in tokens if not i.isnumeric()]
# ลบช่องว่าง
tokens = [i for i in tokens if not ' ' in i]
return tokens
print('tokenized text:\n',split_word(clean_text[0]))
tokens_list = [split_word(txt) for txt in clean_text] # เก็บคำที่แบ่งแล้วเอาไว้ใน tokens_list
tokens_list
from sklearn.feature_extraction.text import CountVectorizer
tokens_list_j = [','.join(tkn) for tkn in tokens_list]
cvec = CountVectorizer(analyzer=lambda x:x.split(','))
c_feat = cvec.fit_transform(tokens_list_j) #วิธีที่ 1 ทำ Bag of words + count word
cvec.vocabulary_
c_feat[:,:].todense()
from sklearn.feature_extraction.text import TfidfVectorizer
tvec = TfidfVectorizer(analyzer=lambda x:x.split(','),)
t_feat = tvec.fit_transform(tokens_list_j) # วิธีที่ 2 ทำ Bag of words + tf-idf
t_feat[:,:].todense()
print(len(tvec.idf_),len(tvec.vocabulary_))
|
Preprocess/Pythai.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''venv'': venv)'
# language: python
# name: python395jvsc74a57bd0a346c6d1fcbc1b1bf63d1089c5bc18a71147686e230d623a8e89aa95db65f4a4
# ---
# # Poincare-Lindstedt method
# ## Swing pendulum
#
# The equation for the swing pendulum is nonlinear due to the rotation nature of the movement.
# $$\ddot{x}+\sin x\approx\ddot{x}+x-\frac{1}{6}x^3=0$$
# Rewrite the equation as
# $$\ddot{x}+x=\epsilon x^3,\quad\epsilon=-1/6$$
# The value $\epsilon$ can be considered as small parameter.
# \begin{align}
# x(t)&\approx x_0(\omega t)+\epsilon x_1(\omega t)+\dots=x_0(t')+\epsilon x_1(t')+\dots\\
# \omega&\approx1+\epsilon \omega_1 + \epsilon^2\omega_2+\dots
# \end{align}
# Change time scale again, once $\omega$ in steady state solution is a function of amplitude. Scaling the time in this way makes frequency in solution independent of amplitude.
# $$\hat{t}=\omega t,\quad\frac{d^2}{dt^2}=\omega^2\frac{d^2}{dt'^2}$$
# The equation after changing time scale
# $$(1+\epsilon \omega_1+\dots)^2(\ddot{x}_0+\epsilon \ddot{x}_1+\dots)+x_0+\epsilon x_1+\dots=\epsilon (x_0+\epsilon x_1+\dots)^3$$
# Expand and collect terms with the same power of the small parameter $\epsilon$:
# $$(\ddot{x}_0+x_0)+\epsilon(\ddot{x}_1+x_1)+\dots=0+\epsilon(x_0^3-2\omega_1\ddot{x_0})+\dots$$
# which can be broken down into sequence of equation:
# \begin{align}
# \ddot{x}_0+x_0&=0\\
# \ddot{x}_1+x_1&=-2\omega_1\ddot{x}_0+x_0^3
# \end{align}
# with initial conditions like this $x_0(0)=a$, $\dot{x}_0(0)=0$, $x_1(0)=0$, $\dot{x}_1(0)=0$
#
# Solution to the 1st equation:
# $$x_0=a\cos t$$
# Substituting to the next equation yields
# $$\ddot{x}_1+x_1=\color{brown}{a(2\omega_1+\frac{3}{4}a^2)\cos t}+\frac{1}{4}a^3\cos 3t=\frac{1}{4}a^3\cos 3t$$
# where the term resulting in secular (aperiodic) solution is highlighted with brown color. Equating to zero the coefficient in this term results in condition for the first order correction to the frequency:
# $$\omega_1=-\frac{3}{8}a^2,\quad x_1=\frac{1}{32}a^3(\cos 3t-\cos t)$$
# Solution accounting for the next harmonic
# $$x\approx a\cos\omega t-\frac{a^3}{192}(\cos 3\omega t-\cos\omega t),\quad \omega\approx 1-\frac{1}{16}a^2$$
# ## Secular terms
#
# This is aperiodic terms in solution appering because of equation of idealized system does not account for dessipation processes usually limiting the amplitude in real world. For instance, the spicific solution to this equation
# $$\ddot{x}+x=\sin(t),\quad\implies\quad x=-\frac{t}{2}\cos t$$
# The solution is not a steady state.
# ## Compare numerical solution with analytical approximation
# Equation
# \begin{equation}
# \ddot{x}+\sin x=0 \qquad
# x(0) = x_0 \quad
# \dot{x}(0) = 0
# \end{equation}
# introducing new variable
# \begin{equation}
# z_1 = x \quad
# z_2 = \dot{x}
# \end{equation}
# get the system of 1st order equation for numerical procedure
# \begin{equation}
# \frac{d}{dt}
# \begin{pmatrix}
# z_1 \\ z_2
# \end{pmatrix}=
# \begin{pmatrix}
# z_2 \\
# -\sin z_1
# \end{pmatrix}
# \end{equation}
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
def duffing_eqs(z, t):
return [ z[1], -np.sin(z[0]) ]
# + [markdown] tags=[]
# Numerical solution
# +
x1 = 0.1 # rad
x2 = np.pi/2
x3 = 0.9*np.pi
t = np.linspace(0, 5*2*np.pi, 100)
sol1 = odeint(duffing_eqs, [x1, 0], t)
sol2 = odeint(duffing_eqs, [x2, 0], t)
sol3 = odeint(duffing_eqs, [x3, 0], t)
# + tags=[]
def plot_duffing(t, sol, fcn, *, title):
plt.plot(t, sol, t, fcn)
plt.xlabel('t')
plt.ylabel('x')
plt.legend(['numerical', 'analytic'])
plt.title(title)
# + [markdown] tags=[]
# Approximation to analytical solution with frequency correction
# $$x(t)\approx x_0\cdot\cos\left(\left(1-\frac{1}{16}x_0^2\right)t\right)$$
# -
def approx_sol_1(t, x0):
w = 1 - x0**2 / 16
return x0 * np.cos(w*t)
def approx_sol_2(t, x0):
w = 1 - x0**2 / 16
return x0 * np.cos(w*t) - \
x0**3 / 192 * (np.cos(3*w*t) - np.cos(w*t))
# + [markdown] tags=[]
# Solution for different amplitudes from $\left[0, \pi\right)$ range
# -
plt.figure(figsize=(15,10))
plt.subplot(2,2,1)
plot_duffing(t, sol1[:,0], x1*np.cos(t),
title='small amplitude')
plt.subplot(2,2,2)
plot_duffing(t, sol2[:,0], x2*np.cos(t),
title='$x_0=0.5\pi$, no freq. correction')
plt.subplot(2,2,3)
plot_duffing(t, sol2[:,0], approx_sol_1(t, x2),
title='$x_0=0.5\pi$, with freq. correction')
plt.subplot(2,2,4)
plot_duffing(t, sol3[:,0],
np.append(np.reshape(approx_sol_1(t, x3), (len(t),1)),
np.reshape(approx_sol_2(t, x3), (len(t),1)),
axis=1),
title='$x_0=0.9\pi$, with correction')
plt.show()
plt.plot(sol1[:,0], sol1[:,1],
sol2[:,0], sol2[:,1],
sol3[:,0], sol3[:,1])
plt.title('Phase plane')
plt.xlabel('$x$')
plt.ylabel('$\dot{x}$')
plt.legend(['$x_0=0.1\pi$','$x_0=0.5\pi$','$x_0=0.9\pi$'])
plt.show()
|
duffing-oscillator/duffing-poincare-lindstedt.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append("..")
from rosalind_tools.config import *
# Given: Positive integers n≤100 and m≤20.
#
# Return: The total number of pairs of rabbits that will remain after the n-th month if all rabbits live for m months.
def mortal_rabbits(n, m):
# Create a list tracking rabbit of age 0, 1, ..., m
rabbit_age = [0] * m
rabbit_age[0] = 1
if n == 1:
return rabbit_age[0]
for i in range(n - 1):
# The number of newborn rabbits are equal to the sum of rabbits aged greater than 1 month
rabbit_age.insert(0, sum(rabbit_age[1:]))
# After m months, the oldest rabbits died
rabbit_age.pop()
# The total number of rabbit at n month is equal to the sum of rabbits of all ages
num_rabbits = sum(rabbit_age)
print(num_rabbits)
return
# Try sample dataset
n = 6
m = 3
mortal_rabbits(n, m)
# Try Rosalind dataset
with open(data_dir/"rosalind_fibd.txt", 'r') as f:
(n, m) = tuple(f.readline().rstrip().split())
mortal_rabbits(int(n), int(m))
|
notebooks/mortal_fibonacci_rabbits.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
# +
# Code for sort algorythims in sort_collection is copied from:
# https://stackabuse.com/sorting-algorithms-in-python
# -
import numpy as np
from datetime import datetime
import pandas as pd
import sort_collection as sc
import crystal_sort
runs = 100
min = 0
max = 10000
size = 10000
def get_rand_set(min: int, max: int, size: int):
set = np.random.randint(min, max, size=size)
set = set.tolist()
return set
algorithm_list = [
'crystal_sort',
'selection_sort',
'insertion_sort',
'heap_sort',
'merge_sort',
'quick_sort',
]
results = {}
buckets = []
for run in range(runs + 1):
bucket = get_rand_set(min, max, size)
# bucket = [1] * 1000
buckets.append(bucket)
# +
time_field_name = 'time (microsecods)'
for algorithm_name in algorithm_list:
results[algorithm_name] = {}
results[algorithm_name][time_field_name] = 0
results['crystal_sort_pgh'] = {}
results['crystal_sort_pgh'][time_field_name] = 0
for run in range(runs + 1):
for algorithm_name in algorithm_list:
if algorithm_name == 'crystal_sort':
method_to_call = getattr(crystal_sort, 'sort')
start = datetime.utcnow()
result = method_to_call(bucket, False)
end = datetime.utcnow()
results['crystal_sort'][time_field_name] += (end - start).microseconds
method_to_call = getattr(crystal_sort, 'sort')
start = datetime.utcnow()
result = method_to_call(bucket, True)
end = datetime.utcnow()
results['crystal_sort_pgh'][time_field_name] += (end - start).microseconds
else:
method_to_call = getattr(sc, algorithm_name)
start = datetime.utcnow()
result = method_to_call(bucket)
end = datetime.utcnow()
results[algorithm_name][time_field_name] += (end - start).microseconds
for algorithm_name in algorithm_list:
results[algorithm_name][time_field_name] = results[algorithm_name][time_field_name] / runs
if algorithm_name == 'crystal_sort':
results['crystal_sort_pgh'][time_field_name] = results['crystal_sort_pgh'][time_field_name] / runs
# +
df = pd.DataFrame().from_dict(results, orient='index')
print(df)
file_name = f'results_{runs:02}_runs_{size}_{max}.csv'
df.to_csv(file_name)
# -
|
benchmark and comparison/benchmark_comp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.environ["OMP_NUM_THREADS"] = '32'
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
import math
import sys
sys.path.append('../')
from pyfd3d import pyfd3d
from pyMKL import pardisoSolver
# +
L0 = 1e-6; #length unit: microns
wvlen = 0.47; # wavelength in L0
Npml = np.array([0,0,0]); # [Nx_pml Ny_pml]
xrange = 0.5*np.array([-1, 1]); # x boundaries in L0
yrange = 0.5*np.array([-1, 1]); # y boundaries in L0
zrange = 0.5*np.array([-1, 1]);
N = np.array([51,51,51])
# -
## Set up the permittivity.
eps_r = np.ones(N)
print(eps_r.shape)
M = np.prod(N)
print(M)
#eps_r[8:12, 8:12, 8:12] = 6;
## Set up the current source density.
Jz = np.zeros(N);
ind_src = (np.ceil(N/2).astype('int')); # (i,j) indices of the center cell; Nx, Ny should be odd
print(ind_src)
Jz[ind_src[0], ind_src[1], ind_src[2]] = 1;
Jx = np.zeros(N);
Jy = np.zeros(N);
eps_r_tensor_dict = {
'eps_xx': eps_r,
'eps_yy': eps_r,
'eps_zz': eps_r,
}
JCurrentVector = {
'Jx': Jx,
'Jy': Jy,
'Jz': Jz
}
## Wonsoek's scalar parameter 1, -1, or 0
s = -1;
dir(pyfd3d)
import pyfd3d.fd3d as fd3d
print(wvlen, Npml)
A,b, Ch = fd3d.curlcurlE(L0, wvlen, xrange, yrange, zrange, eps_r_tensor_dict, JCurrentVector,Npml, s = -1)
print(A.count_nonzero())
print(A[0:100,0:100])
plt.figure(figsize = (15,15))
plt.spy(A, markersize = 0.2)
# +
from scipy.sparse.csgraph import reverse_cuthill_mckee
prcm = reverse_cuthill_mckee(A)
row = np.arange(3*M)
col = prcm
P = sp.coo_matrix((np.ones(3*M), (row, col)), shape=(3*M,3*M))
plt.figure(figsize = (10,10))
plt.spy(P@A@P.T, markersize = 0.5)
# -
# ## utilize iterative solver
from scipy.sparse.linalg import qmr, bicgstab
# %%time
#x, info = qmr(A, b, maxiter = 2000)
x, info = bicgstab(A,b, maxiter = 2000)
print(info, 'if 0 yes')
plt.figure(figsize = (8,4));
plt.plot(np.log10(np.abs(x)), linewidth = 0.5)
# +
Ex = x[0:M].reshape(N, order = 'F');
Ey = x[M:2*M].reshape(N, order = 'F');
Ez = x[2*M:3*M].reshape(N, order = 'F');
plt.imshow(np.abs(Ex[:,12,:]))
plt.colorbar()
# -
plt.figure();
for i in range(N[0]):
plt.plot(np.abs(Ez[i,:,12]))
plt.ylim(ymin = 0)
plt.show()
print(A.shape)
plt.figure(figsize = (8,8))
plt.spy(A)
# +
#try solving with pardiso
# pardiso doesn't even really work with this FDFD matrix, which is odd...honestly very very odd
## current matrix is all real and indefinite, but the solve
## preconditioning the lu solver is great, but it's not life-changing...how do we make the factorization life-changing
# with no pml, curl curl e is symmetric
pSolve = pardisoSolver(A, mtype=-2)
# +
# # %%time
# pSolve.factor()
# +
# # %%time
# x0 = pSolve.solve(P@np.imag(b))
# x0= P.T@x0
# plt.plot(np.abs(x0))
# plt.figure()
# Ex = np.reshape(x0[0:M],N)
# +
#plt.imshow(np.abs(Ex[:,:,0]))
# -
#lt.figure(figsize = (10,5))
plt.plot(np.abs(x0))
plt.plot(np.abs(x), 'x-', markersize = 1, alpha = 0.2)
print(dir(pardisoSolver))
for index, val in enumerate(pSolve.iparm):
print(index, val)
|
notebooks/check solver no pml.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-fastai]
# language: python
# name: conda-env-.conda-fastai-py
# ---
# + [markdown] Collapsed="false"
# # Mish Derivatves
# + Collapsed="false"
import torch
from torch.nn import functional as F
# + Collapsed="false"
inp = torch.randn(100) + (torch.arange(0, 1000, 10, dtype=torch.float)-500.)
inp
# + Collapsed="false"
import sympy
from sympy import Symbol, Function, Expr, diff, simplify, exp, log, tanh
x = Symbol('x')
f = Function('f')
# + [markdown] Collapsed="false"
# ## Overall Derivative
# + Collapsed="false"
diff(x*tanh(log(exp(x)+1)))
# + Collapsed="false"
simplify(diff(x*tanh(log(exp(x)+1))))
# + [markdown] Collapsed="false"
# ## Softplus
#
# $ \Large \frac{\partial}{\partial x} Softplus(x) = 1 - \frac{1}{e^{x} + 1} $
#
# Or, from PyTorch:
#
# $ \Large \frac{\partial}{\partial x} Softplus(x) = 1 - e^{-Y} $
#
# Where $Y$ is saved output
# + Collapsed="false"
class SoftPlusTest(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, threshold=20):
y = torch.where(inp < threshold, torch.log1p(torch.exp(inp)), inp)
ctx.save_for_backward(y)
return y
@staticmethod
def backward(ctx, grad_out):
y, = ctx.saved_tensors
res = 1 - (-y).exp_()
return grad_out * res
# + Collapsed="false"
torch.allclose(F.softplus(inp), SoftPlusTest.apply(inp))
# + Collapsed="false"
torch.autograd.gradcheck(SoftPlusTest.apply, inp.to(torch.float64).requires_grad_())
# + [markdown] Collapsed="false"
# ## $tanh(Softplus(x))$
# + Collapsed="false"
diff(tanh(f(x)))
# + Collapsed="false"
class TanhSPTest(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, threshold=20):
ctx.save_for_backward(inp)
sp = torch.where(inp < threshold, torch.log1p(torch.exp(inp)), inp)
y = torch.tanh(sp)
return y
@staticmethod
def backward(ctx, grad_out, threshold=20):
inp, = ctx.saved_tensors
sp = torch.where(inp < threshold, torch.log1p(torch.exp(inp)), inp)
grad_sp = 1 - torch.exp(-sp)
tanhsp = torch.tanh(sp)
grad = (1 - tanhsp*tanhsp) * grad_sp
return grad_out * grad
# + Collapsed="false"
torch.allclose(TanhSPTest.apply(inp), torch.tanh(F.softplus(inp)))
# + Collapsed="false"
torch.autograd.gradcheck(TanhSPTest.apply, inp.to(torch.float64).requires_grad_())
# + [markdown] Collapsed="false"
# ## Mish
# + Collapsed="false"
diff(x * f(x))
# + Collapsed="false"
diff(x*tanh(f(x)))
# + Collapsed="false"
simplify(diff(x*tanh(f(x))))
# + Collapsed="false"
diff(tanh(f(x)))
# + Collapsed="false"
class MishTest(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, threshold=20):
ctx.save_for_backward(inp)
sp = torch.where(inp < threshold, torch.log1p(torch.exp(inp)), inp)
tsp = torch.tanh(sp)
y = inp.mul(tsp)
return y
@staticmethod
def backward(ctx, grad_out, threshold=20):
inp, = ctx.saved_tensors
sp = torch.where(inp < threshold, torch.log1p(torch.exp(inp)), inp)
grad_sp = 1 - torch.exp(-sp)
tsp = torch.tanh(sp)
grad_tsp = (1 - tsp*tsp) * grad_sp
grad = inp * grad_tsp + tsp
return grad_out * grad
# + Collapsed="false"
torch.allclose(MishTest.apply(inp), inp.mul(torch.tanh(F.softplus(inp))))
# + Collapsed="false"
torch.autograd.gradcheck(TanhSPTest.apply, inp.to(torch.float64).requires_grad_())
|
extra/Derivatives.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: teaching
# language: python
# name: teaching
# ---
# # Scikit-learn introduction
#
# Scikit-learn is one of the most popular and actively developed machine learning libraries out there. It contains nearly every all "canonical" preprocessing and classification techniques. The scikit-learn preprocessing and classifier APIs are identical across all techniques, so models can be quickly swapped out.
#
# This notebook is just a brief introduction to some of the core features of scikit-learn, specifically the uniform function calls to validation, preprocessing, model fitting and testing.
#
# More recently, the Keras deep learning module has introduced a scikit-learn interface.
# Let's get some random data to play with.
import numpy as np
x = (np.random.rand(100, 10) - .5) * 100
b = np.random.rand(10, 1)
y = np.dot(x, b) + np.random.randn(100, 1)*20
# ## Validation
#
# As will be discussed later, keeping training and testing sets separate is crucial to valid models and inferences. The scikit-learn APIs make it easy to keep them separate, as well as containing the submodule `model_selection` for good data-splitting hygiene.
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) # randomly 80/20 split
# # Preprocessing
#
# When doing your projects or working with real data, you'll learn very quickly that the regressions and classifications from this course don't generally work on their own. (Otherwise everyone would be doing this!). Every type of data (imaging, financial, social media, etc) has its own characteristics and thus call for different preprocessing techniques. You can write your own methods using numpy and scipy, and scikit-learn includes quite a few common ones in sklearn.preprocessing.
#
# Here is just an example of some of the preprocessing options
import sklearn.preprocessing as preproc
scaler = preproc.StandardScaler() # will zscore the dataset it is given and save the mean and variance
x_train_sc = scaler.fit_transform(x_train) # zscore training set and save mean
x_test_sc = scaler.transform(x_test) # apply mean and variance to test
# check the mean and variance
print(x_train_sc.mean(), x_train_sc.var())
print(x_test_sc.mean(), x_test_sc.var())
# ## Linear models
# Scikit-learn has the submodule `linear_model`. This module contains many different linear approaches, including ordinary least squares and regularized regressions. Several common ones are demonstrated below, and will be further explored later. For now, notice that the syntax for training and testing is uniform (you can fine-tune free parameters by updating the model object itself)
# +
from sklearn.linear_model import LinearRegression, Ridge, Lasso
ols = LinearRegression()
print(ols.fit(x_train_sc, y_train).score(x_test_sc, y_test))
# fit a model on the training data, and see how it does on the testing set
ridge = Ridge(alpha=1.0)
print(ridge.fit(x_train_sc, y_train).score(x_test_sc, y_test))
lasso = Lasso(alpha=1.0)
print(lasso.fit(x_train_sc, y_train).score(x_test_sc, y_test))
# -
# ## Classification
# Like linear models, classifiers have a common calling structure. We import a bunch of models below.
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import BaggingClassifier
# Let's initialize a whole bunch of different classifiers. Each has its own free parameters that you can tweak (e.g. depth for trees, features for forests, kernels for SVM, etc).
# +
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM",
"Decision Tree", "Random Forest",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3), #k = 3
SVC(kernel="linear", C=0.025), # linear SVM with C = 0.025
SVC(gamma=2, C=1, kernel="rbf"), # RBF SVM with C = 1, gamma = 2
DecisionTreeClassifier(max_depth=5), # depth 5 decision tree
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), # depth 5 random forest with 10 trees
GaussianNB(),
QuadraticDiscriminantAnalysis()]
# -
# Let's generate some random data to demonstrate classification
# +
# from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2) # add some noise
X += 2 * rng.uniform(size=X.shape)
# +
# preprocess data
x_train, x_test, y_train, y_test = \
train_test_split(X, y, test_size=.2, random_state=42)
x_train_sc = scaler.fit_transform(x_train) # zscore training set and save mean
x_test_sc = scaler.transform(x_test) # apply mean and variance to test
# -
# run models in a loop. because each has a .fit() and .score() method, they can just be swapped out
for name, clf in zip(names, classifiers):
clf.fit(x_train_sc, y_train)
score = clf.score(x_test_sc, y_test)
print(name + ": " + str(score))
|
notebooks/sklearn-intro.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
sys.path.append('../external/Transformer_modules/')
sys.path.append('../external/Data_Pointnet++/')
sys.path.append('../src/')
import modelnet_dataset
import modelnet_h5_dataset
import os
from files import BASE_DIR
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
NUM_CLASSES = 40
normal = False
NUM_POINT= 1024
BATCH_SIZE = 32
if normal:
assert(NUM_POINT<=10000)
DATA_PATH = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled')
TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
else:
assert(NUM_POINT<=2048)
TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True)
TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False)
# -
# %load_ext autoreload
# %autoreload 2
import numpy as np
import torch, torch.nn as nn
import torch.nn.functional as F
from modules import MultiHeadAttention, PositionwiseFeedForward
# +
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
# cudnn.enabled = False
# cudnn.benchmark = True
class GlobalAveragePooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return x.mean(dim=self.dim)
class GlobalPooling(nn.Module):
def __init__(self, dim=-1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
avg = x.mean(dim=self.dim)
max = x.max(dim=self.dim)[0]
min = x.min(dim=self.dim)[0]
return torch.cat([min, avg, max], dim=-1)
# -
class Discriminator(nn.Module):
def __init__(self, in_dim,
hidden_dim=100,
ffn_dim =200,
n_head=8,
normalize_loc=True,
normalize_scale=False):
super(Discriminator, self).__init__()
self.normalize_loc = normalize_loc
self.normalize_scale = normalize_scale
self.dropout1 = nn.Dropout(p=0.2)
self.fc1 = nn.Linear(in_dim, hidden_dim)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.constant_(self.fc1.bias, 0.0)
self.dropout2 = nn.Dropout(p=0.1)
self.dropout3 = nn.Dropout(p=0.1)
self.dropout4 = nn.Dropout(p=0.1)
self.mha_1 = MultiHeadAttention(n_head=n_head,d_model = hidden_dim)
self.ffn_1 = PositionwiseFeedForward(hidden_dim, ffn_dim, use_residual=False)
self.mha_2 = MultiHeadAttention(n_head=n_head,d_model = hidden_dim)
self.ffn_2 = PositionwiseFeedForward(hidden_dim, ffn_dim, use_residual=False)
self.gl_1 = GlobalPooling(dim = 1)
self.fc2 = nn.Linear(hidden_dim * 3, 40)
nn.init.xavier_normal_(self.fc2.weight)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, x):
if self.normalize_loc:
x = x - x.mean(dim=1, keepdim=True)
if self.normalize_scale:
x = x / x.std(dim=1, keepdim=True)
h1 = F.relu(self.fc1(x))
h1 =self.dropout1(h1)
h2 = self.mha_1(h1)
h2 = self.dropout2(h2)
h3 = self.ffn_1(h2)
h3 = self.dropout3(h3)
h4 = self.mha_2(h3)
h4 = self.dropout4(h4)
h5 = self.ffn_2(h4)
score = self.fc2(self.gl_1(h5))
return score
model = Discriminator(3).cuda(0)
# +
def compute_loss(X_batch, y_batch):
X_batch = Variable(torch.FloatTensor(X_batch)).cuda(0)
y_batch = Variable(torch.LongTensor(y_batch)).cuda(0)
logits = model(X_batch)
return F.cross_entropy(logits, y_batch).mean()
def iterate_minibatches(X, y, batchsize):
indices = np.random.permutation(np.arange(len(X)))
for start in range(0, len(indices), batchsize):
ix = indices[start: start + batchsize]
yield X[ix], y[ix]
opt = torch.optim.Adam(model.parameters(),lr=0.00001)
# -
import time
from tqdm import tqdm
num_epochs = 150 # total amount of full passes over training data
batch_size = 32
train_loss = []
val_accuracy = []
for epoch in tqdm(range(num_epochs)):
start_time = time.time()
model.train(True)
TRAIN_DATASET.reset()
while TRAIN_DATASET.has_next_batch():
batch_data, batch_label = TRAIN_DATASET.next_batch(augment=True)
loss = compute_loss(batch_data, batch_label)
loss.backward()
opt.step()
opt.zero_grad()
train_loss.append(loss.cpu().detach().numpy())
del loss
# And a full pass over the validation data:
model.train(False) # disable dropout / use averages for batch_norm
TEST_DATASET.reset()
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
logits = model(Variable(torch.FloatTensor(batch_data)).cuda(0))
y_pred = logits.max(1)[1].cpu().detach().numpy()
val_accuracy.append(np.mean(batch_label == y_pred))
del logits
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration): \t{:.6f}".format(
np.mean(train_loss[-9840 // batch_size :])))
print(" validation accuracy: \t\t\t{:.2f} %".format(
np.mean(val_accuracy[-2468 // batch_size :]) * 100))
|
Notebooks/3D_Diskriminator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import uuid
import json
import random
import os
import sys
from time import sleep
from datetime import datetime
import requests as rt
import numpy as np
from kafka import KafkaProducer, KafkaConsumer
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException,ElementNotInteractableException, ElementClickInterceptedException
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
# -
def get_browser(driver_path=r'chromedriver/chromedriver.exe', headless=False):
options = webdriver.ChromeOptions()
if headless:
options.add_argument('headless')
options.add_argument('window-size=1200x600')
browser = webdriver.Chrome(driver_path, options=options)
return browser
# +
def get_vacancies_on_page(browser):
#close pop-up window with suggested region (if present)
try:
browser.find_element_by_class_name('bloko-icon_cancel').click()
except (NoSuchElementException, ElementNotInteractableException):
pass
vacancy_cards = browser.find_elements_by_class_name('vacancy-serp-item ')
return vacancy_cards
# -
def get_vacancy_info(card, browser, keyword, verbose=True):
try:
card.find_element_by_class_name('vacancy-serp-item__info')\
.find_element_by_tag_name('a')\
.send_keys(Keys.CONTROL + Keys.RETURN) #open new tab in Chrome
sleep(2) #let it fully load
#go to the last opened tab
browser.switch_to.window(browser.window_handles[-1])
basic_info = False
while not basic_info:
try:
vacancy_title = browser.find_element_by_xpath('//div[@class="vacancy-title"]//h1').text
company_name = browser.find_element_by_xpath('//a[@class="vacancy-company-name"]').text
company_href_hh = browser.find_element_by_xpath('//a[@class="vacancy-company-name"]').get_attribute('href')
publish_time = browser.find_element_by_xpath('//p[@class="vacancy-creation-time"]').text
basic_info = True
except:
sleep(3)
if verbose:
print("Title: ", vacancy_title )
print("Company: ", company_name )
print("Company link: ", company_href_hh )
print("Publish time: ", publish_time )
try:
salary = browser.find_element_by_xpath('//div[@class="vacancy-title"]//p[@class="vacancy-salary"]').text
except NoSuchElementException :
salary = 'не указано'
try:
emp_mode = browser.find_element_by_xpath('//p[@data-qa="vacancy-view-employment-mode"]').text
except NoSuchElementException :
emp_mode = 'не указано'
finally:
emp_mode = emp_mode.strip().replace('\n', ' ')
try:
exp = browser.find_element_by_xpath('//span[@data-qa="vacancy-experience"]').text
except NoSuchElementException :
exp = 'не указано'
finally:
exp = exp.strip().replace('\n', ' ')
try:
company_address = browser.find_element_by_xpath('//span[@data-qa="vacancy-view-raw-address"]').text
except NoSuchElementException:
company_address = 'не указано'
try:
vacancy_description = browser.find_element_by_xpath('//div[@data-qa="vacancy-description"]').text
except NoSuchElementException:
vacancy_description = 'не указано'
finally:
vacancy_description = vacancy_description.replace('\n', ' ')
try:
vacancy_tags = browser.find_element_by_xpath('//div[@class="bloko-tag-list"]').text
except NoSuchElementException:
vacancy_tags = 'не указано'
finally:
vacancy_tags = vacancy_tags.replace('\n', ', ')
if verbose:
print("Salary: ", salary )
print("Company address: ", company_address )
print('Experience: ', exp)
print('Employment mode: ', emp_mode)
print("Vacancy description: ", vacancy_description[:50] )
print("Vacancy tags: ", vacancy_tags)
browser.close() #close tab
browser.switch_to.window(browser.window_handles[0]) #switch to the first tab
dt = str(datetime.now())
vacancy_info = {'dt': dt,
'keyword': keyword,
'vacancy_title': vacancy_title,
'vacancy_salary': salary,
'vacancy_tags': vacancy_tags,
'vacancy_description': vacancy_description,
'vacancy_experience' : exp,
'employment_mode': emp_mode,
'company_name':company_name,
'company_link':company_href_hh,
'company_address':company_address,
'publish_place_and_time':publish_time}
return vacancy_info
except Exception as ex:
print('Exeption while scraping info!')
print(str(ex))
return None
def insert_data(data, engine, table_name='HH_vacancies', schema='webscraping', verbose=True):
metadata = sa.MetaData(bind=engine)
table = sa.Table(table_name, metadata, autoload=True, schema=schema)
con = engine.connect()
try:
con.execute(table.insert().values(data))
if verbose:
print('Data inserted into table {}'.format(table_name))
except Exception as ex:
print('Exception while inserting data!')
print(str(ex))
finally:
con.close()
# +
def scrape_HH(browser, keyword='Python', pages2scrape=3, table2save='HH_vacancies', verbose=True):
url = f'https://hh.ru/search/vacancy?area=1&fromSearchLine=true&st=searchVacancy&text={keyword}&from=suggest_post'
browser.get(url)
while pages2scrape > 0:
vacancy_cards = get_vacancies_on_page(browser=browser)
for card in vacancy_cards:
vacancy_info = get_vacancy_info(card, browser=browser, keyword=keyword, verbose=verbose)
insert_data(data=vacancy_info, engine=engine, table_name=table2save)
if verbose:
print('Inserted row')
try:
#click to the "Next" button to load other vacancies
browser.find_element_by_xpath('//a[@data-qa="pager-next"]').click()
print('Go to the next page')
except (NoSuchElementException, ElementNotInteractableException):
browser.close()
break
finally:
pages2scrape -= 1
# -
def send_message(producer, topic_name, msg):
msg_id = str(uuid.uuid4())
msg['uid'] = msg_id
producer.send(topic_name, msg)
producer.flush()
print(f'PRODUCER: Sent message with id: {msg_id}')
sleep(0.5)
scrape_HH(browser, keyword='Kafka', pages2scrape=4, verbose=False)
# +
# browser.close()
# browser.quit()
# -
def scrape_HH_to_kafka(browser, producer, keyword='Python', pages2scrape=3, topic_name='parsed-jobs', verbose=True):
url = f'https://hh.ru/search/vacancy?area=1&fromSearchLine=true&st=searchVacancy&text={keyword}&from=suggest_post'
browser.get(url)
while pages2scrape > 0:
vacancy_cards = get_vacancies_on_page(browser=browser)
for card in vacancy_cards:
vacancy_info = get_vacancy_info(card, browser=browser, keyword=keyword, verbose=verbose)
#sending scraping results to kafka
send_message(producer, topic_name=topic_name, msg=vacancy_info)
if verbose:
print('Inserted row')
try:
#click to the "Next" button to load other vacancies
browser.find_element_by_xpath('//a[@data-qa="pager-next"]').click()
print('Go to the next page')
except (NoSuchElementException, ElementNotInteractableException):
break
finally:
pages2scrape -= 1
browser.quit()
producer.close()
KAFKA_HOST = 'localhost:9092'
producer = KafkaProducer(bootstrap_servers=KAFKA_HOST,
value_serializer=lambda x: json.dumps(x).encode('utf-8'))
browser = get_browser(driver_path=r'chromedriver/chromedriver.exe', headless=False)
scrape_HH_to_kafka(browser, producer, keyword='Golang', pages2scrape=1, topic_name='parsed-jobs', verbose=False)
consumer = KafkaConsumer(
'parsed-jobs',
bootstrap_servers=KAFKA_HOST,
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group',
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
mysql_con = 'mysql+pymysql://python_user:123@127.0.0.1:3306/webscraping?charset=utf8mb4'
engine = sa.create_engine(mysql_con)
def consume_and_insert(consumer, engine, verbose=True):
try:
for msg in consumer:
message = msg.value
if verbose:
print(f"CONSUMER: Consumed message with id: {message['uid']}!")
insert_data(message, engine, table_name='HH_vacancies', schema='webscraping', verbose=verbose)
except KeyboardInterrupt:
consumer.close()
print("Closed consumer!")
consume_and_insert(consumer, engine, verbose=True)
|
notebooks/scrape-HH-by-keyword-kafka.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path
import datetime
from moviepy.editor import *
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import numpy as np
font = ImageFont.truetype("arial.ttf", 40)
def make_timalepse_video_from_folder(exp_folder):
'''Read images in exp_folder, find date created,
stamps it on the image and generate mp4 video
named as exp_folder in current folder'''
exp_folder=Path(exp_folder)
print(f'Processing images in folder: {exp_folder.resolve()}')
p = Path(exp_folder).glob('**/*')
files = [item for item in p if item.is_file() and item.suffix=='.jpg']
times = [datetime.datetime.fromtimestamp(file.stat().st_mtime) for file in files]
times=list(map(lambda x: x-times[0],times))
clips=[]
for file, time in zip(files, times):
img=Image.open(file)
ImageDraw.Draw(img).text((0,0), str(time),font=font)
clip =ImageClip(np.array(img)).set_duration(0.1)
clips.append(clip)
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(f"{exp_folder.name}.mp4", fps=24)
# +
import xml.etree.ElementTree as ET
tree = ET.parse('AOI.xml')
root = tree.getroot()
for child in root:
print(child.tag, child.attrib)
# +
exp_folder='./cf90r1-041321-80LAS-diwater/'
make_timalepse_video_from_folder(exp_folder)
# +
import xmltodict
with open('AOI.xml', 'r') as file:
AOI_from_XML=xmltodict.parse(file.read())
# -
AOI_dict={}
for AOI in AOI_from_XML['annotation']['object']:
AOI_dict[AOI['name']]=AOI['bndbox']
for name, item in AOI_dict.items():
print(name,item)
AOI_dict['annotation']['object'][0]
img=Image.open('20210430-113421.bmp')
img.crop((int(AOI_dict['top1']['xmin']),int(AOI_dict['top1']['ymin']),int(AOI_dict['top1']['xmax']),int(AOI_dict['top1']['ymax'])))
(int(AOI_dict['top1']['xmin']),int(AOI_dict['top1']['ymax']),int(AOI_dict['top1']['xmax']),int(AOI_dict['top1']['ymin']))
# +
from pathlib import Path
import datetime
from moviepy.editor import *
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import numpy as np
import xmltodict
font = ImageFont.truetype("arial.ttf", 40)
def make_timalepse_video_from_folder(exp_folder, xml_mask=None):
'''Read images in exp_folder, find date created,
stamps it on the image and generate mp4 video
named as exp_folder in current folder'''
exp_folder=Path(exp_folder)
print(f'Processing images in folder: {exp_folder.resolve()}')
p = Path(exp_folder).glob('**/*')
files = [item for item in p if item.is_file() and item.suffix=='.bmp']
times = [datetime.datetime.fromtimestamp(file.stat().st_mtime) for file in files]
times=list(map(lambda x: x-times[0],times))
if xml_mask is None:
clips=[]
for file, time in zip(files, times):
img=Image.open(file)
ImageDraw.Draw(img).text((0,0), str(time),font=font)
clip =ImageClip(np.array(img)).set_duration(0.1)
clips.append(clip)
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(f"{exp_folder.name}.mp4", fps=24)
else:
with open(xml_mask, 'r') as file:
AOI_from_XML=xmltodict.parse(file.read())
AOI_dict={}
for AOI in AOI_from_XML['annotation']['object']:
AOI_dict[AOI['name']]=AOI['bndbox']
for AOI_name, AOI in AOI_dict.items():
clips=[]
for file, time in zip(files, times):
img=Image.open(file).crop((int(AOI['xmin']),int(AOI['ymin']),int(AOI['xmax']),int(AOI['ymax'])))
ImageDraw.Draw(img).text((0,0), str(time),font=font)
clip =ImageClip(np.array(img)).set_duration(0.1)
clips.append(clip)
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(f"{AOI_name}_{exp_folder.name}.mp4", fps=24)
# -
make_timalepse_video_from_folder('test_folder/', xml_mask='AOI.xml')
# +
import click
from pathlib import Path
import datetime
from moviepy.editor import *
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import numpy as np
font = ImageFont.truetype("arial.ttf", 40)
@click.command()
@click.option('--exp_folder', default='./', help='Path to experiment folder with images. Exp_folder will be also used to name the timelapse video')
@click.option('--xml_mask', default=None, help='Path to xml file with 1 or multiple masks generated by labelimg ')
def main(exp_folder, xml_mask=None):
'''Read images in exp_folder, finds date created,
stamps it on the image and generate mp4 video
named as exp_folder in current folder
Added parameter xml_mask'''
exp_folder=Path(exp_folder)
print(f'Processing images in folder: {exp_folder.resolve()}')
p = Path(exp_folder).glob('**/*')
files = [item for item in p if item.is_file() and item.suffix=='.jpg']
times = [datetime.datetime.fromtimestamp(file.stat().st_mtime) for file in files]
times=list(map(lambda x: x-times[0],times))
if xml_mask is None:
clips=[]
for file, time in zip(files, times):
img=Image.open(file)
ImageDraw.Draw(img).text((0,0), str(time).split('.')[0],font=font)
clip =ImageClip(np.array(img)).set_duration(0.1)
clips.append(clip)
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(f"{exp_folder.name}.mp4", fps=24)
else:
with open(xml_mask, 'r') as file:
AOI_from_XML=xmltodict.parse(file.read())
AOI_dict={}
for AOI in AOI_from_XML['annotation']['object']:
AOI_dict[AOI['name']]=AOI['bndbox']
for AOI_name, AOI in AOI_dict.items():
clips=[]
for file, time in zip(files, times):
img=Image.open(file).crop((int(AOI['xmin']),int(AOI['ymin']),int(AOI['xmax']),int(AOI['ymax'])))
ImageDraw.Draw(img).text((0,0), str(time).split('.')[0],font=font)
clip =ImageClip(np.array(img)).set_duration(0.1)
clips.append(clip)
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(f"{AOI_name}_{exp_folder.name}.mp4", fps=24)
# if __name__ == '__main__':
# sys.exit(main())
# -
|
notebooks/add_AOI_feature/02_add_feature_Execute_function_from_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import gzip
import pickle
from collections import defaultdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('ggplot')
import watchcbb.utils as utils
import watchcbb.efficiency as eff
from watchcbb.sql import SQLEngine
# -
# ### Load individual game data
sql = SQLEngine('cbb')
df_games = sql.df_from_query(""" SELECT * FROM game_data ORDER BY "Date" """)
df_games.tail(10)
# ### Compute full season stats
# Cache it into a pickle so we don't waste time recomputing each time
# +
fname = 'cached/preseason_nb_season_stats.pkl.gz'
if os.path.exists(fname):
with gzip.open(fname, 'rb') as fid:
season_stats_df, season_stats_dict = pickle.load(fid)
else:
season_stats_dict = utils.compute_season_stats(df_games)
season_stats_df = utils.stats_dict_to_df(season_stats_dict)
utils.add_advanced_stats(season_stats_df)
season_stats_dict = utils.stats_df_to_dict(season_stats_df)
eff.compute_efficiency_ratings(season_stats_dict)
season_stats_df = utils.stats_dict_to_df(season_stats_dict)
os.makedirs(os.path.dirname(fname), exist_ok=True)
with gzip.open(fname, 'wb') as fid:
pickle.dump((season_stats_df, season_stats_dict), fid)
print(season_stats_df.shape)
season_stats_df[['year','team_id','wins','losses','Tneteff']].head()
# -
PREDICT_YEAR = 2021
years = season_stats_df.year.unique().tolist()
if PREDICT_YEAR is not None:
years.append(PREDICT_YEAR)
# ## Get roster info
# The player IDs of each player on each team/year
# +
class Player():
def __init__(self, ws=0, mp=0, exp=0):
self.ws = ws
self.mp = mp
self.exp = exp
# dict to hold the cumulative #years of experience for each player
player_exp = defaultdict(int)
# dict to hold player info used later
player_dict = {}
for year in years:
# for year in [2020]:
player_dict[year] = {}
roster_df = pd.read_pickle(f'../data/rosters/{year}.pkl.gz', compression='gzip').reset_index(drop=True)
if year not in season_stats_dict:
season_stats_dict[year] = defaultdict(dict)
for irow,row in roster_df.iterrows():
season_stats_dict[year][row.team_id]['players'] = row.players
season_stats_dict[year][row.team_id]['player_WS'] = row.WS
season_stats_dict[year][row.team_id]['player_MP'] = row.MP
for i,pid in enumerate(row.players):
player_exp[pid] += 1
if pid not in player_dict[year]:
player_dict[year][pid] = Player(ws=row.WS[i], mp=row.MP[i], exp=player_exp[pid])
# -
season_stats_dict[2021]['purdue']['players']
# ## Get recruiting rankings
for year in years:
df_espn = pd.read_csv(f'../data/recruit_ranks/espn_{year-1}.csv')
df_rsci = pd.read_csv(f'../data/recruit_ranks/rsci_{year-1}.csv')
for irow,row in df_espn.iterrows():
season_stats_dict[year][row.team_id]['espn_recruit_rank'] = row.Rank
for irow,row in df_rsci.iterrows():
season_stats_dict[year][row.team_id]['rsci_recruit_rank'] = row.Rank
season_stats_dict[year][row.team_id]['rsci_recruit_points'] = row.points
season_stats_dict[2018]['purdue']['rsci_recruit_points']
# ## Get win-shares lost each year
for year in years:
if year-1 not in season_stats_dict:
continue
for tid in season_stats_dict[year]:
if tid not in season_stats_dict[year-1]:
continue
if 'players' not in season_stats_dict[year][tid] or 'players' not in season_stats_dict[year-1][tid]:
continue
old_players = season_stats_dict[year-1][tid]["players"]
new_players = season_stats_dict[year][tid]["players"]
wshares = season_stats_dict[year-1][tid]["player_WS"]
wshares_lost = 0.0
wshares_return = 0.0
wshares_transfer = 0.0
for pid, ws in zip(old_players, wshares):
if pid not in new_players:
wshares_lost += ws
else:
wshares_return += ws
## very rough accounting for transfers:
for pid in new_players:
## these players are probably grad transfers
if pid not in old_players and pid in player_dict[year-1] and player_dict[year-1][pid].exp >= 3:
wshares_transfer += player_dict[year-1][pid].ws
## these are probably non-grad transfers that have to sit out
elif pid not in old_players and pid in player_dict[year-1] and player_dict[year-1][pid].exp > 0:
pass
## the next two cover players that sat out for transfer/injury/etc and come back
elif pid not in old_players and year-2 in player_dict and pid in player_dict[year-2]:
wshares_transfer += player_dict[year-2][pid].ws
elif pid in old_players and player_dict[year-1][pid].ws == 0.0 and year-2 in player_dict and pid in player_dict[year-2]:
wshares_transfer += player_dict[year-1][pid].ws
season_stats_dict[year][tid]["wshares_lost"] = wshares_lost
season_stats_dict[year][tid]["wshares_return"] = wshares_return
season_stats_dict[year][tid]["wshares_transfer"] = wshares_transfer
# +
year_start = season_stats_df.year.min() + 2
data = defaultdict(list)
for year in range(year_start, max(years)+1):
for tid in season_stats_dict[year]:
data['year'].append(year)
data['tid'].append(tid)
data['final_eff'].append(season_stats_dict[year][tid].get("Tneteff", None))
data['yearm1_eff'].append(season_stats_dict[year-1].get(tid,{}).get("Tneteff",None))
if tid in season_stats_dict[year-2]:
data['yearm2_eff'].append(season_stats_dict[year-2][tid].get("Tneteff",None))
else:
data['yearm2_eff'].append(season_stats_dict[year-1].get(tid,{}).get("Tneteff",None))
data['wshares_lost'].append(season_stats_dict[year][tid].get('wshares_lost',None))
data['wshares_return'].append(season_stats_dict[year][tid].get('wshares_return',None))
data['wshares_transfer'].append(season_stats_dict[year][tid].get('wshares_transfer',None))
data['espn_recruit_rank'].append(season_stats_dict[year][tid].get('espn_recruit_rank',None))
data['rsci_recruit_rank'].append(season_stats_dict[year][tid].get('rsci_recruit_rank',None))
data['rsci_recruit_points'].append(season_stats_dict[year][tid].get('rsci_recruit_points',0))
data['yearm1_oeff'].append(season_stats_dict[year-1].get(tid,{}).get('Tcorroeff',None))
data['yearm1_deff'].append(season_stats_dict[year-1].get(tid,{}).get('Tcorrdeff',None))
data['yearm1_pace'].append(season_stats_dict[year-1].get(tid,{}).get('pace',None))
data['final_pace'].append(season_stats_dict[year][tid].get('pace',None))
df = pd.DataFrame(data, columns=['year','tid','final_eff','yearm1_eff', 'yearm2_eff',
'wshares_lost','wshares_return','wshares_transfer',
'espn_recruit_rank','rsci_recruit_rank','rsci_recruit_points',
'yearm1_oeff','yearm1_deff','yearm1_pace','final_pace'])
df.loc[df.tid=='purdue']
# -
df = df.dropna(subset=['yearm1_eff','yearm2_eff','wshares_lost','wshares_return','wshares_transfer'])
fig, axs = plt.subplots(1, 2, figsize=(15,6))
dfr = df[['espn_recruit_rank','rsci_recruit_rank','rsci_recruit_points']].dropna()
axs[0].scatter(dfr.espn_recruit_rank, dfr.rsci_recruit_rank, alpha=0.4)
axs[0].set_xlabel('espn recruit rank')
axs[0].set_ylabel('rsci recruit rank')
axs[1].scatter(dfr.rsci_recruit_rank, dfr.rsci_recruit_points, alpha=0.4)
axs[1].set_xlabel('rsci recruit_rank')
axs[1].set_ylabel('rsci recruit points')
fig, axs = plt.subplots(2, 2, figsize=(12,12))
axs = axs.flatten()
dfr = df[['final_eff','yearm1_eff','wshares_lost','wshares_transfer','wshares_return','rsci_recruit_points']].dropna()
for i,col in enumerate(['wshares_lost','wshares_transfer','wshares_return','rsci_recruit_points']):
axs[i].scatter(df[col], df.final_eff-df.yearm1_eff, s=10, alpha=0.4)
# +
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error
from xgboost import XGBRegressor
linreg = LinearRegression()
df['recruit_score'] = df.rsci_recruit_points
# df['recruit_score'] = df.espn_recruit_rank.fillna(100)
# df.recruit_score = 0.532/(df.recruit_score - 0.765)**0.436
# df.recruit_score = np.log10(df.recruit_score+1)
df_tmp = df[df.year != PREDICT_YEAR]
models = {
"linreg_baseline" : ['yearm1_eff'],
"linreg1" : ['yearm1_eff','wshares_lost','wshares_transfer','recruit_score'],
"linreg2" : ['yearm1_eff','yearm2_eff','wshares_lost','wshares_transfer','recruit_score'],
"linreg_norecruit" : ['yearm1_eff','yearm2_eff','wshares_lost','wshares_transfer'],
"linreg_notransfer" : ['yearm1_eff','yearm2_eff','wshares_lost','recruit_score'],
"linreg_nolost" : ['yearm1_eff','yearm2_eff','wshares_transfer','recruit_score'],
"xgb2" : ['yearm1_eff','yearm2_eff','wshares_lost','wshares_transfer','recruit_score'],
}
model_scores = defaultdict(list)
years = sorted(df_tmp.year.unique())
NVALID = 1
start_years = years[:len(years)-NVALID+1:NVALID]
for start_valid in start_years:
valid_years = list(range(start_valid, start_valid+NVALID))
train_years = sorted(set(years)-set(valid_years))
for name,cols in models.items():
X_train = df_tmp.loc[df.year.isin(train_years)][cols]
y_train = df_tmp.loc[df.year.isin(train_years)].final_eff
X_valid = df_tmp.loc[df.year.isin(valid_years)][cols]
y_valid = df_tmp.loc[df.year.isin(valid_years)].final_eff
if name.startswith('linreg'):
model = linreg
if name.startswith('xgb'):
model = XGBRegressor(colsample_bytree=1, eval_metric='rmse', learning_rate=0.05, max_depth=2,
min_child_weight=15, n_estimators=250, objective='reg:squarederror', subsample=0.5)
model.fit(X_train, y_train)
pred = model.predict(X_valid)
score = mean_squared_error(y_valid, pred)**0.5
# score = mean_absolute_error(y_valid, pred)
model_scores[name].append(score)
if start_valid==2019 and name.startswith('linreg'):
print(name,model.coef_, model.intercept_)
plt.figure(figsize=(9,7))
xs = np.array(start_years)
for name,scores in sorted(model_scores.items(), key=lambda x:np.mean(x[1]), reverse=True):
print(f'{np.mean(scores):.2f} {name}')
plt.errorbar(xs+NVALID/2.0, scores, xerr=NVALID/2.0, fmt='o', lw=2, label=name)
plt.gca().set_xlim(2010,2021)
plt.xlabel('validation year')
plt.ylabel('Root mean squared error')
plt.legend()
# +
from itertools import product
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
cols = ['yearm1_eff','yearm2_eff','wshares_lost','wshares_transfer','recruit_score']
train_years = [2013,2014,2015,2016,2017,2018]
valid_years = [2019,2020]
X_train = df.loc[df.year.isin(train_years)][cols]
y_train = df.loc[df.year.isin(train_years)].final_eff
X_valid = df.loc[df.year.isin(valid_years)][cols]
y_valid = df.loc[df.year.isin(valid_years)].final_eff
# for max_depth, n_estimators in product([2,3,4],[10,20,50,100]):
# xgb = XGBRegressor(max_depth=max_depth, n_estimators=n_estimators)
# xgb.fit(X_train, y_train)
# pred = xgb.predict(X_valid)
# mse = mean_squared_error(y_valid, pred)**0.5
# print(f'{max_depth:2d} {n_estimators:3d} {mse:.2f}')
parameters = {'objective':['reg:squarederror'],
'eval_metric': ['rmse'],
'learning_rate': [0.02,0.05,0.1], #so called `eta` value
'max_depth': [2,3],
'min_child_weight': [10,15,20],
'subsample': [0.4,0.5,0.6],
'colsample_bytree': [1],
'gamma': [0],
'n_estimators': [150,200,250,300,400]}
xgb = XGBRegressor()
xgb_grid = GridSearchCV(xgb,
parameters,
cv = 3,
n_jobs = 4,
verbose=True)
xgb_grid.fit(X_train,
y_train)
print(mean_squared_error(y_valid, xgb_grid.best_estimator_.predict(X_valid))**0.5)
print(xgb_grid.best_params_)
# -
cols = ['yearm1_eff','yearm2_eff','wshares_lost','wshares_transfer','recruit_score']
MODEL = LinearRegression()
# MODEL = XGBRegressor(learning_rate=0.05, max_depth=2, min_child_weight=15,
# subsample=0.5, n_estimators=250, objective='reg:squarederror')
preds = []
for year in df.year.unique():
train_years = sorted(set(df.year.unique())-set([year, PREDICT_YEAR]))
valid_years = [year]
X_train = df.loc[df.year.isin(train_years)][cols]
y_train = df.loc[df.year.isin(train_years)].final_eff
X_valid = df.loc[df.year.isin(valid_years)][cols]
y_valid = df.loc[df.year.isin(valid_years)].final_eff
MODEL.fit(X_train.values, y_train)
preds += MODEL.predict(X_valid.values).tolist()
df["pred_eff"] = preds
if isinstance(MODEL, LinearRegression):
for i,col in enumerate(cols):
df["contrib_"+col] = MODEL.coef_[i] * df[col]
df['intercept'] = MODEL.intercept_
fig,axs = plt.subplots(1,2,figsize=(12,6))
axs[0].scatter(df.pred_eff, df.final_eff, alpha=0.2, s=15)
axs[0].set_xlabel('Predicted final AdjEff')
axs[0].set_ylabel('Actual final AdjEff')
# plt.scatter(df.recruit_score, df.final_eff-df.pred_eff, alpha=0.2)
axs[1].hist(df.final_eff, bins=np.linspace(-50,50,51))
print(np.mean(df.pred_eff))
fig,axs = plt.subplots(2,3,figsize=(12,6))
axs = axs.flatten()
for i,c in enumerate(cols):
df['dummy'] = df[c].median()
cols_mod = list(cols)
cols_mod[i] = 'dummy'
print(cols_mod)
X = df[cols]
X_mod = df[cols_mod]
pred = MODEL.predict(X.values)
pred_mod = MODEL.predict(X_mod.values)
axs[i].scatter(df[c], pred-pred_mod, alpha=0.2, s=10)
axs[i].set_xlabel(c)
axs[i].set_ylabel('delta(prediction)')
fig.tight_layout()
df_tmp = df[df.year != PREDICT_YEAR]
linreg.fit(df_tmp.yearm1_pace.values.reshape(-1,1), df_tmp.final_pace)
print(linreg.coef_, linreg.intercept_)
df["pred_pace"] = linreg.predict(df.yearm1_pace.values.reshape(-1,1))
plt.figure(figsize=(5,5))
plt.scatter(df.pred_pace, df.final_pace, alpha=0.2, s=10)
plt.plot([62,76], [62,76], 'k--')
df.head()
avg_eff = df.final_eff.mean()
frac_to_off = (df.yearm1_oeff - avg_eff).abs() / ((df.yearm1_oeff-avg_eff).abs() + (df.yearm1_deff-avg_eff).abs())
df["pred_oeff"] = df.yearm1_oeff + (df.pred_eff - df.yearm1_eff) * frac_to_off
df["pred_deff"] = df.yearm1_deff - (df.pred_eff - df.yearm1_eff) * (1-frac_to_off)
df.drop('dummy', axis=1).rename({'tid':'team_id'}, axis=1).to_csv('../data/preseason_predictions.csv',
float_format="%.3f",
index=False)
df.head()
ps = []
for FRAC in np.arange(0.05,0.96,0.05):
first, second = utils.partition_games(df_games, frac=FRAC)
season_stats_dict = utils.compute_season_stats(df_games.iloc[first])
season_stats_df = utils.stats_dict_to_df(season_stats_dict)
utils.add_advanced_stats(season_stats_df)
season_stats_dict = utils.stats_df_to_dict(season_stats_df)
param = 0.9 + 0.1*min(0.2,FRAC)/0.2
eff.compute_efficiency_ratings(season_stats_dict, conv_param=param)
season_stats_df = utils.stats_dict_to_df(season_stats_dict)
df_merge = df.merge(season_stats_df[['year','team_id','Tneteff','pace']], left_on=['year','tid'], right_on=['year','team_id'])
df_merge = df_merge.loc[df_merge.Tneteff > -900]
a = df_merge.pred_eff
b = df_merge.Tneteff
y = df_merge.final_eff
p = -(a*b-b**2-a*y+b*y).sum() / ((a-b)**2).sum()
a = df_merge.pred_pace
b = df_merge.pace
y = df_merge.final_pace
p2 = -(a*b-b**2-a*y+b*y).sum() / ((a-b)**2).sum()
print(f'{FRAC:.2f} {p:.3f} {p2:.3f}')
ps.append((FRAC,p,p2))
plt.figure(figsize=(9,7))
pvals = [x[1] for x in ps]
pvals_pace = [x[2] for x in ps]
xs = np.linspace(0,1,101)
ys = (1-xs)**2.6
plt.plot(np.arange(0,1.01,0.05), [1]+pvals+[0], 'o-', label='Regressed blend parameter')
plt.plot(xs, ys, '-', label='(1-season_frac)^2.6')
# plt.plot(np.arange(0,1.01,0.05), [1]+pvals_pace+[0], 'o-')
plt.xlabel('Season completion fraction', fontsize='x-large')
plt.ylabel('Blend parameter', fontsize='x-large')
plt.legend(fontsize='x-large')
|
notebooks/preseason_prediction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
import json
compromised_users = []
with open('passwords.csv') as password_file:
password_csv = csv.DictReader(password_file)
for password_row in password_csv:
compromised_users.append(password_row['Username'])
#print(compromised_users)
compromised = []
with open('compromised_users.txt', 'w') as compromised_user_file:
for i in compromised_users:
compromised.append(i)
print(compromised)
message = []
with open('boss_message.json', 'r+') as boss_message:
boss_message_dict = {
'recipient': 'The Boss',
'message': 'Mission Success'
}
json.dump(boss_message_dict, boss_message)
for i in boss_message:
message.append(i)
# print(message)
with open('new_passwords.csv', 'w') as new_passwords_obj:
slash_null_sig = """_ _ ___ __ ____
/ )( \ / __) / \(_ _)
) \/ ( ( (_ \( O ) )(
\____/ \___/ \__/ (__)
_ _ __ ___ __ _ ____ ____
/ )( \ / _\ / __)( / )( __)( \
) __ (/ \( (__ ) ( ) _) ) D (
\_)(_/\_/\_/ \___)(__\_)(____)(____/
____ __ __ ____ _ _
___ / ___)( ) / _\ / ___)/ )( \
(___) \___ \/ (_/\/ \\___ \) __ (
(____/\____/\_/\_/(____/\_)(_/
__ _ _ _ __ __
( ( \/ )( \( ) ( )
/ /) \/ (/ (_/\/ (_/\
\_)__)\____/\____/\____/"""
new_passwords_obj.write(slash_null_sig)
|
Hacking the Fender.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.1
# language: julia
# name: julia-1.7
# ---
# # I.1 Numbers
#
#
# Reference: [Overton](https://cs.nyu.edu/~overton/book/)
#
# In this chapter, we introduce the [Two's-complement](https://en.wikipedia.org/wiki/Two's_complement)
# storage for integers and the
# [IEEE Standard for Floating-Point Arithmetic](https://en.wikipedia.org/wiki/IEEE_754).
# There are many possible ways of representing real numbers on a computer, as well as
# the precise behaviour of operations such as addition, multiplication, etc.
# Before the 1980s each processor had potentially a different representation for
# real numbers, as well as different behaviour for operations.
# IEEE introduced in 1985 was a means to standardise this across
# processors so that algorithms would produce consistent and reliable results.
#
# This chapter may seem very low level for a mathematics course but there are
# two important reasons to understand the behaviour of integers and floating-point numbers:
# 1. Integer arithmetic can suddenly start giving wrong negative answers when numbers
# become large.
# 2. Floating-point arithmetic is very precisely defined, and can even be used
# in rigorous computations as we shall see in the problem sheets. But it is not exact
# and its important to understand how errors in computations can accumulate.
# 3. Failure to understand floating-point arithmetic can cause catastrophic issues
# in practice, with the extreme example being the
# [explosion of the Ariane 5 rocket](https://youtu.be/N6PWATvLQCY?t=86).
#
#
# In this chapter we discuss the following:
#
# 1. Binary representation: Any real number can be represented in binary, that is,
# by an infinite sequence of 0s and 1s (bits). We review binary representation.
# 2. Integers: There are multiple ways of representing integers on a computer. We discuss the
# the different types of integers and their representation as bits, and how arithmetic operations behave
# like modular arithmetic. As an advanced topic we discuss `BigInt`, which uses variable bit length storage.
# 2. Floating-point numbers: Real numbers are stored on a computer with a finite number of bits.
# There are three types of floating-point numbers: _normal numbers_, _subnormal numbers_, and _special numbers_.
# 3. Arithmetic: Arithmetic operations in floating-point are exact up to rounding, and how the
# rounding mode can be set. This allows us to bound errors computations.
# 4. High-precision floating-point numbers: As an advanced topic, we discuss how the precision of floating-point arithmetic can be increased arbitrary
# using `BigFloat`.
#
# Before we begin, we load two external packages. SetRounding.jl allows us
# to set the rounding mode of floating-point arithmetic. ColorBitstring.jl
# implements functions `printbits` (and `printlnbits`)
# which print the bits (and with a newline) of floating-point numbers in colour.
using SetRounding, ColorBitstring
# ## 1. Binary representation
#
# Any integer can be presented in binary format, that is, a sequence of `0`s and `1`s.
#
# **Definition**
# For $B_0,\ldots,B_p \in \{0,1\}$ denote a non-negative integer in _binary format_ by:
# $$
# (B_p\ldots B_1B_0)_2 := 2^pB_p + \cdots + 2B_1 + B_0
# $$
# For $b_1,b_2,\ldots \in \{0,1\}$, Denote a non-negative real number in _binary format_ by:
# $$
# (B_p \ldots B_0.b_1b_2b_3\ldots)_2 = (B_p \ldots B_0)_2 + {b_1 \over 2} + {b_2 \over 2^2} + {b_3 \over 2^3} + \cdots
# $$
#
#
#
# First we show some examples of verifying a numbers binary representation:
#
# **Example (integer in binary)**
# A simple integer example is $5 = 2^2 + 2^0 = (101)_2$.
#
# **Example (rational in binary)**
# Consider the number `1/3`. In decimal recall that:
# $$
# 1/3 = 0.3333\ldots = \sum_{k=1}^\infty {3 \over 10^k}
# $$
# We will see that in binary
# $$
# 1/3 = (0.010101\ldots)_2 = \sum_{k=1}^\infty {1 \over 2^{2k}}
# $$
# Both results can be proven using the geometric series:
# $$
# \sum_{k=0}^\infty z^k = {1 \over 1 - z}
# $$
# provided $|z| < 1$. That is, with $z = {1 \over 4}$ we verify the binary expansion:
# $$
# \sum_{k=1}^\infty {1 \over 4^k} = {1 \over 1 - 1/4} - 1 = {1 \over 3}
# $$
# A similar argument with $z = 1/10$ shows the decimal case.
#
#
#
# ## 2. Integers
#
#
# On a computer one typically represents integers by a finite number of $p$ bits,
# with $2^p$ possible combinations of 0s and 1s. For _unsigned integers_ (non-negative integers)
# these bits are just the first $p$ binary digits: $(B_{p-1}\ldots B_1B_0)_2$.
#
# Integers on a computer follow [modular arithmetic](https://en.wikipedia.org/wiki/Modular_arithmetic):
#
# **Definition (ring of integers modulo $m$)** Denote the ring
# $$
# {\mathbb Z}_{m} := \{0 \ ({\rm mod}\ m), 1 \ ({\rm mod}\ m), \ldots, m-1 \ ({\rm mod}\ m) \}
# $$
#
# Integers represented with $p$-bits on a computer actually
# represent elements of ${\mathbb Z}_{2^p}$ and integer arithmetic on a computer is
# equivalent to arithmetic modulo $2^p$.
#
# **Example (addition of 8-bit unsigned integers)** Consider the addition of
# two 8-bit numbers:
# $$
# 255 + 1 = (11111111)_2 + (00000001)_2 = (100000000)_2 = 256
# $$
# The result is impossible to store in just 8-bits! It is way too slow
# for a computer to increase the number of bits, or to throw an error (checks are slow).
# So instead it treats the integers as elements of ${\mathbb Z}_{256}$:
# $$
# 255 + 1 \ ({\rm mod}\ 256) = (00000000)_2 \ ({\rm mod}\ 256) = 0 \ ({\rm mod}\ 256)
# $$
# We can see this in Julia:
x = UInt8(255)
y = UInt8(1)
printbits(x); println(" + "); printbits(y); println(" = ")
printbits(x + y)
# **Example (multiplication of 8-bit unsigned integers)**
# Multiplication works similarly: for example,
# $$
# 254 * 2 \ ({\rm mod}\ 256) = 252 \ ({\rm mod}\ 256) = (11111100)_2 \ ({\rm mod}\ 256)
# $$
# We can see this behaviour in code by printing the bits:
x = UInt8(254) # 254 represented in 8-bits as an unsigned integer
y = UInt8(2) # 2 represented in 8-bits as an unsigned integer
printbits(x); println(" * "); printbits(y); println(" = ")
printbits(x * y)
# ### Signed integer
#
# Signed integers use the [Two's complemement](https://epubs.siam.org/doi/abs/10.1137/1.9780898718072.ch3)
# convention. The convention is if the first bit is 1 then the number is negative: the number $2^p - y$
# is interpreted as $-y$.
# Thus for $p = 8$ we are interpreting
# $2^7$ through $2^8-1$ as negative numbers.
#
# **Example (converting bits to signed integers)**
# What 8-bit integer has the bits `01001001`? Adding the corresponding decimal places we get:
2^0 + 2^3 + 2^6
# What 8-bit (signed) integer has the bits `11001001`? Because the first bit is `1` we know it's a negative
# number, hence we need to sum the bits but then subtract `2^p`:
2^0 + 2^3 + 2^6 + 2^7 - 2^8
# We can check the results using `printbits`:
printlnbits(Int8(73))
printbits(-Int8(55))
# Arithmetic works precisely
# the same for signed and unsigned integers.
#
# **Example (addition of 8-bit integers)**
# Consider `(-1) + 1` in 8-bit arithmetic. The number $-1$ has the same bits as
# $2^8 - 1 = 255$. Thus this is equivalent to the previous question and we get the correct
# result of `0`. In other words:
# $$
# -1 + 1 \ ({\rm mod}\ 2^p) = 2^p-1 + 1 \ ({\rm mod}\ 2^p) = 2^p \ ({\rm mod}\ 2^p) = 0 \ ({\rm mod}\ 2^p)
# $$
#
#
# **Example (multiplication of 8-bit integers)**
# Consider `(-2) * 2`. $-2$ has the same bits as $2^{256} - 2 = 254$ and $-4$ has the
# same bits as $2^{256}-4 = 252$, and hence from the previous example we get the correct result of `-4`.
# In other words:
# $$
# (-2) * 2 \ ({\rm mod}\ 2^p) = (2^p-2) * 2 \ ({\rm mod}\ 2^p) = 2^{p+1}-4 \ ({\rm mod}\ 2^p) = -4 \ ({\rm mod}\ 2^p)
# $$
#
#
#
#
#
# **Example (overflow)** We can find the largest and smallest instances of a type using `typemax` and `typemin`:
printlnbits(typemax(Int8)) # 2^7-1 = 127
printbits(typemin(Int8)) # -2^7 = -128
# As explained, due to modular arithmetic, when we add `1` to the largest 8-bit integer we get the smallest:
typemax(Int8) + Int8(1) # returns typemin(Int8)
# This behaviour is often not desired and is known as _overflow_, and one must be wary
# of using integers close to their largest value.
#
#
# ### Variable bit representation (**advanced**)
#
# An alternative representation for integers uses a variable number of bits,
# with the advantage of avoiding overflow but with the disadvantage of a substantial
# speed penalty. In Julia these are `BigInt`s, which we can create by calling `big` on an
# integer:
x = typemax(Int64) + big(1) # Too big to be an `Int64`
# Note in this case addition automatically promotes an `Int64` to a `BigInt`.
# We can create very large numbers using `BigInt`:
x^100
# Note the number of bits is not fixed, the larger the number, the more bits required
# to represent it, so while overflow is impossible, it is possible to run out of memory if a number is
# astronomically large: go ahead and try `x^x` (at your own risk).
#
#
# ## Division
#
# In addition to `+`, `-`, and `*` we have integer division `÷`, which rounds down:
5 ÷ 2 # equivalent to div(5,2)
# Standard division `/` (or `\` for division on the right) creates a floating-point number,
# which will be discussed shortly:
5 / 2 # alternatively 2 \ 5
# We can also create rational numbers using `//`:
(1//2) + (3//4)
# Rational arithmetic often leads to overflow so it
# is often best to combine `big` with rationals:
big(102324)//132413023 + 23434545//4243061 + 23434545//42430534435
# ## 3. Floating-point numbers
#
# Floating-point numbers are a subset of real numbers that are representable using
# a fixed number of bits.
#
# **Definition (floating-point numbers)**
# Given integers $σ$ (the "exponential shift") $Q$ (the number of exponent bits) and
# $S$ (the precision), define the set of
# _Floating-point numbers_ by dividing into _normal_, _sub-normal_, and _special number_ subsets:
# $$
# F_{σ,Q,S} := F^{\rm normal}_{σ,Q,S} \cup F^{\rm sub}_{σ,Q,S} \cup F^{\rm special}.
# $$
# The _normal numbers_
# $F^{\rm normal}_{σ,Q,S} \subset {\mathbb R}$ are defined by
# $$
# F^{\rm normal}_{σ,Q,S} = \{\pm 2^{q-σ} \times (1.b_1b_2b_3\ldots b_S)_2 : 1 \leq q < 2^Q-1 \}.
# $$
# The _sub-normal numbers_ $F^{\rm sub}_{σ,Q,S} \subset {\mathbb R}$ are defined as
# $$
# F^{\rm sub}_{σ,Q,S} = \{\pm 2^{1-σ} \times (0.b_1b_2b_3\ldots b_S)_2\}.
# $$
# The _special numbers_ $F^{\rm special} \not\subset {\mathbb R}$ are defined later.
#
# Note this set of real numbers has no nice algebraic structure: it is not closed under addition, subtraction, etc.
# We will therefore need to define approximate versions of algebraic operations later.
#
# Floating-point numbers are stored in $1 + Q + S$ total number of bits, in the format
# $$
# sq_{Q-1}\ldots q_0 b_1 \ldots b_S
# $$
# The first bit ($s$) is the <span style="color:red">sign bit</span>: 0 means positive and 1 means
# negative. The bits $q_{Q-1}\ldots q_0$ are the <span style="color:green">exponent bits</span>:
# they are the binary digits of the unsigned integer $q$:
# $$
# q = (q_{Q-1}\ldots q_0)_2.
# $$
# Finally, the bits $b_1\ldots b_S$ are the <span style="color:blue">significand bits</span>.
# If $1 \leq q < 2^Q-1$ then the bits represent the normal number
# $$
# x = \pm 2^{q-σ} \times (1.b_1b_2b_3\ldots b_S)_2.
# $$
# If $q = 0$ (i.e. all bits are 0) then the bits represent the sub-normal number
# $$
# x = \pm 2^{1-σ} \times (0.b_1b_2b_3\ldots b_S)_2.
# $$
# If $q = 2^Q-1$ (i.e. all bits are 1) then the bits represent a special number, discussed
# later.
#
#
# ### IEEE floating-point numbers
#
# **Definition (IEEE floating-point numbers)**
# IEEE has 3 standard floating-point formats: 16-bit (half precision), 32-bit (single precision) and
# 64-bit (double precision) defined by:
# $$
# \begin{align*}
# F_{16} &:= F_{15,5,10} \\
# F_{32} &:= F_{127,8,23} \\
# F_{64} &:= F_{1023,11,52}
# \end{align*}
# $$
#
# In Julia these correspond to 3 different floating-point types:
#
# 1. `Float64` is a type representing double precision ($F_{64}$).
# We can create a `Float64` by including a
# decimal point when writing the number:
# `1.0` is a `Float64`. `Float64` is the default format for
# scientific computing (on the _Floating-Point Unit_, FPU).
# 2. `Float32` is a type representing single precision ($F_{32}$). We can create a `Float32` by including a
# `f0` when writing the number:
# `1f0` is a `Float32`. `Float32` is generally the default format for graphics (on the _Graphics Processing Unit_, GPU),
# as the difference between 32 bits and 64 bits is indistinguishable to the eye in visualisation,
# and more data can be fit into a GPU's limitted memory.
# 3. `Float16` is a type representing half-precision ($F_{16}$).
# It is important in machine learning where one wants to maximise the amount of data
# and high accuracy is not necessarily helpful.
#
#
# **Example (rational in `Float32`)** How is the number $1/3$ stored in `Float32`?
# Recall that
# $$
# 1/3 = (0.010101\ldots)_2 = 2^{-2} (1.0101\ldots)_2 = 2^{125-127} (1.0101\ldots)_2
# $$
# and since $
# 125 = (1111101)_2
# $ the <span style="color:green">exponent bits</span> are `01111101`.
# .
# For the significand we round the last bit to the nearest element of $F_{32}$, (this is explained in detail in
# the section on rounding), so we have
# $$
# 1.010101010101010101010101\ldots \approx 1.01010101010101010101011 \in F_{32}
# $$
# and the <span style="color:blue">significand bits</span> are `01010101010101010101011`.
# Thus the `Float32` bits for $1/3$ are:
printbits(1f0/3)
# For sub-normal numbers, the simplest example is zero, which has $q=0$ and all significand bits zero:
printbits(0.0)
# Unlike integers, we also have a negative zero:
printbits(-0.0)
# This is treated as identical to `0.0` (except for degenerate operations as explained in special numbers).
#
#
#
# ### Special normal numbers
#
# When dealing with normal numbers there are some important constants that we will use
# to bound errors.
#
# **Definition (machine epsilon/smallest positive normal number/largest normal number)** _Machine epsilon_ is denoted
# $$
# ϵ_{{\rm m},S} := 2^{-S}.
# $$
# When $S$ is implied by context we use the notation $ϵ_{\rm m}$.
# The _smallest positive normal number_ is $q = 1$ and $b_k$ all zero:
# $$
# \min |F_{σ,Q,S}^{\rm normal}| = 2^{1-σ}
# $$
# where $|A| := $\{|x| : x \in A \}$.
# The _largest (positive) normal number_ is
#
# $$
# \max F_{σ,Q,S}^{\rm normal} = 2^{2^Q-2-σ} (1.11\ldots1)_2 = 2^{2^Q-2-σ} (2-ϵ_{\rm m})
# $$
#
#
# We confirm the simple bit representations:
σ,Q,S = 127,23,8 # Float32
εₘ = 2.0^(-S)
printlnbits(Float32(2.0^(1-σ))) # smallest positive Float32
printlnbits(Float32(2.0^(2^Q-2-σ) * (2-εₘ))) # largest Float32
# For a given floating-point type, we can find these constants using the following functions:
eps(Float32),floatmin(Float32),floatmax(Float32)
# **Example (creating a sub-normal number)** If we divide the smallest normal number by two, we get a subnormal number:
mn = floatmin(Float32) # smallest normal Float32
printlnbits(mn)
printbits(mn/2)
# Can you explain the bits?
#
#
#
#
# ### Special numbers
#
# The special numbers extend the real line by adding $\pm \infty$ but also a notion of "not-a-number".
#
# **Definition (not a number)**
# Let ${\rm NaN}$ represent "not a number" and define
# $$
# F^{\rm special} := \{\infty, -\infty, {\rm NaN}\}
# $$
#
# Whenever the bits of $q$ of a floating-point number are all 1 then they represent an element of $F^{\rm special}$.
# If all $b_k=0$, then the number represents either $\pm\infty$, called `Inf` and `-Inf` for 64-bit floating-point numbers (or `Inf16`, `Inf32`
# for 16-bit and 32-bit, respectively):
printlnbits(Inf16)
printbits(-Inf16)
# All other special floating-point numbers represent ${\rm NaN}$. One particular representation of ${\rm NaN}$
# is denoted by `NaN` for 64-bit floating-point numbers (or `NaN16`, `NaN32` for 16-bit and 32-bit, respectively):
printbits(NaN16)
# These are needed for undefined algebraic operations such as:
0/0
# **Example (many `NaN`s)** What happens if we change some other $b_k$ to be nonzero?
# We can create bits as a string and see:
i = parse(UInt16, "0111110000010001"; base=2)
reinterpret(Float16, i)
# Thus, there are more than one `NaN`s on a computer.
#
#
# ## 4. Arithmetic
#
#
# Arithmetic operations on floating-point numbers are _exact up to rounding_.
# There are three basic rounding strategies: round up/down/nearest.
# Mathematically we introduce a function to capture the notion of rounding:
#
# **Definition (rounding)** ${\rm fl}^{\rm up}_{σ,Q,S} : \mathbb R \rightarrow F_{σ,Q,S}$ denotes
# the function that rounds a real number up to the nearest floating-point number that is greater or equal.
# ${\rm fl}^{\rm down}_{σ,Q,S} : \mathbb R \rightarrow F_{σ,Q,S}$ denotes
# the function that rounds a real number down to the nearest floating-point number that is greater or equal.
# ${\rm fl}^{\rm nearest}_{σ,Q,S} : \mathbb R \rightarrow F_{σ,Q,S}$ denotes
# the function that rounds a real number to the nearest floating-point number. In case of a tie,
# it returns the floating-point number whose least significant bit is equal to zero.
# We use the notation ${\rm fl}$ when $σ,Q,S$ and the rounding mode are implied by context,
# with ${\rm fl}^{\rm nearest}$ being the default rounding mode.
#
#
#
# In Julia, the rounding mode is specified by tags `RoundUp`, `RoundDown`, and
# `RoundNearest`. (There are also more exotic rounding strategies `RoundToZero`, `RoundNearestTiesAway` and
# `RoundNearestTiesUp` that we won't use.)
#
#
#
# **WARNING (rounding performance, advanced)** These rounding modes are part
# of the FPU instruction set so will be (roughly) equally fast as the default, `RoundNearest`.
# Unfortunately, changing the rounding mode is expensive, and is not thread-safe.
#
#
#
#
# Let's try rounding a `Float64` to a `Float32`.
printlnbits(1/3) # 64 bits
printbits(Float32(1/3)) # round to nearest 32-bit
# The default rounding mode can be changed:
printbits(Float32(1/3,RoundDown) )
# Or alternatively we can change the rounding mode for a chunk of code
# using `setrounding`. The following computes upper and lower bounds for `/`:
x = 1f0
setrounding(Float32, RoundDown) do
x/3
end,
setrounding(Float32, RoundUp) do
x/3
end
# **WARNING (compiled constants, advanced)**: Why did we first create a variable `x` instead of typing `1f0/3`?
# This is due to a very subtle issue where the compiler is _too clever for it's own good_:
# it recognises `1f0/3` can be computed at compile time, but failed to recognise the rounding mode
# was changed.
#
# In IEEE arithmetic, the arithmetic operations `+`, `-`, `*`, `/` are defined by the property
# that they are exact up to rounding. Mathematically we denote these operations as follows:
# $$
# \begin{align*}
# x\oplus y &:= {\rm fl}(x+y) \\
# x\ominus y &:= {\rm fl}(x - y) \\
# x\otimes y &:= {\rm fl}(x * y) \\
# x\oslash y &:= {\rm fl}(x / y)
# \end{align*}
# $$
# Note also that `^` and `sqrt` are similarly exact up to rounding.
#
#
# **Example (decimal is not exact)** `1.1+0.1` gives a different result than `1.2`:
x = 1.1
y = 0.1
x + y - 1.2 # Not Zero?!?
# This is because ${\rm fl}(1.1) \neq 1+1/10$, but rather:
# $$
# {\rm fl}(1.1) = 1 + 2^{-4}+2^{-5} + 2^{-8}+2^{-9}+\cdots + 2^{-48}+2^{-49} + 2^{-51}
# $$
#
# **WARNING (non-associative)** These operations are not associative! E.g. $(x \oplus y) \oplus z$ is not necessarily equal to $x \oplus (y \oplus z)$.
# Commutativity is preserved, at least.
# Here is a surprising example of non-associativity:
(1.1 + 1.2) + 1.3, 1.1 + (1.2 + 1.3)
# Can you explain this in terms of bits?
#
#
# ### Bounding errors in floating point arithmetic
#
# Before we dicuss bounds on errors, we need to talk about the two notions of errors:
#
# **Definition (absolute/relative error)** If $\tilde x = x + δ_{rm a} = x (1 + δ_{\rm r})$ then
# $|δ_{\rm a}|$ is called the _absolute error_ and $|δ_{\rm r}|$ is called the
# _relative error_ in approximating $x$ by $\tilde x$.
#
# We can bound the error of basic arithmetic operations in terms of machine epsilon, provided
# a real number is close to a normal number:
#
# **Definition (normalised range)** The _normalised range_ ${\cal N}_{σ,Q,S} \subset {\mathbb R}$
# is the subset of real numbers that lies
# between the smallest and largest normal floating-point number:
# $$
# {\cal N}_{σ,Q,S} := \{x : \min |F_{σ,Q,S}| \leq |x| \leq \max F_{σ,Q,S} \}
# $$
# When $σ,Q,S$ are implied by context we use the notation ${\cal N}$.
#
# We can use machine epsilon to determine bounds on rounding:
#
# **Proposition (rounding arithmetic)**
# If $x \in {\cal N}$ then
# $$
# {\rm fl}^{\rm mode}(x) = x (1 + \delta_x^{\rm mode})
# $$
# where the _relative error_ is
# $$
# \begin{align*}
# |\delta_x^{\rm nearest}| &\leq {ϵ_{\rm m} \over 2} \\
# |\delta_x^{\rm up/down}| &< {ϵ_{\rm m}}.
# \end{align*}
# $$
#
#
# This immediately implies relative error bounds on all IEEE arithmetic operations, e.g.,
# if $x+y \in {\cal N}$ then
# we have
# $$
# x \oplus y = (x+y) (1 + \delta_1)
# $$
# where (assuming the default nearest rounding)
# $
# |\delta_1| \leq {ϵ_{\rm m} \over 2}.
# $
#
# **Example (bounding a simple computation)** We show how to bound the error in computing
# $$
# (1.1 + 1.2) + 1.3
# $$
# using floating-point arithmetic. First note that `1.1` on a computer is in
# fact ${\rm fl}(1.1)$. Thus this computation becomes
# $$
# ({\rm fl}(1.1) \oplus {\rm fl}(1.2)) \oplus {\rm fl}(1.3)
# $$
# First we find
# $$
# ({\rm fl}(1.1) \oplus {\rm fl}(1.2)) = (1.1(1 + δ_1) + 1.2 (1+δ_2))(1 + δ_3)
# = 2.3 + 1.1 δ_1 + 1.2 δ_2 + 2.3 δ_3 + 1.1 δ_1 δ_3 + 1.2 δ_2 δ_3
# = 2.3 + δ_4
# $$
# where (note $δ_1 δ_3$ and $δ_2 δ_3$ are tiny so we just round up our bound to the nearest decimal)
# $$
# |δ_4| \leq 2.3 ϵ_{\rm m}
# $$
# Thus the computation becomes
# $$
# ((2.3 + δ_4) + 1.3 (1 + δ_5)) (1 + δ_6) = 3.6 + δ_4 + 1.3 δ_5 + 3.6 δ_6 + δ_4 δ_6 + 1.3 δ_5 δ_6 = 3.6 + δ_7
# $$
# where the _absolute error_ is
# $$
# |δ_7| \leq 4.8 ϵ_{\rm m}
# $$
# Indeed, this bound is bigger than the observed error:
abs(3.6 - (1.1+1.2+1.3)), 4.8eps()
# ### Arithmetic and special numbers
#
# Arithmetic works differently on `Inf` and `NaN` and for undefined operations.
# In particular we have:
# +
1/0.0 # Inf
1/(-0.0) # -Inf
0.0/0.0 # NaN
Inf*0 # NaN
Inf+5 # Inf
(-1)*Inf # -Inf
1/Inf # 0.0
1/(-Inf) # -0.0
Inf - Inf # NaN
Inf == Inf # true
Inf == -Inf # false
NaN*0 # NaN
NaN+5 # NaN
1/NaN # NaN
NaN == NaN # false
NaN != NaN # true
# -
# ### Special functions (advanced)
#
# Other special functions like `cos`, `sin`, `exp`, etc. are _not_ part of the IEEE standard.
# Instead, they are implemented by composing the basic arithmetic operations, which accumulate
# errors. Fortunately many are designed to have _relative accuracy_, that is, `s = sin(x)`
# (that is, the Julia implementation of $\sin x$) satisfies
# $$
# {\tt s} = (\sin x) ( 1 + \delta)
# $$
# where $|\delta| < cϵ_{\rm m}$ for a reasonably small $c > 0$,
# _provided_ that $x \in {\rm F}^{\rm normal}$.
# Note these special functions are written in (advanced) Julia code, for example,
# [sin](https://github.com/JuliaLang/julia/blob/d08b05df6f01cf4ec6e4c28ad94cedda76cc62e8/base/special/trig.jl#L76).
#
#
# **WARNING (sin(fl(x)) is not always close to sin(x))** This is possibly a misleading statement
# when one thinks of $x$ as a real number. Consider $x = \pi$ so that $\sin x = 0$.
# However, as ${\rm fl}(\pi) \neq \pi$. Thus we only have relative accuracy compared
# to the floating point approximation:
π₆₄ = Float64(π)
πᵦ = big(π₆₄) # Convert 64-bit approximation of π to higher precision. Note its the same number.
abs(sin(π₆₄)), abs(sin(π₆₄) - sin(πᵦ)) # only has relative accuracy compared to sin(πᵦ), not sin(π)
# Another issue is when $x$ is very large:
ε = eps() # machine epsilon, 2^(-52)
x = 2*10.0^100
abs(sin(x) - sin(big(x))) ≤ abs(sin(big(x))) * ε
# But if we instead compute `10^100` using `BigFloat` we get a completely different
# answer that even has the wrong sign!
x̃ = 2*big(10.0)^100
sin(x), sin(x̃)
# This is because we commit an error on the order of roughly
# $$
# 2 * 10^{100} * ϵ_{\rm m} \approx 4.44 * 10^{84}
# $$
# when we round $2*10^{100}$ to the nearest float.
#
#
# **Example (polynomial near root)**
# For general functions we do not generally have relative accuracy.
# For example, consider a simple
# polynomial $1 + 4x + x^2$ which has a root at $\sqrt 3 - 2$. But
f = x -> 1 + 4x + x^2
x = sqrt(3) - 2
abserr = abs(f(big(x)) - f(x))
relerr = abserr/abs(f(x))
abserr, relerr # very large relative error
# We can see this in the error bound (note that $4x$ is exact for floating point numbers
# and adding $1$ is exact for this particular $x$):
# $$
# (x \otimes x \oplus 4x) + 1 = (x^2 (1 + \delta_1) + 4x)(1+\delta_2) + 1 = x^2 + 4x + 1 + \delta_1 x^2 + 4x \delta_2 + x^2 \delta_1 \delta_2
# $$
# Using a simple bound $|x| < 1$ we get a (pessimistic) bound on the absolute error of
# $3 ϵ_{\rm m}$. Here `f(x)` itself is less than $2 ϵ_{\rm m}$ so this does not imply
# relative accuracy. (Of course, a bad upper bound is not the same as a proof of inaccuracy,
# but here we observe the inaccuracy in practice.)
#
#
#
#
#
#
# ## 5. High-precision floating-point numbers (advanced)
#
# It is possible to set the precision of a floating-point number
# using the `BigFloat` type, which results from the usage of `big`
# when the result is not an integer.
# For example, here is an approximation of 1/3 accurate
# to 77 decimal digits:
big(1)/3
# Note we can set the rounding mode as in `Float64`, e.g.,
# this gives (rigorous) bounds on
# `1/3`:
setrounding(BigFloat, RoundDown) do
big(1)/3
end, setrounding(BigFloat, RoundUp) do
big(1)/3
end
# We can also increase the precision, e.g., this finds bounds on `1/3` accurate to
# more than 1000 decimal places:
setprecision(4_000) do # 4000 bit precision
setrounding(BigFloat, RoundDown) do
big(1)/3
end, setrounding(BigFloat, RoundUp) do
big(1)/3
end
end
# In the problem sheet we shall see how this can be used to rigorously bound ${\rm e}$,
# accurate to 1000 digits.
|
notebooks/Numbers.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Lecture 7
#
# ## Software design, documentation, and testing
# + [markdown] slideshow={"slide_type": "slide"}
# ## Design of a program
#
# From the Practice of Programming:
#
# >The essence of design is to balance competing goals and constraints. Although there may be many tradeoffs when one is writing a small self-contained system, the ramifications of particular choices remain within the system and affect only the individual programmer. But when code is to be used by others, decisions have wider repercussions.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Software Design Desirables
# - Documentation
# - names (understandable names)
# - pre+post conditions or requirements
# - Maintainability
# - Extensibility
# - Modularity and Encapsulation
# - Portability
# - Installability
# - Generality
# - Data Abstraction (change types, change data structures)
# - Functional Abstraction (the object model, overloading)
# - Robustness
# - Provability: Invariants, preconditions, postconditions
# - User Proofing, Adversarial Inputs
# - Efficiency
# - Use of appropriate algorithms and data structures
# - Optimization (but no premature optimization)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Issues to be aware of:
# * **Interfaces**
#
# Your program is being designed to be used by someone: either an end user, another programmer, or even yourself. This interface is a contract between you and the user.
#
# * **Hiding Information**
#
# There is information hiding between layers (a higher up layer can be more abstract). Encapsulation, abstraction, and modularization, are some of the techniques used here.
#
# * **Resource Management**
#
# Resource management issues: who allocates storage for data structures. Generally we want resource allocation/deallocation to happen in the same layer.
#
# * **How to Deal with Errors**
#
# Do we return special values? Do we throw exceptions? Who handles them?
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Interface principles
#
# Interfaces should:
#
# - hide implementation details
# - have a small set of operations exposed, the smallest possible, and these should be orthogonal. Be stingy with the user.
# - be transparent with the user in what goes on behind the scenes
# - be consistent internally: library functions should have similar signature, classes similar methods, and external programs should have the same cli flags
#
# ** Testing should deal with ALL of the issues above, and each layer ought to be tested separately **.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Testing
#
# There are different kinds of tests inspired by the interface principles just described.
#
# - **acceptance tests** verify that a program meets a customer's expectations. In a sense these are a test of the *interface* to the customer: does the program do everything you promised the customer it would do?
#
# - **unit tests** are tests which test a unit of the program for use by another unit. These could test the interface for a client, but they must also test the internal functions that you want to use.
#
# **Exploratory testing**, **regression testing**, and **integration testing** are done in both of these categories, with the latter trying to combine layers and subsystems, not necessarily at the level of an entire application.
#
# One can also performance test, random and exploratorily test, and stress test a system (to create adversarial situations).
# + [markdown] slideshow={"slide_type": "slide"}
# ## Documentation
# **Documentation is a contract between a user (client) and an implementor (library writer).**
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Write good documentation
# * Follow standards of [PEP 257](https://www.python.org/dev/peps/pep-0257/)
# * Clearly outline the inputs, outputs, default values, and expected behavior
# * Include basic usage examples when possible
# + slideshow={"slide_type": "subslide"}
def quad_roots(a=1.0, b=2.0, c=0.0):
"""Returns the roots of a quadratic equation: ax^2 + bx + c = 0.
INPUTS
=======
a: float, optional, default value is 1
Coefficient of quadratic term
b: float, optional, default value is 2
Coefficient of linear term
c: float, optional, default value is 0
Constant term
RETURNS
========
roots: 2-tuple of complex floats
Has the form (root1, root2) unless a = 0
in which case a ValueError exception is raised
EXAMPLES
=========
>>> quad_roots(1.0, 1.0, -12.0)
((3+0j), (-4+0j))
"""
import cmath # Can return complex numbers from square roots
if a == 0:
raise ValueError("The quadratic coefficient is zero. This is not a quadratic equation.")
else:
sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)
r1 = -b + sqrtdisc
r2 = -b - sqrtdisc
return (r1 / 2.0 / a, r2 / 2.0 / a)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Documenting Invariants
# * An invariant is something that is true at some point in the code.
# * Invariants and the contract are what we use to guide our implementation.
# * Pre-conditions and post-conditions are special cases of invariants.
# * Pre-conditions are true at function entry. They constrain the user.
# * Post-conditions are true at function exit. They constrain the implementation.
#
# You can change implementations, stuff under the hood, etc, but once the software is in the wild **you can't change the pre-conditions and post-conditions** since the client user is depending upon them.
# + slideshow={"slide_type": "subslide"}
def quad_roots(a=1.0, b=2.0, c=0.0):
"""Returns the roots of a quadratic equation: ax^2 + bx + c.
INPUTS
=======
a: float, optional, default value is 1
Coefficient of quadratic term
b: float, optional, default value is 2
Coefficient of linear term
c: float, optional, default value is 0
Constant term
RETURNS
========
roots: 2-tuple of complex floats
Has the form (root1, root2) unless a = 0
in which case a ValueError exception is raised
NOTES
=====
PRE:
- a, b, c have numeric type
- three or fewer inputs
POST:
- a, b, and c are not changed by this function
- raises a ValueError exception if a = 0
- returns a 2-tuple of roots
EXAMPLES
=========
>>> quad_roots(1.0, 1.0, -12.0)
((3+0j), (-4+0j))
"""
import cmath # Can return complex numbers from square roots
if a == 0:
raise ValueError("The quadratic coefficient is zero. This is not a quadratic equation.")
else:
sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)
r1 = -b + sqrtdisc
r2 = -b - sqrtdisc
return (r1 / 2.0 / a, r2 / 2.0 / a)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Accessing Documentation (1)
# * Documentation can be accessed by calling the `__doc__` special method
# * Simply calling `function_name.__doc__` will give a pretty ugly output
# * You can make it cleaner by making use of `splitlines()`
# + slideshow={"slide_type": "-"}
quad_roots.__doc__.splitlines()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Accessing Documentation (2)
# A nice way to access the documentation is to use the [`pydoc` module](https://docs.python.org/2/library/pydoc.html).
# + slideshow={"slide_type": "subslide"}
import pydoc
pydoc.doc(quad_roots)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Testing
# + [markdown] slideshow={"slide_type": "subslide"}
# There are different kinds of tests inspired by the interface principles just described.
#
# - **acceptance tests** verify that a program meets a customer's expectations. In a sense these are a test of the *interface* to the customer: does the program do everything you promised the customer it would do?
#
# - **unit tests** are tests which test a unit of the program for use by another unit. These could test the interface for a client, but they must also test the internal functions that you want to use.
#
# **Exploratory testing**, **regression testing**, and **integration testing** are done in both of these categories, with the latter trying to combine layers and subsystems, not necessarily at the level of an entire application.
#
# One can also performance test, random and exploratorily test, and stress test a system (to create adversarial situations).
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Testing of a program
#
# Test as you write your program.
#
# This is so important that I repeat it.
#
# **Test as you go**.
#
# From The Practice of Programming:
#
#
# >The effort of testing as you go is minimal and pays off handsomely. Thinking about testing as you write a program will lead to better code, because that's when you know best what the code should do. **If instead you wait until something breaks, you will probably have forgotten how the code works**. Working under pressure, you will need to figure it out again, which takes time, and the fixes will be less thorough and more fragile because your refreshed understanding is likely to be incomplete.
#
# #### [Test Driven Develoment](https://en.wikipedia.org/wiki/Test-driven_development)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### `doctest`
# The `doctest` module allows us to test pieces of code that we put into our doc. string.
#
# The doctests are a type of **unit test**, which document the interface of the function by example.
#
# Doctests are an example of a **test harness**. We write some tests and execute them all at once. Note that individual tests can be written and executed individually in an ad-hoc manner. However, that is especially inefficient.
#
# Of course, too many doctests clutter the documentation section.
#
# The doctests should not cover every case; they should describe the various ways a class or function can be used. There are better ways to do more comprehensive testing.
# + slideshow={"slide_type": "-"}
import doctest
doctest.testmod(verbose=True)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Principles of Testing
# * Test simple parts first
# * Test code at its boundaries
# - The idea is that most errors happen at data boundaries such as empty input, single input item, exactly full array, wierd values, etc. If a piece of code works at the boundaries, its likely to work elsewhere...
# * Program defensively
# >"Program defensively. A useful technique is to add code to handle "can't happen" cases, situations where it is not logically possible for something to happen but (because of some failure elsewhere) it might anyway. As an example, a program processing grades might expect that there would be no negative or huge values but should check anyway.
# * Automate using a test harness
# * Test incrementally
#
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Test simple parts first:
# A test for the `quad_roots` function:
# + slideshow={"slide_type": "-"}
def test_quadroots():
assert quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))
test_quadroots()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Test at the boundaries
#
# Here we write a test to handle the crazy case in which the user passes strings in as the coefficients.
# + slideshow={"slide_type": "-"}
def test_quadroots_types():
try:
quad_roots("", "green", "hi")
except TypeError as err:
assert(type(err) == TypeError)
print(test_quadroots_types())
# + [markdown] slideshow={"slide_type": "subslide"}
# We can also check to make sure the $a=0$ case is handled okay:
# + slideshow={"slide_type": "-"}
def test_quadroots_zerocoeff():
try:
quad_roots(a=0.0)
except ValueError as err:
assert(type(err) == ValueError)
test_quadroots_zerocoeff()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### When you get an error
#
# It could be that:
#
# - you messed up an implementation
# - you did not handle a case
# - your test was messed up (be careful of this)
#
# If the error was not found in an existing test, create a new test that represents the problem **before** you do anything else. The test should capture the essence of the problem: this process itself is useful in uncovering bugs. Then this error may even suggest more tests.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Automate Using a Test Harness
#
# Great! So we've written some ad-hoc tests. It's pretty clunky. We should use a **test harness**.
#
# As mentioned already, `doctest` is a type of test harness. It has it's uses, but gets messy quickly.
#
# We'll talk about **[`pytest`](https://docs.pytest.org/en/latest/)** here.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Preliminaries
# 1. The idea is that our code consists of several different pieces (or objects)
# 2. The objects are grouped based on how they are related to each other
# - e.g. you may have a class that contains different statistical operations
# - We'll get into this idea much more in the coming weeks
# 3. For now, we can think of having related functions all in one file
# 4. We want to test each of those functions
# - Tests should include checking correctness of output, correctness of input, fringe cases, etc
#
# I will work in the Jupyter notebook for demo purposes.
#
# To create and save a file in the Jupyter notebook, you type `%%file file_name.py`.
#
# I highly recommend that you actually write your code using a text editor (like `vim`) or an `IDE` like `Sypder`.
#
# The toy examples that we've been working with in the class so far can be done in Jupyter, but a real project can be done more efficiently through other means.
# + slideshow={"slide_type": "subslide"}
# %%file roots.py
def quad_roots(a=1.0, b=2.0, c=0.0):
"""Returns the roots of a quadratic equation: ax^2 + bx + c = 0.
INPUTS
=======
a: float, optional, default value is 1
Coefficient of quadratic term
b: float, optional, default value is 2
Coefficient of linear term
c: float, optional, default value is 0
Constant term
RETURNS
========
roots: 2-tuple of complex floats
Has the form (root1, root2) unless a = 0
in which case a ValueError exception is raised
EXAMPLES
=========
>>> quad_roots(1.0, 1.0, -12.0)
((3+0j), (-4+0j))
"""
import cmath # Can return complex numbers from square roots
if a == 0:
raise ValueError("The quadratic coefficient is zero. This is not a quadratic equation.")
else:
sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)
r1 = -b + sqrtdisc
r2 = -b - sqrtdisc
return (r1 / 2.0 / a, r2 / 2.0 / a)
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's put our tests into one file.
# + slideshow={"slide_type": "fragment"}
# %%file test_roots.py
import roots
def test_quadroots_result():
assert roots.quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))
def test_quadroots_types():
try:
roots.quad_roots("", "green", "hi")
except TypeError as err:
assert(type(err) == TypeError)
def test_quadroots_zerocoeff():
try:
roots.quad_roots(a=0.0)
except ValueError as err:
assert(type(err) == ValueError)
# + slideshow={"slide_type": "subslide"}
# !pytest
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Code Coverage
#
# In some sense, it would be nice to somehow check that every line in a program has been covered by a test. If you could do this, you might know that a particular line has not contributed to making something wrong. But this is hard to do: it would be hard to use normal input data to force a program to go through particular statements. So we settle for testing the important lines. The `pytest-cov` module makes sure that this works.
#
# Coverage does not mean that every edge case has been tried, but rather, every critical statement has been.
#
# Let's add a new function to our roots file.
# + slideshow={"slide_type": "subslide"}
# %%file roots.py
def linear_roots(a=1.0, b=0.0):
"""Returns the roots of a linear equation: ax+ b = 0.
INPUTS
=======
a: float, optional, default value is 1
Coefficient of linear term
b: float, optional, default value is 0
Coefficient of constant term
RETURNS
========
roots: 1-tuple of real floats
Has the form (root) unless a = 0
in which case a ValueError exception is raised
EXAMPLES
=========
>>> linear_roots(1.0, 2.0)
-2.0
"""
if a == 0:
raise ValueError("The linear coefficient is zero. This is not a linear equation.")
else:
return ((-b / a))
def quad_roots(a=1.0, b=2.0, c=0.0):
"""Returns the roots of a quadratic equation: ax^2 + bx + c = 0.
INPUTS
=======
a: float, optional, default value is 1
Coefficient of quadratic term
b: float, optional, default value is 2
Coefficient of linear term
c: float, optional, default value is 0
Constant term
RETURNS
========
roots: 2-tuple of complex floats
Has the form (root1, root2) unless a = 0
in which case a ValueError exception is raised
EXAMPLES
=========
>>> quad_roots(1.0, 1.0, -12.0)
((3+0j), (-4+0j))
"""
import cmath # Can return complex numbers from square roots
if a == 0:
raise ValueError("The quadratic coefficient is zero. This is not a quadratic equation.")
else:
sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)
r1 = -b + sqrtdisc
r2 = -b - sqrtdisc
return (r1 / 2.0 / a, r2 / 2.0 / a)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Run the tests and check code coverage
# + slideshow={"slide_type": "subslide"}
# !pytest --cov
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Run the tests, report code coverage, and report missing lines.
# -
# !pytest --cov --cov-report term-missing
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Run tests, **including the doctests**, report code coverage, and report missing lines.
# -
# !pytest --doctest-modules --cov --cov-report term-missing
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Let's put some tests in for the linear roots function.
# +
# %%file test_roots.py
import roots
def test_quadroots_result():
assert roots.quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))
def test_quadroots_types():
try:
roots.quad_roots("", "green", "hi")
except TypeError as err:
assert(type(err) == TypeError)
def test_quadroots_zerocoeff():
try:
roots.quad_roots(a=0.0)
except ValueError as err:
assert(type(err) == ValueError)
def test_linearoots_result():
assert roots.linear_roots(2.0, -3.0) == 1.5
def test_linearroots_types():
try:
roots.linear_roots("ocean", 6.0)
except TypeError as err:
assert(type(err) == TypeError)
def test_linearroots_zerocoeff():
try:
roots.linear_roots(a=0.0)
except ValueError as err:
assert(type(err) == ValueError)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Now run the tests and check code coverage.
# + slideshow={"slide_type": "subslide"}
# !pytest --doctest-modules --cov --cov-report term-missing
# -
|
lectures/L7/L7.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''py38'': conda)'
# name: python3
# ---
# # **<font color=red>Quicksight Migration Artefacts Notebook</font>**
# ## *Instructions Manual:*
# 1. Proceed each step in the sequence mentioned in this notebook.
# 2. Get the programmatic access for the accounts and update the variables in cell#2 for Source Account and cell#3 for Target Account.
# 3. Set the Variables in cell#6 for the analysis and dashboard being migrated.
# 4. This notebook enables the migration for Analyses, Dashboards, and their corresponding datasets.
# 5. The pre-requisite for the dashboard is availability of a json formatted file that has cross-reference for the datasources in teh form of datasource Ids for each environment as stated in the example:
# {"dsn": [{"name": "Athena", "prod": "cXXXXXXX-fXXX-4XXX-8XXX-6XXXXXXXXX", "np": "cXXXXXXX-fXXX-4XXX-8XXX-6XXXXXXXXX", "sb": "cXXXXXXX-fXXX-4XXX-8XXX-6XXXXXXXXX"}]}
#
# *The lookup is based on the name and the items present with the keys ('prod', 'np' ,'sb') prod: Production, np: Non-Prod, sb: Sandbox*
#
# #### **Author:** <NAME>
import os
import boto3
import io
import gc
import awswrangler as wr
import sys
import time
import json
# # **<font color=red>Begin Process Execution**
#
# ### **<font color=pink>1 Initialize the variables and Instantiate the QuickSight Clients using boto3**
# ### <font color=yellow>Populate the current session values for the variables below and generate the instances for Resource, session and client
"""
Populate the variable values from source AWS instance
"""
sv_aws_access_key_id='<KEY>'
sv_aws_secret_access_key='<KEY>'
sv_aws_session_token='<KEY>
"""
Populate the variable values from target AWS instance
"""
tgt_aws_access_key_id='<KEY>'
tgt_aws_secret_access_key='<KEY>'
tgt_aws_session_token='<KEY>
# +
s3 = boto3.resource('s3',
aws_access_key_id=sv_aws_access_key_id,
aws_secret_access_key=sv_aws_secret_access_key,
aws_session_token=sv_aws_session_token)
#s3 = boto3.resource('s3')
s3_session=boto3.Session(aws_access_key_id=sv_aws_access_key_id,aws_secret_access_key=sv_aws_secret_access_key,aws_session_token=sv_aws_session_token,region_name = 'us-east-1')
#s3_session=boto3.Session(region_name = 'us-east-1')
#client = boto3.client('quicksight',region_name='us-east-1')
client = boto3.client('quicksight',aws_access_key_id=sv_aws_access_key_id,aws_secret_access_key=sv_aws_secret_access_key,aws_session_token=sv_aws_session_token,region_name='us-east-1')
# +
tgt_s3 = boto3.resource('s3',
aws_access_key_id=tgt_aws_access_key_id,
aws_secret_access_key=tgt_aws_secret_access_key,
aws_session_token=tgt_aws_session_token)
#s3 = boto3.resource('s3')
tgt_s3_session=boto3.Session(aws_access_key_id=tgt_aws_access_key_id,aws_secret_access_key=tgt_aws_secret_access_key,aws_session_token=tgt_aws_session_token,region_name = 'us-east-1')
#s3_session=boto3.Session(region_name = 'us-east-1')
#client = boto3.client('quicksight',region_name='us-east-1')
tgt_client = boto3.client('quicksight',aws_access_key_id=tgt_aws_access_key_id,aws_secret_access_key=tgt_aws_secret_access_key,aws_session_token=tgt_aws_session_token,region_name='us-east-1')
# -
# ### <font color=yellow>Set the variables for the source and target account_id, role, username, the analysis and dashboard id that we need to migrate
v_src_account_id='000000000000'
v_analysis_id='4xxxxxxxx-8000-4xxx-bxxx-axxxxxxxxxx'
v_dashboard_id='5xxxxxxxx-8000-9xxx-bxxx-axxxxxxxxxx'
v_tgt_account_id='111111111111'
v_src_env='np' #values in ['np','prod','sb']
v_target_env='sb' #values in ['np','prod','sb']
v_role='XXXXXXXXXXXX_Put_the_right_role_here'
v_user='XXXXXXXXXXXX_Put_the_right_user_here'
# # **<font color=red>Begin Process Execution**
#
# ### **<font color=pink>2 Analysis**
# ### <font color=yellow>2.1 Extract the source analysis properties based on the analysis id set in the variables
# +
"""
Get the source Analysis ID using analysis name
"""
analysisId=''
analysisArn=''
analysisName=''
analyses = client.list_analyses(
AwsAccountId=v_src_account_id,
MaxResults=100
)
v_analysis = []
y = [(member['Arn'],member['AnalysisId'],member['Name'], member['Status'],member['CreatedTime'],member['LastUpdatedTime']) for member in analyses["AnalysisSummaryList"] ]
v_analysis.extend(y)
while 'NextToken' in analyses:
analyses = client.list_analyses(
AwsAccountId=v_src_account_id,
MaxResults=100,
NextToken=analyses['NextToken']
)
y = [(member['Arn'],member['AnalysisId'],member['Name'] if ('Name' in member.keys()) else "", member['Status'],member['CreatedTime'],member['LastUpdatedTime']) for member in analyses["AnalysisSummaryList"] ]
v_analysis.extend(y)
#i+=len(analyses.get('AnalysisSummaryList'))
#print("Number of Analyses: " + str(i))
for analysis in v_analysis:
if analysis[1]==v_analysis_id and analysis[3].find('_SUCCESSFUL')!=-1 and analysis[3].find('DELETE')==-1:
analysisId=analysis[1]
analysisName=analysis[2]
analysisArn=analysis[0]
break
src_analysisId=analysisId
src_analysis_arn=analysisArn
src_analysis_name=analysisName
# -
print("src_analysis_id is: {id}\nsrc_analysis_arn is: {arn}\nsrc_analysis_name is: {name}".format(id = src_analysisId,arn=src_analysis_arn,name=src_analysis_name))
# +
"""
Get the target Analysis ID using analysis name
"""
analysisId=''
analysisArn=''
analysisName=''
analyses = tgt_client.list_analyses(
AwsAccountId=v_tgt_account_id,
MaxResults=100
)
v_analysis = []
y = [(member['Arn'],member['AnalysisId'],member['Name'], member['Status'],member['CreatedTime'],member['LastUpdatedTime']) for member in analyses["AnalysisSummaryList"] ]
v_analysis.extend(y)
while 'NextToken' in analyses:
analyses = tgt_client.list_analyses(
AwsAccountId=v_tgt_account_id,
MaxResults=100,
NextToken=analyses['NextToken']
)
y = [(member['Arn'],member['AnalysisId'],member['Name'] if ('Name' in member.keys()) else "", member['Status'],member['CreatedTime'],member['LastUpdatedTime']) for member in analyses["AnalysisSummaryList"] ]
v_analysis.extend(y)
#i+=len(analyses.get('AnalysisSummaryList'))
#print("Number of Analyses: " + str(i))
for analysis in v_analysis:
if analysis[1]==v_analysis_id and analysis[3].find('_SUCCESSFUL')!=-1 and analysis[3].find('DELETE')==-1:
analysisId=analysis[1]
analysisArn=analysis[0]
analysisName=analysis[2]
break
if analysisId:
tgt_analysisId=analysisId
tgt_analysis_arn=analysisArn
tgt_analysis_name=analysisName
# -
print("tgt_analysisId is: {id}\ntgt_analysis_arn is: {arn}\ntgt_analysis_name is: {name}".format(id = tgt_analysisId,arn=tgt_analysis_arn,name=tgt_analysis_name))
# #### <font color=yellow>2.1.1 Set the Template Name
v_template_name=src_analysisId.replace(" ","")+"_MigrationTemplate"
# ### <font color=yellow>2.2 Get the list of Datasets attached to the analysis
###Get the description of analysis. This will tell us all the datasets that teh analysis use
src_analysis_desc=client.describe_analysis(
AwsAccountId=v_src_account_id,
AnalysisId=src_analysisId
)
print(src_analysis_desc)
# #### <font color=yellow>2.2.1 Get the IDs for all the datasets and store in a dictionary
# +
###Extract the dataset ARNs and Ids in 2 dictionaries.
v_src_DatasetArn_Dict={}
v_src_DatasetArn_Id={}
v_src_DatasetName={}
datasets = client.list_data_sets(
AwsAccountId=v_src_account_id,
MaxResults=100
)
v_datasets = []
y = [(member['Arn'],member['DataSetId'],member['Name'], member['CreatedTime'],member['LastUpdatedTime']) for member in datasets["DataSetSummaries"] ]
v_datasets.extend(y)
while 'NextToken' in datasets:
datasets = client.list_data_sets(
AwsAccountId=v_src_account_id,
MaxResults=100,
NextToken=datasets['NextToken']
)
y = [(member['Arn'],member['DataSetId'],member['Name'] if ('Name' in member.keys()) else "", member['CreatedTime'],member['LastUpdatedTime']) for member in datasets["DataSetSummaries"] ]
v_datasets.extend(y)
for src_analysis_dataset in src_analysis_desc['Analysis']['DataSetArns']:
for dataset in v_datasets:
if dataset[0]==src_analysis_dataset:
v_src_DatasetArn_Dict[src_analysis_dataset]=dataset[0]
v_src_DatasetArn_Id[src_analysis_dataset]=dataset[1]
v_src_DatasetName[src_analysis_dataset]=dataset[2]
# -
print(v_src_DatasetArn_Dict)
print(v_src_DatasetArn_Id)
print(v_src_DatasetName)
"""
##convert this into a loop. we can have multiple datasets in an analysis
datasets = client.list_data_sets(
AwsAccountId=v_src_account_id,
MaxResults=100
)
v_datasets = []
y = [(member['Arn'],member['DataSetId'],member['Name'], member['CreatedTime'],member['LastUpdatedTime']) for member in datasets["DataSetSummaries"] ]
v_datasets.extend(y)
while 'NextToken' in datasets:
datasets = client.list_data_sets(
AwsAccountId=v_src_account_id,
MaxResults=100,
NextToken=datasets['NextToken']
)
y = [(member['Arn'],member['DataSetId'],member['Name'] if ('Name' in member.keys()) else "", member['CreatedTime'],member['LastUpdatedTime']) for member in datasets["DataSetSummaries"] ]
v_datasets.extend(y)
#i+=len(datasets.get('DataSetSummaries'))
#print("Number of Analyses: " + str(i))
for dataset in v_datasets:
if dataset[0]==analysis['Analysis']['DataSetArns'][0]:
datasetArn=dataset[0]
datasetId=dataset[1]
break
"""
# #### <font color=yellow>2.2.2 Generate the unique ARNs datasets and store in a list. These ARNs will then be used to create the template for the target
src_dataset_ARN_list=[]
n=0
for item in v_src_DatasetArn_Dict:
n=n+1
##dict(DataSetPlaceholder=v_analysis_name.replace(" ","")+"_MigrationTemplateARN"+str(n),DataSetArn=item)
src_dataset_ARN_list.append(dict(DataSetPlaceholder=src_analysisId.replace(" ","")+"_MigrationTemplateARN"+str(n),DataSetArn=item))
print(src_dataset_ARN_list)
# ### **<font color=pink>3 Templates**
# ### <font color=yellow>3.1 Create the Template
# #### <font color=yellow>3.1.1 Check if the template already exists. If the template already exists, update it, else create it
# +
templateArn=''
templateId=''
templateName=''
templates = client.list_templates(
AwsAccountId=v_src_account_id,
MaxResults=100
)
v_templates = []
y = [(member['Arn'],member['TemplateId'],member['Name'], member['LatestVersionNumber'],member['CreatedTime'],member['LastUpdatedTime']) for member in templates["TemplateSummaryList"] ]
v_templates.extend(y)
while 'NextToken' in templates:
templates = client.list_templates(
AwsAccountId=v_src_account_id,
MaxResults=100,
NextToken=templates['NextToken']
)
y = [(member['Arn'],member['TemplateId'],member['Name'] if ('Name' in member.keys()) else "", member['LatestVersionNumber'],member['CreatedTime'],member['LastUpdatedTime']) for member in templates["TemplateSummaryList"] ]
v_templates.extend(y)
#i+=len(templates.get('TemplateSummaryList'))
#print("Number of Analyses: " + str(i))
for template in v_templates:
if template[1]==v_template_name:
templateArn=template[0]
templateId=template[1]
templateName=template[2]
break
if templateArn:
print("updating template")
updateTemplate = client.update_template(
AwsAccountId=v_src_account_id,
TemplateId=v_template_name,
SourceEntity={
'SourceAnalysis': {
'Arn': src_analysis_arn,
'DataSetReferences': src_dataset_ARN_list
}
}
)
else:
print("creating template")
createTemplate=client.create_template(
AwsAccountId=v_src_account_id,
TemplateId=v_template_name,
SourceEntity={
'SourceAnalysis': {
'Arn': src_analysis_arn,
'DataSetReferences': src_dataset_ARN_list
}
}
)
# -
try:
print("CreateTemplateARN: "+createTemplate['Arn'])
except:
print("UpdateTemplateARN: "+updateTemplate['Arn'])
###Backup
"""
createTemplate=client.create_template(
AwsAccountId='462393762422',
TemplateId='ProductionOverviewTemplate_Migration',
SourceEntity={
'SourceAnalysis': {
'Arn': analysisArn,
'DataSetReferences': [
{
'DataSetPlaceholder': 'ProductionOverview_DSPH_Migration',
'DataSetArn': datasetArn
},
]
}
}
)"""
# #### <font color=yellow>3.1.2 Confirm if the template has been created. You can modify template permissions only if it has been successfully created.
# ##### Template permissions should be updated on the source account template to provide access to the Target account
# +
#Use this to check if the template has been created successfully. If yes, then move to the next step of listing and updating permissions
template_desc=client.describe_template(
AwsAccountId=v_src_account_id,
TemplateId=v_template_name,
#VersionNumber=123,
#AliasName='string'
)
if template_desc['Template']['Version']['Status'].find('_SUCCESSFUL')==-1:
time.sleep(120)
template_desc=client.describe_template(
AwsAccountId=v_src_account_id,
TemplateId=v_template_name,
#VersionNumber=123,
#AliasName='string'
)
if template_desc['Template']['Version']['Status'].find('_SUCCESSFUL')==-1:
print("Template could not be successfully created. Please check the configurations and re-execute the steps")
else:
print("Template was succefully created. Proceeding with the next step of updating permissions.")
updateTemplate = client.update_template_permissions(
AwsAccountId=v_src_account_id,
TemplateId=v_template_name,
GrantPermissions=[
{
"Principal": "arn:aws:iam::"+v_tgt_account_id+":root",
'Actions': ["quicksight:UpdateTemplatePermissions","quicksight:DescribeTemplate"]
},
]
)
else:
updateTemplate = client.update_template_permissions(
AwsAccountId=v_src_account_id,
TemplateId=v_template_name,
GrantPermissions=[
{
"Principal": "arn:aws:iam::"+v_tgt_account_id+":root",
'Actions': ["quicksight:UpdateTemplatePermissions","quicksight:DescribeTemplate"]
},
]
)
#dict_keys(['Arn', 'Version', 'TemplateId', 'LastUpdatedTime', 'CreatedTime'])
# -
## Validate the response. Ensure that the template permissions have been updated in "Permissions" node.
print(updateTemplate['Permissions'])
####We can remove this part
for key in v_src_DatasetArn_Id:
dict_physical_table_map=client.describe_data_set(
AwsAccountId=v_src_account_id,
DataSetId=v_src_DatasetArn_Id[key])['DataSet']['PhysicalTableMap']
distinct_dsn = set()
for i in dict_physical_table_map.keys():
for j in dict_physical_table_map[i].keys():
for k in dict_physical_table_map[i][j].keys():
if k=='DataSourceArn':
distinct_dsn.add(dict_physical_table_map[i][j]['DataSourceArn'])
distinct_source_dsn_set=set()
distinct_source_dsnid_set=set()
distinct_target_dsn_set=set()
for dsn in distinct_dsn:
datasources = client.list_data_sources(
AwsAccountId=v_src_account_id, ##Replace this with Target Account
MaxResults=100
)
v_datasources = []
y = [(member['Arn'],member['DataSourceId'],member['Name'], member['CreatedTime'],member['LastUpdatedTime'],member['Type']) for member in datasources["DataSources"] ]
v_datasources.extend(y)
while 'NextToken' in datasources:
datasources = client.list_data_sources(
AwsAccountId=v_src_account_id,
MaxResults=100,
NextToken=datasources['NextToken']
)
y = [(member['Arn'],member['DataSourceId'],member['Name'] if ('Name' in member.keys()) else "",member['CreatedTime'],member['LastUpdatedTime']) for member in datasources["DataSources"] ]
v_datasources.extend(y)
#i+=len(templates.get('TemplateSummaryList'))
#print("Number of Analyses: " + str(i))
for datasource in v_datasources:
"""
if datasource[2]=='Athena' :
sourceDataSourceARN=datasource[0]
sourceDataSourceId=datasource[1]
"""
if datasource[0]==dsn:
sourceDataSourceARN=datasource[0]
sourceDataSourceId=datasource[1]
sourceDataSourceName=datasource[2]
distinct_source_dsn_set.add(sourceDataSourceId)
# ### **<font color=pink>4 Data Source and Data Set**</font>
# ### <font color=yellow>4.1 Update the Dataset Definition for each of the datasets. Create/Update the Datasets</font>
# ###### Each dataset has physical table map, which has datasource arn attached to it. There can be multiple physical table maps and multiple data sources. We have a cross-reference json list of the mapping of all datasources for all the environments (accounts). At run time, we refer to this json file and extract the datasource arn's based on teh environments and replace the data source arn in the physical table map section of the dataset definition. This is done inside of a loop "for k in v_src_DatasetARn_Id".
# ###### The loop iterates through the dataset definition for each dataset and at the end of each iteration, creates the dataset in the target account. If the dataset already exists, it updates it.
# ###### Each iteration also updates the permissions of the dataset, so it can be accessed via quicksight UI.
# +
get_dsn_mapping_dict=dict()
with open('data.json') as json_file:
dsn_list=json.load(json_file)
for i in dsn_list['dsn']:
get_dsn_mapping_dict[i[v_src_env]]=i[v_target_env]
n=0
for k in v_src_DatasetArn_Id:
update_flag='N'
ds=client.describe_data_set(
AwsAccountId=v_src_account_id,
DataSetId=v_src_DatasetArn_Id[k])
item_list=[]
for item in ds['DataSet']['PhysicalTableMap']:
item_list.append(item)
for i in item_list:
for key in ds['DataSet']['PhysicalTableMap'][i]:
temp_string=ds['DataSet']['PhysicalTableMap'][i][key]['DataSourceArn'].replace(v_src_account_id,v_tgt_account_id)
replacement_dsn=get_dsn_mapping_dict[temp_string[temp_string.find('/')+1:]]
if not replacement_dsn:
print("raise issue")
else:
temp_string=temp_string.replace(temp_string[temp_string.find('/')+1:],replacement_dsn)
ds['DataSet']['PhysicalTableMap'][i][key]['DataSourceArn']=temp_string
datasets = tgt_client.list_data_sets(
AwsAccountId=v_tgt_account_id,
MaxResults=100
)
v_datasets = []
y = [(member['Arn'],member['DataSetId'],member['Name'], member['CreatedTime'],member['LastUpdatedTime']) for member in datasets["DataSetSummaries"] ]
v_datasets.extend(y)
while 'NextToken' in datasets:
datasets = tgt_client.list_data_sets(
AwsAccountId=v_tgt_account_id,
MaxResults=100,
NextToken=datasets['NextToken']
)
y = [(member['Arn'],member['DataSetId'],member['Name'] if ('Name' in member.keys()) else "", member['CreatedTime'],member['LastUpdatedTime']) for member in datasets["DataSetSummaries"] ]
v_datasets.extend(y)
for dataset in v_datasets:
if dataset[1]==ds['DataSet']['DataSetId']:
print('dataset already exists. updating the dataset instead of creating.')
###run update here
update_flag='Y'
print("In Update")
target_dataset = tgt_client.update_data_set(
AwsAccountId=v_tgt_account_id,
DataSetId=ds['DataSet']['DataSetId'],
Name=ds['DataSet']['Name'],
PhysicalTableMap=ds['DataSet']['PhysicalTableMap'],
LogicalTableMap=ds['DataSet']['LogicalTableMap'],
ImportMode=ds['DataSet']['ImportMode'])
break
#print(update_flag)
if update_flag=='N':
print('dataset does not exists. creating the dataset')
target_dataset = tgt_client.create_data_set(AwsAccountId=v_tgt_account_id,
DataSetId=ds['DataSet']['DataSetId'],
Name=ds['DataSet']['Name'],
PhysicalTableMap=ds['DataSet']['PhysicalTableMap'],
LogicalTableMap=ds['DataSet']['LogicalTableMap'],
ImportMode=ds['DataSet']['ImportMode'])
tgt_client.update_data_set_permissions(
AwsAccountId=v_tgt_account_id,
DataSetId=ds['DataSet']['DataSetId'],
GrantPermissions=[
{
'Principal': 'arn:aws:quicksight:us-east-1:'+v_tgt_account_id+':user/default/'+v_role+'/'+v_user,
'Actions': [
'quicksight:UpdateDataSetPermissions','quicksight:DescribeDataSetPermissions','quicksight:PassDataSet','quicksight:CreateIngestion','quicksight:DescribeIngestion','quicksight:CancelIngestion','quicksight:ListIngestions','quicksight:UpdateDataSet','quicksight:DescribeDataSet','quicksight:DeleteDataSet'
]
},
]
)
# -
# ### <font color=pink>5 Create or Update Analysis</font>
# ###### The analysis requires Source Entity, which is obtained by modifying the definition of the dataset ARN in the src_dataset_ARN_list and storing it in the tgt_dataset_ARN_list.
# ###### Check if the analysis already exists in the target account. If it does, update it, else create it. Use the template ARN for the source template, for which we modified teh permissions in the Template step.
# ###### Update the permissions on the analysis, so it can be accessed from Quicksight.
# +
temp_tgt_dict=dict()
tgt_dataset_ARN_List=[]
for i in src_dataset_ARN_list:
temp_tgt_dict['DataSetPlaceholder']=i['DataSetPlaceholder']
temp_tgt_dict['DataSetArn']=i['DataSetArn'].replace(v_src_account_id,v_tgt_account_id)
tgt_dataset_ARN_List.append(temp_tgt_dict)
if tgt_analysisId:
print('Analysis exists in Target. Executing the steps for Update.')
###call update analysis function here. we can use the template for source to update the analysis with tgt_analysisId
tgt_client.update_analysis(
AwsAccountId=v_tgt_account_id,
AnalysisId=src_analysisId,
Name=tgt_analysis_name,
SourceEntity={
'SourceTemplate': {
'DataSetReferences': tgt_dataset_ARN_List,
'Arn': template_desc['Template']['Arn']
}
}
)
else:
print('Analysis does not exist in Target. Executing the steps for Analysis creation.')
###call create analysis function here. we can potentially use the src_analysisId to store as the analysis ID during report creation
tgt_client.create_analysis(
AwsAccountId=v_tgt_account_id,
AnalysisId=src_analysisId,
Name=src_analysis_name,
SourceEntity={
'SourceTemplate': {
'DataSetReferences': tgt_dataset_ARN_List,
'Arn': template_desc['Template']['Arn']
}
}
)
tgt_analysis_desc=tgt_client.describe_analysis(
AwsAccountId=v_tgt_account_id,
AnalysisId=src_analysisId
)
if tgt_analysis_desc['Analysis']['Status'].find('_SUCCESSFUL')==-1:
time.sleep(120)
tgt_analysis_desc=tgt_client.describe_analysis(
AwsAccountId=v_tgt_account_id,
AnalysisId=src_analysisId
)
if tgt_analysis_desc['Analysis']['Status'].find('_SUCCESSFUL')==-1:
print("Analysis could not be successfully created/updated. Please check the configurations and re-execute the steps")
else:
print("Analysis was succefully created/updated. Proceeding with the next step of updating permissions.")
tgt_client.update_analysis_permissions(
AwsAccountId=v_tgt_account_id,
AnalysisId=src_analysisId,
GrantPermissions=[
{
'Principal': 'arn:aws:quicksight:us-east-1:'+v_tgt_account_id+':user/default/'+v_role+'/'+v_user,
'Actions': ['quicksight:RestoreAnalysis', 'quicksight:UpdateAnalysisPermissions', 'quicksight:DeleteAnalysis', 'quicksight:QueryAnalysis', 'quicksight:DescribeAnalysisPermissions', 'quicksight:DescribeAnalysis', 'quicksight:UpdateAnalysis']
},
]
)
else:
print("Analysis was succefully created/updated. Proceeding with the next step of updating permissions.")
tgt_client.update_analysis_permissions(
AwsAccountId=v_tgt_account_id,
AnalysisId=src_analysisId,
GrantPermissions=[
{
'Principal': 'arn:aws:quicksight:us-east-1:'+v_tgt_account_id+':user/default/'+v_role+'/'+v_user,
'Actions': ['quicksight:RestoreAnalysis', 'quicksight:UpdateAnalysisPermissions', 'quicksight:DeleteAnalysis', 'quicksight:QueryAnalysis', 'quicksight:DescribeAnalysisPermissions', 'quicksight:DescribeAnalysis', 'quicksight:UpdateAnalysis']
},
]
)
# -
# ### **<font color=pink>6 Create or Update Dashboard**</font>
# ###### The Dashboard requires Source Entity, which is obtained by modifying the definition of the dataset ARN in the src_dataset_ARN_list and storing it in the tgt_dataset_ARN_list.
# ###### Check if the Dashboard already exists in the target account. If it does, update it, else create it. Use the template ARN for the source template, for which we modified the permissions in the Template step.
# ###### Update the permissions on the Dashboard, so it can be accessed from Quicksight.
# ### <font color=yellow>6.1 Gather the dashboard details from Source
# +
src_dashboardId=''
src_dashboardArn=''
src_dashboardName=''
dashboards = client.list_dashboards(
AwsAccountId=v_src_account_id,
MaxResults=100
)
v_dashboard = []
y = [(member['Arn'],member['DashboardId'],member['Name'], member['PublishedVersionNumber'],member['CreatedTime'],member['LastUpdatedTime']) for member in dashboards["DashboardSummaryList"] ]
v_dashboard.extend(y)
while 'NextToken' in dashboards:
dashboards = client.list_dashboards(
AwsAccountId=v_src_account_id,
MaxResults=100,
NextToken=analyses['NextToken']
)
y = [(member['Arn'],member['DashboardId'],member['Name'] if ('Name' in member.keys()) else "", member['PublishedVersionNumber'],member['CreatedTime'],member['LastUpdatedTime']) for member in dashboards["DashboardSummaryList"] ]
v_dashboard.extend(y)
#i+=len(dashboards.get('DashboardSummaryList'))
#print("Number of Analyses: " + str(i))
for dashboard in v_dashboard:
if dashboard[1]==v_dashboard_id:
src_dashboardId=dashboard[1]
src_dashboardArn=dashboard[0]
src_dashboardName=dashboard[2]
break
# -
# ### <font color=yellow>6.2 Gather the dashboard details from Target
# +
tgt_dashboardId=''
tgt_dashboardArn=''
tgt_dashboardName=''
tgt_dashboards = tgt_client.list_dashboards(
AwsAccountId=v_tgt_account_id,
MaxResults=100
)
v_dashboard = []
y = [(member['Arn'],member['DashboardId'],member['Name'], member['PublishedVersionNumber'],member['CreatedTime'],member['LastUpdatedTime']) for member in tgt_dashboards["DashboardSummaryList"] ]
v_dashboard.extend(y)
while 'NextToken' in tgt_dashboards:
tgt_dashboards = tgt_client.list_dashboards(
AwsAccountId=v_tgt_account_id,
MaxResults=100,
NextToken=analyses['NextToken']
)
y = [(member['Arn'],member['DashboardId'],member['Name'] if ('Name' in member.keys()) else "", member['PublishedVersionNumber'],member['CreatedTime'],member['LastUpdatedTime']) for member in tgt_dashboards["DashboardSummaryList"] ]
v_dashboard.extend(y)
#i+=len(dashboards.get('DashboardSummaryList'))
#print("Number of Analyses: " + str(i))
for dashboard in v_dashboard:
if dashboard[1]==v_dashboard_id:
tgt_dashboardId=dashboard[1]
tgt_dashboardArn=dashboard[0]
tgt_dashboardName=dashboard[2]
break
# -
print("src_dashboardId: "+src_dashboardId)
print("tgt_dashboardId: "+tgt_dashboardId)
# ### <font color=yellow>6.3 Create/Update the Dashboard in Target
# +
if tgt_dashboardId:
print('Dashboard exists in Target. Executing the steps for Update.')
###call update analysis function here. we can use the template for source to update the analysis with tgt_analysisId
dashboard_response_update=tgt_client.update_dashboard(
AwsAccountId=v_tgt_account_id,
DashboardId=tgt_dashboardId,
Name=tgt_dashboardName,
SourceEntity={
'SourceTemplate': {
'DataSetReferences': tgt_dataset_ARN_List,
'Arn': template_desc['Template']['Arn']
}
}
)
time.sleep(120)
else:
print('Dashboard does not exist in Target. Executing the steps for Dashboard creation.')
###call create analysis function here. we can potentially use the src_analysisId to store as the analysis ID during report creation
dashboard_response_create=tgt_client.create_dashboard(
AwsAccountId=v_tgt_account_id,
DashboardId=src_dashboardId,
Name=src_dashboardName,
Permissions=[
{
'Principal': 'arn:aws:quicksight:us-east-1:'+v_tgt_account_id+':user/default/'+v_role+'/'+v_user,
"Actions": ["quicksight:DescribeDashboard","quicksight:ListDashboardVersions","quicksight:UpdateDashboardPermissions","quicksight:QueryDashboard","quicksight:UpdateDashboard","quicksight:DeleteDashboard","quicksight:DescribeDashboardPermissions","quicksight:UpdateDashboardPublishedVersion"]
},
],
SourceEntity={
'SourceTemplate': {
'DataSetReferences': tgt_dataset_ARN_List,
'Arn': template_desc['Template']['Arn']
}
},
DashboardPublishOptions={
'AdHocFilteringOption': {
'AvailabilityStatus': 'DISABLED'
},
'ExportToCSVOption': {
'AvailabilityStatus': 'ENABLED'
},
'SheetControlsOption': {
'VisibilityState': 'COLLAPSED'
}
}
)
time.sleep(120)
try:
published_version_number=int(dashboard_response_create['VersionArn'].replace(dashboard_response_create['Arn']+'/version/',''))
except:
published_version_number=int(dashboard_response_update['VersionArn'].replace(dashboard_response_update['Arn']+'/version/',''))
tgt_dashboard_desc=tgt_client.describe_dashboard(
AwsAccountId=v_tgt_account_id,
DashboardId=src_dashboardId
)
if tgt_dashboard_desc['Dashboard']['Version']['Status'].find('_SUCCESSFUL')==-1:
time.sleep(120)
tgt_dashboard_desc=tgt_dashboard_desc=tgt_client.describe_dashboard(AwsAccountId=v_tgt_account_id,DashboardId=src_dashboardId)
if tgt_dashboard_desc['Dashboard']['Status'].find('_SUCCESSFUL')==-1:
print("Dashboard could not be successfully created/updated. Please check the configurations and re-execute the steps")
else:
print("Dashboard was succefully created/updated. Proceeding with the next step of updating permissions.")
tgt_client.update_dashboard_permissions(
AwsAccountId=v_tgt_account_id,
DashboardId=src_dashboardId,
GrantPermissions=[
{
'Principal': 'arn:aws:quicksight:us-east-1:'+v_tgt_account_id+':user/default/'+v_role+'/'+v_user,
"Actions": ["quicksight:DescribeDashboard","quicksight:ListDashboardVersions","quicksight:UpdateDashboardPermissions","quicksight:QueryDashboard","quicksight:UpdateDashboard","quicksight:DeleteDashboard","quicksight:DescribeDashboardPermissions","quicksight:UpdateDashboardPublishedVersion"]
},
]
)
print("permissions successfully updated. Punblishing the latest version of the dashboard now")
tgt_client.update_dashboard_published_version(
AwsAccountId=v_tgt_account_id,
DashboardId=src_dashboardId,
VersionNumber=tgt_dashboard_desc['Dashboard']['Version']['VersionNumber']
)
else:
print("Dashboard was succefully created/updated. Proceeding with the next step of updating permissions.")
tgt_client.update_dashboard_permissions(
AwsAccountId=v_tgt_account_id,
DashboardId=src_dashboardId,
GrantPermissions=[
{
'Principal': 'arn:aws:quicksight:us-east-1:'+v_tgt_account_id+':user/default/'+v_role+'/'+v_user,
"Actions": ["quicksight:DescribeDashboard","quicksight:ListDashboardVersions","quicksight:UpdateDashboardPermissions","quicksight:QueryDashboard","quicksight:UpdateDashboard","quicksight:DeleteDashboard","quicksight:DescribeDashboardPermissions","quicksight:UpdateDashboardPublishedVersion"]
},
]
)
print("permissions successfully updated. Publishing the latest version of the dashboard now")
tgt_client.update_dashboard_published_version(
AwsAccountId=v_tgt_account_id,
DashboardId=src_dashboardId,
VersionNumber=published_version_number
)
|
QuickSightMigration/QuickSightMigration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from nltools.data import Brain_Data, Adjacency
from nltools.mask import expand_mask, roi_to_brain
from nltools.stats import fdr, threshold, fisher_r_to_z, one_sample_permutation
from sklearn.metrics import pairwise_distances
from nilearn.plotting import plot_glass_brain, plot_stat_map, view_img_on_surf, view_img
from nilearn.image import concat_imgs
from nilearn.image import load_img
from nilearn.image import resample_to_img
subjects = [105,106,107,108,109,111,112,113,114,115,116,117,118,119,120,121,122,123,124]
# M1
condition = ["feedback_1","feedback_2","feedback_3","feedback_4","feedback_D","feedback_K","feedback_X","feedback_Z"]
M1 = np.zeros((len(condition),len(condition)))
M1[np.diag_indices(len(condition))] = 1
M1[0,1] = 1
M1[0,2] = 1
M1[0,3] = 1
M1[1,2] = 1
M1[1,3] = 1
M1[2,3] = 1
M1 = Adjacency(M1, matrix_type = 'distance', labels = condition)
M1.plot()
# M2
M2 = np.zeros((len(condition),len(condition)))
M2[np.diag_indices(len(condition))] = 1
M2[4,5] = 1
M2[4,6] = 1
M2[4,7] = 1
M2[5,6] = 1
M2[5,7] = 1
M2[6,7] = 1
M2 = Adjacency(M2, matrix_type = 'distance', labels = condition)
M2.plot()
# M3
M3 = np.zeros((len(condition),len(condition)))
M3[np.diag_indices(len(condition))] = 1
M3[4,5] = 1
M3[4,6] = 1
M3[4,7] = 1
M3[5,6] = 1
M3[5,7] = 1
M3[6,7] = 1
M3[0,1] = 1
M3[0,2] = 1
M3[0,3] = 1
M3[1,2] = 1
M3[1,3] = 1
M3[2,3] = 1
M3 = Adjacency(M3, matrix_type = 'distance', labels = condition)
M3.plot()
mask_path1 = '/data/projects/ru-highres/masks/a_hpc.nii.gz'
template = load_img('/data/projects/ru-highres/masks/bg_image.nii')
resampled_roi1 = resample_to_img(mask_path1,template)
print(resampled_roi1.shape)
mask1 = Brain_Data(resampled_roi1)
mask1 = mask1.threshold(upper = 0.95, binarize=True, coerce_nan=True)
mask1.plot()
plot_glass_brain(mask1.to_nifti())
mask_path2 = '/data/projects/ru-highres/masks/p_hpc.nii.gz'
template = load_img('/data/projects/ru-highres/masks/bg_image.nii')
resampled_roi2 = resample_to_img(mask_path2,template)
mask2 = Brain_Data(resampled_roi2)
mask2 = mask2.threshold(upper = 0.95, binarize=True, coerce_nan=True)
mask2.plot()
plot_glass_brain(mask2.to_nifti())
mask_path3 = '/data/projects/ru-highres/masks/executive_7sub.nii.gz'
template = load_img('/data/projects/ru-highres/masks/bg_image.nii')
resampled_roi3 = resample_to_img(mask_path3,template)
mask3 = Brain_Data(resampled_roi3)
mask3 = mask3.threshold(upper = 0.95, binarize=True, coerce_nan=True)
mask3.plot()
plot_glass_brain(mask3.to_nifti())
mask_path4 = '/data/projects/ru-highres/masks/limbic_7sub.nii.gz'
template = load_img('/data/projects/ru-highres/masks/bg_image.nii')
resampled_roi4 = resample_to_img(mask_path4,template)
mask4 = Brain_Data(resampled_roi4)
mask4 = mask4.threshold(upper = 0.95, binarize=True, coerce_nan=True)
mask4.plot()
plot_glass_brain(mask4.to_nifti())
# +
datapath = '/data/projects/ru-highres/derivatives/fsl/'
for m in [mask1,mask2,mask3,mask4]:
print(m)
feedback_pattern = []
for sub in subjects:
# create a list of beta maps per condition
file_list9 = [os.path.join(datapath,"sub-"+str(sub),"L2_task-aff_model-01.gfeat","cope1.feat","stats","zstat1.nii.gz"),
os.path.join(datapath,"sub-"+str(sub),"L2_task-aff_model-01.gfeat","cope2.feat","stats","zstat1.nii.gz"),
os.path.join(datapath,"sub-"+str(sub),"L2_task-aff_model-01.gfeat","cope3.feat","stats","zstat1.nii.gz"),
os.path.join(datapath,"sub-"+str(sub),"L2_task-aff_model-01.gfeat","cope4.feat","stats","zstat1.nii.gz"),
os.path.join(datapath,"sub-"+str(sub),"L2_task-inf_model-03_subj.gfeat","cope1.feat","stats","zstat1.nii.gz"),
os.path.join(datapath,"sub-"+str(sub),"L2_task-inf_model-03_subj.gfeat","cope2.feat","stats","zstat1.nii.gz"),
os.path.join(datapath,"sub-"+str(sub),"L2_task-inf_model-03_subj.gfeat","cope3.feat","stats","zstat1.nii.gz"),
os.path.join(datapath,"sub-"+str(sub),"L2_task-inf_model-03_subj.gfeat","cope4.feat","stats","zstat1.nii.gz")]
# put all the beta maps into a Brain_Data object
beta = Brain_Data(file_list9)
# compute pairwise correlation between each betamap's masked area
sub_pattern_similarity = 1- beta.apply_mask(m).distance(metric = 'correlation')
sub_pattern_similarity.labels = ["aff_1","aff_2","aff_3","aff_4","inf_1","inf_2","inf_3","inf_4"]
feedback_pattern.append(sub_pattern_similarity)
feedback_avg = Adjacency(feedback_pattern).mean(axis = 0)
feedback_avg.labels =["aff_1","aff_2","aff_3","aff_4","inf_1","inf_2","inf_3","inf_4"]
feedback_avg.plot()
#correlate two matrices
cross_task1 = feedback_avg.similarity(M1, metric = 'spearman', n_permute = 0,ignore_diagonal=True)
cross_task_r1 = cross_task1['correlation']
print(cross_task_r1)
cross_task2 = feedback_avg.similarity(M2, metric = 'spearman', n_permute = 0, ignore_diagonal=True)
cross_task_r2 = cross_task2['correlation']
print(cross_task_r2)
cross_task3 = feedback_avg.similarity(M3, metric = 'spearman', n_permute = 0, ignore_diagonal=True)
cross_task_r3 = cross_task3['correlation']
print(cross_task_r3)
# -
|
code/RSA_post.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# # Verifying your connection
#
# In this notebook we will show how to connect to the Dimensions on Google BigQuery using Python, so that we can then run a few sample queries.
# In order to run this tutorial, please ensure that:
#
# * You have a valid Dimensions on Google BigQuery [account](https://www.dimensions.ai/products/bigquery/)
# * You have configured your Google Cloud Platform environment (see the [official docs](https://docs.dimensions.ai/bigquery/gcp-setup.html#) on how to do that)
#
#
# + [markdown] Collapsed="false"
# ## Connection methods
#
# There are a few options available:
#
# 1. **Use Google Colaboratory and your personal credentials.** This option is the simplest of all, as it doesn't require you to install anything on your computer. It is normally ok for small to mid-sized projects that can live in the cloud.
# 2. **Use a local Jupyter environment and your personal credentials.** This option requires you to install the Google Cloud SDK in order to authenticate. It is the best option if you want to work locally and/or have other Python libraries or services that you need to access.
# 3. **Use a local Jupyter environment and a service account.** This option is really a variance on the option 2, for those users that must use a service account.
#
# NOTE All of these options require you to first set up a [GCP project](https://docs.dimensions.ai/bigquery/gcp-setup.html#projects) (as you haven't done it already) and provide your project ID. E.g.:
# + Collapsed="false"
MY_PROJECT_ID = "my-cool-gbq-project"
# + [markdown] Collapsed="false"
# ### Option 1: using Google Colaboratory and your personal credentials
#
# [Google Colaboratory](https://colab.research.google.com/) is a free cloud-based Jupyter environment from Google. This option provides an easy service allowing you to get started with notebooks.
#
# Using your Google Account you can create notebooks, execute BigQuery queries and share these with other Google Accounts quickly and easily.
#
#
# + Collapsed="false"
# authentication happens via your browser
from google.colab import auth
auth.authenticate_user()
print('Authenticated')
MY_PROJECT_ID = "my-cool-gbq-project"
from google.cloud import bigquery
client = bigquery.Client(project=MY_PROJECT_ID)
# + [markdown] Collapsed="false"
# ### Option 2: using a local Jupyter and your personal credentials
#
# A Google Account represents a developer, an administrator, or any other person who interacts with Google Cloud.
# This is normally the Google account one has used to get access to the Dimensions on BigQuery product.
#
# In order to configure programmatic access for local development, the easiest way is to authenticate using the [Google Cloud SDK](https://googleapis.dev/python/google-api-core/latest/auth.html).
#
# ```
# $ gcloud auth application-default login
# ```
#
# Note: the command above should be run from a Terminal or console. This will generate a JSON file that is used as the default application credentials for the account that was selected in the above login process. When using the default Client for each Google provided package (such as BigQuery) they should automatically authenticate using these default credentials.
# + Collapsed="false"
# install python client library
# !pip install google-cloud-bigquery -U --quiet
# + Collapsed="false"
from google.cloud import bigquery
MY_PROJECT_ID = "my-cool-gbq-project"
client = bigquery.Client(project=MY_PROJECT_ID)
# + [markdown] Collapsed="false"
# ### Option 3: using a local Jupyter and a service account
#
# A [service account](https://cloud.google.com/iam/docs/service-accounts) is a special kind of account used by an application or a virtual machine (VM) instance, not a person.
#
# Each service account is associated with two sets of public/private RSA key pairs that are used to authenticate to Google: Google-managed keys, and user-managed keys.
#
# When using a service account you'd just have to point your client object to the a key file.
# + Collapsed="false"
from google.cloud import bigquery
credentials_file = 'my-awesome-gbq-project-47616836.json'
MY_PROJECT_ID = "my-cool-gbq-project"
# Explicitly use service account credentials by specifying the private key file
client = bigquery.Client.from_service_account_json(credentials_file)
# + [markdown] Collapsed="false"
# ## Running queries
#
# Once the connection is set up, all you have to do is to type in a SQL query and run it using the `client` object.
# + Collapsed="false"
# Query: Top publications from Oxford univ. by Altmetric Score in 2020
query_1 = """
SELECT
id,
title.preferred as title,
ARRAY_LENGTH(authors) as authors_count,
CAST(altmetrics.score as INT64) as altmetric_score
FROM
`dimensions-ai.data_analytics.publications`
WHERE
year = 2020 AND 'grid.4991.5' in UNNEST(research_orgs)
ORDER BY
altmetric_score DESC
LIMIT 5"""
# 1 - main syntax
query_job = client.query(query_1)
results = query_job.result() # Waits for job to complete.
for row in results:
print("> {} : {}\n\tAuthors: {}\n\tAltmetric Score: {}".format(row.id, row.title, row.authors_count, row.altmetric_score))
# + [markdown] Collapsed="false"
# An slighly alternative syntax is also possible
# + Collapsed="false"
# 2 - omit calling result()
query_job = client.query(query_1)
for row in query_job:
print(row)
# + [markdown] Collapsed="false"
# Another quite handy feature is to transform data direclty into [Pandas dataframes](https://pandas.pydata.org/pandas-docs/)
# + Collapsed="false"
# 3 - return a dataframe
query_job = client.query(query_1).to_dataframe()
query_job
# + [markdown] Collapsed="false"
# ### Advanced: BigQuery magic command and dynamic parameters
#
# The GBQ library comes with a [magic command](https://googleapis.dev/python/bigquery/latest/magics.html) that is essentially a nice shortcut method for running queries.
#
# This extensions needs to be loaded sepately e.g.:
# + Collapsed="false"
# %load_ext google.cloud.bigquery
# + [markdown] Collapsed="false"
# We can then set up a couple of query parameters for the query itself, as well as the usual project ID value.
# + Collapsed="false"
project_id = MY_PROJECT_ID
bq_params = {}
bq_params["journal_id"] = "jour.1115214"
# + [markdown] Collapsed="false"
# Finally we can query by starting a cell with the command `%%bigquery ... `:
# + Collapsed="false"
# %%bigquery --params $bq_params --project $project_id
# Publications per year for Nature Biotechnology
SELECT
count(*) as pubs, year, journal.title
FROM
`dimensions-ai.data_analytics.publications`
WHERE
year >= 2010
AND journal.id = @journal_id
GROUP BY
year, journal.title
ORDER BY
year DESC
# + [markdown] Collapsed="false"
# ## Troubleshooting
#
# * Query fails wit `to_dataframe() ArrowNotImplementedError`
# * Try reinstalling pyarrow ie `pip install pyarrow -U`
# * Query fails with `AttributeError: 'NoneType' object has no attribute 'transport'`
# * Try `pip install google-cloud-bigquery-storage -U` and restarting the notebook
|
archive/1-Verifying-your-connection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: ipykernel_py2
# ---
10 == 20 / 2
10 != 10
10 != 15
100 > 50
100 < 50
15 >= 10 + 10
15 <= 10 + 5
|
11 - Introduction to Python/4_More on Operators/1_Comparison Operators (2:10)/Comparison Operators - Lecture_Py2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # End-To-End Example: Data Analysis of iSchool Classes
#
# In this end-to-end example we will perform a data analysis in Python Pandas we will attempt to answer the following questions:
#
# - What percentage of the schedule are undergrad (course number 500 or lower)?
# - What undergrad classes are on Friday? or at 8AM?
#
# Things we will demonstrate:
#
# - `read_html()` for basic web scraping
# - dealing with 5 pages of data
# - `append()` multiple `DataFrames` together
# - Feature engineering (adding a column to the `DataFrame`)
#
# The iSchool schedule of classes can be found here: https://ischool.syr.edu/classes
#
# +
import pandas as pd
# this turns off warning messages
import warnings
warnings.filterwarnings('ignore')
# -
# just figure out how to get the data
website = 'https://ischool.syr.edu/classes/?page=1'
data = pd.read_html(website)
data[0]
# +
# let's generate links to the other pages
website = 'https://ischool.syr.edu/classes/?page='
classes = pd.DataFrame()
for i in [1,2,3,4,5,6,7]:
link = website + str(i)
page_classes = pd.read_html(link)
classes = classes.append(page_classes[0], ignore_index=True)
classes.to_csv('ischool-classes.csv')
# -
# let's read them all and append them to a single data frame
classes.sample(5)
# +
classes['Subject'] = classes['Course'].str[0:3]
classes['Number'] = classes['Course'].str[3:]
# -
classes['Type'] = ""
classes['Type'][ classes['Number'] >= '500'] = 'GRAD'
classes['Type'][ classes['Number'] < '500'] = 'UGRAD'
ist = classes[ classes['Subject'] == 'IST' ]
istug = ist[ ist['Type'] == 'UGRAD']
istug_nof = istug [istug['Day'].str.find("F") ==-1]
istug_nof_or8am = istug_nof[~ istug_nof['Time'].str.startswith('8:00am')]
istug_nof_or8am
|
content/lessons/12/End-To-End-Example/In-Class-ETEE-Data-Analysis-Of-iSchool-Classes.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: md,ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Computer Vision](https://www.kaggle.com/learn/computer-vision) course. You can reference the tutorial at [this link](https://www.kaggle.com/ryanholbrook/maximum-pooling).**
#
# ---
#
# # Introduction #
#
# In these exercises, you'll conclude the feature extraction begun in Exercise 2, explore how invariance is created by maximum pooling, and then look at a different kind of pooling: *average* pooling.
#
# Run the cell below to set everything up.
# +
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.computer_vision.ex3 import *
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import gridspec
import learntools.computer_vision.visiontools as visiontools
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
# -
# Run this cell to get back to where you left off in the previous lesson. We'll use a predefined kernel this time.
# +
# Read image
image_path = '../input/computer-vision-resources/car_illus.jpg'
image = tf.io.read_file(image_path)
image = tf.io.decode_jpeg(image, channels=1)
image = tf.image.resize(image, size=[400, 400])
# Embossing kernel
kernel = tf.constant([
[-2, -1, 0],
[-1, 1, 1],
[0, 1, 2],
])
# Reformat for batch compatibility.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.expand_dims(image, axis=0)
kernel = tf.reshape(kernel, [*kernel.shape, 1, 1])
kernel = tf.cast(kernel, dtype=tf.float32)
image_filter = tf.nn.conv2d(
input=image,
filters=kernel,
strides=1,
padding='VALID',
)
image_detect = tf.nn.relu(image_filter)
# Show what we have so far
plt.figure(figsize=(12, 6))
plt.subplot(131)
plt.imshow(tf.squeeze(image), cmap='gray')
plt.axis('off')
plt.title('Input')
plt.subplot(132)
plt.imshow(tf.squeeze(image_filter))
plt.axis('off')
plt.title('Filter')
plt.subplot(133)
plt.imshow(tf.squeeze(image_detect))
plt.axis('off')
plt.title('Detect')
plt.show();
# -
# # 1) Apply Pooling to Condense #
#
# For for the last step in the sequence, apply maximum pooling using a $2 \times 2$ pooling window. You can copy this code to get started:
#
# ```
# image_condense = tf.nn.pool(
# input=image_detect,
# window_shape=____,
# pooling_type=____,
# strides=(2, 2),
# padding='SAME',
# )
# ```
# YOUR CODE HERE
image_condense = tf.nn.pool(
input=image_detect,
window_shape=(2, 2),
pooling_type='MAX',
strides=(2, 2),
padding='SAME',
)
# Check your answer
q_1.check()
# Lines below will give you a hint or solution code
q_1.hint()
q_1.solution()
# Run the next cell to see what maximum pooling did to the feature!
plt.figure(figsize=(8, 6))
plt.subplot(121)
plt.imshow(tf.squeeze(image_detect))
plt.axis('off')
plt.title("Detect (ReLU)")
plt.subplot(122)
plt.imshow(tf.squeeze(image_condense))
plt.axis('off')
plt.title("Condense (MaxPool)")
plt.show();
# We learned about how `MaxPool2D` layers give a convolutional network the property of **translation invariance** over small distances. In this exercise, you'll have a chance to observe this in action.
#
# This next code cell will randomly apply a small shift to a circle and then condense the image several times with maximum pooling. Run the cell once and make note of the image that results at the end.
# +
REPEATS = 4
SIZE = [64, 64]
# Create a randomly shifted circle
image = visiontools.circle(SIZE, r_shrink=4, val=1)
image = tf.expand_dims(image, axis=-1)
image = visiontools.random_transform(image, jitter=3, fill_method='replicate')
image = tf.squeeze(image)
plt.figure(figsize=(16, 4))
plt.subplot(1, REPEATS+1, 1)
plt.imshow(image, vmin=0, vmax=1)
plt.title("Original\nShape: {}x{}".format(image.shape[0], image.shape[1]))
plt.axis('off')
# Now condense with maximum pooling several times
for i in range(REPEATS):
ax = plt.subplot(1, REPEATS+1, i+2)
image = tf.reshape(image, [1, *image.shape, 1])
image = tf.nn.pool(image, window_shape=(2,2), strides=(2, 2), padding='SAME', pooling_type='MAX')
image = tf.squeeze(image)
plt.imshow(image, vmin=0, vmax=1)
plt.title("MaxPool {}\nShape: {}x{}".format(i+1, image.shape[0], image.shape[1]))
plt.axis('off')
# -
# # 2) Explore Invariance #
#
#
# Suppose you had made a small shift in a different direction -- what effect would you expect that have on the resulting image? Try running the cell a few more times, if you like, to get a new random shift.
# View the solution (Run this code cell to receive credit!)
q_2.solution()
# # Global Average Pooling #
#
# We mentioned in the previous exercise that average pooling has largely been superceeded by maximum pooling within the convolutional base. There is, however, a kind of average pooling that is still widely used in the *head* of a convnet. This is **global average pooling**. A `GlobalAvgPool2D` layer is often used as an alternative to some or all of the hidden `Dense` layers in the head of the network, like so:
#
# ```
# model = keras.Sequential([
# pretrained_base,
# layers.GlobalAvgPool2D(),
# layers.Dense(1, activation='sigmoid'),
# ])
# ```
#
# What is this layer doing? Notice that we no longer have the `Flatten` layer that usually comes after the base to transform the 2D feature data to 1D data needed by the classifier. Now the `GlobalAvgPool2D` layer is serving this function. But, instead of "unstacking" the feature (like `Flatten`), it simply replaces the entire feature map with its average value. Though very destructive, it often works quite well and has the advantage of reducing the number of parameters in the model.
#
# Let's look at what `GlobalAvgPool2D` does on some randomly generated feature maps. This will help us to understand how it can "flatten" the stack of feature maps produced by the base.
#
# Run this next cell a few times until you get a feel for how this new layer works.
# +
feature_maps = [visiontools.random_map([5, 5], scale=0.1, decay_power=4) for _ in range(8)]
gs = gridspec.GridSpec(1, 8, wspace=0.01, hspace=0.01)
plt.figure(figsize=(18, 2))
for i, feature_map in enumerate(feature_maps):
plt.subplot(gs[i])
plt.imshow(feature_map, vmin=0, vmax=1)
plt.axis('off')
plt.suptitle('Feature Maps', size=18, weight='bold', y=1.1)
plt.show()
# reformat for TensorFlow
feature_maps_tf = [tf.reshape(feature_map, [1, *feature_map.shape, 1])
for feature_map in feature_maps]
global_avg_pool = tf.keras.layers.GlobalAvgPool2D()
pooled_maps = [global_avg_pool(feature_map) for feature_map in feature_maps_tf]
img = np.array(pooled_maps)[:,:,0].T
plt.imshow(img, vmin=0, vmax=1)
plt.axis('off')
plt.title('Pooled Feature Maps')
plt.show();
# -
# Since each of the $5 \times 5$ feature maps was reduced to a single value, global pooling reduced the number of parameters needed to represent these features by a factor of 25 -- a substantial savings!
#
# Now we'll move on to understanding the pooled features.
#
# After we've pooled the features into just a single value, does the head still have enough information to determine a class? This part of the exercise will investigate that question.
#
# Let's pass some images from our *Car or Truck* dataset through VGG16 and examine the features that result after pooling. First run this cell to define the model and load the dataset.
# +
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import image_dataset_from_directory
# Load VGG16
pretrained_base = tf.keras.models.load_model(
'../input/cv-course-models/cv-course-models/vgg16-pretrained-base',
)
model = keras.Sequential([
pretrained_base,
# Attach a global average pooling layer after the base
layers.GlobalAvgPool2D(),
])
# Load dataset
ds = image_dataset_from_directory(
'../input/car-or-truck/train',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=1,
shuffle=True,
)
ds_iter = iter(ds)
# -
# Notice how we've attached a `GlobalAvgPool2D` layer after the pretrained VGG16 base. Ordinarily, VGG16 will produce 512 feature maps for each image. The `GlobalAvgPool2D` layer reduces each of these to a single value, an "average pixel", if you like.
#
# This next cell will run an image from the *Car or Truck* dataset through VGG16 and show you the 512 average pixels created by `GlobalAvgPool2D`. Run the cell a few times and observe the pixels produced by cars versus the pixels produced by trucks.
# +
car = next(ds_iter)
car_tf = tf.image.resize(car[0], size=[128, 128])
car_features = model(car_tf)
car_features = tf.reshape(car_features, shape=(16, 32))
label = int(tf.squeeze(car[1]).numpy())
plt.figure(figsize=(8, 4))
plt.subplot(121)
plt.imshow(tf.squeeze(car[0]))
plt.axis('off')
plt.title(["Car", "Truck"][label])
plt.subplot(122)
plt.imshow(car_features)
plt.title('Pooled Feature Maps')
plt.axis('off')
plt.show();
# -
# # 3) Understand the Pooled Features #
#
# What do you see? Are the pooled features for cars and trucks different enough to tell them apart? How would you interpret these pooled values? How could this help the classification? After you've thought about it, run the next cell for an answer. (Or see a hint first!)
# View the solution (Run this code cell to receive credit!)
q_3.check()
# Line below will give you a hint
q_3.hint()
# Global average pooling is often used in modern convnets. One big advantage is that it greatly reduces the number of parameters in a model, while still telling you if some feature was present in an image or not -- which for classification is usually all that matters. If you're creating a convolutional classifier it's worth trying out!
#
# # Conclusion #
#
# In this lesson we explored the final operation in the feature extraction process: **condensing** with **maximum pooling**. Pooling is one of the essential features of convolutional networks and helps provide them with some of their characteristic advantages: efficiency with visual data, reduced parameter size compared to dense networks, translation invariance. We've seen that it's used not only in the base during feature extraction, but also can be used in the head during classification. Understanding it is essential to a full understanding of convnets.
#
# # Keep Going #
#
# In the next lesson, we'll conclude our discussion of the feature extraction operations with **sliding windows**, the typical way of describing how the convolution and pooling operations scan over an image. We'll describe here the final two parameters in the `Conv2D` and `MaxPool2D` layers: `strides` and `padding`. [**Check it out**](https://www.kaggle.com/ryanholbrook/the-sliding-window) now!
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/196537) to chat with other Learners.*
|
exercise-maximum-pooling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 5.2 Programming Exercise: Create a Small Data Warehouse
# In this exercise, you will create a small data warehouse using Spark to save data as if it were a table in a typical relational database. Once you create this data warehouse, you can query the tables you created using Structured Query Language (SQL).
#
# For this exercise, you will execute your Spark SQL within a Python program, but if you are using a typical Hadoop distribution, there are many ways you can connect those tables to existing tools as if it were a normal, relational database. Spark SQL natively supports reading and writing data managed by Apache Hive. Spark can act as a distributed SQL engine allowing you to connect to any tool with JDBC/ODBC support. You can also integrate Spark with big data tools like Apache Phoenix and normal relational databases.
#
# For this exercise, you will be creating tables using U.S. Gazetteer files provided by the United States Census Bureau. These files provide a listing of geographic areas for selected areas. You can find the Gazetteer files for 2017 and 2018 in the data directory under the gazetteer folder. These directories contain data for congressional districts, core statistical areas, counties, county subdivisions, schools, census tracts, urban areas, zip codes, and places of interest. You will combine the data from 2017 and 2018, and create tables with the filename of the source (e.g., places.csv is saved in the places table).
#
#
# # 1. Gazetteer Data
# ## a. Create Unmanaged Tables
#
#
# The first step of this assignment involves loading the data from the CSV files, combining the file with the file for the other year, and saving it to disk as a table. The following code should provide a template to help you combine tables and save them to the warehouse directory.
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
#import pandas
# +
# Create file paths including filenames
file_2017_path = r'/home/ram/share/650/dsc650-master/data/gazetteer/2017/places.csv'
file_2018_path = r'/home/ram/share/650/dsc650-master/data/gazetteer/2018/places.csv'
warehouse_dir = r'/home/ram/Documents/spark-warehouse/'
# -
spark = SparkSession.builder.appName('week5') \
.config("spark.sql.warehouse.dir", warehouse_dir) \
.getOrCreate()
# +
df1 = spark.read.load(
file_2017_path,
format='csv',
sep=',',
inferSchema=True,
header=True
)
df2 = spark.read.load(
file_2018_path,
format='csv',
sep=',',
inferSchema=True,
header=True
)
# -
df = df1.unionAll(df2)
df.head(3)
df.write.saveAsTable('places', path = warehouse_dir)
# +
#df.write.saveAsTable('places', mode = 'overwrite', path = '/home/ram/share/650/spark-warehouse/')
# -
# For each CSV file in the 2017 and 2018 directories, load the data into Spark, combine it with the corresponding data from the other year and save it to disk.
# +
import os
files_2017_path = r'/home/ram/share/650/dsc650-master/data/gazetteer/2017'
files_2018_path = r'/home/ram/share/650/dsc650-master/data/gazetteer/2018'
#os.listdir("/home/ram/share/650/dsc650-master/data/gazetteer/2017")
fileslist_2017 = os.listdir(files_2017_path)
fileslist_2018 = os.listdir(files_2018_path)
# -
# getting list of files in the directory
import glob
print(glob.glob("/home/ram/share/650/dsc650-master/data/gazetteer/2017/*.csv"))
# list of files in 2017 directory
fileslist_2017
# list of files in 2018 directory
fileslist_2018
def saveastable(file, warehouse_dir):
"""
Combines csv files from different directories (2017 & 2018) and creates a spark table
inputs: 2 - filename, warehouse directory
output: none
expectation: combines files and creates parqurt tables warehouse directory
"""
file1_path = os.path.join(files_2017_path,file)
file2_path = os.path.join(files_2018_path,file)
df1 = spark.read.load(
file1_path,
format='csv',
sep=',',
inferSchema=True,
header=True
)
df2 = spark.read.load(
file2_path,
format='csv',
sep=',',
inferSchema=True,
header=True
)
df = df1.unionAll(df2)
tablename = os.path.splitext(i)[0]
tblwarehouse_dir = os.path.join(warehouse_dir,tablename)
df.write.saveAsTable(tablename, mode = 'overwrite', path = tblwarehouse_dir )
print(" Table created for - ",tablename)
# loops through each file and merges the 2017 & 2018 data for same file and saves in warehouse directory
table_names = []
for i in fileslist_2017:
filename = i
tablename = os.path.splitext(i)[0]
table_names.append(tablename)
file_path_name = os.path.join(files_2017_path,i)
print(filename)
print(tablename)
print(file_path_name)
#call function to merge and create unified parquet file and save it in warehouse directory
saveastable(filename, warehouse_dir)
# Once you have finished saving all of the files as tables, verify that you have loaded the files properly by loading the tables into Spark, and performing a simple row count on each table.
# +
# I did not use this code, as I have already created tables in the above step
def create_external_table(table_name):
table_dir = os.path.join(warehouse_dir, table_name)
return spark.catalog.createExternalTable(table_name, table_dir)
def create_external_tables():
for table_name in table_names:
create_external_table(table_name)
# +
# show existing tables
table_names
#for i in tablelist:
# create_external_tables()
# -
#Getting list of existing tables in spark warehouse
spark.catalog.listTables()
spark.sql("select count(*) as row_count from places").show()
# printing count of rows from each of the merged tables
for i in table_names:
sqlstring = "select count(*) as row_count from " + i
print("Count of rows from table - ", i)
spark.sql(sqlstring).show()
# ## b. Load and Query Tables
#
# Now that we have saved the data to external tables, we will load the tables back into Spark and create a report using Spark SQL. For this report, we will create a report on school districts for the states of Nebraska and Iowa using the elementary_schools, secondary_schools and unified_school_districts tables.
#
# Using Spark SQL, create a report with the following information.
spark.sql("select * from elementary_schools").show(3)
spark.sql("select * from secondary_schools").show(3)
spark.sql("select * from unified_school_districts").show(3)
# reading spark tables and printing schema
elementary_schools_df = spark.read.table("elementary_schools")
print("Number of rows ", elementary_schools_df.count())
elementary_schools_df.printSchema()
# reading spark tables and printing schema
secondary_schools_df = spark.read.table("secondary_schools")
print("Number of rows ", secondary_schools_df.count())
secondary_schools_df.printSchema()
# reading spark tables and printing schema
unified_school_districts_df = spark.read.table("unified_school_districts")
print("Number of rows ", unified_school_districts_df.count())
unified_school_districts_df.printSchema()
# es_ilne = spark.sql("""select state, year, count(*) as Elementary
# from elementary_schools
# where state = 'IA' or state = 'NE'
# or state = 'NJ' or state = 'IL'
# group by state, year""").collect()
# I have joined whole data set based on state and year and not just for those 2 states, as Elementary and Seconday school data sets do not have data for these states.
# +
es_ilne = spark.sql("""select state, year, count(*) as Elementary
from elementary_schools
group by state, year""").collect()
#converting list to data frame
es_ilne_df = sc.parallelize(es_ilne).toDF()
es_ilne_df.printSchema()
# -
es_ilne_df.show()
ss_ilne = spark.sql("""select state, year, count(*) as Secondary
from secondary_schools
group by state, year""").collect()
#converting list to data frame
ss_ilne_df = sc.parallelize(ss_ilne).toDF()
ss_ilne_df.printSchema()
ss_ilne_df.show()
# +
usd_ilne = spark.sql("""select state, year, count(*) as Unified
from unified_school_districts
group by state, year""").collect()
#converting list to data frame
usd_ilne_df = sc.parallelize(usd_ilne).toDF()
usd_ilne_df.printSchema()
# -
# renaming columns to preserve for later stages
usd_ilne_df = usd_ilne_df.withColumnRenamed("state","JState")\
.withColumnRenamed("year","JYear")
usd_ilne_df.show()
joinexpression = [usd_ilne_df.JState == es_ilne_df.state, usd_ilne_df.JYear == es_ilne_df.year]
joinType = "left_outer"
reportdf1 = usd_ilne_df.join(es_ilne_df,joinexpression,joinType)#.collect()
reportdf1 = reportdf1.drop("state", "year")
reportdf1.show()
joinexpression = [reportdf1.JState == ss_ilne_df.state, reportdf1.JYear == ss_ilne_df.year]
joinType = "left_outer"
# +
reportdf = reportdf1.join(ss_ilne_df,joinexpression,joinType)#.collect()
reportdf = reportdf.drop("state", "year")
reportdf = reportdf.withColumnRenamed("JState","State")\
.withColumnRenamed("JYear","Year")
reportdf = reportdf.select("State", "Year", "Elementary", "Secondary", "Unified")\
.sort("State", "Year")
reportdf.show()
# -
# saving the summarized report for all states as spark table in warehouse
tablename = 'Allstates_counts'
tblwarehouse_dir = os.path.join(warehouse_dir,tablename)
reportdf.write.saveAsTable(tablename, mode = 'overwrite', path = tblwarehouse_dir )
# +
# using spark SQL analyse the table for few states
summary_fewstates = spark.sql("""select *
from Allstates_counts
where state In ('NE' ,'IA')
order by state""")
#print the report output for states IA & NE
summary_fewstates.show()
# -
# # 2. Flight Data
#
#
# In the previous exercise, you joined data from flights and airport codes to create a report. Create an external table for airport_codes and domestic_flights from the domestic-flights/flights.parquet and airport-codes/airport-codes.csv files. Recreate the report of top ten airports for 2008 using Spark SQL instead of dataframes.
# +
# Create file paths including filenames
parquet_file_path = r'/home/ram/share/650/dsc650-master/data/domestic-flights/flights.parquet'
airportdata_filepath = r'/home/ram/share/650/dsc650-master/data/airport-codes/airport-codes.csv'
# -
df_flight = spark.read.parquet(parquet_file_path)
df_flight.head(5)
# +
df_airpot_codes = spark.read.load(airportdata_filepath, format="csv", sep=",", inferschema=True, header=True)
df_airpot_codes.head(5)
# -
# ## Join to Origin Airport
joinexpression = df_flight['origin_airport_code'] == df_airpot_codes['iata_code']
joinType = "left_outer"
#df_flight.join(df_airpot_codes,joinexpression,joinType).show(3)
df_merged = df_flight.join(df_airpot_codes,joinexpression,joinType)
df_merged_modified = df_merged.drop("__index_level_0__","ident","local_code","continent","iso_country","iata_code")
df_merged_modified.head(2)
df_merged_modified2 = df_merged_modified.withColumnRenamed("type","origin_airport_type")\
.withColumnRenamed("name","origin_airport_name")\
.withColumnRenamed("elevation_ft","origin_airport_elevation_ft")\
.withColumnRenamed("iso_region","origin_airport_region")\
.withColumnRenamed("municipality","origin_airport_municipality")\
.withColumnRenamed("gps_code","origin_airport_gps_code")\
.withColumnRenamed("coordinates","origin_airport_coordinates")
df_merged_modified2.printSchema()
# ## Join to Destination Airport
joinexpression2 = df_merged_modified2['destination_airport_code'] == df_airpot_codes['iata_code']
joinType2 = "left_outer"
# +
#df_merged_modified2.join(df_airpot_codes,joinexpression2,joinType2).show(2)
df_merged_modified_dest= df_merged_modified2.join(df_airpot_codes,joinexpression2,joinType2)
df_merged_modified_dest2 = df_merged_modified_dest.drop("__index_level_0__","ident","local_code","continent","iso_country")
df_merged_modified_dest_final = df_merged_modified_dest2.withColumnRenamed("type","destination_airport_type")\
.withColumnRenamed("name","destination_airport_name")\
.withColumnRenamed("elevation_ft","destination_airport_elevation_ft")\
.withColumnRenamed("iso_region","destination_airport_region")\
.withColumnRenamed("municipality","destination_airport_municipality")\
.withColumnRenamed("gps_code","destination_airport_gps_code")\
.withColumnRenamed("coordinates","destination_airport_coordinates")
# -
df_merged_modified_dest_final.printSchema()
# ## Top Ten Airports By Inbound Passengers
df_merged_modified_dest_final.createOrReplaceTempView("dfTable")
# dataframe with data from 2008
df_2008 = spark.sql("SELECT * FROM dfTable where flight_year = 2008")
df_2008.head(2)
# saving dataframe with 2008 as a table
df_2008.write.saveAsTable('Flight_2008_data', mode = 'overwrite',
path = '/home/ram/Documents/spark-warehouse/flight_data')
# dataframe with data from 2008
Top10 = spark.sql(""" SELECT * FROM
(
SELECT Airport_Name,
Airpot_Code,
dense_rank() Over(ORDER BY Total_Inbound_Passengers DESC) as Rank,
Total_Inbound_Passengers,
Total_Inbound_Flights,
Average_Daily_Passengers,
Average_DailyFlights
FROM
(
SELECT
destination_airport_name as Airport_Name,
destination_airport_code as Airpot_Code,
count(passengers) as Total_Inbound_Passengers,
count(flights) as Total_Inbound_Flights,
mean(passengers) as Average_Daily_Passengers,
mean(flights) as Average_DailyFlights
FROM Flight_2008_data
GROUP BY destination_airport_name, destination_airport_code
) Temp
) RankedTable
WHERE Rank < 11
""")
Top10.show()
|
Spark Learnings/2. Create a Small Data Warehouse.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Start-to-Finish Example: Setting up Polytropic [TOV](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation) Initial Data, in Curvilinear Coordinates
#
# ## Authors: <NAME>, <NAME>, and <NAME>
# ### Formatting improvements courtesy <NAME>
#
# ## This module sets up initial data for a TOV star in *spherical, isotropic coordinates*, using the *Numerical* ADM Spherical to BSSN Curvilinear initial data module (numerical = BSSN $\lambda^i$'s are computed using finite-difference derivatives instead of exact expressions).
#
# **Notebook Status:** <font color='green'><b> Validated </b></font>
#
# **Validation Notes:** This module has been validated to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution (see [plots](#convergence) at bottom). Note that convergence at the surface of the star will be lower order due to the sharp drop to zero in $T^{\mu\nu}$.</font>
#
# ### NRPy+ Source Code for this module:
#
# * [TOV/TOV_Solver.py](../edit/TOV/TOV_Solver.py); ([**NRPy+ Tutorial module reviewing mathematical formulation and equations solved**](Tutorial-ADM_Initial_Data-TOV.ipynb)); ([**start-to-finish NRPy+ Tutorial module demonstrating that initial data satisfy Hamiltonian constraint**](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data.ipynb)): Tolman-Oppenheimer-Volkoff (TOV) initial data; defines all ADM variables and nonzero $T^{\mu\nu}$ components in Spherical basis.
# * [BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb): *Numerical* Spherical ADM$\to$Curvilinear BSSN converter function
# * [BSSN/BSSN_constraints.py](../edit/BSSN/BSSN_constraints.py); [\[**tutorial**\]](Tutorial-BSSN_constraints.ipynb): Hamiltonian constraint in BSSN curvilinear basis/coordinates
#
# ## Introduction:
# Here we use NRPy+ to set up initial data for a [simple polytrope TOV star](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation).
#
# The entire algorithm is outlined as follows, with links to the relevant NRPy+ tutorial notebooks listed at each step:
#
# 1. Allocate memory for gridfunctions, including temporary storage for the Method of Lines time integration [(**NRPy+ tutorial on NRPy+ Method of Lines algorithm**)](Tutorial-Method_of_Lines-C_Code_Generation.ipynb).
# 1. Set gridfunction values to initial data
# * [**NRPy+ tutorial on TOV initial data**](Tutorial-ADM_Initial_Data-TOV.ipynb)
# * [**NRPy+ tutorial on validating TOV initial data**](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data.ipynb).
# 1. Evaluate the Hamiltonian constraint violation
# * [**NRPy+ tutorial on BSSN constraints**](Tutorial-BSSN_constraints.ipynb)
# 1. Repeat above steps at two numerical resolutions to confirm convergence of Hamiltonian constraint violation to zero.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 1. [Step 1](#initializenrpy): Set core NRPy+ parameters for numerical grids and reference metric
# 1. [Step 2](#adm_id_tov): Set up ADM initial data for polytropic TOV Star
# 1. [Step 2.a](#tov_interp): Interpolating the TOV data file as needed
# 1. [Step 2.b](#source): Compute source terms $S_{ij}$, $S_{i}$, $S$, and $\rho$
# 1. [Step 2.c](#jacobian): Jacobian transformation on the ADM/BSSN source terms
# 1. [Step 2.d](#tensor): Rescale tensorial quantities
# 1. [Step 3](#adm_id_spacetime): Convert ADM spacetime quantity initial data from Spherical to BSSN Curvilinear coordinates
# 1. [Step 4](#validate): Validating that the TOV initial data satisfy the Hamiltonian constraint
# 1. [Step 4.a](#ham_const_output): Output the Hamiltonian Constraint
# 1. [Step 4.b](#apply_bcs): Apply singular, curvilinear coordinate boundary conditions
# 1. [Step 4.c](#enforce3metric): Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$
# 1. [Step 5](#mainc): `TOV_Playground.c`: The Main C Code
# 1. [Step 6](#plot): Plotting the single-neutron-star initial data
# 1. [Step 7](#convergence): Validation: Convergence of numerical errors (Hamiltonian constraint violation) to zero
# 1. [Step 8](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='initalizenrpy'></a>
#
# # Step 1: Set core NRPy+ parameters for numerical grids and reference metric \[Back to [top](#toc)\]
# $$\label{initializenrpy}$$
#
# +
# Step P1: Import needed NRPy+ core modules:
from outputC import lhrh,outCfunction,outputC # NRPy+: Core C code output module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import finite_difference as fin # NRPy+: Finite difference C code generation module
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import shutil, os, sys # Standard Python modules for multiplatform OS-level functions
# Step P2: Create C code output directory:
Ccodesdir = os.path.join("TOVID_Ccodes/")
# First remove C code output directory if it exists
# Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty
# # !rm -r ScalarWaveCurvilinear_Playground_Ccodes
shutil.rmtree(Ccodesdir, ignore_errors=True)
# Then create a fresh directory
cmd.mkdir(Ccodesdir)
# Step P3: Create executable output directory:
outdir = os.path.join(Ccodesdir,"output/")
cmd.mkdir(outdir)
# Step 1: Set the spatial dimension parameter
# to three this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set some core parameters, including CoordSystem MoL timestepping algorithm,
# FD order, floating point precision, and CFL factor:
# Choices are: Spherical, SinhSpherical, SinhSphericalv2, Cylindrical, SinhCylindrical,
# SymTP, SinhSymTP
CoordSystem = "Spherical"
# Step 2.a: Set defaults for Coordinate system parameters.
# These are perhaps the most commonly adjusted parameters,
# so we enable modifications at this high level.
# domain_size = 7.5 # SET BELOW BASED ON TOV STELLAR RADIUS
# sinh_width sets the default value for:
# * SinhSpherical's params.SINHW
# * SinhCylindrical's params.SINHW{RHO,Z}
# * SinhSymTP's params.SINHWAA
sinh_width = 0.4 # If Sinh* coordinates chosen
# sinhv2_const_dr sets the default value for:
# * SinhSphericalv2's params.const_dr
# * SinhCylindricalv2's params.const_d{rho,z}
sinhv2_const_dr = 0.05# If Sinh*v2 coordinates chosen
# SymTP_bScale sets the default value for:
# * SinhSymTP's params.bScale
SymTP_bScale = 0.5 # If SymTP chosen
# Step 2.b: Set the order of spatial finite difference derivatives;
# and the core data type.
FD_order = 4 # Finite difference order: even numbers only, starting with 2. 12 is generally unstable
REAL = "double" # Best to use double here.
# Step 3: Set the coordinate system for the numerical grid
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
# Step 4: Set the finite differencing order to FD_order (set above).
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", FD_order)
# Step 5: Set the direction=2 (phi) axis to be the symmetry axis; i.e.,
# axis "2", corresponding to the i2 direction.
# This sets all spatial derivatives in the phi direction to zero.
par.set_parval_from_str("indexedexp::symmetry_axes","2")
# Step 6: The MoLtimestepping interface is only used for memory allocation/deallocation
import MoLtimestepping.C_Code_Generation as MoL
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
RK_method = "Euler" # DOES NOT MATTER; Again MoL interface is only used for memory alloc/dealloc.
RK_order = Butcher_dict[RK_method][1]
cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/"))
MoL.MoL_C_Code_Generation(RK_method, RHS_string = "", post_RHS_string = "",
outdir = os.path.join(Ccodesdir,"MoLtimestepping/"))
# Step 7: Polytropic EOS setup
# For EOS_type, choose either "SimplePolytrope" or "PiecewisePolytrope"
EOS_type = "SimplePolytrope"
# If "PiecewisePolytrope" is chosen as EOS_type, you
# must also choose the name of the EOS, which can
# be any of the following:
# 'PAL6', 'SLy', 'APR1', 'APR2', 'APR3', 'APR4',
# 'FPS', 'WFF1', 'WFF2', 'WFF3', 'BBB2', 'BPAL12',
# 'ENG', 'MPA1', 'MS1', 'MS2', 'MS1b', 'PS', 'GS1',
# 'GS2', 'BGN1H1', 'GNH3', 'H1', 'H2', 'H3', 'H4',
# 'H5', 'H6', 'H7', 'PCL2', 'ALF1', 'ALF2', 'ALF3',
# 'ALF4'
EOS_name = 'SLy' # <-- IGNORED IF EOS_type is not PiecewisePolytrope.
# -
# <a id='adm_id_tov'></a>
#
# # Step 2: Set up ADM initial data for polytropic TOV Star \[Back to [top](#toc)\]
# $$\label{adm_id_tov}$$
#
# As documented [in the TOV Initial Data NRPy+ Tutorial Module](Tutorial-TOV_Initial_Data.ipynb) ([older version here](Tutorial-GRMHD_UnitConversion.ipynb)), we will now set up TOV initial data, storing the densely-sampled result to file (***Courtesy <NAME>***).
#
# The TOV solver uses an ODE integration routine provided by scipy, so we first make sure that scipy is installed:
# !pip install scipy > /dev/null
# Next we call the [`TOV.TOV_Solver()` function](../edit/TOV/TOV_Solver.py) ([NRPy+ Tutorial module](Tutorial-ADM_Initial_Data-TOV.ipynb)) to set up the initial data, using the default parameters for initial data. This function outputs the solution to a file named "outputTOVpolytrope.txt".
# +
##########################
# Polytropic EOS example #
##########################
import TOV.Polytropic_EOSs as ppeos
if EOS_type == "SimplePolytrope":
# Set neos = 1 (single polytrope)
neos = 1
# Set rho_poly_tab (not needed for a single polytrope)
rho_poly_tab = []
# Set Gamma_poly_tab
Gamma_poly_tab = [2.0]
# Set K_poly_tab0
K_poly_tab0 = 1. # ZACH NOTES: CHANGED FROM 100.
# Set the eos quantities
eos = ppeos.set_up_EOS_parameters__complete_set_of_input_variables(neos,rho_poly_tab,Gamma_poly_tab,K_poly_tab0)
rho_baryon_central = 0.129285
elif EOS_type == "PiecewisePolytrope":
eos = ppeos.set_up_EOS_parameters__Read_et_al_input_variables(EOS_name)
rho_baryon_central=2.0
else:
print("""Error: unknown EOS_type. Valid types are 'SimplePolytrope' and 'PiecewisePolytrope' """)
sys.exit(1)
import TOV.TOV_Solver as TOV
M_TOV, R_Schw_TOV, R_iso_TOV = TOV.TOV_Solver(eos,
outfile="outputTOVpolytrope.txt",
rho_baryon_central=rho_baryon_central,
return_M_RSchw_and_Riso = True,
verbose = True)
# domain_size sets the default value for:
# * Spherical's params.RMAX
# * SinhSpherical*'s params.AMAX
# * Cartesians*'s -params.{x,y,z}min & .{x,y,z}max
# * Cylindrical's -params.ZMIN & .{Z,RHO}MAX
# * SinhCylindrical's params.AMPL{RHO,Z}
# * *SymTP's params.AMAX
domain_size = 2.0 * R_iso_TOV
# -
# <a id='tov_interp'></a>
#
# ## Step 2.a: Interpolate the TOV data file as needed to set up ADM spacetime quantities in spherical basis (for input into the `Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear` module) and $T^{\mu\nu}$ in the chosen reference metric basis \[Back to [top](#toc)\]
# $$\label{tov_interp}$$
#
# The TOV data file just written stored $\left(r,\rho(r),P(r),M(r),e^{\nu(r)}\right)$, where $\rho(r)$ is the total mass-energy density (cf. $\rho_{\text{baryonic}}$).
#
# **METRIC DATA IN TERMS OF ADM QUANTITIES**
#
# The [TOV line element](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation) in *Schwarzschild coordinates* is written (in the $-+++$ form):
# $$
# ds^2 = - c^2 e^\nu dt^2 + \left(1 - \frac{2GM}{rc^2}\right)^{-1} dr^2 + r^2 d\Omega^2.
# $$
#
# In *isotropic coordinates* with $G=c=1$ (i.e., the coordinate system we'd prefer to use), the ($-+++$ form) line element is written:
# $$
# ds^2 = - e^{\nu} dt^2 + e^{4\phi} \left(d\bar{r}^2 + \bar{r}^2 d\Omega^2\right),
# $$
# where $\phi$ here is the *conformal factor*.
#
# The ADM 3+1 line element for this diagonal metric in isotropic spherical coordinates is given by:
# $$
# ds^2 = (-\alpha^2 + \beta_k \beta^k) dt^2 + \gamma_{\bar{r}\bar{r}} d\bar{r}^2 + \gamma_{\theta\theta} d\theta^2+ \gamma_{\phi\phi} d\phi^2,
# $$
#
# from which we can immediately read off the ADM quantities:
# \begin{align}
# \alpha &= e^{\nu(\bar{r})/2} \\
# \beta^k &= 0 \\
# \gamma_{\bar{r}\bar{r}} &= e^{4\phi}\\
# \gamma_{\theta\theta} &= e^{4\phi} \bar{r}^2 \\
# \gamma_{\phi\phi} &= e^{4\phi} \bar{r}^2 \sin^2 \theta \\
# \end{align}
#
# **STRESS-ENERGY TENSOR $T^{\mu\nu}$**
#
# We will also need the stress-energy tensor $T^{\mu\nu}$. [As discussed here](https://en.wikipedia.org/wiki/Tolman%E2%80%93Oppenheimer%E2%80%93Volkoff_equation), the stress-energy tensor is diagonal:
#
# \begin{align}
# T^t_t &= -\rho \\
# T^i_j &= P \delta^i_j \\
# \text{All other components of }T^\mu_\nu &= 0.
# \end{align}
#
# Since $\beta^i=0$ the inverse metric expression simplifies to (Eq. 4.49 in [Gourgoulhon](https://arxiv.org/pdf/gr-qc/0703035.pdf)):
# $$
# g^{\mu\nu} = \begin{pmatrix}
# -\frac{1}{\alpha^2} & \frac{\beta^i}{\alpha^2} \\
# \frac{\beta^i}{\alpha^2} & \gamma^{ij} - \frac{\beta^i\beta^j}{\alpha^2}
# \end{pmatrix} =
# \begin{pmatrix}
# -\frac{1}{\alpha^2} & 0 \\
# 0 & \gamma^{ij}
# \end{pmatrix},
# $$
#
# and since the 3-metric is diagonal we get
#
# \begin{align}
# \gamma^{\bar{r}\bar{r}} &= e^{-4\phi}\\
# \gamma^{\theta\theta} &= e^{-4\phi}\frac{1}{\bar{r}^2} \\
# \gamma^{\phi\phi} &= e^{-4\phi}\frac{1}{\bar{r}^2 \sin^2 \theta}.
# \end{align}
#
# Thus raising $T^\mu_\nu$ yields a diagonal $T^{\mu\nu}$
#
# \begin{align}
# T^{tt} &= -g^{tt} \rho = \frac{1}{\alpha^2} \rho = e^{-\nu(\bar{r})} \rho \\
# T^{\bar{r}\bar{r}} &= g^{\bar{r}\bar{r}} P = \frac{1}{e^{4 \phi}} P \\
# T^{\theta\theta} &= g^{\theta\theta} P = \frac{1}{e^{4 \phi}\bar{r}^2} P\\
# T^{\phi\phi} &= g^{\phi\phi} P = \frac{1}{e^{4\phi}\bar{r}^2 \sin^2 \theta} P
# \end{align}
# +
thismodule = "TOVID"
rbar,theta,rho,P,expnu,exp4phi = par.Cparameters("REAL",thismodule,
["rbar","theta","rho","P","expnu","exp4phi"],1e300)
IDalpha = sp.sqrt(expnu)
gammaSphDD = ixp.zerorank2(DIM=3)
gammaSphDD[0][0] = exp4phi
gammaSphDD[1][1] = exp4phi*rbar**2
gammaSphDD[2][2] = exp4phi*rbar**2*sp.sin(theta)**2
T4SphUU = ixp.zerorank2(DIM=4)
T4SphUU[0][0] = rho/expnu
T4SphUU[1][1] = P/exp4phi
T4SphUU[2][2] = P/(exp4phi*rbar**2)
T4SphUU[3][3] = P/(exp4phi*rbar**2*sp.sin(theta)**2)
# +
expr_list = [IDalpha]
name_list = ["*alpha"]
for i in range(3):
for j in range(i,3):
expr_list.append(gammaSphDD[i][j])
name_list.append("*gammaDD"+str(i)+str(j))
desc = """This function takes as input either (x,y,z) or (r,th,ph) and outputs
all ADM quantities in the Cartesian or Spherical basis, respectively."""
name = "ID_TOV_ADM_quantities"
outCparams = "preindent=1,outCverbose=False,includebraces=False"
outCfunction(
outfile=os.path.join(Ccodesdir, name + ".h"), desc=desc, name=name,
params=""" const REAL xyz_or_rthph[3],
const ID_inputs other_inputs,
REAL *gammaDD00,REAL *gammaDD01,REAL *gammaDD02,REAL *gammaDD11,REAL *gammaDD12,REAL *gammaDD22,
REAL *KDD00,REAL *KDD01,REAL *KDD02,REAL *KDD11,REAL *KDD12,REAL *KDD22,
REAL *alpha,
REAL *betaU0,REAL *betaU1,REAL *betaU2,
REAL *BU0,REAL *BU1,REAL *BU2""",
body="""
// Set trivial metric quantities:
*KDD00 = *KDD01 = *KDD02 = 0.0;
/**/ *KDD11 = *KDD12 = 0.0;
/**/ *KDD22 = 0.0;
*betaU0 = *betaU1 = *betaU2 = 0.0;
*BU0 = *BU1 = *BU2 = 0.0;
// Next set gamma_{ij} in spherical basis
const REAL rbar = xyz_or_rthph[0];
const REAL theta = xyz_or_rthph[1];
const REAL phi = xyz_or_rthph[2];
REAL rho,rho_baryon,P,M,expnu,exp4phi;
TOV_interpolate_1D(rbar,other_inputs.Rbar,other_inputs.Rbar_idx,other_inputs.interp_stencil_size,
other_inputs.numlines_in_file,
other_inputs.r_Schw_arr,other_inputs.rho_arr,other_inputs.rho_baryon_arr,other_inputs.P_arr,other_inputs.M_arr,
other_inputs.expnu_arr,other_inputs.exp4phi_arr,other_inputs.rbar_arr,
&rho,&rho_baryon,&P,&M,&expnu,&exp4phi);\n"""+
outputC(expr_list,name_list, "returnstring",outCparams),
opts="DisableCparameters")
# -
# As all input quantities are functions of $r$, we will simply read the solution from file and interpolate it to the values of $r$ needed by the initial data.
#
# 1. First we define functions `ID_TOV_ADM_quantities()` and `ID_TOV_TUPMUNU()` that call the [1D TOV interpolator function](../edit/TOV/tov_interp.h) to evaluate the ADM spacetime quantities and $T^{\mu\nu}$, respectively, at any given point $(r,\theta,\phi)$ in the Spherical basis. All quantities are defined as above.
# 1. Next we will construct the BSSN/ADM source terms $\{S_{ij},S_{i},S,\rho\}$ in the Spherical basis
# 1. Then we will perform the Jacobian transformation on $\{S_{ij},S_{i},S,\rho\}$ to the desired `(xx0,xx1,xx2)` basis
# 1. Next we call the *Numerical* Spherical ADM$\to$Curvilinear BSSN converter function to conver the above ADM quantities to the rescaled BSSN quantities in the desired curvilinear coordinate system: [BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb).
#
# $$
# {\rm Jac\_dUSph\_dDrfmUD[mu][nu]} = \frac{\partial x^\mu_{\rm Sph}}{\partial x^\nu_{\rm rfm}},
# $$
#
# via exact differentiation (courtesy SymPy), and the inverse Jacobian
# $$
# {\rm Jac\_dUrfm\_dDSphUD[mu][nu]} = \frac{\partial x^\mu_{\rm rfm}}{\partial x^\nu_{\rm Sph}},
# $$
#
# using NRPy+'s `generic_matrix_inverter3x3()` function. In terms of these, the transformation of BSSN tensors from Spherical to `"reference_metric::CoordSystem"` coordinates may be written:
#
# $$
# T^{\mu\nu}_{\rm rfm} =
# \frac{\partial x^\mu_{\rm rfm}}{\partial x^\delta_{\rm Sph}}
# \frac{\partial x^\nu_{\rm rfm}}{\partial x^\sigma_{\rm Sph}} T^{\delta\sigma}_{\rm Sph}
# $$
# +
r_th_ph_or_Cart_xyz_oID_xx = []
CoordType_in = "Spherical"
if CoordType_in == "Spherical":
r_th_ph_or_Cart_xyz_oID_xx = rfm.xxSph
elif CoordType_in == "Cartesian":
r_th_ph_or_Cart_xyz_oID_xx = rfm.xxCart
else:
print("Error: Can only convert ADM Cartesian or Spherical initial data to BSSN Curvilinear coords.")
exit(1)
# Next apply Jacobian transformations to convert into the (xx0,xx1,xx2) basis
# rho and S are scalar, so no Jacobian transformations are necessary.
Jac4_dUSphorCart_dDrfmUD = ixp.zerorank2(DIM=4)
Jac4_dUSphorCart_dDrfmUD[0][0] = sp.sympify(1)
for i in range(DIM):
for j in range(DIM):
Jac4_dUSphorCart_dDrfmUD[i+1][j+1] = sp.diff(r_th_ph_or_Cart_xyz_oID_xx[i],rfm.xx[j])
Jac4_dUrfm_dDSphorCartUD, dummyDET = ixp.generic_matrix_inverter4x4(Jac4_dUSphorCart_dDrfmUD)
# Perform Jacobian operations on T^{mu nu} and gamma_{ij}
T4UU = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","T4UU","sym01",DIM=4)
IDT4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
for delta in range(4):
for sigma in range(4):
IDT4UU[mu][nu] += \
Jac4_dUrfm_dDSphorCartUD[mu][delta]*Jac4_dUrfm_dDSphorCartUD[nu][sigma]*T4SphUU[delta][sigma]
lhrh_list = []
for mu in range(4):
for nu in range(mu,4):
lhrh_list.append(lhrh(lhs=gri.gfaccess("auxevol_gfs","T4UU"+str(mu)+str(nu)),rhs=IDT4UU[mu][nu]))
desc = """This function takes as input either (x,y,z) or (r,th,ph) and outputs
all ADM quantities in the Cartesian or Spherical basis, respectively."""
name = "ID_TOV_TUPMUNU_xx0xx1xx2"
outCparams = "preindent=1,outCverbose=False,includebraces=False"
outCfunction(
outfile=os.path.join(Ccodesdir, name + ".h"), desc=desc, name=name,
params="""const paramstruct *restrict params,REAL *restrict xx[3],
const ID_inputs other_inputs,REAL *restrict auxevol_gfs""",
body=outputC([rfm.xxSph[0],rfm.xxSph[1],rfm.xxSph[2]],
["const REAL rbar","const REAL theta","const REAL ph"],"returnstring",
"CSE_enable=False,includebraces=False")+"""
REAL rho,rho_baryon,P,M,expnu,exp4phi;
TOV_interpolate_1D(rbar,other_inputs.Rbar,other_inputs.Rbar_idx,other_inputs.interp_stencil_size,
other_inputs.numlines_in_file,
other_inputs.r_Schw_arr,other_inputs.rho_arr,other_inputs.rho_baryon_arr,other_inputs.P_arr,other_inputs.M_arr,
other_inputs.expnu_arr,other_inputs.exp4phi_arr,other_inputs.rbar_arr,
&rho,&rho_baryon,&P,&M,&expnu,&exp4phi);\n"""+
fin.FD_outputC("returnstring",lhrh_list,params="outCverbose=False,includebraces=False").replace("IDX4","IDX4S"),
loopopts="AllPoints,Read_xxs")
# -
# <a id='adm_id_spacetime'></a>
#
# # Step 3: Convert ADM initial data to BSSN-in-curvilinear coordinates \[Back to [top](#toc)\]
# $$\label{adm_id_spacetime}$$
#
# This is an automated process, taken care of by [`BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear`](../edit/BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py), and documented [in this tutorial notebook](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb).
import BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear as AtoBnum
AtoBnum.Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear("Spherical","ID_TOV_ADM_quantities",
Ccodesdir=Ccodesdir,loopopts="")
# <a id='validate'></a>
#
# # Step 4: Validating that the TOV initial data satisfy the Hamiltonian constraint \[Back to [top](#toc)\]
# $$\label{validate}$$
#
# We will validate that the TOV initial data satisfy the Hamiltonian constraint, modulo numerical finite differencing error
# <a id='ham_const_output'></a>
#
# ## Step 4.a: Output the Hamiltonian constraint \[Back to [top](#toc)\]
# $$\label{ham_const_output}$$
#
# First output the Hamiltonian constraint [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-BSSN_constraints.ipynb)
# +
# Enable rfm_precompute infrastructure, which results in
# BSSN RHSs that are free of transcendental functions,
# even in curvilinear coordinates, so long as
# ConformalFactor is set to "W" (default).
cmd.mkdir(os.path.join(Ccodesdir,"rfm_files/"))
par.set_parval_from_str("reference_metric::enable_rfm_precompute","True")
par.set_parval_from_str("reference_metric::rfm_precompute_Ccode_outdir",os.path.join(Ccodesdir,"rfm_files/"))
import BSSN.Enforce_Detgammabar_Constraint as EGC
enforce_detg_constraint_symb_expressions = EGC.Enforce_Detgammabar_Constraint_symb_expressions()
# Now register the Hamiltonian as a gridfunction.
H = gri.register_gridfunctions("AUX","H")
# Then define the Hamiltonian constraint and output the optimized C code.
import BSSN.BSSN_constraints as bssncon
import BSSN.BSSN_stress_energy_source_terms as Bsest
bssncon.BSSN_constraints(add_T4UUmunu_source_terms=False)
Bsest.BSSN_source_terms_for_BSSN_constraints(T4UU)
bssncon.H += Bsest.sourceterm_H
# Now that we are finished with all the rfm hatted
# quantities in generic precomputed functional
# form, let's restore them to their closed-
# form expressions.
par.set_parval_from_str("reference_metric::enable_rfm_precompute","False") # Reset to False to disable rfm_precompute.
rfm.ref_metric__hatted_quantities()
desc="Evaluate the Hamiltonian constraint"
name="Hamiltonian_constraint"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name,
params = """rfm_struct *restrict rfmstruct,const paramstruct *restrict params,
REAL *restrict in_gfs, REAL *restrict auxevol_gfs, REAL *restrict aux_gfs""",
body = fin.FD_outputC("returnstring",lhrh(lhs=gri.gfaccess("aux_gfs", "H"), rhs=bssncon.H),
params="outCverbose=False").replace("IDX4","IDX4S"),
loopopts = "InteriorPoints,Enable_rfm_precompute")
# -
# <a id='bc_functs'></a>
#
# ## Step 4.b: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](#toc)\]
# $$\label{bc_functs}$$
#
# Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)
import CurviBoundaryConditions.CurviBoundaryConditions as cbcs
cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"))
# <a id='enforce3metric'></a>
#
# ## Step 4.c: Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint \[Back to [top](#toc)\]
# $$\label{enforce3metric}$$
#
# Then enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)), as [documented in the corresponding NRPy+ tutorial notebook](Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.ipynb)
#
# Applying curvilinear boundary conditions should affect the initial data at the outer boundary, and will in general cause the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint to be violated there. Thus after we apply these boundary conditions, we must always call the routine for enforcing the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint:
# Set up the C function for the det(gammahat) = det(gammabar)
EGC.output_Enforce_Detgammabar_Constraint_Ccode(Ccodesdir,
exprs=enforce_detg_constraint_symb_expressions)
# <a id='cparams_rfm_and_domainsize'></a>
#
# ## Step 4.d: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\]
# $$\label{cparams_rfm_and_domainsize}$$
#
# Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.
#
# Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above
# +
# Step 3.d.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))
# Step 3.d.ii: Set free_parameters.h
# Output to $Ccodesdir/free_parameters.h reference metric parameters based on generic
# domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale,
# parameters set above.
rfm.out_default_free_parameters_for_rfm(os.path.join(Ccodesdir,"free_parameters.h"),
domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale)
# Step 3.d.iii: Generate set_Nxx_dxx_invdx_params__and__xx.h:
rfm.set_Nxx_dxx_invdx_params__and__xx_h(Ccodesdir)
# Step 3.d.iv: Generate xxCart.h, which contains xxCart() for
# (the mapping from xx->Cartesian) for the chosen
# CoordSystem:
rfm.xxCart_h("xxCart","./set_Cparameters.h",os.path.join(Ccodesdir,"xxCart.h"))
# Step 3.d.v: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))
# -
# <a id='mainc'></a>
#
# # Step 5: `TOV_Playground.c`: The Main C Code \[Back to [top](#toc)\]
# $$\label{mainc}$$
#
# +
# Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER)
with open(os.path.join(Ccodesdir,"TOV_Playground_REAL__NGHOSTS.h"), "w") as file:
file.write("""
// Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
#define NGHOSTS """+str(int(FD_order/2)+1)+"""
// Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point
// numbers are stored to at least ~16 significant digits
#define REAL """+REAL+"""
// Part P0.c: Set TOV stellar parameters
#define TOV_Mass """+str(M_TOV)+"""
#define TOV_Riso """+str(R_iso_TOV)+"\n")
# +
# %%writefile $Ccodesdir/TOV_Playground.c
// Step P0: Define REAL and NGHOSTS. This header is generated by NRPy+.
#include "TOV_Playground_REAL__NGHOSTS.h"
#include "rfm_files/rfm_struct__declare.h"
#include "declare_Cparameters_struct.h"
// Step P1: Import needed header files
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#ifndef M_PI
#define M_PI 3.141592653589793238462643383279502884L
#endif
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524400844362104849039L
#endif
// Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of
// data in a 1D array. In this case, consecutive values of "i"
// (all other indices held to a fixed value) are consecutive in memory, where
// consecutive values of "j" (fixing all other indices) are separated by
// Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of
// "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc.
#define IDX4S(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) )
#define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) )
#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)
#define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \
for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++)
// Step P3: Set UUGF and VVGF macros, as well as xxCart()
#include "boundary_conditions/gridfunction_defines.h"
// Step P4: Set xxCart(const paramstruct *restrict params,
// REAL *restrict xx[3],
// const int i0,const int i1,const int i2,
// REAL xCart[3]),
// which maps xx->Cartesian via
// {xx[0][i0],xx[1][i1],xx[2][i2]}->{xCart[0],xCart[1],xCart[2]}
#include "xxCart.h"
// Step P5: Defines set_Nxx_dxx_invdx_params__and__xx(const int EigenCoord, const int Nxx[3],
// paramstruct *restrict params, REAL *restrict xx[3]),
// which sets params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for
// the chosen Eigen-CoordSystem if EigenCoord==1, or
// CoordSystem if EigenCoord==0.
#include "set_Nxx_dxx_invdx_params__and__xx.h"
// Step P6: Include basic functions needed to impose curvilinear
// parity and boundary conditions.
#include "boundary_conditions/CurviBC_include_Cfunctions.h"
// Step P8: Include function for enforcing detgammabar constraint.
#include "enforce_detgammabar_constraint.h"
// Step P4: Declare initial data input struct:
// stores data from initial data solver,
// so they can be put on the numerical grid.
typedef struct __ID_inputs {
REAL Rbar;
int Rbar_idx;
int interp_stencil_size;
int numlines_in_file;
REAL *r_Schw_arr,*rho_arr,*rho_baryon_arr,*P_arr,*M_arr,*expnu_arr,*exp4phi_arr,*rbar_arr;
} ID_inputs;
// Part P11: Declare all functions for setting up TOV initial data.
/* Routines to interpolate the TOV solution and convert to ADM & T^{munu}: */
#include "../TOV/tov_interp.h"
#include "ID_TOV_ADM_quantities.h"
#include "ID_TOV_TUPMUNU_xx0xx1xx2.h"
/* Next perform the basis conversion and compute all needed BSSN quantities */
#include "ID_ADM_xx0xx1xx2_to_BSSN_xx0xx1xx2__ALL_BUT_LAMBDAs.h"
#include "ID_BSSN__ALL_BUT_LAMBDAs.h"
#include "ID_BSSN_lambdas.h"
// Step P10: Declare function necessary for setting up the initial data.
// Step P10.a: Define BSSN_ID() for BrillLindquist initial data
// Step P10.b: Set the generic driver function for setting up BSSN initial data
void initial_data(const paramstruct *restrict params,const bc_struct *restrict bcstruct,
const rfm_struct *restrict rfmstruct,
REAL *restrict xx[3], REAL *restrict auxevol_gfs, REAL *restrict in_gfs) {
#include "set_Cparameters.h"
// Step 1: Set up TOV initial data
// Step 1.a: Read TOV initial data from data file
// Open the data file:
char filename[100];
sprintf(filename,"./outputTOVpolytrope.txt");
FILE *in1Dpolytrope = fopen(filename, "r");
if (in1Dpolytrope == NULL) {
fprintf(stderr,"ERROR: could not open file %s\n",filename);
exit(1);
}
// Count the number of lines in the data file:
int numlines_in_file = count_num_lines_in_file(in1Dpolytrope);
// Allocate space for all data arrays:
REAL *r_Schw_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
REAL *rho_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
REAL *rho_baryon_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
REAL *P_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
REAL *M_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
REAL *expnu_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
REAL *exp4phi_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
REAL *rbar_arr = (REAL *)malloc(sizeof(REAL)*numlines_in_file);
// Read from the data file, filling in arrays
// read_datafile__set_arrays() may be found in TOV/tov_interp.h
if(read_datafile__set_arrays(in1Dpolytrope, r_Schw_arr,rho_arr,rho_baryon_arr,P_arr,M_arr,expnu_arr,exp4phi_arr,rbar_arr) == 1) {
fprintf(stderr,"ERROR WHEN READING FILE %s!\n",filename);
exit(1);
}
fclose(in1Dpolytrope);
REAL Rbar = -100;
int Rbar_idx = -100;
for(int i=1;i<numlines_in_file;i++) {
if(rho_arr[i-1]>0 && rho_arr[i]==0) { Rbar = rbar_arr[i-1]; Rbar_idx = i-1; }
}
if(Rbar<0) {
fprintf(stderr,"Error: could not find rbar=Rbar from data file.\n");
exit(1);
}
ID_inputs TOV_in;
TOV_in.Rbar = Rbar;
TOV_in.Rbar_idx = Rbar_idx;
const int interp_stencil_size = 12;
TOV_in.interp_stencil_size = interp_stencil_size;
TOV_in.numlines_in_file = numlines_in_file;
TOV_in.r_Schw_arr = r_Schw_arr;
TOV_in.rho_arr = rho_arr;
TOV_in.rho_baryon_arr = rho_baryon_arr;
TOV_in.P_arr = P_arr;
TOV_in.M_arr = M_arr;
TOV_in.expnu_arr = expnu_arr;
TOV_in.exp4phi_arr = exp4phi_arr;
TOV_in.rbar_arr = rbar_arr;
/* END TOV INPUT ROUTINE */
// Step 1.b: Interpolate data from data file to set BSSN gridfunctions
ID_BSSN__ALL_BUT_LAMBDAs(params,xx,TOV_in, in_gfs);
apply_bcs_curvilinear(params, bcstruct, NUM_EVOL_GFS, evol_gf_parity, in_gfs);
enforce_detgammabar_constraint(rfmstruct, params, in_gfs);
ID_BSSN_lambdas(params, xx, in_gfs);
apply_bcs_curvilinear(params, bcstruct, NUM_EVOL_GFS, evol_gf_parity, in_gfs);
enforce_detgammabar_constraint(rfmstruct, params, in_gfs);
ID_TOV_TUPMUNU_xx0xx1xx2(params,xx,TOV_in,auxevol_gfs);
free(rbar_arr);
free(rho_arr);
free(rho_baryon_arr);
free(P_arr);
free(M_arr);
free(expnu_arr);
}
// Step P11: Declare function for evaluating Hamiltonian constraint (diagnostic)
#include "Hamiltonian_constraint.h"
// main() function:
// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates
// Step 1: Set up initial data to an exact solution
// Step 2: Start the timer, for keeping track of how fast the simulation is progressing.
// Step 3: Integrate the initial data forward in time using the chosen RK-like Method of
// Lines timestepping algorithm, and output periodic simulation diagnostics
// Step 3.a: Output 2D data file periodically, for visualization
// Step 3.b: Step forward one timestep (t -> t+dt) in time using
// chosen RK-like MoL timestepping algorithm
// Step 3.c: If t=t_final, output conformal factor & Hamiltonian
// constraint violation to 2D data file
// Step 3.d: Progress indicator printing to stderr
// Step 4: Free all allocated memory
int main(int argc, const char *argv[]) {
paramstruct params;
#include "set_Cparameters_default.h"
// Step 0a: Read command-line input, error out if nonconformant
if((argc != 4) || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < 2 /* FIXME; allow for axisymmetric sims */) {
fprintf(stderr,"Error: Expected three command-line arguments: ./BrillLindquist_Playground Nx0 Nx1 Nx2,\n");
fprintf(stderr,"where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n");
fprintf(stderr,"Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS);
exit(1);
}
// Step 0b: Set up numerical grid structure, first in space...
const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) };
if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) {
fprintf(stderr,"Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n");
fprintf(stderr," For example, in case of angular directions, proper symmetry zones will not exist.\n");
exit(1);
}
// Step 0c: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
// Step 0d: Uniform coordinate grids are stored to *xx[3]
REAL *xx[3];
// Step 0d.i: Set bcstruct
bc_struct bcstruct;
{
int EigenCoord = 1;
// Step 0d.ii: Call set_Nxx_dxx_invdx_params__and__xx(), which sets
// params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the
// chosen Eigen-CoordSystem.
set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);
// Step 0d.iii: Set Nxx_plus_2NGHOSTS_tot
#include "set_Cparameters-nopointer.h"
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
// Step 0e: Find ghostzone mappings; set up bcstruct
#include "boundary_conditions/driver_bcstruct.h"
// Step 0e.i: Free allocated space for xx[][] array
for(int i=0;i<3;i++) free(xx[i]);
}
// Step 0f: Call set_Nxx_dxx_invdx_params__and__xx(), which sets
// params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the
// chosen (non-Eigen) CoordSystem.
int EigenCoord = 0;
set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);
// Step 0g: Set all C parameters "blah" for params.blah, including
// Nxx_plus_2NGHOSTS0 = params.Nxx_plus_2NGHOSTS0, etc.
#include "set_Cparameters-nopointer.h"
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
// Step 0j: Error out if the number of auxiliary gridfunctions outnumber evolved gridfunctions.
// This is a limitation of the RK method. You are always welcome to declare & allocate
// additional gridfunctions by hand.
if(NUM_AUX_GFS > NUM_EVOL_GFS) {
fprintf(stderr,"Error: NUM_AUX_GFS > NUM_EVOL_GFS. Either reduce the number of auxiliary gridfunctions,\n");
fprintf(stderr," or allocate (malloc) by hand storage for *diagnostic_output_gfs. \n");
exit(1);
}
// Step 0k: Allocate memory for gridfunctions
#include "MoLtimestepping/RK_Allocate_Memory.h"
REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot);
// Step 0l: Set up precomputed reference metric arrays
// Step 0l.i: Allocate space for precomputed reference metric arrays.
#include "rfm_files/rfm_struct__malloc.h"
// Step 0l.ii: Define precomputed reference metric arrays.
{
#include "set_Cparameters-nopointer.h"
#include "rfm_files/rfm_struct__define.h"
}
// Step 1: Set up initial data to an exact solution
initial_data(¶ms,&bcstruct, &rfmstruct, xx, auxevol_gfs, y_n_gfs);
// Step 1b: Apply boundary conditions, as initial data
// are sometimes ill-defined in ghost zones.
// E.g., spherical initial data might not be
// properly defined at points where r=-1.
apply_bcs_curvilinear(¶ms, &bcstruct, NUM_EVOL_GFS,evol_gf_parity, y_n_gfs);
enforce_detgammabar_constraint(&rfmstruct, ¶ms, y_n_gfs);
// Evaluate Hamiltonian constraint violation
Hamiltonian_constraint(&rfmstruct, ¶ms, y_n_gfs,auxevol_gfs, diagnostic_output_gfs);
char filename[100];
sprintf(filename,"out%d.txt",Nxx[0]);
FILE *out2D = fopen(filename, "w");
LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS0-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS1-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS2-NGHOSTS) {
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
REAL xCart[3];
xxCart(¶ms,xx,i0,i1,i2,xCart);
int idx = IDX3S(i0,i1,i2);
fprintf(out2D,"%e %e %e %e\n",xCart[1]/TOV_Mass,xCart[2]/TOV_Mass, y_n_gfs[IDX4ptS(CFGF,idx)],
log10(fabs(diagnostic_output_gfs[IDX4ptS(HGF,idx)])));
}
fclose(out2D);
// Step 4: Free all allocated memory
#include "rfm_files/rfm_struct__freemem.h"
#include "boundary_conditions/bcstruct_freemem.h"
#include "MoLtimestepping/RK_Free_Memory.h"
free(auxevol_gfs);
for(int i=0;i<3;i++) free(xx[i]);
return 0;
}
# -
import cmdline_helper as cmd
cmd.C_compile(os.path.join(Ccodesdir,"TOV_Playground.c"), "TOV_Playground")
cmd.delete_existing_files("out96.txt")
cmd.Execute("TOV_Playground", "96 96 2", "out96.txt")
# <a id='plot'></a>
#
# # Step 6: Plotting the single-neutron-star initial data \[Back to [top](#toc)\]
# $$\label{plot}$$
#
# Here we plot the conformal factor of these initial data on a 2D grid, such that darker colors imply stronger gravitational fields. Hence, we see the single neutron star centered at the origin: $x/M=y/M=z/M=0$, where $M$ is an arbitrary mass scale (conventionally the [ADM mass](https://en.wikipedia.org/w/index.php?title=ADM_formalism&oldid=846335453) is chosen), and our formulation of Einstein's equations adopt $G=c=1$ [geometrized units](https://en.wikipedia.org/w/index.php?title=Geometrized_unit_system&oldid=861682626).
# +
import numpy as np
from scipy.interpolate import griddata
from pylab import savefig
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.display import Image
x96,y96,valuesCF96,valuesHam96 = np.loadtxt('out96.txt').T #Transposed for easier unpacking
bounds = 7.5
pl_xmin = -bounds
pl_xmax = +bounds
pl_ymin = -bounds
pl_ymax = +bounds
grid_x, grid_y = np.mgrid[pl_xmin:pl_xmax:100j, pl_ymin:pl_ymax:100j]
points96 = np.zeros((len(x96), 2))
for i in range(len(x96)):
points96[i][0] = x96[i]
points96[i][1] = y96[i]
grid96 = griddata(points96, valuesCF96, (grid_x, grid_y), method='nearest')
grid96cub = griddata(points96, valuesCF96, (grid_x, grid_y), method='cubic')
plt.clf()
plt.title("Neutron Star: log10( max(1e-6,Energy Density) )")
plt.xlabel("x/M")
plt.ylabel("y/M")
# fig, ax = plt.subplots()
# ax.plot(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
# plt.close(fig)
fig96cf = plt.imshow(grid96.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
cb = plt.colorbar(fig96cf)
savefig("BHB.png")
from IPython.display import Image
Image("BHB.png")
# # interpolation='nearest', cmap=cm.gist_rainbow)
# -
# <a id='convergence'></a>
#
# # Step 7: Validation: Convergence of numerical errors (Hamiltonian constraint violation) to zero \[Back to [top](#toc)\]
# $$\label{convergence}$$
#
# The equations behind these initial data solve Einstein's equations exactly, at a single instant in time. One reflection of this solution is that the Hamiltonian constraint violation should be exactly zero in the initial data.
#
# However, when evaluated on numerical grids, the Hamiltonian constraint violation will *not* generally evaluate to zero due to the associated numerical derivatives not being exact. However, these numerical derivatives (finite difference derivatives in this case) should *converge* to the exact derivatives as the density of numerical sampling points approaches infinity.
#
# In this case, all of our finite difference derivatives agree with the exact solution, with an error term that drops with the uniform gridspacing to the fourth power: $\left(\Delta x^i\right)^4$.
#
# Here, as in the [Start-to-Finish Scalar Wave (Cartesian grids) NRPy+ tutorial](Tutorial-Start_to_Finish-ScalarWave.ipynb) and the [Start-to-Finish Scalar Wave (curvilinear grids) NRPy+ tutorial](Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb) we confirm this convergence.
#
# First, let's take a look at what the numerical error looks like on the x-y plane at a given numerical resolution, plotting $\log_{10}|H|$, where $H$ is the Hamiltonian constraint violation:
# +
grid96 = griddata(points96, valuesHam96, (grid_x, grid_y), method='nearest')
grid96cub = griddata(points96, valuesHam96, (grid_x, grid_y), method='cubic')
# fig, ax = plt.subplots()
plt.clf()
plt.title("96^3 Numerical Err.: log_{10}|Ham|")
plt.xlabel("x/M")
plt.ylabel("y/M")
fig96cub = plt.imshow(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
cb = plt.colorbar(fig96cub)
# -
# Next, we set up the same initial data but on a lower-resolution, $48\times 8\times 2$ grid (axisymmetric in the $\phi$ direction). Since the constraint violation (numerical error associated with the fourth-order-accurate, finite-difference derivatives) should converge to zero with the uniform gridspacing to the fourth power: $\left(\Delta x^i\right)^4$, we expect the constraint violation will increase (relative to the $96\times 16\times 2$ grid) by a factor of $\left(96/48\right)^4$. Here we demonstrate that indeed this order of convergence is observed as expected, *except* at the star's surface where the stress-energy tensor $T^{\mu\nu}$ sharply drops to zero.
# +
# Now rerun TOV_Playground with twice lower resolution.
cmd.delete_existing_files("out48.txt")
cmd.Execute("TOV_Playground", "48 48 2", "out48.txt")
x48,y48,valuesCF48,valuesHam48 = np.loadtxt('out48.txt').T #Transposed for easier unpacking
points48 = np.zeros((len(x48), 2))
for i in range(len(x48)):
points48[i][0] = x48[i]
points48[i][1] = y48[i]
grid48 = griddata(points48, valuesHam48, (grid_x, grid_y), method='cubic')
griddiff_48_minus_96 = np.zeros((100,100))
griddiff_48_minus_96_1darray = np.zeros(100*100)
gridx_1darray_yeq0 = np.zeros(100)
grid48_1darray_yeq0 = np.zeros(100)
grid96_1darray_yeq0 = np.zeros(100)
count = 0
outarray = []
for i in range(100):
for j in range(100):
griddiff_48_minus_96[i][j] = grid48[i][j] - grid96[i][j]
griddiff_48_minus_96_1darray[count] = griddiff_48_minus_96[i][j]
if j==49:
gridx_1darray_yeq0[i] = grid_x[i][j]
grid48_1darray_yeq0[i] = grid48[i][j] + np.log10((48./96.)**4)
grid96_1darray_yeq0[i] = grid96[i][j]
count = count + 1
plt.clf()
fig, ax = plt.subplots()
plt.title("Plot Demonstrating 4th-order Convergence")
plt.xlabel("x/M")
plt.ylabel("log10(Relative error)")
ax.plot(gridx_1darray_yeq0, grid96_1darray_yeq0, 'k-', label='Nr=96')
ax.plot(gridx_1darray_yeq0, grid48_1darray_yeq0, 'k--', label='Nr=48, mult by (48/96)^4')
ax.set_ylim([-12.5,1.5])
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
legend.get_frame().set_facecolor('C1')
plt.show()
# -
# <a id='latex_pdf_output'></a>
#
# # Step 8: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data.pdf](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data")
|
Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_TOV_initial_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="te9-dtQ_Rsqd" outputId="0bab81df-6497-4c52-ac99-d583e7492263"
# !git clone https://github.com/CurtisASmith/comet-atomic-2020-gpt2-colab /content/comet-atomic-2020
# %cd /content/comet-atomic-2020
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="n6QGmc7MRuTY" outputId="c21b4fc0-1436-4178-efa3-8802de62327c"
pip install -r requirements.txt
# + id="KU8nNmQERxmK"
#before running this cell, create directory gpt2data and upload train dev and test datasets
import os
os.environ['TRAIN_DATA_PATH'] = '/content/comet-atomic-2020/gpt2data/train.tsv'
os.environ['DEV_DATA_PATH'] = '/content/comet-atomic-2020/gpt2data/dev.tsv'
os.environ['TEST_DATA_PATH'] = '/content/comet-atomic-2020/gpt2data/test.tsv'
os.environ['DO_TRAIN']='True'
# + colab={"base_uri": "https://localhost:8080/"} id="scTZsgThRzWn" outputId="117d333c-18e6-4faf-f094-cbb74d3ebdd2"
# !python /content/comet-atomic-2020/comet_gpt2.py
|
ATOMIC2020_GPT2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import numexpr as ne
from xwp.spectral_1d import propTF,propIR
from xwp.exact_1d import exact_prop_numba
from xwp_cython.prop1d import exact_prop_cython
# +
energy = 15000
wavel = (1240/energy)*10**(-9)
pi = np.pi
L_in = 5e-6
z = 10e-6
L_in = 5e-1
N_in = 2500
N_out = 2500
N_in_exact = 25000
in_domain_exact = np.linspace(-L_in/2, L_in/2, N_in_exact)
in_wave = np.array(np.zeros(N_in), dtype='complex128')
in_wave[int(N_in/2)-int(N_in/8):int(N_in/2)+int(N_in/8)] = 1
in_wave = np.zeros(N_in, dtype='complex128')
in_wave[int(N_in/2)-int(N_in/8):int(N_in/2)+int(N_in/8)] = 1
sampling = in_domain_exact[1] - in_domain_exact[0]
critical = (wavel*z/L_in)
if sampling>critical:
print('Use TF')
else :
print('Use IR')
print('Fresnel Number :', (L_in**2)/(wavel*z))
out_,L_ = propTF(in_wave, L_in/N_in, L_in, wavel,z)
out_domain_ = np.linspace(-L_/2,L_/2,N_out)
out_wave_exact = np.zeros((N_out),dtype='complex128')
out_domain_exact = np.linspace(-L_/2,L_/2,N_out)
# -
# %timeit exact_prop_cython(in_wave, out_wave_exact, L_in, L_, wavel, z)
# %timeit exact_prop_numba(in_wave, out_wave_exact, L_in, L_, wavel, z)
|
test/compare_1d_cython.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 with Spark
# language: python3
# name: python36
# ---
# # Build a product recommendation engine
#
# 
#
# This notebook contains steps and code to create a recommendation engine based on shopping history and deploy that model to Watson Machine Learning. This notebook runs on Python 3.x with Apache Spark 2.3.
#
# ## Learning Goals
#
# The learning goals of this notebook are:
#
# * Load a CSV file into the Object Storage service linked to your Watson Studio
# * Use the *k*-means algorithm, which is useful for cluster analysis in data mining, to segment customers into clusters for the purpose of making an in-store purchase recommendation
# * Deploy the model to the IBM Watson Machine Learning service in IBM Cloud
# ## Table of contents
#
# 1. [Setup](#setup)<br>
# 2. [Load and explore data](#load)<br>
# 3. [Create a KMeans model](#kmeans)<br>
# 3.1. [Prepare data](#prepare_data)<br>
# 3.2. [Create clusters and define the model](#build_model)<br>
# 4. [Persist the model](#persist)<br>
# 5. [Deploy the model to the cloud](#deploy)<br>
# 5.1. [Create deployment for the model](#create_deploy)<br>
# 5.2. [Test model deployment](#test_deploy)<br>
# 6. [Create product recommendations](#create_recomm)<br>
# 6.1. [Test product recommendations model](#test_recomm)<br>
# 7. [Summary and next steps](#summary)<br>
# ## 1. Setup
#
#
# Before you use the sample code in this notebook, you must perform the following setup tasks:
#
# * Create a Watson Machine Learning service instance (a free plan is offered) and associate it with your project
# * Create a Cloud Object Storage service instance (a free plan is offered) and associate it with your project
#
# We'll be using a couple libraries for this exercise:
#
# 1. [Watson Machine Learning Client](http://wml-api-pyclient.mybluemix.net/): Client library to work with the Watson Machine Learning service on IBM Cloud. Library available on [pypi](https://pypi.org/project/watson-machine-learning-client/). Service available on [IBM Cloud](https://cloud.ibm.com/catalog/services/machine-learning).
# 1. [ibmos2spark](https://github.com/ibm-watson-data-lab/ibmos2spark): Facilitates Data I/O between Spark and IBM Object Storage services
!pip install --upgrade ibmos2spark
!pip install --upgrade watson-machine-learning-client
# <a id="load"></a>
# ## 2. Load and explore data
#
# In this section you will load and access the data file that contains the customer shopping data using [Cloud Object Storage in the notebook](https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/load-and-access-data.html):
#
# 1. Place cursor into the next cell (`# Generated Code Here`)
# 1. Click the **Find and Add Data** icon to open the Files and Connections side bar
# 1. Click **browse** and navigate to and select the `customers_orders1_opt.csv`
# 1. Click **Insert to code**
# 1. Select **SparkSession DataFrame**
#
# Code to download and import the CSV data into a Spark DataFrame is generated and added into the notebook cell.
#
# ```
# import ibmos2spark
# # @hidden_cell
# credentials = {
# 'endpoint': 'https://s3-api.us-geo.objectstorage.service.networklayer.com',
# 'service_id': '***',
# 'iam_service_endpoint': 'https://iam.ng.bluemix.net/oidc/token',
# 'api_key': '***'
# }
#
# configuration_name = 'os_7135ade4b1d24e67b69b610d4a20966c_configs'
# cos = ibmos2spark.CloudObjectStorage(sc, credentials, configuration_name, 'bluemix_cos')
#
# from pyspark.sql import SparkSession
# spark = SparkSession.builder.getOrCreate()
# df_data_1 = spark.read\
# .format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\
# .option('header', 'true')\
# .load(cos.url('customers_orders1_opt.csv', '***'))
# df_data_1.take(5)
# ```
#
# Run the generated code.
#
# +
# Generated Code Here
import ibmos2spark
# @hidden_cell
credentials = {
'endpoint': 'https://s3-api.us-geo.objectstorage.service.networklayer.com',
'service_id': '***',
'iam_service_endpoint': 'https://iam.ng.bluemix.net/oidc/token',
'api_key': '***'
}
configuration_name = 'os_7135ade4b1d24e67b69b610d4a20966c_configs'
cos = ibmos2spark.CloudObjectStorage(sc, credentials, configuration_name, 'bluemix_cos')
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
df_data_2 = spark.read\
.format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\
.option('header', 'true')\
.load(cos.url('customers_orders1_opt.csv', '***'))
df_data_2.take(5)
# -
# <br>
#
# Update and set the `df` variable to the dataframe variable (e.g., `df_data_1`) created by the generated code
#
df = df_data_2
# <a id="kmeans"></a>
# ## 3. Create a *k*-means model with Spark
#
# In this section of the notebook you use the *k*-means implementation to associate every customer to a cluster based on their shopping history.
#
# First, import the Apache Spark Machine Learning packages ([MLlib](http://spark.apache.org/docs/2.2.0/api/python/pyspark.ml.html)) that you need in the subsequent steps:
#
from pyspark.ml import Pipeline
from pyspark.ml.clustering import KMeans
from pyspark.ml.clustering import KMeansModel
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.linalg import Vectors
# <a id="prepare_data"></a>
# ### 3.1 Prepare data
#
# Create a new data set with just the data that you need. Filter the columns that you want, in this case the customer ID column and the product-related columns. Remove the columns that you don't need for aggregating the data and training the model. Convert the column types from `StringType` to `IntegerType`:
# +
from pyspark.sql.types import IntegerType
# Here are the product cols. In a real world scenario we would query a product table, or similar.
product_cols = ['Baby Food', 'Diapers', 'Formula', 'Lotion', 'Baby wash', 'Wipes', 'Fresh Fruits', 'Fresh Vegetables', 'Beer', 'Wine', 'Club Soda', 'Sports Drink', 'Chips', 'Popcorn', 'Oatmeal', 'Medicines', 'Canned Foods', 'Cigarettes', 'Cheese', 'Cleaning Products', 'Condiments', 'Frozen Foods', 'Kitchen Items', 'Meat', 'Office Supplies', 'Personal Care', 'Pet Supplies', 'Sea Food', 'Spices']
# Here we get the customer ID and the products they purchased
df_filtered = df.select(['CUST_ID'] + product_cols)
for c in product_cols:
df_filtered = df_filtered.withColumn(c, df[c].cast(IntegerType()))
# -
# <br>
#
# View the filtered information:
df_filtered.show()
# Now, aggregate the individual transactions for each customer to get a single score per product, per customer.
# +
df_customer_products = df_filtered.groupby('CUST_ID').sum() # Use customer IDs to group transactions by customer and sum them up
df_customer_products = df_customer_products.drop('sum(CUST_ID)')
df_customer_products.show()
# -
# <a id="build_model"></a>
# ### 3.2 Create clusters and define the model
#
# Create 100 clusters with a *k*-means model based on the number of times a specific customer purchased a product.
#
# | No Clustering | Clustering |
# |------|------|
# |  |  |
#
# First, create a feature vector by combining the product and quantity columns:
assembler = VectorAssembler(inputCols=["sum({})".format(x) for x in product_cols],outputCol="features") # Assemble vectors using product fields
# Next, create the *k*-means clusters and the pipeline to define the model:
kmeans = KMeans(maxIter=50, predictionCol="cluster").setK(100).setSeed(1) # Initialize model
pipeline = Pipeline(stages=[assembler, kmeans])
model = pipeline.fit(df_customer_products)
# Finally, calculate the cluster for each customer by running the original dataset against the *k*-means model:
df_customer_products_cluster = model.transform(df_customer_products)
df_customer_products_cluster.show()
# <a id="persist"></a>
# ## 4. Persist the model
#
# In this section you will learn how to store your pipeline and model in Watson Machine Learning repository by using Python client libraries.
# ### 4.1 Configure IBM Watson Machine Learning credentials
#
# To access your machine learning repository programmatically, you need to copy in your credentials, which you can see in your **IBM Watson Machine Learning** service details in IBM Cloud.
#
# > **IMPORTANT**: Update `apikey` and `instance_id` below. Credentials can be found on _Service Credentials_ tab of the Watson Machine Learning service instance created on the IBM Cloud.
# +
# @hidden_cell
wml_credentials = {
"apikey": "***",
"iam_apikey_description": "Auto-generated for key ***",
"iam_apikey_name": "Service credentials-1",
"iam_role_crn": "crn:v1:bluemix:public:iam::::serviceRole:Writer",
"iam_serviceid_crn": "crn:v1:bluemix:public:iam-identity::a/***",
"instance_id": "***",
"url": "https://us-south.ml.cloud.ibm.com"
}
print(wml_credentials)
# -
# Connect to the Watson Machine Learning service using the provided credentials.
from watson_machine_learning_client import WatsonMachineLearningAPIClient
client = WatsonMachineLearningAPIClient(wml_credentials)
print(client.version)
# ### 4.2 Save the model
#
# #### Save the model to the Watson Machine Learning repository
#
# You use the Watson Machine Learning client's [Repository class](http://wml-api-pyclient.mybluemix.net/#repository) to store and manage models in the Watson Machine Learning service.
#
# > **NOTE**: You can also use Watson Studio to manage models. In this notebook we are using the client library instead.
train_data = df_customer_products.withColumnRenamed('CUST_ID', 'label')
# > **TIP**: Update the cell below with your name, email, and name you wish to give to your model.
model_props = {client.repository.ModelMetaNames.AUTHOR_NAME: "IBM",
client.repository.ModelMetaNames.NAME: "Shopping Recommendation Engine"}
published_model = client.repository.store_model(model=model, pipeline=pipeline, meta_props=model_props, training_data=train_data)
# > **NOTE**: You can delete a model from the repository by calling `client.repository.delete`.
# #### Display list of existing models in the Watson Machine Learning repository
client.repository.list_models()
# #### Display information about the saved model
import json
saved_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(saved_model_uid)
print(json.dumps(model_details, indent=2))
# <a id="deploy"></a>
# ## 5. Deploy model to the IBM cloud
#
# You use the Watson Machine Learning client's [Deployments class](http://wml-api-pyclient.mybluemix.net/#deployments) to deploy and score models.
#
# ### 5.1 Create an online deployment for the model
#
created_deployment = client.deployments.create(saved_model_uid, 'Shopping Recommendation Engine Deployment')
# ### 5.2 Retrieve the scoring endpoint for this model
scoring_endpoint = client.deployments.get_scoring_url(created_deployment)
print(scoring_endpoint)
# <a id="test_deploy"></a>
# ### 5.3 Test the deployed model
#
# To verify that the model was successfully deployed to the cloud, you'll specify a customer ID, for example customer 12027, to predict this customer's cluster against the Watson Machine Learning deployment, and see if it matches the cluster that was previously associated this customer ID.
customer = df_customer_products_cluster.filter('CUST_ID = 12027').collect()
print("Previously calculated cluster = {}".format(customer[0].cluster))
# To determine the customer's cluster using Watson Machine Learning, you need to load the customer's purchase history. This function uses the local data frame to select every product field and the number of times that customer 12027 purchased a product.
from six import iteritems
def get_product_counts_for_customer(cust_id):
cust = df_customer_products.filter('CUST_ID = {}'.format(cust_id)).take(1)
fields = []
values = []
for row in cust:
for product_col in product_cols:
field = 'sum({})'.format(product_col)
value = row[field]
fields.append(field)
values.append(value)
return (fields, values)
# This function takes the customer's purchase history and calls the scoring endpoint:
def get_cluster_from_watson_ml(fields, values):
scoring_payload = {'fields': fields, 'values': [values]}
predictions = client.deployments.score(scoring_endpoint, scoring_payload)
return predictions['values'][0][len(product_cols)+1]
# Finally, call the functions defined above to get the product history, call the scoring endpoint, and get the cluster associated to customer 12027:
product_counts = get_product_counts_for_customer(12027)
fields = product_counts[0]
values = product_counts[1]
print("Cluster calculated by Watson ML = {}".format(get_cluster_from_watson_ml(fields, values)))
# <a id="create_recomm"></a>
# ## 6. Create product recommendations
#
# Now you can create some product recommendations.
#
# First, run this cell to create a function that queries the database and finds the most popular items for a cluster. In this case, the **df_customer_products_cluster** dataframe is the database.
# This function gets the most popular clusters in the cell by grouping by the cluster column
def get_popular_products_in_cluster(cluster):
df_cluster_products = df_customer_products_cluster.filter('cluster = {}'.format(cluster))
df_cluster_products_agg = df_cluster_products.groupby('cluster').sum()
row = df_cluster_products_agg.rdd.collect()[0]
items = []
for product_col in product_cols:
field = 'sum(sum({}))'.format(product_col)
items.append((product_col, row[field]))
sortedItems = sorted(items, key=lambda x: x[1], reverse=True) # Sort by score
popular = [x for x in sortedItems if x[1] > 0]
return popular
# Now, run this cell to create a function that will calculate the recommendations based on a given cluster. This function finds the most popular products in the cluster, filters out products already purchased by the customer or currently in the customer's shopping cart, and finally produces a list of recommended products.
# This function takes a cluster and the quantity of every product already purchased or in the user's cart
from pyspark.sql.functions import desc
def get_recommendations_by_cluster(cluster, purchased_quantities):
# Existing customer products
print('PRODUCTS ALREADY PURCHASED/IN CART:')
customer_products = []
for i in range(0, len(product_cols)):
if purchased_quantities[i] > 0:
customer_products.append((product_cols[i], purchased_quantities[i]))
df_customer_products = sc.parallelize(customer_products).toDF(["PRODUCT","COUNT"])
df_customer_products.show()
# Get popular products in the cluster
print('POPULAR PRODUCTS IN CLUSTER:')
cluster_products = get_popular_products_in_cluster(cluster)
df_cluster_products = sc.parallelize(cluster_products).toDF(["PRODUCT","COUNT"])
df_cluster_products.show()
# Filter out products the user has already purchased
print('RECOMMENDED PRODUCTS:')
df_recommended_products = df_cluster_products.alias('cl').join(df_customer_products.alias('cu'), df_cluster_products['PRODUCT'] == df_customer_products['PRODUCT'], 'leftouter')
df_recommended_products = df_recommended_products.filter('cu.PRODUCT IS NULL').select('cl.PRODUCT','cl.COUNT').sort(desc('cl.COUNT'))
df_recommended_products.show(10)
# Next, run this cell to create a function that produces a list of recommended items based on the products and quantities in a user's cart. This function uses Watson Machine Learning to calculate the cluster based on the shopping cart contents and then calls the **get_recommendations_by_cluster** function.
# This function would be used to find recommendations based on the products and quantities in a user's cart
def get_recommendations_for_shopping_cart(products, quantities):
fields = []
values = []
for product_col in product_cols:
field = 'sum({})'.format(product_col)
if product_col in products:
value = quantities[products.index(product_col)]
else:
value = 0
fields.append(field)
values.append(value)
return get_recommendations_by_cluster(get_cluster_from_watson_ml(fields, values), values)
# Run this cell to create a function that produces a list of recommended items based on the purchase history of a customer. This function uses Watson Machine Learning to calculate the cluster based on the customer's purchase history and then calls the **get_recommendations_by_cluster** function.
# This function is used to find recommendations based on the purchase history of a customer
def get_recommendations_for_customer_purchase_history(customer_id):
product_counts = get_product_counts_for_customer(customer_id)
fields = product_counts[0]
values = product_counts[1]
return get_recommendations_by_cluster(get_cluster_from_watson_ml(fields, values), values)
# Now you can take customer 12027 and produce a recommendation based on that customer's purchase history:
get_recommendations_for_customer_purchase_history(12027)
# Now, take a sample shopping cart and produce a recommendation based on the items in the cart:
get_recommendations_for_shopping_cart(['Diapers','Baby wash','Oatmeal'],[1,2,1])
# ## <font color=green>Congratulations</font>, you've sucessfully created a recommendation engine and deployed it to the Watson Machine Learning service
#
# You can now switch to the Watson Machine Learning console to deploy the model and then test it in application, or continue within the notebook to deploy the model using the APIs.
|
examples/wml-product-recommendation-engine-complete.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import requests as rq
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from IPython.display import Image
import random
train_log = pd.read_csv("/work/sagarj/Work/BellLabs/Data/VGG_beauty_multiclass_crossEntropy.log.train")
test_log = pd.read_csv("/work/sagarj/Work/BellLabs/Data/VGG_beauty_multiclass_crossEntropy.log.test")
test_log
# +
_, ax1 = plt.subplots(figsize=(15, 10))
ax2 = ax1.twinx()
ax1.plot(train_log["NumIters"], train_log["loss"], alpha=0.4 )
ax1.plot(test_log["NumIters"], test_log["loss"], 'g')
ax2.plot(test_log["NumIters"], test_log["accuracy"], 'r')
ax1.set_xlabel('iteration', fontsize = 20)
ax1.set_ylabel('train loss' , fontsize = 20)
ax2.set_ylabel('test accuracy', fontsize = 20)
plt.title("Loss - accuracy plot for VGG-net for beauty" , fontsize = 20)
ax1.legend(["Train Loss" , "Test Loss"])
ax2.legend(["Accuracy"] , loc = 2)
# -
PlaceFeats = "../Data/SVPlacesFeats.csv"
PlaceClasses = "../caffe_models/places/categoryIndex_places205.csv"
imageList = "../Data/streetViewPaths.txt"
#Feats = pd.read_csv(PlaceFeats , header = None)
Feats = np.genfromtxt(PlaceFeats,delimiter=',')
classes = pd.read_csv(PlaceClasses, header = None , delimiter = ' ')
with open(imageList,'r') as f:
imgPaths = f.readlines()
imgId = [k.strip().split('/')[-1].split('.')[0] for k in imgPaths]
classes.keys()
Feats.shape
np.argpartition(Feats[0][-205:], -3)[-3:]
Feats[0][-205:].argsort()[-3:][::-1]
top5ImgScenes = {}
for i in range(len(imgId)):
topScenes = Feats[i][-205:].argsort()[-5:][::-1]
labels = [classes.iloc[k][0] for k in topScenes]
top5ImgScenes[imgId[i]] = labels
N = -501
top5ImgScenes[top5ImgScenes.keys()[N]]
imgDir = "/datasets/sagarj/streetView/PPImages/"
Image(imgDir + top5ImgScenes.keys()[N] + ".jpg")
fcFeats = Feats[:,:-205]
fcFeats.shape
# +
from sklearn.decomposition import PCA as sklearnPCA
sklearn_pca = sklearnPCA(n_components=200)
sklearn_transf = sklearn_pca.fit_transform(fcFeats)
# -
print(sklearn_pca.explained_variance_ratio_)
np.sum(pca.explained_variance_ratio_)
sklearn_transf.shape
# +
from scipy.cluster.vq import kmeans,vq
from scipy.spatial.distance import minkowski
centroid_list = []
id_list = []
for i in range(1 , 12):
centroids,_ = kmeans(sklearn_transf,i)
idx,_ = vq(sklearn_transf,centroids)
centroid_list.append(centroids)
id_list.append(idx)
SSE_values = np.zeros(len(id_list))
for i in range(len(centroid_list)):
for j in range(len(centroid_list[i])):
vecs = sklearn_transf[id_list[i]==j,:]
#print vecs.shape
cent = centroid_list[i][j]
SSE_1 = 0.0
for vec in vecs:
SSE_1 = SSE_1 + minkowski(vec,cent,2)
SSE_values[j] = SSE_values[j] + SSE_1
for i in range(len(SSE_values)):
SSE_values[i] = SSE_values[i]/(i+1)
# -
fig, ax = plt.subplots()
fig.set_size_inches(20, 15)
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
x = np.arange(1,11)
plt.plot(x,SSE_values[:10] ,linewidth = 3.0)
plt.xlabel("Number of Clusters in FC7 space", fontsize = 25)
plt.ylabel("Mean Minkowski distance from cluster centroids", fontsize = 25)
plt.show()
|
notebooks/LearningVisualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from collections import defaultdict
# !pwd
# files = !ls ../../../Paper/Supplementary\ Files/Results/*.csv
data = {file:pd.read_csv(file) for file in files}
# # Test robustness to other score aggregation approaches (e.g., z-scores)
batch_scores = ['PCR batch', 'Batch ASW', 'graph iLISI', 'graph connectivity', 'kBET']
bio_scores = ['NMI cluster/label', 'ARI cluster/label', 'Cell type ASW', 'isolated label F1', 'isolated label silhouette', 'graph cLISI', 'CC conservation', 'HVG conservation']
# +
def max_min_scale_cols(df):
return((df - df.min())/(df.max() - df.min()))
def z_score(df):
return((df - df.mean())/df.std())
# -
# ## For RNA & Sims:
def test_ranking(df):
from scipy.stats import spearmanr
batch_scores = ['PCR batch', 'Batch ASW', 'graph iLISI', 'graph connectivity', 'kBET']
bio_scores = ['NMI cluster/label', 'ARI cluster/label', 'Cell type ASW', 'isolated label F1', 'isolated label silhouette', 'graph cLISI', 'CC conservation', 'HVG conservation', 'trajectory conservation']
batch_score_sub = [bs for bs in batch_scores if bs in df.columns]
bio_score_sub = [bs for bs in bio_scores if bs in df.columns]
df['Batch_Correction_zsc'] = z_score(df[batch_score_sub]).mean(1)
df['Bio_conservation_zsc'] = z_score(df[bio_score_sub]).mean(1)
df['Overall_Score_zsc'] = 0.4*df['Batch_Correction_zsc'] + 0.6*df['Bio_conservation_zsc']
if 'Features' in df.columns:
df['Method_id'] = ['_'.join(df[['Method', 'Output', 'Features', 'Scaling']].values[i]) for i in range(df.shape[0])]
else:
df['Method_id'] = ['_'.join(df[['Method', 'Output']].values[i]) for i in range(df.shape[0])]
sorted_df = df[['Method_id', 'Overall_Score_zsc', 'Overall Score']].sort_values(by='Overall_Score_zsc', ascending=False)
sorted_df['rank'] = [i for i in range(sorted_df.shape[0])]
test_statistic = spearmanr(sorted_test['rank'].values, sorted_test.index)[0]
return (test_statistic, pd.DataFrame({'Method_id':sorted_df['Method_id'], 'rank_zsc':sorted_df['rank'].values, 'rank_init':sorted_df.index}))
for file in data.keys():
print(f'{file}')
test_ranking(data[file])
# ## For ATAC:
# +
# files = !ls ../../../Paper/Supplementary\ Files/Results/ATAC/*.csv
data_atac = {file:pd.read_csv(file) for file in files}
# -
for file in data_atac.keys():
print(f'{file}')
test_ranking(data_atac[file])
|
notebooks/analysis/Robustness_of_score_aggregation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
import sys
import pandas as pd
import numpy.matlib
import numpy as np
import scipy
import scipy.stats as stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
from sklearn import preprocessing
from sklearn import linear_model
import statsmodels
import statsmodels.api as sm
from statsmodels.distributions.empirical_distribution import ECDF
sns.set_context('talk')
sns.set_style('white')
sns.set_style('ticks')
# The full model with covariate correction is not used here (will be added soon), rather a simple ridge correction is applied instead, the results are similar though slightly less powered
def Zcells(DGE):
"""Z transformation of columns of pandas"""
DGEZ=DGE.copy()
DGEZ=pd.DataFrame(sklearn.preprocessing.scale(DGE,axis=0))
DGEZ.index=DGE.index
DGEZ.columns=DGE.columns
return DGEZ
def run_ridge(Y,X):
lm=linear_model.Ridge(fit_intercept=True,max_iter=10000)
lm.fit(X,Y)
B=pd.DataFrame(lm.coef_)
B.index=Y.columns
B.columns=X.columns
return B
def shuffle_mat(X,Xother,Y):
flag=0
X_shuffle=X.copy()
X_shuffle.index=np.random.permutation(X.index)
X_shuffle=X_shuffle.loc[Y.index]
XF_shuffle=pd.concat([X_shuffle,Xother],axis=1)
return XF_shuffle
def make_shufs(X,Xother,Y,shufnum=3):
Be_shuffs=pd.DataFrame()
flag=0
for i in range(shufnum):
XF_shuffle=shuffle_mat(X,Xother,Y)
from sklearn import linear_model
lm=linear_model.Ridge(fit_intercept=True,max_iter=10000)
lm.fit(XF_shuffle,Y)
Be_shuf=pd.DataFrame(lm.coef_)
Be_shuf.index=Y.columns
Be_shuf.columns=XF_shuffle.columns
if flag==0:
Be_shuffs=Be_shuf
flag=1
else:
Be_shuffs=pd.concat([Be_shuffs,Be_shuf])
return Be_shuffs
def fdr_colwise_coefs(B,B_shuf):
BFDR=B.copy()
for col in BFDR.columns:
curcol=B[col]
curfdr=BFDR[col]
curecdf=ECDF(B_shuf[col])
curcol_pos=curcol>0
curcol_neg=curcol<0
sign_col=np.sign(curcol)
curfdr[curcol_pos]=-np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(1.0-curecdf(curcol[curcol_pos]))[1])
curfdr[curcol_neg]=np.log10(statsmodels.sandbox.stats.multicomp.fdrcorrection0(curecdf(curcol[curcol_neg]))[1])
BFDR[col]=curfdr
return BFDR
PATH2DATA='CHANGETHIS'
DGE=pd.read_csv(PATH2DATA+'scOSTKO_Pilot.txt.gz',sep='\t',index_col=0)
DGE.head()
labels=pd.read_csv(PATH2DATA+'scOSTKO_labels.txt',sep='\t',index_col=0)
#signature genes, if desired
heat_shock_genes=['Hspa5','Pdia6','Hsp90b1','Dnajc3','Pdia3','Calr','Manf','Hyou1','Herpud1','Pdia4','Dnajb11']
downregulated_genes=['Tnf','Osm','Cxcl1','Il1a']
Y=DGE.T
# +
X=labels
X.index=Y.index
X['Perturbation']=[0 if 'WT' in x else 1 for x in list(X['Perturbation'])]
# Genes Detected use here
X['qc']=Zcells(pd.DataFrame((Y>0).sum(axis=1)))
# -
X.head()
print('Knockout cells = '+str(np.sum(labels['Perturbation']==1))+', Wildtype cells= '+str(np.sum(labels['Perturbation']==0)))
#Simple Linear Model Calculation of Differential Expression
Be=run_ridge(Y,X)
Be_shufs=make_shufs(pd.DataFrame(X['Perturbation']),pd.DataFrame(X['qc']),Y,shufnum=10)
BFDR=fdr_colwise_coefs(Be,Be_shufs)
ost_de=pd.read_csv(PATH2DATA+'ost_vs_NT_T2.csv',index_col=0)
ost_de['sl10']=np.sign(ost_de['log2FoldChange'])*-np.log10(ost_de['padj'])
print(len(ost_de))
plt.scatter(ost_de['log2FoldChange'],np.abs(ost_de['sl10']),alpha=0.5)
plt.annotate('Tnf',(ost_de.loc['Tnf']['log2FoldChange'],np.abs(ost_de.loc['Tnf']['sl10'])))
plt.annotate('Hspa5',(ost_de.loc['Hspa5']['log2FoldChange'],np.abs(ost_de.loc['Hspa5']['sl10'])))
plt.axvline(0.2)
plt.axvline(-0.2)
plt.axhline(1.3)
plt.title('Parnas et al 2015, Population DESeq2')
plt.xlabel('Log2(Fold Change)')
plt.ylabel('Log10(Q-Value)')
COMBINED_TABLE=BFDR.join(ost_de).dropna()
max_cap=20
COMBINED_TABLE.ix[COMBINED_TABLE['Perturbation']>max_cap,'Perturbation']=max_cap
COMBINED_TABLE.ix[COMBINED_TABLE['Perturbation']<-max_cap,'Perturbation']=-max_cap
COMBINED_TABLE.ix[COMBINED_TABLE['sl10']>max_cap,'sl10']=max_cap
COMBINED_TABLE.ix[COMBINED_TABLE['sl10']<-max_cap,'sl10']=-max_cap
sig_thresh=1.3
pop_sigs=COMBINED_TABLE['sl10'].abs()>sig_thresh
print(np.sum(pop_sigs))
plt.scatter(COMBINED_TABLE['Perturbation'][pop_sigs],COMBINED_TABLE['sl10'][pop_sigs],alpha=0.5)
plt.annotate('Tnf',(COMBINED_TABLE.loc['Tnf']['Perturbation'],COMBINED_TABLE.loc['Tnf']['sl10']))
plt.annotate('Hspa5',(COMBINED_TABLE.loc['Hspa5']['Perturbation'],COMBINED_TABLE.loc['Hspa5']['sl10']))
plt.axvline(sig_thresh,c='black')
plt.axvline(-sig_thresh,c='black')
plt.axhline(sig_thresh,c='black')
plt.axhline(-sig_thresh,c='black')
plt.xlabel('scRNA-seq Signed Log10(q-value)')
plt.ylabel('Population DESeq2 Signed Log10(q-value)')
#Concordance between population RNA-seq and scRNA-seq
np.mean(np.sign(COMBINED_TABLE[pop_sigs]['sl10'])==np.sign(COMBINED_TABLE[pop_sigs]['Perturbation']))
cellvec=np.round(np.linspace(5,200,12))
cellvec_sig=np.round(np.linspace(2,60,22))
wtcells=X[X['Perturbation']==0].index
ostcells=X[X['Perturbation']==1].index
sig_poptot=ost_de[np.abs(ost_de['sl10'])>1.3].index
genes_detected=(Y>0).sum()
these_genes=Y.columns[genes_detected>100]
ost_de=ost_de.loc[list(ost_de.index.intersection(these_genes))]
sig_popsub=ost_de[np.abs(ost_de['sl10'])>1.3].index
(len(ost_de),np.sum(ost_de['sl10']>1.3),np.sum(ost_de['sl10']<(-1.3)))
# +
#evaluate the significance of the heatshock signature as a function of cells
MULTI_hs=[]
for j in range(10):
DOWNSAMPLE=[]
print(j)
for i in range(len(cellvec_sig)):
thesecells=[]
theseostcells=np.random.choice(ostcells,int(cellvec_sig[i]),replace=False)
thesecells.extend(wtcells)
thesecells.extend(theseostcells)
Xsample=X.loc[thesecells]
Ysample=Y.loc[thesecells]
Be_nocells=run_ridge(Ysample,Xsample)
Be_shufs=make_shufs(pd.DataFrame(Xsample['Perturbation']),pd.DataFrame(Xsample['qc']),Ysample,shufnum=5)
BFDR=fdr_colwise_coefs(Be_nocells,Be_shufs)
BFDR_hs=BFDR.loc[heat_shock_genes]['Perturbation']
#set maximum limits on p-value
BFDR_hs[BFDR_hs<0]=0
BFDR_hs[BFDR_hs>4]=4
#use Fisher's combined p-value method to evaluate the significance of a signature
DOWNSAMPLE.append(-np.log10(scipy.stats.combine_pvalues(np.power(10,-BFDR_hs))[1]))
MULTI_hs.append(DOWNSAMPLE)
# +
#plot P-values vs. Number of Cells
df_hs=pd.DataFrame(MULTI_hs)
y=np.array(df_hs.mean(axis=0))
ystd=np.array(df_hs.std(axis=0))
plt.plot(cellvec_sig,y)
plt.fill_between(cellvec_sig, y-ystd, y+ystd,alpha=0.5)
plt.axhline(2,c='black')
#maximum p-value
plt.axhline(-np.log10(scipy.stats.combine_pvalues([1e-4]*len(heat_shock_genes))[1]),c='black')
plt.title('Heat Shock Signature P-value Recovery vs. Number of Cells')
plt.xlabel('Number of Cells')
plt.ylabel('Significance of Combined Signature Log10(P-value)')
# -
pop_thresh=2
sc_thresh=3
# +
#Perform independent downsamplings
niter=10
MULTI_tp=pd.DataFrame(np.zeros((niter,len(cellvec))))
for j in range(niter):
DOWNSAMPLE=pd.DataFrame()
print(j)
for i in range(len(cellvec)):
infovec=[]
thesecells=[]
theseostcells=np.random.choice(ostcells,int(cellvec[i]),replace=False)
thesewtcells=np.random.choice(wtcells,int(cellvec[i]),replace=False)
thesecells.extend(thesewtcells)
thesecells.extend(theseostcells)
Xsample=X.loc[thesecells]
Ysample=Y.loc[thesecells]
Be_nocells=run_ridge(Ysample,Xsample)
Be_shufs=make_shufs(pd.DataFrame(Xsample['Perturbation']),pd.DataFrame(Xsample['qc']),Ysample,shufnum=10)
BFDR=fdr_colwise_coefs(Be_nocells,Be_shufs)
BIG_TABLE=BFDR.join(ost_de).dropna()
pop_pos=BIG_TABLE['sl10']>pop_thresh
pop_neg=BIG_TABLE['sl10']<(-pop_thresh)
sc_pos=BIG_TABLE['Perturbation']>sc_thresh
sc_neg=BIG_TABLE['Perturbation']<-sc_thresh
tpr=np.divide(1.0*np.sum(np.logical_and(pop_pos,sc_pos))+np.sum(np.logical_and(pop_neg,sc_neg)),np.sum(pop_pos)+np.sum(pop_neg))
DOWNSAMPLE[i]=[tpr]
df_effectsize=pd.concat([df_effectsize,tmpeffect])
MULTI_tp.ix[j,:]=np.array(DOWNSAMPLE)
# +
y=np.array(MULTI_tp.mean(axis=0))
ystd=np.array(MULTI_tp.std(axis=0))
plt.plot(cellvec,y)
plt.fill_between(cellvec, y-ystd, y+ystd,alpha=0.5)
plt.ylim([-0.01,1.0])
plt.xlabel('Number of Cells')
plt.ylabel('Sensitivity')
plt.title('Ridge Regression Sensitivity vs. cell number')
|
Power_Analysis_DOE/ost_ko_comparison.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# necessary packages #
#cd("./Documents/Github/Multi_NNGP/RDA/projects/Conj/")
using Distributions
using Random
using Distances
using LinearAlgebra
using SparseArrays
using IterativeSolvers
using ProgressMeter
using JLD2
using DataFrames
using Dates
include("../../../util2.j")
# +
# Packages for diagnostic test #
using StatsPlots
using MCMCChains
using PrettyTables
# -
@load "../../data/conj_data/conj_data_expanded.jld"
@load "../../results/conj/conj_resp_results.jld"
N
NM
# +
# CV choose 13 14 15 16#
# (5973.540549 + 5928.378866 + 23569.583394 + 25259.021231)/60
# -
# # MCMC Chain check
β_pos_sam = Array{Float64, 3}(undef, N_sam, p * q, 1);
β_pos_sam[:, :, 1] = hcat(β_sam[1, 1, :], β_sam[1, 2, :], β_sam[2, 1, :], β_sam[2, 2, :]);
β_chain = Chains(β_pos_sam);
pβ = plot(β_chain)
truncindex = 1;#Integer(trunc(N_sam / 2));
Σ_pos_sam = Array{Float64, 3}(undef, N_sam - truncindex + 1, q * q, 1);
Σ_pos_sam[:, :, 1] = hcat(Σ_sam[1, 1, truncindex:N_sam], Σ_sam[1, 2, truncindex:N_sam],
Σ_sam[2, 1, truncindex:N_sam], Σ_sam[2, 2, truncindex:N_sam]);
Σ_chain = Chains(Σ_pos_sam);
pΣ = plot(Σ_chain)
# # Posterior Inference
# +
# count the covarage of 95% CI #
count = fill(0.0, 2);
for j in 1:q
for i in 1:NM
count[j] = count[j] +
((Y_m_pos_qt[i, j, 1] < Y_ord[U_indx[i], j]) &&
(Y_m_pos_qt[i, j, 3] > Y_ord[U_indx[i], j]))
end
end
count
# -
round.(count ./ NM, digits = 4)
round(sum(count) / (q * NM), digits = 4)
# calculate root mean square predictive error #
MSPE1 = mean((Y_ord[U_indx, 1] - Y_m_pos_mean[:, 1]).^2)
RMSPE1 = sqrt(MSPE1); round(RMSPE1, digits = 5)
MSPE2 = mean((Y_ord[U_indx, 2] - Y_m_pos_mean[:, 2]).^2)
RMSPE2 = sqrt(MSPE2); round(RMSPE2, digits = 5)
# calculate root mean square predictive error #
MSPE = mean((Y_ord[U_indx, :] - Y_m_pos_mean).^2)
RMSPE = sqrt(MSPE); round(RMSPE, digits = 5)
round.([RMSPE1 RMSPE2 RMSPE], digits = 5)
# MAE
MAE1 = mean(abs.(Y_ord[U_indx, 1] - Y_m_pos_mean[:, 1]))
MAE2 = mean(abs.(Y_ord[U_indx, 2] - Y_m_pos_mean[:, 2]))
MAE = mean(abs.(Y_ord[U_indx, :] - Y_m_pos_mean))
round.([MAE1 MAE2 MAE], digits = 5)
# CRPS
CRPS = [(sqrt(Y_m_pos_var[i, j]) * ( 1 /sqrt(π) -
2 * pdf(Normal(), (Y_ord[U_indx[i], j] - Y_m_pos_mean[i, j]) / sqrt(Y_m_pos_var[i, j])) -
((Y_ord[U_indx[i], j] - Y_m_pos_mean[i, j]) / sqrt(Y_m_pos_var[i, j])) *
(2* cdf(Normal(), (Y_ord[U_indx[i], j] - Y_m_pos_mean[i, j]) / sqrt(Y_m_pos_var[i, j])) - 1 )))
for i in 1:NM, j in 1:q];
round.([mean(CRPS[:, 1]) mean(CRPS[:, 2]) mean(CRPS)], digits = 5)
#INT
INT = [((Y_m_pos_qt[i, j, 3] - Y_m_pos_qt[i, j, 1]) +
(2 / 0.05)*(Y_m_pos_qt[i, j, 1] - Y_ord[U_indx[i], j]) * (Y_ord[U_indx[i], j] < Y_m_pos_qt[i, j, 1]) +
(2 / 0.05)*(Y_ord[U_indx[i], j] - Y_m_pos_qt[i, j, 3]) * (Y_ord[U_indx[i], j] > Y_m_pos_qt[i, j, 3]))
for i in 1: NM, j in 1:q];
round.([mean(INT[:, 1]) mean(INT[:, 2]) mean(INT)], digits = 5)
N_Inf_burn = 1;
summary_table = Array{Float64, 2}(undef, 10, 4);
summary_table[1, :] = vcat(mean(β_sam[1, 1, N_Inf_burn:N_sam]),
quantile(β_sam[1, 1, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[2, :] = vcat(mean(β_sam[1, 2, N_Inf_burn:N_sam]),
quantile(β_sam[1, 2, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[3, :] = vcat(mean(β_sam[2, 1, N_Inf_burn:N_sam]),
quantile(β_sam[2, 1, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[4, :] = vcat(mean(β_sam[2, 2, N_Inf_burn:N_sam]),
quantile(β_sam[2, 2, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[5, :] = vcat( mean(Σ_sam[1, 1, N_Inf_burn:N_sam]),
quantile(Σ_sam[1, 1, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[6, :] = vcat(mean(Σ_sam[1, 2, N_Inf_burn:N_sam]),
quantile(Σ_sam[1, 2, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[7, :] = vcat( mean(Σ_sam[2, 2, N_Inf_burn:N_sam]),
quantile(Σ_sam[2, 2, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[8, :] = vcat( mean((1 / α_pick - 1.0) .* Σ_sam[1, 1, N_Inf_burn:N_sam]),
quantile((1 / α_pick - 1.0) .* Σ_sam[1, 1, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[9, :] = vcat(mean((1 / α_pick - 1.0) .* Σ_sam[1, 2, N_Inf_burn:N_sam]),
quantile((1 / α_pick - 1.0) .* Σ_sam[1, 2, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table[10, :] = vcat( mean((1 / α_pick - 1.0) .* Σ_sam[2, 2, N_Inf_burn:N_sam]),
quantile((1 / α_pick - 1.0) .* Σ_sam[2, 2, N_Inf_burn:N_sam], [0.5, 0.025, 0.975]));
summary_table = round.(summary_table; digits = 4);
rnames = ["β[1, 1]", "β[1, 2]", "β[2, 1]", "β[2, 2]", "Σ[1, 1]", "Σ[1, 2]", "Σ[2, 2]",
"(1/α - 1)Σ[1, 1]", "(1/α - 1)Σ[1, 2]", "(1/α - 1)Σ[2, 2]"];
summary_table = [rnames summary_table];
pretty_table(summary_table, ["" "mean" "median" "2.5%" "97.5%"], markdown)
ϕ_pick
α_pick
|
RDA/projects/Conj/Multi_conj_resp_real-summary.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# <table>
# <tr>
# <td width=15%><img src="./img/UGA.png"></img></td>
# <td><center><h1>Introduction to Python for Data Sciences</h1></center></td>
# <td width=15%><a href="http://www.iutzeler.org" style="font-size: 16px; font-weight: bold"><NAME></a><br/> Fall. 2018 </td>
# </tr>
# </table>
#
# <br/><br/><div id="top"></div>
#
# <center><a style="font-size: 40pt; font-weight: bold">Chap. 4 - Machine Learning with ScikitLearn </a></center>
#
# <br/>
#
# # ``4. Going Further``
#
# ---
# <a href="#style"><b>Package check and Styling</b></a><br/><br/><b>Outline</b><br/><br/>
# a) <a href="#furtherNew"> Creating new features/models</a><br/> b) <a href="#furtherVal"> Validation and Hyperparameters tuning</a><br/> c) <a href="#learVal"> Text and Image Features</a><br/> d) <a href="#furExo"> Exercises </a><br/>
# ## <a id="furtherNew"> a) Creating new features/models</a>
#
#
# <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p>
#
#
#
# We saw above how to transform categorical features. It is possible to modify them in a number of ways in order to create different model.
#
#
# For instance, from 1D point/value couples $(x,y)$, the linear regression fits a line. However, if we tranform $x$ into $(x^1,x^2,x^3)$, the same linear regression will fit a 3-degree polynomial.
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
#import seaborn as sns
#sns.set()
N = 100 #points to generate
X = np.sort(10*np.random.rand(N, 1)**0.8 , axis=0) #abscisses
y = 4 + 0.4*np.random.rand(N) - 1. / (X.ravel() + 0.5)**2 - 1. / (10.5 - X.ravel() ) # some complicated function
plt.scatter(X,y)
# -
# Linear regression will obviously be a bad fit.
# +
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(X, y)
yfit = model.predict(X)
plt.scatter(X, y)
plt.plot(X, yfit,label="linear regression")
plt.legend()
# -
# Let us transform it into a 3-degree polynomial fit and perform the same linear regression.
# +
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=3, include_bias=False) # 3 degree without degree 0 (no constant)
XPoly = poly.fit_transform(X)
print(XPoly[:5,])
# -
modelPoly = LinearRegression().fit(XPoly, y)
yfitPoly = modelPoly.predict(XPoly)
plt.scatter(X, y)
plt.plot(X, yfit,label="linear regression")
plt.plot(X, yfitPoly,label="Polynomial regression (deg 3)")
plt.legend(loc = 'lower right')
# ### Pipeline
#
# This *2-step* fitting (Polynomial transform + Linear regression) calls for a replicated dataset which can be costly. That is why Scikit Learn implement a *pipeline* method that allows to perform multiple fit/transform sequentially.
#
# This pipeline can then be used as a model.
#
#
# +
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
polyFeat = PolynomialFeatures(degree=3, include_bias=False)
linReg = LinearRegression()
polyReg = Pipeline([ ('polyFeat',polyFeat) , ('linReg',linReg) ])
polyReg.fit(X, y) # X original not XPoly
yfitPolyNew = polyReg.predict(X)
plt.scatter(X, y)
plt.plot(X, yfit,label="linear regression")
plt.plot(X, yfitPolyNew,label="Polynomial regression (deg 3)")
plt.legend(loc = 'lower right')
# -
# ## <a id="furtherVal"> b) Validation and Hyperparameters tuning</a>
#
#
# <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p>
#
#
#
# We saw above (see the lasso example of the regression part) some basic examples of how to:
# * *validate our model* by splitting the dataset into training and testing set (using <tt>train_test_split</tt> from <tt>sklearn.model_selection</tt>)
# * *tune hyperparameter* by looking at the error for different values
#
# Scikit Learn actually provides some methods for that as well.
# ### Validation
#
# Scikit Learn offer a *cross validation* method that
# * split the dataset in several groups
# * for each of these group, fit the model on all but this group and computer the error on this one
#
# This way all the data has gone thought the learning and validating sets hence the *cross* validation. This is illustrated by the following figure from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>.
#
# 
#
# The score computer is computed either as the standard score of the estimator or can be precised with the <tt>scoring</tt> option (see the [available metrics](http://scikit-learn.org/stable/modules/model_evaluation.html#the-scoring-parameter-defining-model-evaluation-rules) ).
# <div id="warn"> <b>Warning:</b> All scorer objects follow the convention that higher return values are better than lower return values. </div>
#
#
# Let us compute the cross validation for our polynomial fit problem.
# +
from sklearn.model_selection import cross_val_score
cv_score = cross_val_score(polyReg, X, y, cv=5, scoring="neg_mean_absolute_error") # 5 groups cross validation
print(cv_score)
print("Mean score:" , np.mean(cv_score))
# -
# ### Grid Search
#
# Now that scoring and cross validation is done, we can focus on investigating the best parameters of our polynomial model:
# * degree
# * presence or not of an intercept
#
# Let us see which are the parameters of our model (as this is a pipeline, this might be interesting to use the <tt>get_params</tt> function).
polyReg.get_params()
# This enables to see the parameters corresponding to the quantities to fit:
# * degree: <tt>polyFeat__degree</tt>
# * presence or not of an intercept: <tt>linReg\__fit_intercept</tt> and <tt>polyFeat__include_bias</tt>
#
# We can now construct a *dictionary* of values to test.
param_grid = [
{'polyFeat__degree': np.arange(1,12),
'linReg__fit_intercept': [True,False],
'polyFeat__include_bias': [True,False]}]
# +
from sklearn.grid_search import GridSearchCV
grid = GridSearchCV(polyReg, param_grid, cv=5)
grid.fit(X, y)
# -
# We can then get the best parameters and the corresponding model.
grid.best_params_
# +
best_model = grid.best_estimator_.fit(X,y)
overfit = polyReg.set_params(polyFeat__degree=15).fit(X,y)
Xplot = np.linspace(-1,10.5,100).reshape(-1, 1)
yBest = best_model.predict(Xplot)
yOver = overfit.predict(Xplot)
plt.scatter(X, y)
plt.plot(Xplot, yBest , 'r' ,label="Best polynomial")
plt.plot(Xplot, yOver , 'k' , label="overfitted (deg 15)")
plt.legend(loc = 'lower right')
plt.ylim([0,5])
plt.title("Best and overfitted models")
# -
# We notice that the grid search based on cross validation helped discarded overfitted models (as they were bad on validation sets).
# ## <a id="learVal"> c) Text and Image Features</a>
#
#
# <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p>
#
#
# We already saw an example of feature extraction from categorical data. However, for some particular categorical data, dedicated tools exist. For instance, text and images.
#
#
# ### Text feature extraction
#
#
# In Learning applications, words are usually more important than letters, so a basic way to extract features is to construct one feature per present word and count the occurences of this word. This is known as *word count*. An approach to mitigate very present words (like "the" , "a" , etc) is *term frequency inverse document frequency* (tf-idf) which weights the occurence count by how often it appears.
#
f = open('./data/poems/poe-raven.txt', 'r')
poe = f.read().replace('\n',' ').replace('.','').replace(',','').replace('-','')
poe
# +
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer()
X = vec.fit_transform([poe])
X
# -
# The vectorizer has registered the feature names and outed a sparse matrix that can be converted to a Dataframe.
import pandas as pd
pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
# The tf-idf verctorizer works the same way.
# +
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer()
X = vec.fit_transform([poe])
pd.DataFrame(X.toarray(), columns=vec.get_feature_names())
# -
# For more details, see the [text feature extraction doc](http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction) and [image feature extraction doc](http://scikit-learn.org/stable/modules/feature_extraction.html#image-feature-extraction) as well as [scikit image](http://scikit-image.org/).
# ## <a id="furExo"> c) Exercises </a>
#
# <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p>
#
# <div class="exo"> <b>Exercise 4.4.1:</b> <i>Differentiating authors according to their word usage</i><br/>
#
# In the folder <tt>data/poems</tt> are three poems by <NAME> and three plays by <NAME>. Is it possible to differentiate these authors using only the word in their plays?
# </div>
# ---
# <div id="style"></div>
# ### Package Check and Styling
#
#
# <p style="text-align: right; font-size: 10px;"><a href="#top">Go to top</a></p>
#
# +
import lib.notebook_setting as nbs
packageList = ['IPython', 'numpy', 'scipy', 'matplotlib', 'cvxopt', 'pandas', 'seaborn', 'sklearn', 'tensorflow']
nbs.packageCheck(packageList)
nbs.cssStyling()
|
4-4_Going_Further.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Welcome to the EXR Converter Demo!
# First, we will convert a sample FITS image to EXR format. To do this, we will use the fits2exr package. Import fits2exr (and exr2fits for later use) below.
from exrconverter import fits2exr, exr2fits
# Fits2exr contains one function, `convert()`, that performs all of the heavy lifting. The function header is copied here for convienience:
#
# `def convert(input_fits, output_exr, output_pixel_type=None):`
#
# The two required parameters are the name of the input FITS file and the name of the output EXR file. Optionally, an output pixel type can be included as well. Pixel options are:
#
# 1. `FLOAT16`
# 2. `FLOAT32`
# 3. `FLOAT64`
# 4. `INT8`
# 5. `INT16`
# 6. `INT32`
# 7. `UINT16`
#
# Because of the unique HDU formatting for FITS, only **2-D** images will be copied into the EXR file as different channels. The FITS header is much more sophisticated than that of EXR, so it is ported to EXR as a binary string. While this information is not available in EXR, it is converted back to a readable format when `exr2fits` is run.
#
# **TL/DR:**
# - A conversion from FITS to EXR and back to FITS will preserve the header and 2-D images from the original FITS file.
# - However, if `output_pixel_type` is smaller than the original fits type, the pixel data will be truncated.
fits2exr.convert('frame-r-006793-3-0127.fits', 'exr_demo_output.exr')
# An analogous function in exr2fits will convert EXR images to FITS format. The function header is:
#
# `def convert(input_exr, output_fits, output_pixel_type=None, verbose=True):`
#
# The two required parameters, `input_exr` and `output_fits`, specify the input EXR and output FITS files, and `output_pixel_type` can be specified optionally. Additionally, the `verbose` parameter will print warnings.
exr2fits.convert('exr_demo_output.exr', 'fits_demo_output.fits')
# ## Converstion from FITS -> EXR -> FITS
# We will compare `frame-r-006793-3-0127.fits` and `fits_demo_output.fits` to observe how the FITS file is modified by being stored in EXR.
#
# First, we will check the number and types of HDUs.
# +
from astropy.io import fits
# Open the two fits files
hdu_list_original = fits.open('frame-r-006793-3-0127.fits')
hdu_list_modified = fits.open('fits_demo_output.fits')
print(f'The original fits file has {len(hdu_list_original)} HDU(s).')
print(f'The original fits file has {len(hdu_list_modified)} HDU(s).')
# -
# It is clear that only **one** HDU from the original FITS file survives the storage in EXR. We will check if this was the only 2-D image in the original FITS file.
# +
for hdu in hdu_list_original:
if hdu.is_image and len(hdu.data.shape) == 2:
print(f"The {hdu.name} HDU is a 2-D image.")
hdu_list_original.close()
hdu_list_modified.close()
# -
# Correct!
#
# We will now observe the truncation of FITS data if a smaller type is used to store in EXR.
# +
import numpy as np
# Run fits2exr with 16-bit float type
fits2exr.convert('frame-r-006793-3-0127.fits', 'exr_demo_output.exr', np.float16)
exr2fits.convert('exr_demo_output.exr', 'fits_demo_output.fits', np.float32)
# Open the two fits files again
hdu_list_original = fits.open('frame-r-006793-3-0127.fits')
hdu_list_modified = fits.open('fits_demo_output.fits')
image_original = hdu_list_original[0].data
image_modified = hdu_list_modified[0].data
print (f"The difference between pixels is {np.float32(image_original[0][0]) - np.float32(image_modified[0][0])}.")
|
examples/DEMO.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# \title{HP 34401 Controlled with Python via RS-232 without VISA}
# \author{<NAME>}
# \maketitle
# The goal of this program/notebook is to develop a program that can control and log a circa ~1990 Benchtop/Rack multimeter from HP(Agilent) without the need for any sort of VISA library. While a VISA library and subsequent based program (see `pyvisa` for ex) is convenient the goal of this code is to show that it is not necessary. And while the code developed hereand while as `pyvisa-py` can be used with a raspberry pi there are two inconveniences with using VISA.
# 1. The instruments commands must be in the VISA database that is being used wich is not always the case
# 2. with `pyvisa-py` VISA can be used on linux computers. But with direct command programing an instrument can (in theory) interact with the `MicroPython` microcontroller platform
#
# While this code can not be run as is on `MiroPython` is could be with some refactoring
#
# Needed hardware:
# + HP or Agilent 34401 Multimeter
# + USB to db9 RS-232 converter or a USB to db9 Null Modem RS-232 converter
# + RS-232 Null Modem if a dedicated USP to RS-232 Null Modem is not used
# # Libraries used
# +
import serial
import pandas as pd
import numpy as np
import xarray as xr
import threading
import matplotlib.pyplot as plt
import serial #implements rs-232 com
import pandas as pd # data collection
import numpy as np
import threading #used for mutlti threading
import matplotlib.pyplot as plt
# -
# # Setting up the rs-232 communication ports
# + for a windows machine go to device manager setting and determine what com port the usb to rs-232 cable is connected to
#
# + for a mac machine: ????????
#
# + for a linux ubuntu machine open a terminal and review the the list of connections to the computer via typing in the terminal
# ```ls -l /dev/tty*```
# at the end list one should see ```/dev/ttyUSB``` with an ending number of the USB port. After selecting the appropriate port one will typically need to unlock the port via
# ``` sudo chmod 666 /dev/ttyUSB```
UnLockedPort='/dev/ttyUSB0'
# # Program
# + code_folding=[]
class MultiMeter34401A():
"""
Class to initiate a conenction/control/record a HP multimeter from python
without VISA
The recorded values and recorded errors are stored in the following
dataframes:
self.Errors
self.Data
"""
def __init__(self, Name):
"""
Initiate some helpers and the data storage dataframes
Args:
Name (str): a convenient name of the mass flow controller
"""
self.Name=Name
#Dataframe to store errors
self.Errors=pd.DataFrame(columns=['Last Command', 'Time', 'Error'])
#Dataframe to store recordings
self.Data=pd.DataFrame(columns=['Time', 'Value', 'Mode'])
def MakeConnection(self, ULSerPort):
"""
Method to startup the rs-232 serial connection via the python
serial library
Args:
ULSerPort (str)
"""
#model number as a check of correct connection
MODEL='34401A'
try:
self.MMSer=serial.Serial(ULSerPort, 9600, timeout=1)
self.MMSer.write(b'*IDN?\n')
self.IDN=self.MMSer.readline().decode("utf-8")[:-2]
except Exception as ex:
print(ex)
if self.IDN.find(MODEL)==-1:
raise ValueError(f'{self.IDN} not supported by this class ony supports {MODEL}')
def RemoteSetup(self):
"""
Method to run a command sequence to put the instrument into remote
mode. The Mutltimeter can be taken of remote by pressing shift on the
contorls; but can be brought back to remote mode by rerunning this
method
"""
#command ; exspected response
#Reset
C1=b'*RST\n'; GR1=5
#Clear
C2=b'*CLS\n'; GR2=5
C3=b'*ESE 1\n'; GR3=7
C4=b'*SRE 32\n'; GR4=8
#Go to remote mode
C5=b'SYST:REM\n'; GR5=9
#Vain attempt to turn off the beeper
C6=b'SYST:BEEP:STAT OFF\n'; GR6=19
ComandSeq=[C1, C2, C3, C4, C5, C6]
ResponceSeq=[GR1, GR2, GR3, GR4, GR5, GR6]
#run the command sequence and verify it went oky
for Comd, Res in zip(ComandSeq, ResponceSeq):
if self.MMSer.write(Comd)==Res:
pass
else:
raise ValueError(f'Remote Setup Error on command {Comd} ')
break
print('Remote Connection Made')
self.Mode='DC_V'
def ModeSet(self, Mode='DC_V'):
"""
Method to set the measurement mode of the 34401A using shorthand
Args:
Mode (str; Def. 'DC_V): Sets the mode of 34401A the available
modes are:
'DC_V': DC Voltage Reading,
'DC_I': DC Current Reading,
'AC_V': AC Voltage Reading,
'AC_I': AC Current Reading,
'Res2W': 2 Wire Resistance Reading,
'Res4W': 4 Wire Resistance Reading,
'Freq': AC Main Frequency Measurement,
'Period':AC Main Period Measurement
"""
C={'DC_V':b'CONF:VOLT:DC\n',
'DC_I':b'CONF:CURR:DC\n',
'AC_V':b'CONF:VOLT:AC\n',
'AC_I':b'CONF:CURR:AC\n',
'Res2W':b'CONF:RES\n',
'Res4W':b'CONF:FRES\n',
'Freq':b'CONF:FREQ\n',
'Period':b'CONF:PER\n'}
try:
self.MMSer.write(C[Mode])
self.Mode=Mode
except KeyError:
print(f'Mode {Mode} is not a meassurment mode of this insturment')
def ErrorReadAct(self):
"""
Method to read the Error from the 34401A and record the errors. Also
clears the `-410,"Query INTERRUPTED"` that occurs because of the
slow reading to output transfer compared to the speed of modern
computers
Return:
returns the non 410 error if non 410 error occurs
"""
#ask for the error
if self.MMSer.write(b'SYST:ERR?\n')!=10:
raise ValueError('Error State Not Readable!!!')
#read the error
ErrRes=self.MMSer.readline().decode("utf-8")[:-2]
if ErrRes=='+0,"No error"':
return None
# the called before ready error that is ignored and cleared
elif ErrRes=='-410,"Query INTERRUPTED"':
#clear the buffer and ignore
self.MMSer.write(b'*CLS\n')
# for any other errors record them
else:
#'Last Command', 'Time', 'Error'
self.Errors.loc[self.Errors.shape[0],:]=['', pd.Timestamp.now(), ErrRes]
try:
self.EndAutoRun()
except:
pass
return ErrRes
def QurreyProgress(self, debug=False):
"""
Method to cheack the progress of getting the reading to the output
register on the RS-232 line using repetitive calls to the SCPI OPC
routine
Arg:
debug (bool; Def. False): if True prints out if the Query is
still in progress transferring to the RS-232 output register
Return:
1 if the information is ready on the RS-232 register
If the call counter is triggered a error is raised and if the
Autorun is ongoing will be stopped
"""
#counter to act like a time out
Counter=0
while True:
#ask for the OPC
if self.MMSer.write(b'*OPC?\n')==6:
pass
else:
raise ValueError('Operation Complet Command Not Excepted!!!')
#if complet return 1
CompState=self.MMSer.readline().decode("utf-8")[:-2]
if CompState=='':
if debug:
print('Qurrey in Prog')
pass
elif CompState=='1':
if debug:
print('Qurry Complet')
break
else:
break
return CompState
# The Time out action that if triggered will cancel the autorun
Counter+=1
if Counter==10:
raise ValueError('Operation is not Completing after 10 cyles!!!')
try:
self.EndAutoRun()
except:
pass
break
def MakeMeasurment(self):
"""
Method to make a measurement on the 34401A usign the
SCPI INIT OPC FETCH method where the OPC is done via
`self.QurreyProgress` If the reading is successful then the reading
is recorded to `self.Data`
"""
#accurire the reading on the 34401A
if self.MMSer.write(b'INIT\n')==5:
pass
else:
raise ValueError('Get Mesurment Comand Not Taken!!!')
#perform the OPC ready check
self.QurreyProgress()
# Read the value from the 34401A
if self.MMSer.write(b'FETCH?\n')==7:
pass
else:
raise ValueError('Fetch Measurment Comand Not Taken!')
M=self.MMSer.readline(); T=pd.Timestamp.now()
try:
M=float(M.decode("utf-8")[:-2])
#'time', 'value', 'Mode'
self.Data.loc[self.Data.shape[0], :]=[T, M, self.Mode]
except Exception as ex:
print(ex)
self.ErrorReadAct()
self.ErrorReadAct()
def SetupLivePlot(self):
"""
Internal Method for creating the fig, ax for live ploting
"""
# %matplotlib notebook
# %matplotlib notebook
self.fig, self.ax=plt.subplots(nrows=1, ncols=1)
def LivePlotData(self):
"""
Method that performance the live ploting of the data from the
instrument
"""
# if the everything is setup run this
try:
self.ax.clear()
# plot the values recorded in self.Data where the Mode
#of the recorded data equals the current mode of the multimeter
# TODO could be made into subplots for each mode
# TODO should have splits in mode reflect in graph
self.Data.where(self.Data['Mode']==self.Mo15de).plot(x='Time', y='Value',
title=self.Mode, ax=self.ax, legend=False)
self.fig.canvas.draw()
# if everything is not setup perform the setup and rerun
except AttributeError:
self.SetupLivePlot()
self.LivePlotData()
def RunAction(self):
"""
Method for the actions that should happen during a autorun event
"""
self.MakeMeasurment()
self.LivePlotData()
def AutoRun(self, IntervalStep=10, IntervalUnit='Min', ReGenPlot=False):
"""
Exsternal Method called by the user to initiate auto running of the
the instrument on a separate thread so that python can continue to
perform more task
Args:
IntervalStep (int): the interval time inverval that the autorun's
actions are done
IntervalUnit (str; Default 'Min' {Hour, Sec}): the unit of the
intervals auto running
exsample IntervalStep=10, IntervalUnit='Min' will perform the
autorun action every 10 minutes
ReGenPlot (bool; Default False): True will create a new instance
of the live plot where as False will reuse the old live plot
"""
#recreate the plot in a new instance of plot
if ReGenPlot:
self.SetupLivePlot()
# convert timer to sec
InvertvalUnits={'Hour':60.0**2, 'Min':60.0, 'Sec':1.0}
self.IntervalStep=float(IntervalStep*InvertvalUnits[IntervalUnit])
#call the internal method to do instantiate the autorunning
self.AutoRunRun()
def AutoRunRun(self):
"""
Internal Method to initiate the thread
Note:
This method may be redundant but worked, will most likely will
be removed in future. But for now this gets the thread working
due to the seemingly recursive nature of the pythons thread
"""
#call the run action
self.RunAction()
#create a thread timer and bind this method to the thread
self.t=threading.Timer(self.IntervalStep, self.AutoRunRun)
#start the thread
self.t.start()
def EndAutoRun(self):
"""
Method to end the autorun via termination of the thread timer and
joining the thread with bundle
"""
# terminate the timmer
self.t.cancel()
# terminate the thread and rejoin it to main bundle
self.t.join()
self.LivePlotData()
# -
# # Testing
# The testing was done with the the front voltage/2W resistor directly connected to the outputs of a generic benchtop DC Power supply while make will go unsaid for for very good reasons. As there was no load in the circuit the current function was not tested here but has been tested in additional test.
Meter1=MultiMeter34401A('MM1')
Meter1.MakeConnection(UnLockedPort)
Meter1.RemoteSetup()
R=Meter1.ErrorReadAct(); print(R)
print(Meter1.Mode)
Meter1.ModeSet('DC_V')
print(Meter1.Mode)
Meter1.QurreyProgress(debug=True)
Meter1.MakeMeasurment()
Meter1.Errors
Meter1.Data
Meter1.AutoRun(5, 'Sec')
Meter1.ModeSet('Res4W')
Meter1.EndAutoRun()
Meter1.ModeSet('DC_V')
Meter1.AutoRun(1.5, 'Sec', ReGenPlot=True)
Meter1.EndAutoRun()
Meter1.Data
|
HP34401ASansVISA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] origin_pos=0
# # softmax回归的简洁实现
# :label:`sec_softmax_concise`
#
# 在 :numref:`sec_linear_concise`中,
# 我们发现(**通过深度学习框架的高级API能够使实现**)
# (~~softmax~~)
# 线性(**回归变得更加容易**)。
# 同样,通过深度学习框架的高级API也能更方便地实现softmax回归模型。
# 本节如在 :numref:`sec_softmax_scratch`中一样,
# 继续使用Fashion-MNIST数据集,并保持批量大小为256。
#
# + origin_pos=3 tab=["tensorflow"]
import tensorflow as tf
from d2l import tensorflow as d2l
# + origin_pos=4 tab=["tensorflow"]
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
# + [markdown] origin_pos=5
# ## 初始化模型参数
#
# 如我们在 :numref:`sec_softmax`所述,
# [**softmax回归的输出层是一个全连接层**]。
# 因此,为了实现我们的模型,
# 我们只需在`Sequential`中添加一个带有10个输出的全连接层。
# 同样,在这里`Sequential`并不是必要的,
# 但它是实现深度模型的基础。
# 我们仍然以均值0和标准差0.01随机初始化权重。
#
# + origin_pos=8 tab=["tensorflow"]
net = tf.keras.models.Sequential()
net.add(tf.keras.layers.Flatten(input_shape=(28, 28)))
weight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)
net.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))
# + [markdown] origin_pos=9
# ## 重新审视Softmax的实现
# :label:`subsec_softmax-implementation-revisited`
#
# 在前面 :numref:`sec_softmax_scratch`的例子中,
# 我们计算了模型的输出,然后将此输出送入交叉熵损失。
# 从数学上讲,这是一件完全合理的事情。
# 然而,从计算角度来看,指数可能会造成数值稳定性问题。
#
# 回想一下,softmax函数$\hat y_j = \frac{\exp(o_j)}{\sum_k \exp(o_k)}$,
# 其中$\hat y_j$是预测的概率分布。
# $o_j$是未规范化的预测$\mathbf{o}$的第$j$个元素。
# 如果$o_k$中的一些数值非常大,
# 那么$\exp(o_k)$可能大于数据类型容许的最大数字,即*上溢*(overflow)。
# 这将使分母或分子变为`inf`(无穷大),
# 最后得到的是0、`inf`或`nan`(不是数字)的$\hat y_j$。
# 在这些情况下,我们无法得到一个明确定义的交叉熵值。
#
# 解决这个问题的一个技巧是:
# 在继续softmax计算之前,先从所有$o_k$中减去$\max(o_k)$。
# 你可以看到每个$o_k$按常数进行的移动不会改变softmax的返回值:
#
# $$
# \begin{aligned}
# \hat y_j & = \frac{\exp(o_j - \max(o_k))\exp(\max(o_k))}{\sum_k \exp(o_k - \max(o_k))\exp(\max(o_k))} \\
# & = \frac{\exp(o_j - \max(o_k))}{\sum_k \exp(o_k - \max(o_k))}.
# \end{aligned}
# $$
#
#
# 在减法和规范化步骤之后,可能有些$o_j - \max(o_k)$具有较大的负值。
# 由于精度受限,$\exp(o_j - \max(o_k))$将有接近零的值,即*下溢*(underflow)。
# 这些值可能会四舍五入为零,使$\hat y_j$为零,
# 并且使得$\log(\hat y_j)$的值为`-inf`。
# 反向传播几步后,我们可能会发现自己面对一屏幕可怕的`nan`结果。
#
# 尽管我们要计算指数函数,但我们最终在计算交叉熵损失时会取它们的对数。
# 通过将softmax和交叉熵结合在一起,可以避免反向传播过程中可能会困扰我们的数值稳定性问题。
# 如下面的等式所示,我们避免计算$\exp(o_j - \max(o_k))$,
# 而可以直接使用$o_j - \max(o_k)$,因为$\log(\exp(\cdot))$被抵消了。
#
# $$
# \begin{aligned}
# \log{(\hat y_j)} & = \log\left( \frac{\exp(o_j - \max(o_k))}{\sum_k \exp(o_k - \max(o_k))}\right) \\
# & = \log{(\exp(o_j - \max(o_k)))}-\log{\left( \sum_k \exp(o_k - \max(o_k)) \right)} \\
# & = o_j - \max(o_k) -\log{\left( \sum_k \exp(o_k - \max(o_k)) \right)}.
# \end{aligned}
# $$
#
# 我们也希望保留传统的softmax函数,以备我们需要评估通过模型输出的概率。
# 但是,我们没有将softmax概率传递到损失函数中,
# 而是[**在交叉熵损失函数中传递未规范化的预测,并同时计算softmax及其对数**],
# 这是一种类似["LogSumExp技巧"](https://en.wikipedia.org/wiki/LogSumExp)的聪明方式。
#
# + origin_pos=12 tab=["tensorflow"]
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# + [markdown] origin_pos=13
# ## 优化算法
#
# 在这里,我们(**使用学习率为0.1的小批量随机梯度下降作为优化算法**)。
# 这与我们在线性回归例子中的相同,这说明了优化器的普适性。
#
# + origin_pos=16 tab=["tensorflow"]
trainer = tf.keras.optimizers.SGD(learning_rate=.1)
# + [markdown] origin_pos=17
# ## 训练
#
# 接下来我们[**调用**] :numref:`sec_softmax_scratch`中(~~之前~~)
# (**定义的训练函数来训练模型**)。
#
# + origin_pos=18 tab=["tensorflow"]
num_epochs = 10
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
|
02_linear_networks/tf_softmax_regression_concise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from os import listdir
from os.path import isfile, join
import pandas as pd
import numpy as np
import torch
from PIL import Image
from numpy import asarray
import argparse
from collections import namedtuple, OrderedDict
import itertools
import os
import numpy as np
from typing import Tuple
from typing import List
from typing import Dict
import random
from itertools import product
import copy
import re
import random
import hashlib
import pathlib
import json
import torch.nn.functional as F
from scipy.stats import pearsonr
import wandb
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
)
import logging
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from tqdm import tqdm, trange
# Building up our SEND model.
from models.BERT import *
from models.VGGFace2 import *
from models.optimization import *
class InputFeature:
def __init__(
self, video_id="",
acoustic_feature=[],
linguistic_feature=[],
visual_feature=[],
labels=[],
):
self.video_id = video_id
self.acoustic_feature = acoustic_feature
self.linguistic_feature = linguistic_feature
self.visual_feature = visual_feature
self.labels = labels
def preprocess_SEND_files(
data_dir, # Multitmodal X
target_data_dir, # Y
use_target_ratings,
time_window_in_sec=4.0,
modality_dir_map = {"acoustic": "acoustic-egemaps",
"linguistic": "linguistic-word-level", # we don't load features
"visual": "image-raw", # image is nested,
"target": "observer_EWE",
},
preprocess= {'acoustic': lambda df : df.loc[:,' F0semitoneFrom27.5Hz_sma3nz_amean':' equivalentSoundLevel_dBp'],
'acoustic_timer': lambda df : df.loc[:,' frameTime'],
'linguistic': lambda df : df.loc[:,'word'],
'linguistic_timer': lambda df : df.loc[:,'time-offset'],
'target': lambda df : ((df.loc[:,'evaluatorWeightedEstimate'] / 50.0) - 1.0),
'target_timer': lambda df : df.loc[:,'time'],
},
linguistic_tokenizer=None,
pad_symbol=0,
max_number_of_file=-1
):
import time
start = time.time()
SEND_videos = []
# basically, let us gett all the video ids?
a_ids = [f.split("_")[0]+"_"+f.split("_")[1]
for f in listdir(os.path.join(data_dir, modality_dir_map["acoustic"]))
if isfile(os.path.join(data_dir, modality_dir_map["acoustic"], f))]
l_ids = [f.split("_")[0]+"_"+f.split("_")[1]
for f in listdir(os.path.join(data_dir, modality_dir_map["linguistic"]))
if isfile(os.path.join(data_dir, modality_dir_map["linguistic"], f))]
v_ids = [f.split("_")[0]+"_"+f.split("_")[1]
for f in listdir(os.path.join(data_dir, modality_dir_map["visual"]))
if f != ".DS_Store"]
assert len(a_ids) == len(l_ids) and len(l_ids) == len(v_ids)
assert len(set(a_ids).intersection(set(l_ids))) == len(l_ids)
assert len(set(a_ids).intersection(set(v_ids))) == len(v_ids)
# We need the first pass for linguistic modality process?
max_window_l_length = -1
for video_id in a_ids: # pick any one!
# linguistic features process
l_file = os.path.join(data_dir, modality_dir_map["linguistic"], f"{video_id}_aligned.tsv")
l_df = pd.read_csv(l_file, sep='\t')
#l_words = np.array(preprocess["linguistic"](l_df))
#l_words = [w.strip().lower() for w in l_words]
l_words = []
l_timestamps = []
head = True
with open(l_file) as fp:
for line in fp:
if head:
head = False
continue
l_words.append(line.strip().split("\t")[2].lower().strip())
l_timestamps.append(float(line.strip().split("\t")[1]))
#l_timestamps = np.array(preprocess["linguistic_timer"](l_df))
l_timestamps = np.array(l_timestamps)
# sample based on interval
current_time = 0.0
keep_first = True
sampled_l_words = [] # different from other modality, it is essentially a list of list!
tmp_words = []
for i in range(0, l_timestamps.shape[0]):
if keep_first:
sampled_l_words += [[]]
keep_first = False
if l_timestamps[i] >= current_time+time_window_in_sec:
sampled_l_words.append(tmp_words)
tmp_words = [l_words[i]] # reinit the buffer
current_time += time_window_in_sec
continue
tmp_words += [l_words[i]]
# overflow
if len(tmp_words) > 0:
sampled_l_words.append(tmp_words)
for window_words in sampled_l_words:
window_str = " ".join(window_words)
window_tokens = linguistic_tokenizer.tokenize(window_str)
token_ids = linguistic_tokenizer.convert_tokens_to_ids(window_tokens)
if len(token_ids) > max_window_l_length:
max_window_l_length = len(token_ids)
max_window_l_length += 2 # the start and the end token
if max_number_of_file != -1:
logger.info(f"WARNING: Only loading #{max_number_of_file} videos.")
max_seq_len = -1
video_count = 0
for video_id in a_ids: # pick any one!
if max_number_of_file != -1 and video_count >= max_number_of_file:
break # we enforce!
if video_count > 1 and video_count%100 == 0:
logger.info(f"Processed #{len(SEND_videos)} videos.")
# logger.info(SEND_videos[-1])
# we need to fix this to get features aligned.
# Step 1: Load rating data, and we can get window partitioned according to our interval.
target_id = video_id.split("_")[0][2:] + "_" + video_id.split("_")[1][3:]
if use_target_ratings:
target_file = os.path.join(target_data_dir, modality_dir_map["target"], f"target_{target_id}_normal.csv")
else:
target_file = os.path.join(target_data_dir, modality_dir_map["target"], f"results_{target_id}.csv")
target_df = pd.read_csv(target_file)
target_ratings = np.array(preprocess["target"](target_df))
target_timestamps = np.array(preprocess["target_timer"](target_df))
assert target_ratings.shape[0] == target_timestamps.shape[0]
windows = []
number_of_window = int(max(target_timestamps)//time_window_in_sec)
for i in range(0, number_of_window):
windows += [(i*time_window_in_sec, (i+1)*time_window_in_sec)]
windows += [((i+1)*time_window_in_sec, max(target_timestamps))]
# [(0, 5], (5, 10], ...]
# acoustic features process
a_file = os.path.join(data_dir, modality_dir_map["acoustic"], f"{video_id}_acousticFeatures.csv")
a_df = pd.read_csv(a_file)
a_features = np.array(preprocess["acoustic"](a_df))
a_timestamps = np.array(preprocess["acoustic_timer"](a_df))
a_feature_dim = a_features.shape[1]
assert a_features.shape[0] == a_timestamps.shape[0]
sampled_a_features_raw = [[] for i in range(len(windows))]
for i in range(0, a_timestamps.shape[0]):
# using mod to hash to the correct bucket.
hash_in_window = int(a_timestamps[i]//time_window_in_sec)
if hash_in_window >= len(windows):
continue # we cannot predict after ratings max.
sampled_a_features_raw[hash_in_window].append(a_features[i])
sampled_a_features = []
for window in sampled_a_features_raw:
# only acoustic need to consider this I think.
if len(window) == 0:
collate_window = np.zeros(a_feature_dim)
else:
collate_window = np.mean(np.array(window), axis=0)
sampled_a_features.append(collate_window)
# linguistic features process
l_file = os.path.join(data_dir, modality_dir_map["linguistic"], f"{video_id}_aligned.tsv")
l_df = pd.read_csv(l_file, sep='\t')
# the following line is buggy, it may parse file incorrectly!
#l_words = np.array(preprocess["linguistic"](l_df))
#l_words = [w.strip().lower() for w in l_words]
l_words = []
l_timestamps = []
head = True
with open(l_file) as fp:
for line in fp:
if head:
head = False
continue
l_words.append(line.strip().split("\t")[2].lower().strip())
l_timestamps.append(float(line.strip().split("\t")[1]))
#l_timestamps = np.array(preprocess["linguistic_timer"](l_df))
l_timestamps = np.array(l_timestamps)
assert len(l_words) == l_timestamps.shape[0]
sampled_l_features_raw = [[] for i in range(len(windows))]
for i in range(0, l_timestamps.shape[0]):
# using mod to hash to the correct bucket.
hash_in_window = int(l_timestamps[i]//time_window_in_sec)
if hash_in_window >= len(windows):
continue # we cannot predict after ratings max.
sampled_l_features_raw[hash_in_window].append(l_words[i])
sampled_l_features = []
sampled_l_mask = []
sampled_l_segment_ids = []
for window in sampled_l_features_raw:
window_str = " ".join(window)
window = linguistic_tokenizer.tokenize(window_str)
complete_window_word = ["[CLS]"] + window + ["[SEP]"]
token_ids = linguistic_tokenizer.convert_tokens_to_ids(complete_window_word)
input_mask = [1 for _ in range(len(token_ids))]
for _ in range(0, max_window_l_length-len(token_ids)):
token_ids.append(linguistic_tokenizer.pad_token_id)
input_mask.append(0)
segment_ids = [0] * len(token_ids)
sampled_l_features += [token_ids]
sampled_l_mask += [input_mask]
sampled_l_segment_ids += [segment_ids]
# visual features process
# for visual, we actually need to active control what image we load, we
# cannot just load all images, it will below memory.
fps=30 # We may need to dynamically figure out this number?
frame_names = []
for f in listdir(os.path.join(data_dir, modality_dir_map["visual"], video_id)):
if ".jpg" in f:
frame_names += [(int(f.split("_")[0][5:])*(1.0/fps), f)]
frame_names.sort(key=lambda x:x[0])
sampled_v_features_raw = [[] for i in range(len(windows))]
for f in frame_names:
# using mod to hash to the correct bucket.
hash_in_window = int(f[0]//time_window_in_sec)
if hash_in_window >= len(windows):
continue # we cannot predict after ratings max.
sampled_v_features_raw[hash_in_window].append(f)
sampled_v_features = []
for window in sampled_v_features_raw:
if len(window) == 0:
f_data = np.zeros((224,224,3))
else:
# we collate by using the last frame in the time window.
f = window[-1]
f_path = os.path.join(data_dir, modality_dir_map["visual"], video_id, f[1])
f_image = Image.open(f_path)
f_data = asarray(f_image)
f_data = f_data[...,::-1] # reverse the order.
sampled_v_features.append(f_data)
# ratings (target)
target_id = video_id.split("_")[0][2:] + "_" + video_id.split("_")[1][3:]
if use_target_ratings:
target_file = os.path.join(target_data_dir, modality_dir_map["target"], f"target_{target_id}_normal.csv")
else:
target_file = os.path.join(target_data_dir, modality_dir_map["target"], f"results_{target_id}.csv")
target_df = pd.read_csv(target_file)
target_ratings = np.array(preprocess["target"](target_df))
target_timestamps = np.array(preprocess["target_timer"](target_df))
assert target_ratings.shape[0] == target_timestamps.shape[0]
sampled_ratings_raw = [[] for i in range(len(windows))]
for i in range(0, target_timestamps.shape[0]):
# using mod to hash to the correct bucket.
hash_in_window = int(target_timestamps[i]//time_window_in_sec)
sampled_ratings_raw[hash_in_window].append(target_ratings[i])
sampled_ratings = []
for window in sampled_ratings_raw:
collate_window = np.mean(np.array(window), axis=0)
sampled_ratings.append(collate_window)
# we truncate features based on linguistic avaliabilities.
assert len(sampled_a_features) == len(sampled_l_features)
assert len(sampled_a_features) == len(sampled_v_features)
max_window_cutoff_l = int(max(l_timestamps)//time_window_in_sec)
max_window_cutoff_a = int(max(a_timestamps)//time_window_in_sec)
max_window_cutoff_v = int(frame_names[-1][0]//time_window_in_sec)
max_window_cutoff = min([max_window_cutoff_l, max_window_cutoff_a, max_window_cutoff_v])
sampled_a_features = sampled_a_features[:max_window_cutoff]
sampled_l_features = sampled_l_features[:max_window_cutoff]
sampled_v_features = sampled_v_features[:max_window_cutoff]
sampled_ratings = sampled_ratings[:max_window_cutoff]
sampled_l_mask = sampled_l_mask[:max_window_cutoff]
sampled_l_segment_ids = sampled_l_segment_ids[:max_window_cutoff]
input_mask = np.ones(len(sampled_a_features)).tolist()
max_seq_len = 60
seq_len = len(sampled_a_features)
for i in range(max_seq_len-len(sampled_a_features)):
sampled_a_features.append(np.zeros(a_feature_dim))
sampled_l_features.append(np.zeros(max_window_l_length))
sampled_l_mask.append(np.zeros(max_window_l_length))
sampled_l_segment_ids.append(np.zeros(max_window_l_length))
sampled_v_features.append(np.zeros((224,224,3)))
sampled_ratings.append(0.0)
input_mask.append(0)
sampled_a_features = torch.tensor(sampled_a_features)
sampled_l_features = torch.LongTensor(sampled_l_features)
sampled_l_mask = torch.LongTensor(sampled_l_mask)
sampled_l_segment_ids = torch.LongTensor(sampled_l_segment_ids)
processed_tensor = torch.tensor(sampled_v_features).float()
processed_tensor[..., 0] -= 91.4953
processed_tensor[..., 1] -= 103.8827
processed_tensor[..., 2] -= 131.0912
sampled_v_features = processed_tensor
sampled_ratings = torch.tensor(sampled_ratings)
input_mask = torch.LongTensor(input_mask)
video_struct = {
"video_id": video_id,
"a_feature": sampled_a_features,
"l_feature": sampled_l_features,
"l_mask": sampled_l_mask,
"l_segment_ids": sampled_l_segment_ids,
"v_feature": sampled_v_features,
"rating": sampled_ratings,
"seq_len": seq_len,
"input_mask": input_mask
}
video_count += 1
SEND_videos += [video_struct]
end = time.time()
elapsed = end - start
logger.info(f"Time elapsed for first-pass: {elapsed}")
return SEND_videos
def eval_ccc(y_true, y_pred):
"""Computes concordance correlation coefficient."""
true_mean = np.mean(y_true)
true_var = np.var(y_true)
pred_mean = np.mean(y_pred)
pred_var = np.var(y_pred)
covar = np.cov(y_true, y_pred, bias=True)[0][1]
ccc = 2*covar / (true_var + pred_var + (pred_mean-true_mean) ** 2)
return ccc
class MultimodalEmotionPrediction(nn.Module):
def __init__(
self,
linguistic_model="bert-base-uncased",
visual_model="vggface-2",
visual_model_path="../saved-models/resnet50_scratch_dag.pth",
acoustic_model="mlp",
cache_dir="../.huggingface_cache/",
):
super(MultimodalEmotionPrediction, self).__init__()
# Loading BERT using huggingface?
linguistic_config = AutoConfig.from_pretrained(
linguistic_model,
cache_dir=cache_dir
)
self.linguistic_encoder = LinguisticEncoderBERT.from_pretrained(
linguistic_model,
from_tf=False,
config=linguistic_config,
cache_dir=cache_dir
)
# let us disenable gradient prop
# for name, param in self.linguistic_encoder.named_parameters():
# param.requires_grad = False
# Loading visual model using vggface-2
self.visual_encoder = Resnet50_scratch_dag()
state_dict = torch.load(visual_model_path)
self.visual_encoder.load_state_dict(state_dict)
self.visual_reducer = nn.Linear(2048, 768)
# Rating lstm.
# hidden_dim = 128
hidden_dim = 768
self.rating_decoder = nn.LSTM(
hidden_dim, 64, 1,
batch_first=True, bidirectional=False)
# Rating decoder.
self.rating_output = nn.Sequential(
nn.Linear(64, 1)
)
self.acoustic_encoder = nn.Linear(88, 32)
self.rating_decoder_a = nn.LSTM(
32, 1, 1,
batch_first=True, bidirectional=False)
self.rating_decoder_v = nn.LSTM(
768, 1, 1,
batch_first=True, bidirectional=False)
def forward(
self, input_a_feature, input_l_feature,
input_l_mask, input_l_segment_ids,
input_v_feature, train_rating_labels, input_mask,
):
# linguistic encoder
batch_size, seq_len = input_l_feature.shape[0], input_l_feature.shape[1]
input_l_feature = input_l_feature.reshape(batch_size*seq_len, -1)
input_l_mask = input_l_mask.reshape(batch_size*seq_len, -1)
input_l_segment_ids = input_l_segment_ids.reshape(batch_size*seq_len, -1)
l_decode = self.linguistic_encoder(
input_ids=input_l_feature,
attention_mask=input_l_mask,
token_type_ids=input_l_segment_ids,
)
l_decode = l_decode.reshape(batch_size, seq_len, -1)
# visual encoder
input_v_feature = input_v_feature.reshape(batch_size*seq_len, 224, 224, 3)
input_v_feature = input_v_feature.permute(0,3,1,2).contiguous()
_, v_decode = self.visual_encoder(input_v_feature)
v_decode = v_decode.squeeze(dim=-1).squeeze(dim=-1).contiguous()
v_decode = v_decode.reshape(batch_size, seq_len, -1)
v_decode = self.visual_reducer(v_decode)
# decoding to ratings.
output, (_, _) = self.rating_decoder(l_decode)
output = self.rating_output(output)
output = output.squeeze(dim=-1)
output = output * input_mask
a_decode = self.acoustic_encoder(input_a_feature)
output_a, (_, _) = self.rating_decoder_a(a_decode)
output_a = output_a.squeeze(dim=-1)
output_a = output_a * input_mask
output_v, (_, _) = self.rating_decoder_v(v_decode)
output_v = output_v.squeeze(dim=-1)
output_v = output_v * input_mask
output += output_a
output += output_v
# get loss.
criterion = nn.MSELoss(reduction='sum')
loss = criterion(output, train_rating_labels)
return loss, output
def evaluate(
test_dataloader, model, device, args,
):
pbar = tqdm(test_dataloader, desc="Iteration")
ccc = []
corr = []
outputs = []
total_loss = 0
data_num = 0
model.eval()
with torch.no_grad():
for step, batch in enumerate(pbar):
if torch.cuda.is_available():
torch.cuda.empty_cache()
input_a_feature, input_l_feature, input_l_mask, input_l_segment_ids, \
input_v_feature, rating_labels, seq_lens, input_mask = batch
input_a_feature = input_a_feature.to(device)
input_l_feature = input_l_feature.to(device)
input_l_mask = input_l_mask.to(device)
input_l_segment_ids = input_l_segment_ids.to(device)
input_v_feature = input_v_feature.to(device)
rating_labels = rating_labels.to(device)
seq_lens = seq_lens.to(device)
input_mask = input_mask.to(device)
loss, output = \
model(input_a_feature, input_l_feature, input_l_mask, input_l_segment_ids,
input_v_feature, rating_labels, input_mask)
n_gpu = torch.cuda.device_count()
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
total_loss += loss.data.cpu().detach().tolist()
data_num += torch.sum(seq_lens).tolist()
output_array = output.cpu().detach().numpy()
rating_labels_array = rating_labels.cpu().detach().numpy()
for i in range(0, input_a_feature.shape[0]):
ccc.append(eval_ccc(rating_labels_array[i][:int(seq_lens[i].tolist()[0])], output_array[i][:int(seq_lens[i].tolist()[0])]))
corr.append(pearsonr(output_array[i][:int(seq_lens[i].tolist()[0])], rating_labels_array[i][:int(seq_lens[i].tolist()[0])])[0])
outputs.append(output_array[i])
total_loss /= data_num
return total_loss, ccc, corr, outputs
def train(
train_dataloader, test_dataloader, model, optimizer,
device, args
):
global_step = 0
best_ccc, best_corr = -1, -1
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
pbar = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(pbar):
model.train()
if torch.cuda.is_available():
torch.cuda.empty_cache()
input_a_feature, input_l_feature, input_l_mask, input_l_segment_ids, \
input_v_feature, rating_labels, seq_lens, input_mask = batch
input_a_feature = input_a_feature.to(device)
input_l_feature = input_l_feature.to(device)
input_l_mask = input_l_mask.to(device)
input_l_segment_ids = input_l_segment_ids.to(device)
input_v_feature = input_v_feature.to(device)
rating_labels = rating_labels.to(device)
seq_lens = seq_lens.to(device)
input_mask = input_mask.to(device)
loss, output = \
model(input_a_feature, input_l_feature, input_l_mask, input_l_segment_ids,
input_v_feature, rating_labels, input_mask)
n_gpu = torch.cuda.device_count()
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
loss /= (torch.sum(seq_lens).tolist())
loss.backward() # uncomment this for actual run!
optimizer.step()
optimizer.zero_grad()
pbar.set_description("loss: %.4f"%loss)
if args.is_tensorboard:
wandb.log({"train_loss": loss.cpu().detach().numpy()})
if global_step%args.eval_interval == 0:
logger.info('Evaluating the model...')
# we need to evaluate!
loss, ccc, corr, outputs = evaluate(
test_dataloader, model, device, args,
)
if np.mean(ccc) > best_ccc:
best_ccc = np.mean(ccc)
# save best ccc models.
if args.save_best_model:
logger.info('Saving the new best model for ccc...')
checkpoint = {'model': model.state_dict()}
checkpoint_path = os.path.join(args.output_dir, "best_ccc_pytorch_model.bin")
torch.save(checkpoint, checkpoint_path)
if np.mean(corr) > best_corr:
best_corr = np.mean(corr)
# save best corr models.
if args.save_best_model:
logger.info('Saving the new best model for corr...')
checkpoint = {'model': model.state_dict()}
checkpoint_path = os.path.join(args.output_dir, "best_corr_pytorch_model.bin")
torch.save(checkpoint, checkpoint_path)
# Average statistics and print
stats = {'eval_loss': loss, 'corr': np.mean(corr), 'corr_std': np.std(corr),
'ccc': np.mean(ccc), 'ccc_std': np.std(ccc),
'best_ccc': best_ccc, 'best_corr': best_corr}
if args.is_tensorboard:
wandb.log(stats)
logger.info(f'Evaluation results: {stats}')
global_step += 1
# -
def arg_parse():
parser = argparse.ArgumentParser(description='multimodal emotion analysis argparse.')
# Experiment management:
parser.add_argument('--train_batch_size', type=int, default=6,
help='Training batch size.')
parser.add_argument('--eval_batch_size', type=int, default=12,
help='Evaluation batch size.')
parser.add_argument('--lr', type=float, default=1e-4,
help='Learning rate.')
parser.add_argument('--warmup_proportion', type=float, default=0.1,
help='Warmup period.')
parser.add_argument('--seed', type=int, default=42,
help='Random seed.')
parser.add_argument('--num_train_epochs', type=float, default=3,
help='Number of training epochs.')
parser.add_argument('--eval_interval', type=int, default=20,
help='Evaluation interval in steps.')
parser.add_argument('--max_number_of_file', type=int, default=-1,
help='Maybe we just want to test with a few number of files.')
parser.add_argument('--resumed_from_file_path', type=str, default="",
help='Whether to resume for this file.')
parser.add_argument('--data_dir', type=str, default="../../SENDv1-data/",
help='Whether to resume for this file.')
parser.add_argument('--output_dir', type=str, default="../default_output_log/",
help='Whether to resume for this file.')
parser.add_argument("--is_tensorboard",
default=False,
action='store_true',
help="Whether to use tensorboard.")
parser.add_argument("--save_best_model",
default=False,
action='store_true',
help="Whether to save the best model during eval.")
parser.add_argument("--eval_only",
default=False,
action='store_true',
help="Whether we are evaluating the model only.")
parser.add_argument("--debug_only",
default=False,
action='store_true',
help="Whether we are debugging the code only.")
parser.add_argument("--use_target_ratings",
default=False,
action='store_true',
help="Whether to use target ratings from the dataset.")
parser.set_defaults(
# Exp management:
seed=42,
)
try:
get_ipython().run_line_magic('matplotlib', 'inline')
args = parser.parse_args([])
except:
args = parser.parse_args()
return args
# +
if __name__ == "__main__":
# Loading arguments
args = arg_parse()
try:
get_ipython().run_line_magic('matplotlib', 'inline')
# Experiment management:
args.train_batch_size=1
args.eval_batch_size=1
args.lr=8e-5
args.seed=42
args.is_tensorboard=True # Let us try this!
args.output_dir="../default_output_log/"
is_jupyter = True
except:
is_jupyter = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
n_gpu = torch.cuda.device_count()
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# Create output directory if not exists.
pathlib.Path(args.output_dir).mkdir(parents=True, exist_ok=True)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(args.output_dir, "training.log"),
)
logger = logging.getLogger(__name__)
logging.getLogger().addHandler(logging.StreamHandler(os.sys.stdout))
logger.info("Training the model with the following parameters: ")
logger.info(args)
if args.is_tensorboard and not is_jupyter:
logger.warning("Enabling wandb for tensorboard logging...")
run = wandb.init(project="SEND-Multimodal", entity="wuzhengx")
run_name = wandb.run.name
wandb.config.update(args)
else:
wandb = None
# We don't allow flexibility here..
# tokenizer = AutoTokenizer.from_pretrained(
# "bert-base-uncased",
# use_fast=False,
# cache_dir="../.huggingface_cache/"
# )
# train_SEND_features = None
# test_SEND_features = None
# if args.use_target_ratings:
# logger.info("WARNING: use_target_ratings is setting to TRUE.")
# modality_dir_map = {"acoustic": "acoustic-egemaps",
# "linguistic": "linguistic-word-level", # we don't load features
# "visual": "image-raw", # image is nested,
# "target": "target"}
# preprocess = {
# 'acoustic': lambda df : df.loc[:,' F0semitoneFrom27.5Hz_sma3nz_amean':' equivalentSoundLevel_dBp'],
# 'acoustic_timer': lambda df : df.loc[:,' frameTime'],
# 'linguistic': lambda df : df.loc[:,'word'],
# 'linguistic_timer': lambda df : df.loc[:,'time-offset'],
# 'target': lambda df : ((df.loc[:,' rating'] / 0.5) - 1.0),
# 'target_timer': lambda df : df.loc[:,'time'],
# }
# else:
# logger.info("WARNING: use_target_ratings is setting to FALSE.")
# modality_dir_map = {"acoustic": "acoustic-egemaps",
# "linguistic": "linguistic-word-level", # we don't load features
# "visual": "image-raw", # image is nested,
# "target": "observer_EWE"}
# preprocess = {
# 'acoustic': lambda df : df.loc[:,' F0semitoneFrom27.5Hz_sma3nz_amean':' equivalentSoundLevel_dBp'],
# 'acoustic_timer': lambda df : df.loc[:,' frameTime'],
# 'linguistic': lambda df : df.loc[:,'word'],
# 'linguistic_timer': lambda df : df.loc[:,'time-offset'],
# 'target': lambda df : ((df.loc[:,'evaluatorWeightedEstimate'] / 50.0) - 1.0),
# 'target_timer': lambda df : df.loc[:,'time'],
# }
# if not args.eval_only:
# # Training data loading
# train_modalities_data_dir = os.path.join(args.data_dir, "features/Train/")
# train_target_data_dir = os.path.join(args.data_dir, "ratings/Train")
# test_modalities_data_dir = os.path.join(args.data_dir, "features/Valid/")
# test_target_data_dir = os.path.join(args.data_dir, "ratings/Valid")
# train_SEND_features = preprocess_SEND_files(
# train_modalities_data_dir,
# train_target_data_dir,
# args.use_target_ratings,
# modality_dir_map=modality_dir_map,
# preprocess=preprocess,
# linguistic_tokenizer=tokenizer,
# max_number_of_file=args.max_number_of_file
# )
# if args.debug_only:
# logger.info("WARNING: Debugging only. Evaluate and Train datasets are the same.")
# test_SEND_features = copy.deepcopy(train_SEND_features)
# else:
# test_SEND_features = preprocess_SEND_files(
# test_modalities_data_dir,
# test_target_data_dir,
# args.use_target_ratings,
# modality_dir_map=modality_dir_map,
# preprocess=preprocess,
# linguistic_tokenizer=tokenizer,
# )
# else:
# test_modalities_data_dir = os.path.join(args.data_dir, "features/Test/")
# test_target_data_dir = os.path.join(args.data_dir, "ratings/Test")
# test_SEND_features = preprocess_SEND_files(
# test_modalities_data_dir,
# test_target_data_dir,
# args,
# modality_dir_map=modality_dir_map,
# preprocess=preprocess,
# linguistic_tokenizer=tokenizer,
# max_number_of_file=args.max_number_of_file
# )
train_data = torch.load('./train_data.pt')
test_data = torch.load('./test_data.pt')
logger.info("Finish Loading Datasets...")
if not args.eval_only:
# Initialize all the datasets
# train_input_a_feature = torch.stack([video_struct["a_feature"] for video_struct in train_SEND_features]).float()
# train_input_l_feature = torch.stack([video_struct["l_feature"] for video_struct in train_SEND_features])
# train_input_l_mask = torch.stack([video_struct["l_mask"] for video_struct in train_SEND_features])
# train_input_l_segment_ids = torch.stack([video_struct["l_segment_ids"] for video_struct in train_SEND_features])
# train_input_v_feature = torch.stack([video_struct["v_feature"] for video_struct in train_SEND_features]).float()
# train_rating_labels = torch.stack([video_struct["rating"] for video_struct in train_SEND_features]).float()
# train_seq_lens = torch.tensor([[video_struct["seq_len"]] for video_struct in train_SEND_features]).float()
# train_input_mask = torch.stack([video_struct["input_mask"] for video_struct in train_SEND_features])
# test_input_a_feature = torch.stack([video_struct["a_feature"] for video_struct in test_SEND_features]).float()
# test_input_l_feature = torch.stack([video_struct["l_feature"] for video_struct in test_SEND_features])
# test_input_l_mask = torch.stack([video_struct["l_mask"] for video_struct in test_SEND_features])
# test_input_l_segment_ids = torch.stack([video_struct["l_segment_ids"] for video_struct in test_SEND_features])
# test_input_v_feature = torch.stack([video_struct["v_feature"] for video_struct in test_SEND_features]).float()
# test_rating_labels = torch.stack([video_struct["rating"] for video_struct in test_SEND_features]).float()
# test_seq_lens = torch.tensor([[video_struct["seq_len"]] for video_struct in test_SEND_features]).float()
# test_input_mask = torch.stack([video_struct["input_mask"] for video_struct in test_SEND_features])
# train_data = TensorDataset(
# train_input_a_feature,
# train_input_l_feature, train_input_l_mask, train_input_l_segment_ids,
# train_input_v_feature, train_rating_labels, train_seq_lens, train_input_mask
# )
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
# test_data = TensorDataset(
# test_input_a_feature,
# test_input_l_feature, test_input_l_mask, test_input_l_segment_ids,
# test_input_v_feature, test_rating_labels, test_seq_lens, test_input_mask
# )
test_dataloader = DataLoader(test_data, batch_size=args.eval_batch_size, shuffle=False)
else:
logger.info("Not implemented...")
if not args.eval_only:
# Init model with optimizer.
model = MultimodalEmotionPrediction()
no_decay = ['bias', 'gamma', 'beta']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
num_train_steps = int(
len(train_data) / args.train_batch_size * args.num_train_epochs)
# We use the default BERT optimz to do gradient descent.
# optimizer = BERTAdam(optimizer_parameters,
# lr=args.lr,
# warmup=args.warmup_proportion,
# t_total=num_train_steps)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
# Determine the device.
if not torch.cuda.is_available() or is_jupyter:
device = torch.device("cpu")
n_gpu = -1
else:
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
if n_gpu > 1:
model = torch.nn.DataParallel(model)
model = model.to(device)
train(
train_dataloader, test_dataloader, model, optimizer,
device, args
)
else:
logger.info("Not implemented...")
|
code/run_multimodal_time_series.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table> <tr>
# <td style="background-color:#ffffff;">
# <a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="25%" align="left"> </a></td>
# <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
# prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
# </td>
# </tr></table>
# <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# <h2> <font color="blue"> Solutions for </font> Basics of Python: Conditionals </h2>
# <a id="task1"></a>
# <h3> Task 1 </h3>
#
# Randomly pick a number between 10 and 50, and say whether it is even or not.
#
# Remember the reminder/mod operator (%).
#
# A number is even if is exactly divided by 2. That is, the reminder is zero when divided by 2 over integers.
#
# If the number is odd, then the reminder will be 1 when divided by 2 over integers.
# <h3>Solution</h3>
# +
from random import randrange
r = randrange(10,51)
if r % 2 ==0: print(r,"is even")
else: print(r,"is odd")
# -
# <a id="task2"></a>
# <h3> Task 2 </h3>
#
# Randomly pick a number between 0 and 99.
#
# With half probability $\left( 0.5 = \dfrac{1}{2} \right)$, it is expected to be between 0 and 49 or between 50 and 99.
#
# "Let's try to calculate this probability experimentally."
#
# N=100 times randomly pick a number between 0 and 99, calculate the frequencies of both cases, and then divide each by 100.
#
# Repeat the same experiment for N=1,000, N=10,000, and N=100,000.
#
# Experimental results should get closer to the ideal ratio when N increases.
# <h3>Solution</h3>
# +
from random import randrange
for N in [100,1000,10000,100000]:
first_half=second_half=0
for i in range(N):
r = randrange(100)
if r<50:
first_half = first_half + 1
else:
second_half=second_half + 1
print(N,"->",first_half/N,second_half/N)
# -
|
bronze-before-workshop/python/Python16_Basics_Conditionals_Solutions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# This starter code is inspired by
# https://www.kaggle.com/valentynsichkar/traffic-signs-classification-with-cnn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pickle
import matplotlib.pyplot as plt
import datetime
import os
import random
from math import sqrt, ceil
from timeit import default_timer as timer
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, AvgPool2D, BatchNormalization, Reshape
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sn
# # Helpers
# %matplotlib inline
def confusion_matrix_gen(model, x_test, y_test):
y_pred_prob = model.predict(x_test)
y_pred = np.argmax(y_pred_prob, axis=1)
confusion = confusion_matrix(y_test, y_pred)
confusion_df = pd.DataFrame(confusion, index=labels, columns=labels)
plt.figure(figsize = (15,10))
sn.heatmap(confusion_df)
# # Load data
with open('data/data2.pickle', 'rb') as f:
data = pickle.load(f, encoding='latin1')
# +
# Preparing y_train and y_validation for using in Keras
data['y_train'] = to_categorical(data['y_train'], num_classes=43)
data['y_validation'] = to_categorical(data['y_validation'], num_classes=43)
# Making channels come at the end
data['x_train'] = data['x_train'].transpose(0, 2, 3, 1)
data['x_validation'] = data['x_validation'].transpose(0, 2, 3, 1)
data['x_test'] = data['x_test'].transpose(0, 2, 3, 1)
# Showing loaded data from file
for i, j in data.items():
if i == 'labels':
print(i + ':', len(j))
else:
print(i + ':', j.shape)
# -
# # Show examples
# +
# %matplotlib inline
# Preparing function for ploting set of examples
# As input it will take 4D tensor and convert it to the grid
# Values will be scaled to the range [0, 255]
def convert_to_grid(x_input):
N, H, W, C = x_input.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + 1 * (grid_size - 1)
grid_width = W * grid_size + 1 * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C)) + 255
next_idx = 0
y0, y1 = 0, H
for y in range(grid_size):
x0, x1 = 0, W
for x in range(grid_size):
if next_idx < N:
img = x_input[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = 255.0 * (img - low) / (high - low)
next_idx += 1
x0 += W + 1
x1 += W + 1
y0 += H + 1
y1 += H + 1
return grid
# Visualizing some examples of training data
examples = data['x_train'][:20, :, :, :]
print(examples.shape) # (20, 32, 32, 3)
# Plotting
fig = plt.figure()
grid = convert_to_grid(examples)
plt.imshow(grid.astype('uint8'), cmap='gray')
plt.axis('off')
plt.gcf().set_size_inches(15, 15)
plt.title('Some examples of training data', fontsize=18)
plt.show()
plt.close()
# Saving plot
fig.savefig('training_examples.png')
plt.close()
# -
# # Random Baseline
# +
match_cnt = 0
y_pred = []
for i in range(len(data['x_test'])):
x_input = data['x_test'][i:i+1]
y_input = data['y_test'][i:i+1]
prediction = random.randint(0, 42)
y_pred.append(prediction)
if y_input[0] == prediction:
match_cnt+=1
accuracy = match_cnt / len(data['x_test'])
print('Accuracy:', accuracy)
# -
# ## confusion matrix
# %matplotlib inline
confusion = confusion_matrix(data['y_test'], y_pred)
confusion_df = pd.DataFrame(confusion, index=labels, columns=labels)
plt.figure(figsize = (15,10))
sn.heatmap(confusion_df)
# # Baseline CNN model
# ## Model Setup
model = Sequential()
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu', input_shape=(32, 32, 3)))
model.add(MaxPool2D(pool_size=2))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dense(43, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# ## (Optional) Load model
model_name = "model-17:34:41.477136"
if os.path.exists(model_name):
model = load_model(model_name)
# ## Train
# +
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** (x + epochs))
epochs = 15
h = model.fit(data['x_train'], data['y_train'],
batch_size=5, epochs = epochs,
validation_data = (data['x_validation'], data['y_validation']),
callbacks=[annealer], verbose=1)
# -
model_time = datetime.datetime.now().time()
model.save("model-" + str(model_time))
# # Test
# ## One input image
# Defining function for getting texts for every class - labels
def label_text(file):
# Defining list for saving label in order from 0 to 42
label_list = []
# Reading 'csv' file and getting image's labels
r = pd.read_csv(file)
# Going through all names
for name in r['SignName']:
# Adding from every row second column with name of the label
label_list.append(name)
# Returning resulted list with labels
return label_list
# +
# %matplotlib inline
x_input = data['x_test'][100:101]
y_input = data['y_test'][100:101]
# plt.rcParams['figure.figsize'] = (2.5, 2.5) # Setting default size of plots
plt.imshow(x_input[0, :, :, :])
plt.axis('off')
plt.show()
# Getting scores from forward pass of input image
score = model.predict(x_input)
# Scores is given for image with 43 numbers of predictions for each class
# Getting only one class with maximum value
prediction = np.argmax(score)
print('ClassId:', prediction)
# Getting labels
labels = label_text('data/label_names.csv')
# Printing label for classified Traffic Sign
print('Label:', labels[prediction])
print('True Label:', labels[y_input[0]])
# -
# ## All test images
# +
match_cnt = 0
for i in range(len(data['x_test'])):
x_input = data['x_test'][i:i+1]
y_input = data['y_test'][i:i+1]
score = model.predict(x_input)
prediction = np.argmax(score)
if y_input[0] == prediction:
match_cnt+=1
accuracy = match_cnt / len(data['x_test'])
print('Accuracy:', accuracy)
# -
# # Confusion Matrix
confusion_matrix_gen(model, data['x_test'], data['y_test'])
|
StarterCode.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from tmilib import *
from h2o_utils import *
import csv
import h2o
h2o.init()
# -
# +
def sum_confusion_matrix_diagonal_v1(confusion_matrix):
total_correct = 0
column_idx = 0
for column_name in confusion_matrix.col_header:
if column_name == 'Error' or column_name == 'Rate' or column_name == '':
continue
total_correct += confusion_matrix[column_name][column_idx]
column_idx += 1
return total_correct
def get_correct_ratio_for_classifier(classifier):
confusion_matrix = classifier._get_metrics(classifier, False, True, False).values()[0].confusion_matrix()
if 'table' in dir(confusion_matrix):
confusion_matrix = confusion_matrix.table
total_correct = sum_confusion_matrix_diagonal_v1(confusion_matrix)
total_items = 13836775
return float(total_correct)/total_items
# +
#classifier = load_h2o_model('domainclass_cpn_v89_randomforest_v1.h2o')
#print get_correct_ratio_for_classifier(classifier)
#print classifier
# -
# +
def make_predictions_and_save_for_classifier_num(classifier_num):
make_predictions_and_save(
'domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1.h2o',
'domainclass_cpn_test_all_withdomain_v' + str(classifier_num) + '.csv',
'domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1_predictions.csv',
4
)
clear_h2o_memory()
# -
def make_predictions_and_save_for_classifier_num_all_insession(classifier_num):
make_predictions_and_save(
'domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1.h2o',
'domainclass_cpn_test_all_withdomain_v' + str(classifier_num) + '_all_insession.csv',
'domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1_all_insession_predictions.csv',
4
)
clear_h2o_memory()
make_predictions_and_save_for_classifier_num_all_insession(78)
for classifier_num in [89, 88, 87, 86, 85, 84, 81, 80, 79, 78, 77]:
make_predictions_and_save_for_classifier_num(classifier_num)
#classifier_num = 89
#predictions = load_h2o_data('domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1_predictions.csv')
# +
#test_data = load_h2o_data('domainclass_cpn_test_all_withdomain_v' + str(classifier_num) + '.csv')
# -
from itertools import izip
@jsonmemoized
def get_user_to_time_to_predicted_domain_id(all_insession=False):
if not all_insession:
predictions_csv_file = 'domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1_predictions.csv'
else:
predictions_csv_file = 'domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1_predictions_all_insession.csv'
if not all_insession:
test_data_csv_file = 'domainclass_cpn_test_all_withdomain_v' + str(classifier_num) + '.csv'
else:
test_data_csv_file = 'domainclass_cpn_test_all_withdomain_v' + str(classifier_num) + '_all_insession.csv'
classifier_num = 78
predictions_csv = csv.reader(sdir_open(predictions_csv_file))
predictions_header = next(predictions_csv)
assert predictions_header[0] == 'predict'
test_data_csv = csv.reader(sdir_open(test_data_csv_file))
test_data_header = next(test_data_csv)
assert test_data_header[0] == 'time_sec'
assert test_data_header[1] == 'user'
assert test_data_header[2] == 'ref_domain'
assert test_data_header[3] == 'label'
user_to_time_to_domain_id = {}
user_to_time_to_nextdomain_id = {}
user_to_time_to_prev7_domains_id = {}
user_to_time_to_immediate_prev7_domains_id = {}
total_correct = 0
total_items = 0
output = {}
for predictions_line,test_data_line in izip(predictions_csv, test_data_csv):
total_items += 1
predict = predictions_line[0]
label = test_data_line[3]
if predict == label:
total_correct += 1
continue
time_sec = test_data_line[0] # needs to remain string so we can index into things
user = test_data_line[1]
if user not in output:
output[user] = {}
ref_domain = test_data_line[2]
ref_domain_id = domain_to_id(ref_domain)
time_to_domain_id = user_to_time_to_domain_id.get(user, None)
if time_to_domain_id == None:
time_to_domain_id = get_recent_domain_id_at_seconds_for_user(user)
user_to_time_to_domain_id[user] = time_to_domain_id
time_to_nextdomain_id = user_to_time_to_nextdomain_id.get(user, None)
if time_to_nextdomain_id == None:
time_to_nextdomain_id = get_next_domain_id_at_seconds_for_user(user)
user_to_time_to_nextdomain_id[user] = time_to_nextdomain_id
time_to_prev7_domains_id = user_to_time_to_prev7_domains_id.get(user, None)
if time_to_prev7_domains_id == None:
time_to_prev7_domains_id = get_prev7_domains_id_at_seconds_for_user(user)
user_to_time_to_prev7_domains_id[user] = time_to_prev7_domains_id
time_to_immediate_prev7_domains_id = user_to_time_to_prev7_domains_id.get(user, None)
if time_to_immediate_prev7_domains_id == None:
time_to_immediate_prev7_domains_id = get_immediate_prev7_domains_id_at_seconds_for_user(user)
user_to_time_to_immediate_prev7_domains_id[user] = time_to_immediate_prev7_domains_id
pred_domain_id = -1
if predict == 'c':
pred_domain_id = time_to_domain_id[time_sec]
elif predict == 'n':
pred_domain_id = time_to_nextdomain_id[time_sec]
elif predict[0] == 'p':
num = int(predict[1])
pred_domain_id = time_to_prev7_domains_id[time_sec][num]
elif predict[0] == 'i':
num = int(predict[1])
pred_domain_id = time_to_immediate_prev7_domains_id[time_sec][num]
if pred_domain_id == -1:
pred_domain_id = time_to_domain_id[time_sec]
if pred_domain_id == ref_domain_id:
total_correct += 1
output[user][time_sec] = pred_domain_id
return output
_ = get_user_to_time_to_predicted_domain_id()
@jsonmemoized
def get_user_to_domain_id_to_total_time_spent_predicted():
output = {}
user_to_time_to_domain_id = get_user_to_time_to_predicted_domain_id()
for user,time_to_domain_id in user_to_time_to_domain_id.viewitems():
output[user] = Counter()
for time,domain_id in time_to_domain_id.viewitems():
output[user][domain_id] += 1
return output
_ = get_user_to_domain_id_to_total_time_spent_predicted()
@jsonmemoized
def get_user_to_domain_to_total_time_spent_predicted():
output = {}
user_to_domain_id_to_total_time_spent = get_user_to_domain_id_to_total_time_spent_predicted()
for user,domain_id_to_total_time_spent in user_to_domain_id_to_total_time_spent.viewitems():
output[user] = {}
for domain_id,total_time_spent in domain_id_to_total_time_spent.viewitems():
domain = id_to_domain(int(domain_id))
output[user][domain] = total_time_spent
return output
_ = get_user_to_domain_to_total_time_spent_predicted()
def get_performance_for_classifier(classifier_num):
predictions_csv = csv.reader(sdir_open('domainclass_cpn_v' + str(classifier_num) + '_randomforest_v1_predictions.csv'))
predictions_header = next(predictions_csv)
assert predictions_header[0] == 'predict'
test_data_csv = csv.reader(sdir_open('domainclass_cpn_test_all_withdomain_v' + str(classifier_num) + '.csv'))
test_data_header = next(test_data_csv)
assert test_data_header[0] == 'time_sec'
assert test_data_header[1] == 'user'
assert test_data_header[2] == 'ref_domain'
assert test_data_header[3] == 'label'
user_to_time_to_domain_id = {}
user_to_time_to_nextdomain_id = {}
user_to_time_to_prev7_domains_id = {}
user_to_time_to_immediate_prev7_domains_id = {}
total_correct = 0
total_items = 0
for predictions_line,test_data_line in izip(predictions_csv, test_data_csv):
total_items += 1
predict = predictions_line[0]
label = test_data_line[3]
if predict == label:
total_correct += 1
continue
time_sec = test_data_line[0] # needs to remain string so we can index into things
user = test_data_line[1]
ref_domain = test_data_line[2]
ref_domain_id = domain_to_id(ref_domain)
time_to_domain_id = user_to_time_to_domain_id.get(user, None)
if time_to_domain_id == None:
time_to_domain_id = get_recent_domain_id_at_seconds_for_user(user)
user_to_time_to_domain_id[user] = time_to_domain_id
time_to_nextdomain_id = user_to_time_to_nextdomain_id.get(user, None)
if time_to_nextdomain_id == None:
time_to_nextdomain_id = get_next_domain_id_at_seconds_for_user(user)
user_to_time_to_nextdomain_id[user] = time_to_nextdomain_id
time_to_prev7_domains_id = user_to_time_to_prev7_domains_id.get(user, None)
if time_to_prev7_domains_id == None:
time_to_prev7_domains_id = get_prev7_domains_id_at_seconds_for_user(user)
user_to_time_to_prev7_domains_id[user] = time_to_prev7_domains_id
time_to_immediate_prev7_domains_id = user_to_time_to_prev7_domains_id.get(user, None)
if time_to_immediate_prev7_domains_id == None:
time_to_immediate_prev7_domains_id = get_immediate_prev7_domains_id_at_seconds_for_user(user)
user_to_time_to_immediate_prev7_domains_id[user] = time_to_immediate_prev7_domains_id
pred_domain_id = -1
if predict == 'c':
pred_domain_id = time_to_domain_id[time_sec]
elif predict == 'n':
pred_domain_id = time_to_nextdomain_id[time_sec]
elif predict[0] == 'p':
num = int(predict[1])
pred_domain_id = time_to_prev7_domains_id[time_sec][num]
elif predict[0] == 'i':
num = int(predict[1])
pred_domain_id = time_to_immediate_prev7_domains_id[time_sec][num]
if pred_domain_id == -1:
pred_domain_id = time_to_domain_id[time_sec]
if pred_domain_id == ref_domain_id:
total_correct += 1
print classifier_num, float(total_correct) / total_items, total_correct, total_items
for classifier_num in [89, 88, 87, 86, 85, 84, 81, 80, 79, 78, 77]:
get_performance_for_classifier(classifier_num)
# +
#print predictions['predict'][0,:][0,0]
# -
'''
for x in glob(sdir_path('domainclass_cpn_v*.h2o')):
print x
#classifier = load_h2o_model('domainclass_cpn_v88_randomforest_v1.h2o')
classifier = load_h2o_model(x)
print get_correct_ratio_for_classifier(classifier)
'''
|
find_best_domain_reconstruction_algorithm_h2o.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Filling Area on Line Plots
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
data = pd.read_csv('data/data_dev.csv')
data.head()
ages = data['Age']
dev_salaries = data['All_Devs']
py_salaries = data['Python']
js_salaries = data['JavaScript']
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.legend()
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.show()
# -
overall_median = 57287
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.legend()
plt.fill_between(ages, py_salaries, overall_median, alpha=0.25)
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.show()
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.legend()
plt.fill_between(ages, py_salaries, alpha=0.25)
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.show()
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.legend()
plt.fill_between(ages, py_salaries, overall_median,
where=(py_salaries > overall_median), alpha=0.25)
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.show()
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.legend()
plt.fill_between(ages, py_salaries, overall_median,
where=(py_salaries <= overall_median), alpha=0.25)
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.show()
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.legend()
plt.fill_between(ages, py_salaries, overall_median,
where=(py_salaries > overall_median), alpha=0.25)
plt.fill_between(ages, py_salaries, overall_median,
where=(py_salaries <= overall_median), alpha=0.25)
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.show()
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.legend()
plt.fill_between(ages, py_salaries, dev_salaries,
where=(py_salaries > dev_salaries), alpha=0.25)
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.show()
# +
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
plt.fill_between(ages, py_salaries, dev_salaries,
where=(py_salaries > dev_salaries), alpha=0.25,
label='Above Avg')
plt.fill_between(ages, py_salaries, dev_salaries,
where=(py_salaries <= dev_salaries), alpha=0.25,
label='Bellow Avg')
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.legend()
plt.show()
|
matplotlib basics/matplotlib 5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
#
# *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
# <!--NAVIGATION-->
# < [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb) | [Contents](Index.ipynb) | [In Depth: Principal Component Analysis](05.09-Principal-Component-Analysis.ipynb) >
#
# <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.08-Random-Forests.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
# # In-Depth: Decision Trees and Random Forests
# Previously we have looked in depth at a simple generative classifier (naive Bayes; see [In Depth: Naive Bayes Classification](05.05-Naive-Bayes.ipynb)) and a powerful discriminative classifier (support vector machines; see [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb)).
# Here we'll take a look at motivating another powerful algorithm—a non-parametric algorithm called *random forests*.
# Random forests are an example of an *ensemble* method, meaning that it relies on aggregating the results of an ensemble of simpler estimators.
# The somewhat surprising result with such ensemble methods is that the sum can be greater than the parts: that is, a majority vote among a number of estimators can end up being better than any of the individual estimators doing the voting!
# We will see examples of this in the following sections.
# We begin with the standard imports:
# + jupyter={"outputs_hidden": false}
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
# -
# ## Motivating Random Forests: Decision Trees
# Random forests are an example of an *ensemble learner* built on decision trees.
# For this reason we'll start by discussing decision trees themselves.
#
# Decision trees are extremely intuitive ways to classify or label objects: you simply ask a series of questions designed to zero-in on the classification.
# For example, if you wanted to build a decision tree to classify an animal you come across while on a hike, you might construct the one shown here:
# 
# [figure source in Appendix](06.00-Figure-Code.ipynb#Decision-Tree-Example)
# The binary splitting makes this extremely efficient: in a well-constructed tree, each question will cut the number of options by approximately half, very quickly narrowing the options even among a large number of classes.
# The trick, of course, comes in deciding which questions to ask at each step.
# In machine learning implementations of decision trees, the questions generally take the form of axis-aligned splits in the data: that is, each node in the tree splits the data into two groups using a cutoff value within one of the features.
# Let's now look at an example of this.
# ### Creating a decision tree
#
# Consider the following two-dimensional data, which has one of four class labels:
# + jupyter={"outputs_hidden": false}
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=1.0)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow');
# -
# A simple decision tree built on this data will iteratively split the data along one or the other axis according to some quantitative criterion, and at each level assign the label of the new region according to a majority vote of points within it.
# This figure presents a visualization of the first four levels of a decision tree classifier for this data:
# 
# [figure source in Appendix](06.00-Figure-Code.ipynb#Decision-Tree-Levels)
# Notice that after the first split, every point in the upper branch remains unchanged, so there is no need to further subdivide this branch.
# Except for nodes that contain all of one color, at each level *every* region is again split along one of the two features.
# This process of fitting a decision tree to our data can be done in Scikit-Learn with the ``DecisionTreeClassifier`` estimator:
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier().fit(X, y)
# Let's write a quick utility function to help us visualize the output of the classifier:
def visualize_classifier(model, X, y, ax=None, cmap='rainbow'):
ax = ax or plt.gca()
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=cmap,
clim=(y.min(), y.max()), zorder=3)
ax.axis('tight')
ax.axis('off')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# fit the estimator
model.fit(X, y)
xx, yy = np.meshgrid(np.linspace(*xlim, num=200),
np.linspace(*ylim, num=200))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Create a color plot with the results
n_classes = len(np.unique(y))
contours = ax.contourf(xx, yy, Z, alpha=0.3,
levels=np.arange(n_classes + 1) - 0.5,
cmap=cmap,
zorder=1)
ax.set(xlim=xlim, ylim=ylim)
# Now we can examine what the decision tree classification looks like:
# + jupyter={"outputs_hidden": false}
visualize_classifier(DecisionTreeClassifier(), X, y)
# -
# If you're running this notebook live, you can use the helpers script included in [The Online Appendix](06.00-Figure-Code.ipynb#Helper-Code) to bring up an interactive visualization of the decision tree building process:
# + jupyter={"outputs_hidden": false}
# helpers_05_08 is found in the online appendix
import helpers_05_08
helpers_05_08.plot_tree_interactive(X, y);
# -
# Notice that as the depth increases, we tend to get very strangely shaped classification regions; for example, at a depth of five, there is a tall and skinny purple region between the yellow and blue regions.
# It's clear that this is less a result of the true, intrinsic data distribution, and more a result of the particular sampling or noise properties of the data.
# That is, this decision tree, even at only five levels deep, is clearly over-fitting our data.
# ### Decision trees and over-fitting
#
# Such over-fitting turns out to be a general property of decision trees: it is very easy to go too deep in the tree, and thus to fit details of the particular data rather than the overall properties of the distributions they are drawn from.
# Another way to see this over-fitting is to look at models trained on different subsets of the data—for example, in this figure we train two different trees, each on half of the original data:
# 
# [figure source in Appendix](06.00-Figure-Code.ipynb#Decision-Tree-Overfitting)
# It is clear that in some places, the two trees produce consistent results (e.g., in the four corners), while in other places, the two trees give very different classifications (e.g., in the regions between any two clusters).
# The key observation is that the inconsistencies tend to happen where the classification is less certain, and thus by using information from *both* of these trees, we might come up with a better result!
# If you are running this notebook live, the following function will allow you to interactively display the fits of trees trained on a random subset of the data:
# + jupyter={"outputs_hidden": false}
# helpers_05_08 is found in the online appendix
import helpers_05_08
helpers_05_08.randomized_tree_interactive(X, y)
# -
# Just as using information from two trees improves our results, we might expect that using information from many trees would improve our results even further.
# ## Ensembles of Estimators: Random Forests
#
# This notion—that multiple overfitting estimators can be combined to reduce the effect of this overfitting—is what underlies an ensemble method called *bagging*.
# Bagging makes use of an ensemble (a grab bag, perhaps) of parallel estimators, each of which over-fits the data, and averages the results to find a better classification.
# An ensemble of randomized decision trees is known as a *random forest*.
#
# This type of bagging classification can be done manually using Scikit-Learn's ``BaggingClassifier`` meta-estimator, as shown here:
# + jupyter={"outputs_hidden": false}
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
tree = DecisionTreeClassifier()
bag = BaggingClassifier(tree, n_estimators=100, max_samples=0.8,
random_state=1)
bag.fit(X, y)
visualize_classifier(bag, X, y)
# -
# In this example, we have randomized the data by fitting each estimator with a random subset of 80% of the training points.
# In practice, decision trees are more effectively randomized by injecting some stochasticity in how the splits are chosen: this way all the data contributes to the fit each time, but the results of the fit still have the desired randomness.
# For example, when determining which feature to split on, the randomized tree might select from among the top several features.
# You can read more technical details about these randomization strategies in the [Scikit-Learn documentation](http://scikit-learn.org/stable/modules/ensemble.html#forest) and references within.
#
# In Scikit-Learn, such an optimized ensemble of randomized decision trees is implemented in the ``RandomForestClassifier`` estimator, which takes care of all the randomization automatically.
# All you need to do is select a number of estimators, and it will very quickly (in parallel, if desired) fit the ensemble of trees:
# + jupyter={"outputs_hidden": false}
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, random_state=0)
visualize_classifier(model, X, y);
# -
# We see that by averaging over 100 randomly perturbed models, we end up with an overall model that is much closer to our intuition about how the parameter space should be split.
# ## Random Forest Regression
#
# In the previous section we considered random forests within the context of classification.
# Random forests can also be made to work in the case of regression (that is, continuous rather than categorical variables). The estimator to use for this is the ``RandomForestRegressor``, and the syntax is very similar to what we saw earlier.
#
# Consider the following data, drawn from the combination of a fast and slow oscillation:
# + jupyter={"outputs_hidden": false}
rng = np.random.RandomState(42)
x = 10 * rng.rand(200)
def model(x, sigma=0.3):
fast_oscillation = np.sin(5 * x)
slow_oscillation = np.sin(0.5 * x)
noise = sigma * rng.randn(len(x))
return slow_oscillation + fast_oscillation + noise
y = model(x)
plt.errorbar(x, y, 0.3, fmt='o');
# -
# Using the random forest regressor, we can find the best fit curve as follows:
# + jupyter={"outputs_hidden": false}
from sklearn.ensemble import RandomForestRegressor
forest = RandomForestRegressor(200)
forest.fit(x[:, None], y)
xfit = np.linspace(0, 10, 1000)
yfit = forest.predict(xfit[:, None])
ytrue = model(xfit, sigma=0)
plt.errorbar(x, y, 0.3, fmt='o', alpha=0.5)
plt.plot(xfit, yfit, '-r');
plt.plot(xfit, ytrue, '-k', alpha=0.5);
# -
# Here the true model is shown in the smooth gray curve, while the random forest model is shown by the jagged red curve.
# As you can see, the non-parametric random forest model is flexible enough to fit the multi-period data, without us needing to specifying a multi-period model!
# ## Example: Random Forest for Classifying Digits
#
# Earlier we took a quick look at the hand-written digits data (see [Introducing Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb)).
# Let's use that again here to see how the random forest classifier can be used in this context.
# + jupyter={"outputs_hidden": false}
from sklearn.datasets import load_digits
digits = load_digits()
digits.keys()
# -
# To remind us what we're looking at, we'll visualize the first few data points:
# + jupyter={"outputs_hidden": false}
# set up the figure
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# plot the digits: each image is 8x8 pixels
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')
# label the image with the target value
ax.text(0, 7, str(digits.target[i]))
# -
# We can quickly classify the digits using a random forest as follows:
# + jupyter={"outputs_hidden": false}
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target,
random_state=0)
model = RandomForestClassifier(n_estimators=1000)
model.fit(Xtrain, ytrain)
ypred = model.predict(Xtest)
# -
# We can take a look at the classification report for this classifier:
# + jupyter={"outputs_hidden": false}
from sklearn import metrics
print(metrics.classification_report(ypred, ytest))
# -
# And for good measure, plot the confusion matrix:
# + jupyter={"outputs_hidden": false}
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(ytest, ypred)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)
plt.xlabel('true label')
plt.ylabel('predicted label');
# -
# We find that a simple, untuned random forest results in a very accurate classification of the digits data.
# ## Summary of Random Forests
#
# This section contained a brief introduction to the concept of *ensemble estimators*, and in particular the random forest – an ensemble of randomized decision trees.
# Random forests are a powerful method with several advantages:
#
# - Both training and prediction are very fast, because of the simplicity of the underlying decision trees. In addition, both tasks can be straightforwardly parallelized, because the individual trees are entirely independent entities.
# - The multiple trees allow for a probabilistic classification: a majority vote among estimators gives an estimate of the probability (accessed in Scikit-Learn with the ``predict_proba()`` method).
# - The nonparametric model is extremely flexible, and can thus perform well on tasks that are under-fit by other estimators.
#
# A primary disadvantage of random forests is that the results are not easily interpretable: that is, if you would like to draw conclusions about the *meaning* of the classification model, random forests may not be the best choice.
# <!--NAVIGATION-->
# < [In-Depth: Support Vector Machines](05.07-Support-Vector-Machines.ipynb) | [Contents](Index.ipynb) | [In Depth: Principal Component Analysis](05.09-Principal-Component-Analysis.ipynb) >
#
# <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.08-Random-Forests.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
|
present/bi2/2020/ubb/az_en_jupyter2_mappam/PythonDataScienceHandbook/05.08-Random-Forests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tapas
# language: python
# name: tapas
# ---
import pandas as pd
import csv
from pathlib import Path
def _load_predictions(path, delim='\t'):
with open(path) as f:
reader = csv.reader(f, delimiter=delim, quotechar='"',quoting=csv.QUOTE_NONE,escapechar='\\')
headers = next(reader)
table = pd.DataFrame(list(reader),columns=headers)
return table
refdir = Path('../WikiTableQuestions')/'data'
train_refs = _load_predictions(refdir/'random-split-1-train.tsv')
dev_refs = _load_predictions(refdir/'random-split-1-dev.tsv')
test_refs = _load_predictions(refdir/'pristine-unseen-tables.tsv')
t = dev_refs
len(dev_refs)
patterns = [
'yes or no',
'^are',
'^is',
'^did',
'^was',
'^were',
'true or false',
'is it true'
]
len(t[t['utterance'].str.contains('|'.join(patterns))]['utterance'])
t[t['utterance'].apply(lambda x: 'yes' in x.split(' '))]
selection_ptrs = [
'better'
]
len(t[t['utterance'].str.contains('|'.join(selection_ptrs))]['utterance'])
|
dataset_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import os
import git
import sys
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# +
#Loading in County Data
#Datasets that are altered & Resaved as such
Age_Race = pd.read_csv(f"{homedir}/data/us/demographics/acs_2018.csv", encoding='latin1')
Votes = pd.read_csv(f"{homedir}/data/us/demographics/countypres_2000-2016.csv", encoding='latin1')
Educ = pd.read_csv(f"{homedir}/data/us/demographics/education.csv", encoding='latin1')
Density = pd.read_csv(f"{homedir}/data/us/demographics/county_land_areas.csv", encoding='latin1')
Unemp = pd.read_csv(f"{homedir}/data/us/demographics/unemployment.csv", delimiter="\t")
Poverty = pd.read_csv(f"{homedir}/data/us/demographics/poverty.csv", delimiter="\t")
#Key to map FIPs values to State and County Name
Key = pd.read_csv(f"{homedir}/data/us/processing_data/fips_key.csv", encoding='latin1')
Key = Key.drop(columns=['MSA/PMSA NECMA']) #Dropping unecessary column
Key = Key.set_index('FIPS')
Key.to_csv('Key.csv')
#Datasets that are not altered
Pop_60 = pd.read_csv(f"{homedir}/data/us/demographics/county_populations.csv", encoding='latin1')
Pop_60 = Pop_60.set_index('FIPS')
Pop_60.to_csv('Pop_60.csv')
# -
#Changes prefixes of column names
def drop_prefix(self, prefix, replace = ''):
self.columns = self.columns.str.replace(prefix, replace)
return self
#Removes all duplicate columns from dataframes
def drop_dup_col(df):
df = df.loc[:,~df.columns.duplicated()]
return df
# +
#Cleaning Voting Data
Votes = Votes[Votes.party != 'green']
Votes = Votes[Votes.party != 'republican'] #Removing unneeded rows
Votes = Votes[Votes.candidate != 'Other']
Votes = Votes[Votes.FIPS >= -1] #Removing NaN rows
################################
Votes = Votes.drop(columns=['state', 'state_po', 'county', 'office', 'candidate', 'version']) #removing uneeded columns
Votes.insert(5, "Prop_Blue", Votes.candidatevotes/Votes.totalvotes, True) #Adding column of fraction of pop that vote dem.
Votes = Votes.drop(columns=['candidatevotes', 'party'])
Votes = Votes.pivot(index= 'FIPS', columns = 'year') #making FIPS main index
Votes.to_csv('Votes.csv')
#Removing the pivot aspect from the Votes Dataset
Votes = pd.read_csv('Votes.csv', encoding='latin1')
Votes = Votes.drop([0,1])
Votes.columns = ['FIPS', 'Total_Votes_2000', 'Total_Votes_2004', 'Total_Votes_2008', 'Total_Votes_2012',\
'Total_Votes_2016', 'Frac_Dem_2000', 'Frac_Dem_2004', 'Frac_Dem_2008', 'Frac_Dem_2012', 'Frac_Dem_2016']
Votes.FIPS = Votes.FIPS.astype(str).astype(float).astype(int) #Rewriting the columns names
Votes = Votes.set_index('FIPS')
Votes.to_csv('Votes.csv')
# +
#Cleaning the Racial/Age Data
Age_Race = Age_Race.sort_values(by=['FIPS'])
#removing these percent/ratio values as these are poorly rounded, can be manually computed later
Age_Race = Age_Race[Age_Race.columns.drop(list(Age_Race.filter(regex='Percent')))]
Age_Race = Age_Race[Age_Race.columns.drop(list(Age_Race.filter(regex='ratio')))]
#Dropping unecessary columns prefixes
Age_Race = drop_prefix(Age_Race, 'Estimate!!')
Age_Race = drop_prefix(Age_Race, 'SEX AND AGE!!')
Age_Race = drop_prefix(Age_Race, 'RACE!!')
Age_Race = drop_prefix(Age_Race, 'Total population!!') #Changing column title names
Age_Race = drop_prefix(Age_Race, 'One race!!', 'Exclusively ')
Age_Race = drop_prefix(Age_Race, 'Two or more races!!', 'Interracial ')
Age_Race = drop_prefix(Age_Race, 'Race alone or in combination with one or more other races!!', 'Total ')
Age_Race = drop_prefix(Age_Race, 'HISPANIC OR LATINO AND ')
#Dropping unecessary columns
Age_Race = drop_dup_col(Age_Race) #Removes duplicate columns
Age_Race = Age_Race[Age_Race.columns.drop(list(Age_Race.filter(regex='.1')))] #removes extra duplicate columns
Age_Race = Age_Race.drop(columns=['Geographic Area Name', 'Total Total population'])
Age_Race = Age_Race.replace('N', 0) #changing NaN values to 0
#####################################
Age_Race = Age_Race.set_index('FIPS')
Age_Race.to_csv('Age_Race.csv')
# +
#Cleaning Education Data, removing state data from county data
Educ_County = Educ[Educ['FIPS'] % 1000 != 0]
Educ_County = Educ_County.set_index('FIPS')
Educ_County.to_csv('Educ_County.csv')
# +
#Cleaning Density area Data
Density = Density.drop(columns=['County Name']) #Dropping unecessary column
Density.columns = Density.columns.str.replace('County FIPS','FIPS')
Density = Density.set_index('FIPS')
Density.to_csv('Density.csv')
# +
#Cleaning Unemployment area Data
Unemp = Unemp.drop(columns=['State', 'Area_name']) #Dropping unecessary columns
Unemp = Unemp.set_index('FIPS')
Unemp.to_csv('Unemp.csv')
# +
#Cleaning Poverty area Data
Poverty = Poverty.drop(columns=['Stabr', 'Area_name', 'Rural-urban_Continuum_Code_2013', 'Urban_Influence_Code_2013'])
#Dropping unecessary columns
Poverty = Poverty.set_index('FIPS')
Poverty.to_csv('Poverty.csv')
# -
print('Votes: ' + str(len(Votes)))
print('Age_Race: ' + str(len(Age_Race)))
print('Educ_County: ' + str(len(Educ_County)))
print('Density: ' + str(len(Density)))
print('Unemp: ' + str(len(Unemp)))
print('Poverty: ' + str(len(Poverty)))
print('Key: ' + str(len(Key)))
print('Pop_60: ' + str(len(Pop_60)))
Votes.head()
Age_Race.head()
Educ_County.head()
Density.head()
Unemp.head()
Poverty.head()
Key.head()
Pop_60.head()
|
models/processing/USA/County_Based/Demographics_County.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementing Monroe's Fightin' Words metric
#
# This is an implementation of several plots from the Monroe et al. paper. The goal is to identify words that have significantly different frequency in one collection or another.
from collections import Counter
import regex
import numpy as np
from matplotlib import pyplot
word_pattern = regex.compile(r"\w+")
# Here's were we convert a long string into word counts. This is really about deciding what factors do and do not have relevance to my specific study.
#
# I noticed earlier that Hamilton seems to use "Executive" (capitalized) while Madison uses "executive". This distinction is lost when we lower-case everything. Is that important? It could be, but I'm saying that for today, it's not. We have to make decisions, the important thing is to realize that we're making decisions.
# +
madison_counter = Counter()
hamilton_counter = Counter()
jay_counter = Counter()
with open("documents.txt") as reader:
for line in tqdm(reader):
fields = line.rstrip().split("\t")
if len(fields) < 3:
continue
tokens = word_pattern.findall(fields[2].lower())
if "Madison" in fields[1]:
madison_counter.update(tokens)
if "Hamilton" in fields[1]:
hamilton_counter.update(tokens)
if "Jay" in fields[1]:
jay_counter.update(tokens)
# -
madison_counter.most_common(10)
hamilton_counter.most_common(10)
jay_counter.most_common(10)
# +
# The Counter class allows cool counter arithmetic
all_counter = madison_counter + hamilton_counter + jay_counter
# -
vocabulary = list(all_counter.keys())
len(vocabulary)
# +
# keys() returns words in the order they were added, in this case
# for the Madison counter. If we want to sort, use most_common()
print(vocabulary[:20], vocabulary[-20:])
# +
# another great thing about Counter vs dict: it doesn't
# freak out if you ask for something it doesn't know about,
# it just gives you 0
# a common pattern I use: use zeros() to allocate space for
# a matrix, then fill it.
word_counts = np.zeros((len(vocabulary), 3))
for row, word in enumerate(vocabulary):
word_counts[row,0] = madison_counter[word]
word_counts[row,1] = hamilton_counter[word]
word_counts[row,2] = jay_counter[word]
# +
# Numpy by default uses scientific notation, I want decimals
np.set_printoptions(suppress=True)
# -
word_counts[:10,:]
# +
# .sum(axis=0) sums along columns (ie [row,col])
author_sums = word_counts.sum(axis=0)
# Use np.newaxis to streeeeetch an array to the shape of
# another array
word_proportions = word_counts / author_sums[np.newaxis,:]
# Hamilton writes more than Madison, Jay left to pursue other interests
author_sums
# +
# most probabilities are small, but note the hard 0s
word_proportions[:10,:]
# +
# What are the odds? p(w) / p(not w) = p(w) / (1-p(w))
word_odds = word_counts / (author_sums[np.newaxis,:] - word_counts)
word_odds[:10,:]
# +
# me verifying that the odds were being calculated correctly, for a
# slightly different vocab size
2 / 66375
# -
2 / 66373
# +
# get the *log* frequency of words to use as the x-axis for plots
# axis 0 in [row,col] is along rows, axis 1 is along columns
# to be clear: sum -> *along* -> columns to get the sum for each row
type_frequencies = word_counts.sum(axis=1)
log_type_frequencies = np.log(type_frequencies)
# -
def plot_words(y_value):
y_std = y_value.std()
pyplot.figure(figsize=(6,18))
pyplot.scatter(log_type_frequencies, y_value, alpha=0.3)
for i in range(len(vocabulary)):
if np.abs(y_value[i]) > 2 * y_std:
pyplot.text(log_type_frequencies[i], y_value[i], vocabulary[i])
pyplot.show()
# +
# raw counts aren't very interesting -- Hamilton [1] writes more
# than Madison [0]
plot_words(word_counts[:,0] - word_counts[:,1])
# +
# the difference in proportion emphasizes high-frequency words
# (low-frequency words are always close to 0)
plot_words(word_proportions[:,0] - word_proportions[:,1])
# +
# odds are very close to proportions
plot_words(word_odds[:,0] - word_odds[:,1])
# +
# the log odds ratio blows up because we have 0s
plot_words(np.log(word_odds[:,0] / word_odds[:,1]))
# -
# ## Smoothing to avoid 0s
#
# When we make a distribution by dividing a set of counts by their sum, we risk adding 0s. One way to avoid this is to add a non-zero value to each element of the array and then divide by the new sum (including the added values). This makes distributions "smoother".
#
# We'll see a lot more about this, but for now note the effect of adding constants of different magnitudes on the resulting distribution.
#
# Orange = actual observations, Blue = smoothing. The total size of the bar is the smoothed distribution.
# +
sparse_array = np.array([1,0,1,4,0])
smoothing = 1.0 * np.array([1,1,1,1,1])
pyplot.bar(range(5), smoothing)
pyplot.bar(range(5), sparse_array, bottom=smoothing)
pyplot.show()
smoothing = 0.05 * np.array([1,1,1,1,1])
pyplot.bar(range(5), smoothing)
pyplot.bar(range(5), sparse_array, bottom=smoothing)
pyplot.show()
smoothing = 10.0 * np.array([1,1,1,1,1])
pyplot.bar(range(5), smoothing)
pyplot.bar(range(5), sparse_array, bottom=smoothing)
pyplot.show()
# -
smoothed_word_counts = word_counts + 1.0
smoothed_author_sums = smoothed_word_counts.sum(axis=0)
smoothed_word_odds = smoothed_word_counts / (smoothed_author_sums[np.newaxis,:] - smoothed_word_counts)
smoothed_word_odds[:10,:]
# +
# Unlike proportions, log odds ratios emphasize rare words
plot_words(np.log(smoothed_word_odds[:,0] / smoothed_word_odds[:,1]))
# +
# For the final part of Fightin' Words, divide each log-odds-ratio by
# an estimate of its variance. (See derivation for details).
# Think about how this variance term affects rare words and more
# frequent words.
log_odds_difference = np.log(smoothed_word_odds[:,0] / smoothed_word_odds[:,1])
log_odds_variances = 1.0 / smoothed_word_counts[:,0] + 1.0 / smoothed_word_counts[:,1]
plot_words(log_odds_difference / np.sqrt(log_odds_variances))
# -
# Visualizing lots of words can be difficult. Notice which words your eye is drawn to and which you can't even make out. We often use list views for greater browsability.
def sort_words(scores, n=20):
sorted_pairs = sorted(zip(scores, vocabulary))
sorted_words = [w for s,w in sorted_pairs]
print("[most negative] ", ", ".join(sorted_words[:n]))
print("...")
print(", ".join(sorted_words[-n:]), " [most positive]")
sort_words(log_odds_difference / np.sqrt(log_odds_variances), 100)
|
notebooks/template_tokenization_6150.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# An introduction to open quantum systems
# ========================================
#
# ### 6.1 Qubit decoherence mechanisms: energy relaxation ($T_{1}$) and dephasing ($T_{2}$)
#
# #### <NAME>, IBM TJ Watson Research Center
#
# #### Abstract
# In this brief chapter, we promote our closed-system treatment of a quantum bit (qubit) to an open-system one. Here, you will learn the basic ideas of open-system evolution. Our focus is on unwanted, incoherent processes. These are unavoidable in a physical system and are thus necessary to understand for any practical quantum computation. A "real" qubit does not exist in isolation, but rather as a physical system embedded in an environment. Due to the unavoidable coupling with the environment (here, considered unmonitored) the qubit evolution is profoundly affected. Typically, the coupling results in two chief decoherence mechanisms that limit qubit performance: unwanted loss of energy and loss of coherence. These are characterized by the energy-relaxation lifetime ($T_{1}$) and the total dephasing time ($T_{2}$), respectively. We will explain these with a simple toy model based on the simplest possible quantum trajectory (used in the qiskit Aer simulator). We obtain the standard Linbdlad form of the master equation, which governs the evolution of the average quantum state (the density matrix). We explain how to characterize $T_{1}$ and $T_{2}$ noise. Finally, we comment on related notions such as $T_{2}^{*}$, $T_{2}^{E}$, and a hot, thermal environment. Finally, we will measure $T_{1}$ and $T_{2}$ on a real quantum device.
#
# <hr>
#
# #### Contents
#
#
# - [6.1 Qubit decoherence mechanisms: energy relaxation ($T_{1}$) and dephasing ($T_{2}$)](#61-qubit-decoherence-mechanisms-energy-relaxation-t_1-and-dephasing-t_2)
# - Introduction
# - [Qubit and environment.](#qubit-and-environment)
# - [Environmental noise model: microscopic origin.](#environmental-noise-model-microscopic-origin)
# - [Description of just the qubit system that accounts for the coupling to the environment.](#description-of-just-the-qubit-system-that-accounts-for-the-coupling-to-the-environment)
# - [Review of preliminaries: Bloch-vector representation.](#review-of-preliminaries-bloch-vector-representation)
# - [The bit-flip quantum channel: random kicks](#the-bit-flip-quantum-channel-random-kicks)
# - [Model: to kick or not to kick](#model-to-kick-or-not-to-kick)
# - [Average state evolution](#average-state-evolution)
# - [Bloch picture](#bloch-picture)
# - [Time evolution](#time-evolution)
# - [The amplitude damping channel: energy loss$\left(T_{1}\right)$](#the-amplitude-damping-channel-energy-lossleftt_1right)
# - [Phase damping $T_{\phi}$](#phase-damping-t_phi)
# - [Both amplitude $T_{1}$ and phase damping $T_{\phi}$](#both-amplitude-t_1-and-phase-damping-t_phi)
# - [ignis](#ignis)
#
#
# Open quantum system evolution: a simple model
# =============================================
#
# ###### Qubit and environment.
#
# For simplicity and definitiveness of example, let us begin by restricting our attention to the simplest possible quantum system one that has two levels. A qubit [^1] cannot exist in isolation. It is part of a larger world that it is unavoidably coupled to. The coupling is an interaction between the qubit degree of freedom and the rest of the world, which we will refer to as the *environment*. Sometimes, the environment is also referred to as the *bath* or *reservoir.* As it consists of the rest of the world, we consider it large, or *macroscopic* composed of many degrees of freedom, which for all practical purposes we have no knowledge of or control over.
#
# ###### Environmental noise model: microscopic origin.
#
# Let us begin with a semi-classical picture. If the environmental degrees of freedom are uncontrolled, we may imagine that each of them them jiggles about stochastically. However, as a degree jiggles, if it is coupled to the qubit, it effects a force on the qubit due to the system-environment coupling. The result is a stochastic force applied on the qubit that alters its states in an unknown manner. Even if the environmental degrees of freedom are very weakly coupled to the qubit, their multitude and continuous existence [^2] can lead to a signifiant alteration of the qubit dynamics. The coupling, which can serve to jiggle the qubit about stochastically, also acts in the reverse direction. Any energy (or information) stored in the system can leak out of the qubit into the environment [^3]. In summary, unavoidable coupling between our small qubit system and the *macroscopic* environment rapidly leads to loss of quantum coherence for the qubit a process known as *decoherence.*
#
# ###### Description of just the qubit system that accounts for the coupling to the environment.
#
# Since we cannot track the environment, we will focus on a description of just the qubit system. The effect of the environment will be factored in an average sense we will discuss the density matrix $\rho$ rather than the pure state $\left|\psi\right\rangle$. We could at this stage write down a quantum description of both the system and the environment, considered as an infinite number of quantum degrees of freedom, and proceed to trace out the environment under some assumptions. This canonical approach is explicated in many textbooks, including Niselen and Chuang or Carmichael. Let us rather take a different tack. This is a slightly simpler approach that will lead to the same final result, while teaching us some new basic concepts along the way.
#
# ##### Review of preliminaries: Bloch-vector representation.
#
# Before proceeding to look at the ensemble, average behavior that results from the interaction with the environment, let us review the description of the density matrix in terms of the Bloch vector representation. Let us briefly review the Bloch vector of a qubit. It consists of three components, denoted $x,y$, and $z$, which find values in the range bounded by -1 and +1. Each corresponds to the expectation value of a Pauli operator $(X,Y,Z)$. For example $x=\operatorname{Tr}\left(X\rho\right).$ Note that $x$ is a scalar variable, while $X$ is a quantum operator. These three components represent all informationally orthogonal directions. Together they specify the quantum state fully. For convenience, they can be grouped into a vector $\vec{r}\left(t\right)=\left(x\left(t\right),y\left(t\right),z\left(t\right)\right)$. The density matrix expressed in terms of the Bloch vector components is $$\begin{aligned}
# \rho & =\frac{1}{2}\begin{pmatrix}1+z & x-iy\\
# x+iy & 1-z
# \end{pmatrix}\\
# & =\frac{1}{2}\left(I+xX+yY+zZ\right)\\
# & =\frac{1}{2}\left(I+\vec{r}\cdot\vec{\sigma}\right)\,,\end{aligned}$$ where in the last line, for notational convenience, we expressed the inner product between the vector of expectations $\vec{r}$ and the vector of Pauli operators $\vec{\sigma}=\left(X,Y,Z\right).$ Note that there is no global phase when discussing the density matrix. The Bloch vector length $\left|\vec{r}\right|_{2}=\sqrt{x^{2}+y^{2}+z^{2}}$ is a number between zero and one. One indicates that the state is pure, while zero indicates that the state is completely mixed. It is important to keep in mind that the ground state ($\left|g\right\rangle$) is denoted by $z=+1$, while the excited state ($\left|e\right\rangle$) is represented by $z=-1$. This may at first seem at adds with your intuition.
#
# #### The bit-flip quantum channel: random kicks
#
# ###### Model: to kick or not to kick
#
# Consider the following elementary model of the interaction between the qubit and the environment. Suppose that at each instant in time the environment can only perform one of two possible actions on the qubit either it does nothing ($I$) or it flips the qubit bit ($X$). Consider the case in which the environment does nothing we can think of this as the environment applying the identity gate $I$ to the qubit. This case occurs with a probability that we will denote $1-p$ [^4]. Denoting time by $t$ and the time increment by $\mathrm{d}t$, the quantum state $\rho$ at time $t+\mathrm{d}t$ (conditioned on the identity gate $I$ having been applied by the environment) is $$\rho_{I}\left(t+\mathrm{d}t\right)=I\rho\left(t\right)I\,,\qquad\mathbb{P}\left(I\right)=1-p\,,$$ where we have expressed the probability of the identity gate occurring as $\mathbb{P}\left(I\right)=1-p$. If $p=0$, then the environment always applies the identity and never performs a random action. This is the case of a closed quantum system. For all time, an initial pure state $\rho\left(0\right)=\left|\psi\right\rangle \left\langle \psi\right|$ will remain pure.
#
# What happens in the remainder of the instances, corresponding to the probability $\mathbb{P}\left(X\right)\equiv p$? Let's say the environment randomly applies the bit-flip gate $X$ to the qubit. The resulting state at time $t+\mathrm{d}t$ is now $$\rho_{X}\left(t+\mathrm{d}t\right)=X\rho\left(t\right)X\,,\qquad\mathbb{P}\left(X\right)=p\,.$$ If the qubit was initialized in $\left|0\right\rangle \left\langle 0\right|$, it will at the next time step be found in $\left|1\right\rangle \left\langle 1\right|$ with probability $p$.
#
# ##### Average state evolution
#
# Since we cannot keep track of the environment, we must average these various possible outcomes together to obtain the (unconditional) ensemble state $$\begin{aligned}
# \rho\left(t+\mathrm{d}t\right) & =\mathbb{P}\left(I\right)\rho_{I}\left(t+\mathrm{d}t\right)+\mathbb{P}\left(X\right)\rho_{X}\left(t+\mathrm{d}t\right)\\
# & =\left(1-p\right)\rho_{I}\left(t+\mathrm{d}t\right)+p\rho_{X}\left(t+\mathrm{d}t\right)\\
# & =\left(1-p\right)\rho\left(t\right)+pX\rho\left(t\right)X\,.\end{aligned}$$ Note the interpretation of the last line in terms of a null and non-null operation weighted by some probabilities that sum to one. We will see this form repeatedly.
#
# ###### [[A bit of nomenclature]{.upright}]{.medium}
#
# It is sometime useful to describe the environment operation on the density matrix as an operation called a *channel*. We can write it as $$\rho\left(t+\mathrm{d}t\right)=\mathcal{E}\left[\rho\left(t\right)\right]\,,$$ where the calligraphic symbol $\mathcal{E}$ denotes the channel map. Since it acts on density matrices, rather than wave-vectors, we call it, rather than an operator, a *superoperator*. Note that the channel, a map from $\rho\left(t\right)$ to $\rho\left(t+\mathrm{d}t\right)$, is linear in $\rho$.
#
# 
#
# Technically, what we wrote down in the preceding discussion is called a Kraus decomposition of a quantum channel. The channel here is the ensemble operation of the environment on the qubit. The Kraus operators are the probability-weighted identify and bit-flip gates. Let us not dwell on the technical aspect here, we will encounter it again soon enough. Rather, let us provide a more clear picture of the action of the above map.
#
# ##### Bloch picture
#
# In terms of the Bloch representation, see the section entitled "Review of preliminaries: Bloch-vector representation," the action of the environment is to perform either$I$ or $X$ on the qubit resulting in: $$\begin{aligned}
# \mathrm{I:}\qquad & \vec{r}_{I}\left(t+\mathrm{d}t\right)=\left(x\left(t\right),y\left(t\right),z\left(t\right)\right)\,,\\
# \mathrm{X:}\qquad & \vec{r}_{X}\left(t+\mathrm{d}t\right)=\left(x\left(t\right),-y\left(t\right),-z\left(t\right)\right)\,.\end{aligned}$$ The bit-flip environment randomly flips the $y$ and $z$ Bloch components. Thus, on average, $$\vec{r}\left(t+\mathrm{d}t\right)=\left(x\left(t\right),\left(1-2p\right)y\left(t\right),\left(1-2p\right)z\left(t\right)\right)\,.$$ We observe that the states along $x$ are unaffected. However, the states in the $y-z$ plane are subjected to a decay. [^5]
#
# ###### Time evolution
#
# What happens as time goes on? In other words, as we apply the quantum map repeatedly, and eventually continuously in time. First, let us divide time into $n$ even chunks of size $\mathrm{d}t$, where $n$ is an integer; $t=n\mathrm{d}t$. The evolution of the Bloch vector becomes (this follows directly from the display equation above) $$\vec{r}\left(t\right)=\left(x\left(0\right),\left(1-2p\right)^{n}y\left(0\right),\left(1-2p\right)^{n}z\left(0\right)\right)\,.$$ Equivalently, we could have written the density matrix in terms of the channel map, $\rho\left(t\right)=\mathcal{E}^{n}\left[\rho\left(0\right)\right]$, where $\mathcal{E}^{n}$ denotes the repeated application of the channel $n$ times.
#
# To take the continuum limit, we need to first express $p$ in terms of a time rate. Let $p=\gamma\mathrm{d}t$, where $\gamma$ is finite and is the bit-flip rate. Using the limit identity $\lim_{n\rightarrow\inf}\left(1-\frac{a}{n}\right)^{n}=e^{-n},$ one finds (try this exercise yourself) $$\vec{r}\left(t\right)=\left(x\left(0\right),e^{-2\gamma t}y\left(0\right),e^{-2\gamma t}z\left(0\right)\right)\,.$$ What does equation this mean? We observe the exponential decay [^6] of the initial information stored in the $y$ and $z$ Bloch components. The information is scrambled by the environment, until we no longer know what has happened to it. This is a type of decoherence. The decay of the $z$ component describes the loss of polarization of the qubit. The decay time of this component is called the $T_{1}$ time, i.e., in this simple model $T_{1}=\frac{2}{\gamma}$.
#
# However, the simple bit-flip model we have examined so far is not quite the processed that is typically observed in lab. We will cover the standard $T_{1}$ energy loss process in the following section by repeating the above analysis.
#
# #### The amplitude damping channel: energy loss$\left(T_{1}\right)$
#
# Consider the physical case of the leakage (emission) of a photon from a qubit into the environment, and its detection by the environment. The total loss of a photon of energy of the in the qubit should annihilate the excited ground state. Thus instead of a bit-flip, in this model, the environment applies the annihilation (lowering) operator on the qubit, $$\sigma_{-}\equiv\left|0\right\rangle \left\langle 1\right|=\frac{1}{2}\left(X+iY\right)=\begin{pmatrix}0 & 1\\
# 0 & 0
# \end{pmatrix}\,.$$ Note that this can no longer occur with fixed probability. If the atom is in the ground state, there should be no possibility for the environment to have detected a leaked photon from the excited states. Hence the probability to apply $\sigma_{-}$ should be zero. In general, the probability to annihilate the excited state ($\left|1\right\rangle$) will dictated by the excited state population, namely $p_{\mathrm{click}}=p\mathrm{Tr}\left[\left|1\right\rangle \left\langle 1\right|\rho\right]=p\mathrm{Tr}\left[\sigma_{-}^{\dagger}.\sigma_{-}\rho\right]$, where $p$ is a proportionality factor between zero and one. One can show [^7] that the correct random operations applied by the environment on the qubit for this scenario are the following two Kraus maps $$A_{0}=\begin{pmatrix}1 & 0\\
# 0 & \sqrt{1-p}
# \end{pmatrix}\,,\qquad A_{1}=\sqrt{p}\sigma_{-}\,.$$ The probability of a click is thus succinctly expressed as $p_{\mathrm{click}}=\operatorname{Tr}\left[A_{1}^{\dagger}A_{1}\rho\right]=\frac{p}{2}\left(1-z\right)=\rho_{11}$, where $\frac{1}{2}\left(1-z\right)$ is the population of the $\left|1\right\rangle$ state, denoted $\rho_{11}$. The probability of no-click event is $p_{\mathrm{no-click}}=\operatorname{Tr}\left[A_{0}^{\dagger}A_{0}\rho\right]=1-\rho_{11}$. Combining these two weighted operators the amplitude damping ($T_{1})$ noise map is $$\mathcal{A}\left(\rho\right)=A_{0}\rho A_{0}^{\dagger}+A_{1}\rho A_{1}^{\dagger}\,,$$ which can be expressed in terms of its action on the Bloch vector components, $$\vec{r}\left(t+\mathrm{d}t\right)=\left(\sqrt{1-p}x\left(t\right),\sqrt{1-p}y\left(t\right),\left(1-p\right)z\left(t\right)+p\right)\,.$$ We leave it as an exercise to show that the repeated application of the noise map leads to $$\vec{r}\left(t+\mathrm{d}t\right)=\left(\left(1-p\right)^{n/2}x\left(t\right),\left(1-p\right)^{n/2}y\left(t\right),1+\left(1-p\right)^{n}z\left(t\right)\right)\,.$$ Using the limit results from the last section, you can show that in the continuous limit, setting $p=\frac{1}{T_{1}}\mathrm{d}t$, $$\vec{r}\left(t+\mathrm{d}t\right)=\left(e^{-t/2T_{1}}x\left(0\right),e^{-t/2T_{1}}y\left(0\right),1+e^{-t/T_{1}}z\left(0\right)\right)\,.$$ Observe the exponential decay of the population with a time constant $T_{1}$, called the energy relaxation time, to the ground state $\lim_{t\rightarrow\infty}z\left(t\right)=+1$. The coherence $x$ and $y$ Bloch components decay with a time constant $T_{2}=2T_{1}$.
#
# #### Phase damping $T_{\phi}$
#
# If the environment randomly flips the phase of the qubit, the Kraus operators would be [^8]
#
# $$A_{0}=\sqrt{1-p}I\,,\qquad A_{1}=\sqrt{p}Z\,,$$ in which case, we leave it as an exercise to the reader, one finds $$\vec{r}\left(t+\mathrm{d}t\right)=\left(e^{-t/T_{\phi}}x\left(0\right),e^{-t/T_{1}}y\left(0\right),z\left(0\right)\right)\,,$$ where $p=\mathrm{d}t/T_{\phi}$.
#
# #### Both amplitude $T_{1}$ and phase damping $T_{\phi}$
#
# Combining the above two processes, one finds (exercise), $$\vec{r}\left(t+\mathrm{d}t\right)=\left(e^{-t/T_{2}}x\left(0\right),e^{-t/T_{2}}y\left(0\right),1+e^{-t/T_{1}}z\left(0\right)\right)\,.$$ where $$\frac{1}{T_{2}}\equiv\frac{1}{T_{\phi}}+\frac{1}{2T_{1}}\,.$$
#
#
# Footnotes
# =========
#
# [^1]: A physical qubit might be an atom in free space or a superconducting circuit in a box
#
# [^2]: We imagine that the coupling to the environment is present at all times and in this discussion unchanging. This will be related when we discuss low-frequency noise and $T_{2}^{E}$.
#
# [^3]: Here, we assume that the environment is not monitored directly by the observer. Hence, we will specialize to an unconditioned monitoring. By 'unconditioned' we mean that we will throw away any measurement record and only talk about the average evolution of the quantum system, not that conditioned on the record.
#
# [^4]: Since we will label this outcome $I$, formally, we can write that the probability of the identity outcome is $\mathbb{P}\left(I\right)=p$.
#
# [^5]: Since $0\leq p\leq1$, if $p=0$ or $p=1$, the process is deterministic. The noise is maximal for $p=\frac{1}{2}$, a completely random coin.
#
# [^6]: The exponential decay is the result of the stochastic Poison Markov process assumed in our model.
#
# [^7]: Carmichael, <NAME>., An Open Systems Approach to Quantum Optics (Springer, Berlin, Heidelberg, 1993).
#
# [^8]: Alternatively, $A_{0}=\sqrt{1-p}I\,,\qquad A_{1}=\sqrt{p}\frac{1}{2}\left(I+Z\right)=\sqrt{p}\left|g\vphantom{g}\right\rangle \left\langle \vphantom{g}g\right|,\qquad A_{2}=\sqrt{p}\frac{1}{2}\left(I-Z\right)=\sqrt{p}\left|e\vphantom{e}\right\rangle \left\langle \vphantom{e}e\right|$
#
# # II. Measuring $T_1$ and $T_2$: Qiskit ignis
# How to use ignis to measure
#
# + tags=["uses-hardware"]
# Importing standard Qiskit libraries and configuring account
from qiskit import QuantumCircuit, execute, Aer, IBMQ
from qiskit.compiler import transpile, assemble
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from scipy.optimize import curve_fit
from qutip import mesolve
# Loading your IBM Q account(s)
provider = IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q', group='deployed', project='default')
#IBMQ.get_provider(hub='ibm-q', group='open', project='main')
backend = provider.get_backend('ibmq_armonk')
backend_config = backend.configuration()
backend_defaults = backend.defaults()
inst_sched_map = backend_defaults.instruction_schedule_map
backend.configuration().parametric_pulses = []
# + tags=["uses-hardware"]
backend_config.discriminators
# + tags=["uses-hardware"]
# exponential function
def exponential(t,tau,a,offset):
return a*np.exp(-t/tau)+offset
# fit an exponential function
def fit_exponential(ts,values):
p0=[np.average(ts),1,0]
return curve_fit(exponential, ts, values,p0=p0)
# sine with an expontial decaying envelope
def damped_sine(t, fs, Td, a, offs, ph):
return a*np.exp(-t/Td)*np.sin(2*np.pi*fs*t+ph)+offs
# fit a damped sine function
def fit_damped_sine(ts, values):
p0=[5e4,50e-6,.5,.5,3*np.pi/2]
return curve_fit(damped_sine, ts, values,p0=p0)
# + tags=["uses-hardware"]
from qiskit import pulse # This is where we access all of our Pulse features!
from qiskit.pulse import Play, Acquire, Drag, GaussianSquare, Gaussian
import qiskit.pulse.library as pulse_lib
from qiskit.visualization import SchedStyle
import numpy as np
dt=backend_config.dt
drive_chan = pulse.DriveChannel(0)
meas_chan = pulse.MeasureChannel(0)
acq_chan = pulse.AcquireChannel(0)
discriminator = pulse.configuration.Discriminator('quadratic_discriminator')
pi_pulse=Drag(duration=640, amp=(0.657857142719338), sigma=160, beta=-4.72912208204562)
pi_2_pulse=Gaussian(duration=640, amp=(0.657857142719338/2), sigma=160)
with pulse.build() as measure:
pulse.play(GaussianSquare(duration=16000, amp=(0.605+0j), sigma=64, width=15744), meas_chan)
pulse.acquire(16000, acq_chan, pulse.MemorySlot(0), discriminator=discriminator)
# + tags=["uses-hardware"]
drive_freq=backend_defaults.qubit_freq_est[0]
T1_delay_times=np.linspace(0,400e-6,61) #measurement time delays
qubit_decay_pulses = []
for delay in T1_delay_times:
with pulse.build(name=f"decay delay = {delay * 1e6} us") as temp_decay_pulse:
with pulse.align_sequential():
pulse.play(pi_pulse, drive_chan)
pulse.delay(int((delay)//dt), meas_chan)
pulse.call(measure)
qubit_decay_pulses.append(temp_decay_pulse)
# + tags=["uses-hardware"]
qubit_decay_pulses[0].draw(style=SchedStyle(figsize=(7, 4)),scale=1)
# + tags=["uses-hardware"]
from qiskit import assemble
from qiskit.tools.monitor import job_monitor
# setting the readout frequency to the resontor frequecy in the dispersive limit measured earlier
# setting the qubit freqency to the default value
los = [{drive_chan: drive_freq}]
num_shots = 4*1024
qubit_decay_experiment = assemble(qubit_decay_pulses,
backend=backend,
meas_level=2,
meas_return='avg',
shots=num_shots,
schedule_los= los * len(qubit_decay_pulses))
job_qubit_decay = backend.run(qubit_decay_experiment)
job_monitor(job_qubit_decay)
# -
# + tags=["uses-hardware"]
import matplotlib.pyplot as plt
qubit_decay_results = job_qubit_decay.result(timeout=120)
qubit_decay_values = []
for i in range(len(T1_delay_times)):
counts=qubit_decay_results.get_counts(i)
qubit_decay_values.append(counts['1']/sum(counts.values()))
#qubit_decay_values = np.real(qubit_decay_values)
#qubit_decay_values/=max(qubit_decay_values)
decay_popt,_=fit_exponential(T1_delay_times, qubit_decay_values)
T1=decay_popt[0]
plt.scatter(T1_delay_times*1e6, qubit_decay_values, color='black')
plt.plot(T1_delay_times*1e6,exponential(T1_delay_times,*decay_popt),'--',lw=2,color='red',label=r'$\tau$={:.1f} $\mu$s'.format(T1*1e6))
plt.title("$T_1$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.legend()
plt.show()
# + tags=["uses-hardware"]
decay_popt
# + tags=["uses-hardware"]
backend.job_limit().maximum_jobs
# +
import qutip as qt
result=mesolve(qt.qeye(2),qt.basis(2,1),T1_delay_times,c_ops=[1/np.sqrt(T1)*qt.sigmap()], e_ops=[qt.num(2)])
# + tags=["uses-hardware"]
a=decay_popt[1]
c=decay_popt[2]
plt.scatter(T1_delay_times*1e6, qubit_decay_values, color='black')
plt.plot(T1_delay_times*1e6,a*result.expect[0]+c,'--',lw=2,color='red',label=r'$T1$={:.1f} $\mu$s'.format(T1*1e6))
#plt.plot(T1_delay_times*1e6,result.expect[0],'--',lw=2,color='red',label=r'$T1$={:.1f} $\mu$s'.format(T1*1e6))
plt.title("$T_1$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.legend()
plt.show()
# + tags=["uses-hardware"]
drive_detuning=50e3
drive_freq=backend_defaults.qubit_freq_est[0] + drive_detuning
qubit_ramsey_pulses = []
T2_delay_times=np.linspace(0,100e-6,51) #measurement time delays
for delay in T2_delay_times:
with pulse.build(name=f"decay delay = {delay * 1e6} us") as temp_decay_pulse:
with pulse.align_sequential():
pulse.play(pi_2_pulse, drive_chan)
pulse.delay(int((delay)//dt), meas_chan)
pulse.play(pi_2_pulse, drive_chan)
pulse.call(measure)
qubit_ramsey_pulses.append(temp_decay_pulse)
# + tags=["uses-hardware"]
qubit_ramsey_pulses[1].draw()
# + tags=["uses-hardware"]
los = [{drive_chan: drive_freq}]
num_shots = 1024
qubit_ramsey_experiment = assemble(qubit_ramsey_pulses,
backend=backend,
meas_level=2,
meas_return='avg',
shots=num_shots,
schedule_los= los * len(qubit_ramsey_pulses))
job_qubit_ramsey = backend.run(qubit_ramsey_experiment)
job_monitor(job_qubit_ramsey)
# + tags=["uses-hardware"]
import matplotlib.pyplot as plt
qubit_ramsey_results = job_qubit_ramsey.result(timeout=120)
qubit_ramsey_values = []
for i in range(len(T2_delay_times)):
counts=qubit_ramsey_results.get_counts(i)
qubit_ramsey_values.append(counts['1']/sum(counts.values()))
#qubit_ramsey_values.append(qubit_ramsey_results.get_memory(i)[0])
#qubit_ramsey_values = np.real(qubit_ramsey_values)
#qubit_ramsey_values = (qubit_ramsey_values-min(qubit_ramsey_values))
#qubit_ramsey_values/=max(qubit_ramsey_values)
ramsey_popt,_=fit_damped_sine(T2_delay_times[5:], qubit_ramsey_values[5:])
T2=ramsey_popt[1]
omega=2*np.pi*ramsey_popt[0]
a=ramsey_popt[2]
c=ramsey_popt[3]
#p0=[5e4,50e-6,.5,.5,3*np.pi/2]
plt.scatter(T2_delay_times*1e6, qubit_ramsey_values, color='black')
plt.plot(T2_delay_times*1e6,damped_sine(T2_delay_times,*ramsey_popt),'--',lw=2,color='red',label=r'$T2$={:.1f} $\mu$s'.format(T2*1e6))
#plt.plot(T2_delay_times*1e6,damped_sine(T2_delay_times,*p0),'--',lw=2,color='red',label=r'$\tau$={:.1f} $\mu$s'.format(T1*1e6))
plt.title("$T_2$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.legend()
plt.show()
# + tags=["uses-hardware"]
ramsey_popt
# + tags=["uses-hardware"]
result_ramsey=mesolve(omega/2*qt.sigmaz(),(qt.basis(2,1)+qt.basis(2,0)).unit(),T2_delay_times,c_ops=[1/np.sqrt(T2)*qt.sigmaz()], e_ops=[(1+qt.sigmax())/2])
# + tags=["uses-hardware"]
plt.scatter(T2_delay_times*1e6, qubit_ramsey_values, color='black')
plt.plot(T2_delay_times*1e6,(result_ramsey.expect[0]+c)/2,'--',lw=2,color='red',label=r'$T2$={:.1f} $\mu$s'.format(T2*1e6))
plt.title("$T_2$ Experiment", fontsize=15)
plt.xlabel('Delay before measurement [$\mu$s]', fontsize=15)
plt.ylabel('Signal [a.u.]', fontsize=15)
plt.legend()
plt.show()
# -
|
content/ch-quantum-hardware/Open-quantum-systems.ipynb
|
# ---
# title: "Display Scientific Notation As Floats"
# author: "<NAME>"
# date: 2017-12-20T11:53:49-07:00
# description: "How to display a number in scientific notation as a float."
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Create Values
# +
# Create a numbers in scientific notation
value_scientific_notation = 6.32000000e-03
# Create a vector of numbers in scientific notation
vector_scientific_notation = [6.32000000e-03,
1.80000000e+01,
2.31000000e+00,
0.00000000e+00,
5.38000000e-01,
6.57500000e+00,
6.52000000e+01,
4.09000000e+00,
1.00000000e+00,
2.96000000e+02,
1.53000000e+01,
3.96900000e+02,
4.98000000e+00]
# -
# ## Display Values As Floats
# Display value as a float
'{:f}'.format(value_scientific_notation)
# Display vector values as floats
['{:f}'.format(x) for x in vector_scientific_notation]
|
docs/python/basics/display_scientific_notation_as_floats.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="ILrQGdyN08lj" outputId="d97c83c0-a1db-455d-cd95-2cdcbb9616f2"
from google.colab import drive
drive.mount('/content/drive')
# + id="a_dakmXj1jaC"
import pickle
import numpy as np
pickle_in = open("/content/drive/My Drive/Dataset/face_X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("/content/drive/My Drive/Dataset/face_Y.pickle","rb")
y = pickle.load(pickle_in)
X = X/255.0
X=np.array(X)
y=np.array(y)
# + colab={"base_uri": "https://localhost:8080/"} id="IyWh3LpB09vh" outputId="f9c19d6e-9063-4a86-d9c1-043b2f001483"
from keras.layers import Dense, Input, Dropout, GlobalAveragePooling2D, Flatten, Conv2D, BatchNormalization, Activation, MaxPooling2D
from keras.models import Model, Sequential
from keras.optimizers import Adam
# number of possible label values
nb_classes = 7
# Initialising the CNN
model = Sequential()
# 1 - Convolution
model.add(Conv2D(64,(3,3), padding='same', input_shape=(48, 48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(128,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 3rd Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 4th Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(nb_classes, activation='softmax'))
print(model.summary())
opt = Adam(lr=0.0001)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="5gDdXWLX1K6v" outputId="56e5e018-0111-4187-e4b7-21ac69c3be2d"
history=model.fit(X, y, batch_size=32, epochs=40, validation_split=0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 590} id="LskfAA-j1-aq" outputId="5c02d01a-65bd-4f15-d05b-63cdf641db9a"
model.save('/content/drive/My Drive/Model/face2.h5')
model.save('/content/drive/My Drive/Model/face2.model')
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="6vqwe8l36IOK"
|
ARJUN/TUNEX_CNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Dependencies
# + _kg_hide-input=true
import gc
import numpy as np
import pandas as pd
from tokenizers import BertWordPieceTokenizer
# -
# # Parameters
# +
MAX_LEN = 512
base_path = '/kaggle/input/bert-base-ml-cased-huggingface/bert_base_cased/'
config_path = base_path + 'bert-base-multilingual-cased-config.json'
vocab_path = base_path + 'bert-base-multilingual-cased-vocab.txt'
# File paths
x_train_bias_path = 'x_train_bias'
y_train_bias_path = 'y_train_bias'
# -
# ## Tokenizer
tokenizer = BertWordPieceTokenizer(vocab_path, lowercase=False)
tokenizer.enable_truncation(max_length=MAX_LEN)
tokenizer.enable_padding(max_length=MAX_LEN)
# # Train set (bias)
data_bias_size = 1902194
chuncksize = 100000
for i in range((data_bias_size // chuncksize // 2 ), (data_bias_size // chuncksize)):
print((i * chuncksize), '--------------------------------------------')
train_bias = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv",
usecols=['comment_text', 'toxic'], nrows=chuncksize, skiprows=range(1, i * chuncksize))
print('Train samples %d' % len(train_bias))
display(train_bias.head())
x_train_bias = [x.ids for x in tokenizer.encode_batch(train_bias['comment_text'].tolist())]
y_train_bias = train_bias['toxic'].astype(np.float32).values.reshape(len(train_bias), 1)
# Save
np.save(x_train_bias_path + '_pt%d' % i, x_train_bias)
np.save(y_train_bias_path + '_pt%d' % i, y_train_bias)
print('x_train samples %d' % len(x_train_bias))
print('y_train samples %d' % len(y_train_bias))
|
Datasets/jigsaw-dataset-bias-bert-cased-pt2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center> Анализ данных на Python </center>
#
# ## Семинар 4. Функции
str = ["I", "am", "groot", ".", "you", "are", "not"]
list(reversed(str))
# <img src="images/python-function.svg" alt="drawing" width="900"/>
# ### Пример even_odd
#
# Напишите функцию, которая возвращает "even", если число четное и "odd" в противном случае
# +
def even_odd(integer):
if integer % 2 == 0:
return "even"
else:
return "odd"
print(3, "is", even_odd(3))
print(10, "is", even_odd(10))
# -
# ### Лямбда функции
# Функции можно определять еще короче, если функция простая
#
#
# <img src="images/python-lambda-functions-new.png" alt="drawing" width="900"/>
# +
even_odd_lambda = lambda integer: "even" if integer % 2 == 0 else "odd"
print(3, "is", even_odd_lambda(3))
print(10, "is", even_odd_lambda(10))
# -
# ## Ремарка о print и return
#
# До этого момента, чтобы посмотреть на результат нашей программы мы использовали print. Из-за этого, как показывает практика, могло показаться, что print возвращает результат
#
# <img src="images/is_it_meme.png" alt="drawing" width="400"/>
# Но это не так. Сравните:
# +
def my_function_return(x):
return x
def my_function_print(x):
print("print inside function:", x)
print("my_function_return:", str(my_function_return(67)))
print("my_function_print:", str(my_function_print(67)))
# -
# Поэтому, когда вы пишете функции:
# <img src="images/yes_no_meme.png" alt="drawing" width="300"/>
# ## Задачи
# simple tool for tests
def test_problem(func, test_data):
for inputs, true_answer in test_data:
answer = func(inputs)
assert answer == true_answer, f'Expected {true_answer}, got {answer}. Input: {inputs}'
print("OK!")
# ### Задача 0. Чтение и запись в файл
#
# Часто в задачах (не в этот раз) на вход вам будут подаваться файлы, и из них нужно уметь считывать данные.
# Иногда случается так, что файл со входными данными содержит лишние символы, пустые строки и тд. Давайте разберем несколько случаев.
FILENAME = "myfile.txt"
# #### Пример 1
#
# На вход программы поступает 4 числа с новой строки. Считайте их из файла и положите в переменную answer.
# +
content = """
1
2
3
4
"""
answer = []
with open(FILENAME, "w") as out_file:
out_file.write(content)
with open(FILENAME, "r") as input_file:
for row in input_file:
# YOUR CODE HERE
assert answer == [1, 2, 3, 4]
# -
# #### Пример 2
#
# На вход программы поступает 4 числа разбитых пробелами. Считайте их из файла и положите в переменную answer
# +
content = "1 2 3 4"
answer = []
with open(FILENAME, "w") as out_file:
out_file.write(content)
with open(FILENAME, "r") as input_file:
row = next(input_file)
for a in row:
if a !='':
answer.append(int(a))
# YOUR CODE HERE
assert answer == [1, 2, 3, 4]
# -
# ### Задача 1.Fizz Buzz
#
# Напишите программу, которая выводит строковое представление чисел от 1 до n.
#
# Но для чисел кратных трем он должен выводить «Fizz» вместо числа, а для кратных пяти - «Buzz». Для чисел, кратных трем и пяти, выведите «FizzBuzz».
#
def fizz_buzz(n):
for i in range(1, n + 1):
if i % 15 == 0:
answer.append("FizzzBuzz")
elif i % 3 == 0:
answer.append("Fizz")
elif i % 5 == 0:
answer.append("Buzz")
else:
answer.append(str(i))
return answer
FIZZ_BUZZ_TESTS_DATA = [
(1, ["1"]),
(2, ["1", "2"]),
(15, ["1", "2", "Fizz", "4", "Buzz", "Fizz", "7", "8", "Fizz", "Buzz", "11", "Fizz", "13", "14", "FizzBuzz"]),]
test_problem(fizz_buzz, FIZZ_BUZZ_TESTS_DATA)
# ### Задача 2. Fibonacci
#
# Напишите программу, которая выводить числа Фибоначчи.
# (Подсказка: последовательность Фибоначчи - это последовательность чисел, в которой каждое следующее число в последовательности - это сумма двух предыдущих чисел. Последовательность выглядит так: 1, 1, 2, 3, 5, 8, 13, …)
def fibonacci(n):
# YOUR CODE HERE
return answer
FIBONACCI_TESTS_DATA = [
(1, 1),
(2, 1),
(3, 2),
(4, 3),
(5, 5),
(10, 55),
(40, 102334155),
]
test_problem(fibonacci, FIBONACCI_TESTS_DATA)
# ### Задача 3. Найти все дубли в списке
#
# В массиве целых чисел некоторые элементы появляются дважды, а другие - один раз.
#
# Найдите все элементы, которые встречаются дважды в этом массиве.
#
# Выводить елементы нужно в том же порядке, в котором они первый раз встретились в листе.
def find_duplicates(arr):
# YOUR CODE HERE
return answer
FIND_DUPLICATES_TESTS_DATA = [
([1], []),
([1, 1], [1]),
([1, 1, 2, 2], [1, 2]),
]
test_problem(find_duplicates, FIND_DUPLICATES_TESTS_DATA)
# ### Задача 4. Палиндром
#
# Определите, является ли строка палиндромом. Палиндром - это число или текст, одинаково читающееся в обоих направлениях.
def is_palindrome(string):
# YOUR CODE HERE
return answer
IS_PALINDROME_TESTS_DATA = [
("", True),
("a", True),
("ab", False),
("aba", True),
("malayalam", True),
]
test_problem(is_palindrome, IS_PALINDROME_TESTS_DATA)
|
sem03_functions/sem03_functions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mengwangk/dl-projects/blob/master/lstm_s1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="221uNfzeTXax"
# # LSTM
# + [markdown] colab_type="text" id="A4ItXv-fTXaz"
# ## Data Preprocessing
# + colab_type="code" id="LLVivFIoTXa1" colab={}
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
# + colab_type="code" id="sA6EqaOOTXa4" colab={}
# path = Path('datasets/stocks/mbb.csv')
path = Path('mbb.csv')
# + colab_type="code" id="zMXcKuHgTXa7" colab={}
df = pd.read_csv(path)
# + colab_type="code" id="yxT4Crl2TXa9" outputId="815bb3cd-c5ec-4daf-bc92-6892b1eb6c0a" colab={"base_uri": "https://localhost:8080/", "height": 221}
df.info()
# + colab_type="code" id="HuaikXW2TXbB" outputId="37cd1052-d403-4461-d372-954a825acd85" colab={"base_uri": "https://localhost:8080/", "height": 359}
df.head(10)
# + colab_type="code" id="lv9LX3CVTXbE" colab={}
# Convert the date column
df['Date'] = pd.to_datetime(df['Date'])
# + colab_type="code" id="83CVuEXlTXbG" outputId="d74870a1-1802-41de-a18d-fc187a6ca943" colab={"base_uri": "https://localhost:8080/", "height": 359}
df.head(10)
# + colab_type="code" id="wxmfs9A8TXbJ" colab={}
# Sort date column ascending
df.sort_values(by=['Date'], inplace=True)
# + colab_type="code" id="Pk6l2BB8TXbL" outputId="a764e9c6-b25f-4468-e125-fbafe48ae432" colab={"base_uri": "https://localhost:8080/", "height": 979}
df.head(30)
# + [markdown] id="yqPfs0kVCHzN" colab_type="text"
# #### Split into monthly means
# + id="e05rXnv4CHzO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="0b854d90-c067-4b7f-c247-86da472d513e"
# df['Year'] = pd.DatetimeIndex(df["Date"]).year
# df['Month'] = pd.DatetimeIndex(df["Date"]).month
df_temp = df
df_temp.reset_index(inplace=True, drop=True)
number_of_days = 5
df_temp['GroupId'] = df_temp.index
df_temp['GroupId'] = df_temp.apply(lambda x: df_temp['GroupId'] // number_of_days)
df_grouped_mean = df_temp.groupby(['GroupId'], as_index=False).mean()
print(df_grouped_mean.head(10), '\n\n', len(df_grouped_mean))
print(df.head(10), '\n\n', len(df))
# + id="m4JZbkuDCHzR" colab_type="code" colab={}
df = df_grouped_mean
# + id="gcRFFQNMCHzU" colab_type="code" colab={}
# + id="1UVPN274CHzW" colab_type="code" colab={}
# + id="wRgsUoLCCHzY" colab_type="code" colab={}
# + id="LnM11mygCHzb" colab_type="code" colab={}
# + colab_type="code" id="hbdgBaocTXbO" colab={}
# # train_test_split?
# Split to training and test set without shuffling
training_set, test_set = train_test_split(df, test_size=0.10, shuffle=False)
# + colab_type="code" id="oUi6b5g3TXbV" outputId="4395cc6c-d701-4249-e6d4-5b39d46581d4" colab={"base_uri": "https://localhost:8080/", "height": 376}
print(len(training_set));training_set.head(10)
# + colab_type="code" id="B_7O8Ur7TXbb" outputId="ec3fa289-4795-456f-a643-d8f9e041deb1" colab={"base_uri": "https://localhost:8080/", "height": 376}
print(len(test_set));test_set.head(10)
# + colab_type="code" id="mgKC1K_KTXbi" outputId="414fca67-1b86-40c0-8718-42318f653eb2" colab={"base_uri": "https://localhost:8080/", "height": 5814}
# Use the Open price
training_set = training_set.iloc[:,2:3].values
print(training_set)
# + colab_type="code" id="u6L54WmbTXbm" outputId="72800f90-40bd-4a7d-b377-525e36fea46f" colab={"base_uri": "https://localhost:8080/", "height": 5814}
# Feature scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
training_set = sc.fit_transform(training_set)
print(training_set)
# + colab_type="code" id="0HFDGSXBTXbp" outputId="54754404-fd83-4282-bda5-c0db51df477c" colab={"base_uri": "https://localhost:8080/", "height": 34}
n = len(training_set); n
# + colab_type="code" id="hCjzXeb_TXbu" outputId="58b2f305-e9a1-42af-c262-26be1f18562d" colab={"base_uri": "https://localhost:8080/", "height": 11611}
# Getting the inputs and the ouputs
# Restricting the input and output based on how LSTM functions.
X_train = training_set[0:n-1]
y_train = training_set[1:n]
print(X_train); print(len(X_train))
print(y_train); print(len(y_train))
# + colab_type="code" id="T_6r6SzRTXby" colab={}
# Reshaping - Adding time interval as a dimension for input.
X_train = np.reshape(X_train, (n-1, 1, 1))
# + colab_type="code" id="N3JMFOkUTXb1" outputId="e1ebb5f4-577a-4bbc-9acb-c2c46e1033f7" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(X_train.shape)
# + [markdown] colab_type="text" id="7xbyya1oTXb3"
# ## Build the Network
# + colab_type="code" id="ANldwJtDTXb4" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="58a0621e-cee4-4fdc-c3c3-8b410b46ae4b"
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# + colab_type="code" id="j3iIZNz7TXb6" colab={"base_uri": "https://localhost:8080/", "height": 88} outputId="ed76df86-055b-472a-8551-aca8fe4f19b0"
regressor = Sequential()
# 4 memory units, sigmoid activation function and (None time interval with 1 attribute as input)
regressor.add(LSTM(units = 4, activation = 'sigmoid', input_shape = (None, 1)))
# + colab_type="code" id="JEeGra5oTXb-" colab={}
# 1 neuron in the output layer for 1 dimensional output
regressor.add(Dense(units = 1))
# + colab_type="code" id="gukrzfxOTXcA" colab={}
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# + colab_type="code" id="9X3da0U5TXcD" outputId="717ea315-02c8-44ed-e7e1-143f99b6ec17" colab={"base_uri": "https://localhost:8080/", "height": 3505}
regressor.fit(X_train, y_train, batch_size = 32, epochs = 100)
# + [markdown] colab_type="text" id="Ut9ya6h5TXcG"
#
# + [markdown] colab_type="text" id="waB2iBRNTXcH"
# ## Prediction
# + colab_type="code" id="Tz0XemoJTXcI" outputId="8ddef914-4726-4a47-922c-2e73a8fbd75a" colab={"base_uri": "https://localhost:8080/", "height": 34}
real_stock_price = test_set.iloc[:,2:3].values; len(real_stock_price)
# + colab_type="code" id="hsU9ZRZwTXcJ" outputId="2bec9ef0-d5d4-4d13-e812-d4284b63584e" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Getting the predicted stock price of 2017
inputs = real_stock_price
inputs = sc.transform(inputs)
inputs = np.reshape(inputs, (len(real_stock_price), 1, 1))
inputs.shape
# + colab_type="code" id="XTl3YvyWTXcM" colab={}
predicted_stock_price = regressor.predict(inputs)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# + [markdown] colab_type="text" id="cxhmMBZPTXcP"
# ## Visualize the results
# + colab_type="code" id="ht8Wl-fJTXcR" outputId="49e4a92d-2d67-456b-ceba-40220dd64102" colab={"base_uri": "https://localhost:8080/", "height": 411}
plt.figure(figsize=(16, 6))
plt.plot(real_stock_price, color = 'red', label = 'Real Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Stock Price')
plt.title('Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Stock Price')
plt.legend()
plt.show()
# + colab_type="code" id="ZLzg1cMjTXcT" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3c710739-198f-43dd-bde6-5b3388dde63f"
# Make predictions on entire dataset
training_set, test_set = train_test_split(df, test_size=0.10, shuffle=False)
real_stock_price_train = training_set.iloc[:,2:3].values
print(len(df))
# + colab_type="code" id="eRw3eZcPaK02" colab={}
predicted_stock_price_train = regressor.predict(X_train)
predicted_stock_price_train = sc.inverse_transform(predicted_stock_price_train)
# + colab_type="code" id="06gvP3eJaNMJ" outputId="9bde9050-e2d5-4e49-efdd-5f576a5de4df" colab={"base_uri": "https://localhost:8080/", "height": 411}
# Visualising the results
plt.figure(figsize=(16, 6))
plt.plot(real_stock_price_train, color = 'red', label = 'Real Stock Price')
plt.plot(predicted_stock_price_train, color = 'blue', label = 'Predicted Stock Price')
plt.title('Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Stock Price')
plt.legend()
plt.show()
# + colab_type="code" id="wZrGrXGBaOGK" colab={}
# + [markdown] colab_type="text" id="6N_fdqGcTXcV"
# ## Evaluation
# + colab_type="code" id="1ZRGl0c6TXcV" outputId="452de9ee-c213-4417-a500-dc3060460087" colab={"base_uri": "https://localhost:8080/", "height": 34}
import math
from sklearn.metrics import mean_squared_error
rmse = math.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))
print(rmse)
# + colab_type="code" id="9iNuW3A7TXcZ" colab={}
|
lstm_s1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Node Embeddings and Skip Gram Examples
#
# **Purpose:** - to explore the node embedding methods used for methods such as Word2Vec.
#
# **Introduction-** one of the key methods used in node classification actually draws inspiration from natural language processing. This based in the fact that one approach for natural language processing views the ordering of words in a manner similar to a graph since each n-gram has a set of words that follow it. Strategies that treat text this way are naturally amenable to domains where we are explicitly working on a network structure.
#
# Methods which employ node embeddings have several fundamental steps:
# 1. Create a "corpus" of node connections using a random walk.
# 2. Define a transformation on the list of node connections from **1** which groups node values that are close together with a high number, and nodes that have less of a relationship with a small number.
# 3. Run a standard machine learning method on the new set of factors from step **2**.
#
#
# ## Random Walks:
#
# Here we explore the first step in this process: The random choosing of node values in the graph structure. This step is taken to approximate the connections each node has as a list. This carries two advantages:
# 1. Each node similarity measure has both local (direct) connections, and also expresses higher order connections (indirect). This is known as **Expressivity**.
# 2. All node pairs don't need to be encoded; we don't have to worry about coding the zero probabilities. This is **Efficiency**.
#
# We will discuss some of the methods used for random walks in the sections below in reference to the paper where they were originally discussed.
#
# ### DeepWalk Method
#
# *DeepWalk: Online Learning of Social Representations* uses short random walks. In this case, we define a random walk starting at vertex $V_i$ as $W_i$. This random walk is a stochastic process composed of random variables $W_i^k$ where k denotes the step in the sequence of each random walk.
#
# For this method, a stream of random walks is created. This method has the added advantage of being easy to parallelize and is also less sensitive to changes in the underlying graph than using a larger length random walk.
#
# The implementation of the DeepWalk method is used in the function below:
import pandas as pd, numpy as np, os, random
from IPython.core.debugger import set_trace
np.random.seed(13)
dat = pd.read_csv("../Data/soc-sign-bitcoinalpha.csv", names = ["SOURCE", "TARGET", "RATING", "TIME"])
len(pd.unique(dat.SOURCE))
len(pd.unique(dat.TARGET) )
# +
#from_vals = pd.unique(dat.SOURCE)
#a = dat.TARGET[dat.SOURCE == from_vals[1]]
# Generate list comprehension using from values as a key; to values are saved as a list.
#node_lists = {x:dat.TARGET[dat.SOURCE == x].values for x in from_vals }
# Generate a step by selecting one value randomly from the list of "to" nodes:
def gen_step(key_val,dict_vals):
# print(dict_vals[key_val])
return( dict_vals[key_val][random.randint(0,len(dict_vals[key_val])-1)] )
def gen_walk(key_val,dict_vals,steps):
walk_vals = [key_val]
for i in range(0,steps-1):
walk_vals.append(gen_step(walk_vals[-1],dict_vals) )
return(walk_vals)
def RW_DeepWalk( orig_nodes, to_vals, walk_length=3):
from_vals = pd.unique(orig_nodes)
node_lists = {x:to_vals[orig_nodes == x].values for x in from_vals}
start_nodes = [* node_lists]
start_nodes=[x for x in start_nodes if x in node_lists.keys()]
walks = {x:gen_walk(key_val= x,dict_vals = node_lists,steps=walk_length) for x in start_nodes}
return(walks)
# -
# In order to sort these values, we need to make a full list of "from" and "to" for the random walk. This is performed in the script below:
# Identify values in "to" column that might not be in the from column:
f = dat.SOURCE
t = dat.TARGET
unique_t = [x for x in pd.unique(t) if not(x in pd.unique(f))]
x_over = dat[dat['TARGET'].isin( unique_t)]
# Add entries from the "to" column to the from column; add corresponding entries from the "from" column. This way, we include mappings of nodes in the "to" column as part of the random walk.
full_from = f.append(x_over.TARGET)
full_to = t.append(x_over.SOURCE)
random_walk = RW_DeepWalk( full_from, full_to, walk_length=10)
# An example of one of the arrays obtained using a random walk:
random_walk[1]
# The choice of the random walk method provides a way of representing the network that can be performed quickly. This method is also simple to parallelize. Finally, this method and the speed it can be used allows for a quick way to update calculations due to changes in the graph structure.
# ### Node2vec Method
#
# The paper "Scalable Feature Learning for Networks" uses a separate method called a "biased random walk".
#
#
# One of the points made in the paper is the type of sampling strategies that can be used to try to approximate the neighborhood around some node (this is denoted as $N_s$ in the paper). There are two extremes for sampling strategies that can be employed:
#
# * Breadh-first sampling (BFS) - The neighborhood is restricted to nodes which are immediate neighbors of the source node. For this, we define the neighborhood **only** with directly adjacent nodes.
# * Depth-first sampling (DFS) - The neighborhood consists of nodes sequentially sampled at increasing distances from the source node. This is represented in the random walk algorithm that was shown in the last section.
#
#
# A biased random walk as expressed by the authors is an interpolation between the two strategies mentioned above.
#
# Let $u$ be the source node, and $l$ be the length of the random walk. Let $c_i$ be the $i$th node in the walk where $c_0 = u$. Then, $c_i$ is generated as follows:
#
# $$ P(c_i = x | c_{i-1} =v) = \frac{\pi_{v,x} }{Z} $$ and 0 otherwise.
#
# Where $\pi_{v,x}$ is the unnormalized transition probability between nodes $v$ and $x$, and $Z$ is some constant that normalizes the probability between the two nodes. This is very similar to the formulation that was desecribed earlier for DeepWalk.
#
# The simplest way to introduce bias to the random walks is to sample based onthe static edge weights: $w_{v,x} = \pi_{v,x} $. In the case of an unweighted graph like the one used in the example above, $w_{v,x} =1$.
#
# We will define a $2$nd order random walk with parameters $p,q$. We will set the unnoramlized transition probability to $\pi_{v,x} = \alpha_{p,q}(t,x)*w_{v,x}$ where \alpha_{p,q}(t,x) is defined as:
#
# \begin{equation}
# \alpha_{p,q}(t,x) =
# \begin{cases}
# \frac{1}{p} & \text{if $d_{t,x}=0$ }\\
# 1 & \text{if $d_{t,x}=1$ }\\
# \frac{1}{q} & \text{if $d_{t,x}=2$ }
# \end{cases}
# \end{equation}
#
# Where $d_{t,x}$ defines the shortest path distance between nodes $t$ and $x$ Also note that $d_{t,x} \in \{0,1,2\}$
#
# Changing parameters $p$ and $q$ will impact the speed that the walk leaves the current neighborhood. In the example provided in the paper, the authors consider a process which as just transitioned to node *v* from node *t*. It has three potential choices for its next step:
#
# * Transition back to *t* with the bias of $\alpha_{t,v} = \frac{1}{p}$ being applied.
# * Transition to a shared node with a bias of 1 being applied.
# * Transition to an unshared node with a bias of $\alpha_{t,v} = \frac{1}{q}$ being applied.
#
# Then - a lower q-value and higher p-value will increase the likelihood of leaving the initial neighborhood of *t*. At the extreme, you would get the original random walk implementation described above by letting $p =1$ and $q=1$.
#
# A higher q value will decrease the likelihood of the current step moving to a node that neig
#
from_vals = pd.unique(full_from)
node_lists = {x:full_to[full_from == x].values for x in from_vals}
node_lists
gen_step(430,node_lists)
# +
cur_node = gen_step(430,node_lists)
prev_node_list = node_lists[cur_node]
cur_node_list = node_lists[430]
shared_nodes = list(set(prev_node_list) & set(cur_node_list))
unshared_nodes = list(set(prev_node_list) ^ set(cur_node_list))
prev_node = 430
# -
def gen_biased_step(cur_val, prev_val,dict_vals,p = 1, q = 1):
# set_trace()
prev_node_list = node_lists[prev_val]
cur_node_list = node_lists[cur_val]
shared_nodes = list(set(prev_node_list) & set(cur_node_list))
unshared_nodes = list( set(prev_node_list) ^ set(cur_node_list)^set([prev_val]) )
all_nodes = shared_nodes + unshared_nodes + [prev_val]
shared_weights = [1/p]*len(shared_nodes)
unshared_weights = [1/q]*len(unshared_nodes)
all_weights = shared_weights +unshared_weights + [1]
# set_trace()
node_step = random.choices(all_nodes,all_weights)
return( node_step )
test = gen_biased_step(cur_val = 59, prev_val = 430,dict_vals = node_lists,p = 1, q = 1)
test
# +
def gen_walk_biased(key_val,dict_vals,steps,p=1,q=1):
walk_vals = [key_val]
for i in range(0,steps-1):
if i==0:
prev_val = key_val
else:
prev_val =walk_vals[-1]
# set_trace()
walk_vals.append(
gen_biased_step(
cur_val = key_val, prev_val = prev_val,dict_vals = dict_vals,p = p, q = q)[0] )
# gen_biased_step(cur_val = 59, prev_val = 430,dict_vals = node_lists,p = 1, q = 1)
# walk_vals.append(gen_step(walk_vals[-1],dict_vals) )
# set_trace()
return(walk_vals)
# Split the node values into three different groups
# Apply weightings to each edge to change the likelihood of leaving the neighborhood.
# A biased random walk as described in the node2vec paper. The p and q values are defaulted to 1 which will make this the same as the RW_DeepWalk paper described earlier.
def RW_Biased( orig_nodes, to_vals, walk_length=3,p = 1,q =1):
from_vals = pd.unique(orig_nodes)
node_lists = {x:to_vals[orig_nodes == x].values for x in from_vals}
start_nodes = [* node_lists]
start_nodes=[x for x in start_nodes if x in node_lists.keys()]
# set_trace()
# walks = {x:gen_walk_biased(key_val= start_nodes[x], prev_key = start_nodes[x-1],dict_vals = node_lists,steps=walk_length,p = p, q = q) for x in range(1,len(start_nodes))}
# walks = {x:gen_walk(key_val= x,dict_vals = node_lists,steps=walk_length) for x in start_nodes}
walks = [gen_walk(key_val= x,dict_vals = node_lists,steps=walk_length) for x in start_nodes]
return(walks)
# -
full_from = full_from.astype(str )
full_to = full_to.astype(str )
test = RW_Biased(full_from, full_to,walk_length =10,p = .5, q = .7)
test
# ## Creating a Node Embedding
#
# Now that we've created a representation of the likelihood of getting to different nodes in each graph, we can the methods which we will use to represent the network as an embedding vector. Note that this is an alternative to other methods such as one-hot encoding of the results which are extremely memory/computation intensive. In principle, what we want to do is represent the "context" or relationship of each of these nodes to all other nodes by mapping each node into an $N$ dimensional vector space. The length of the vector is arbitrary; As it is increased the precision will rise while the speed of the computation will fall.Nodes which are in the immediate neighborhood of the current node will be heavily favored, second order connections, less so, and those that are completely unconnected, not at all. This method was first explored in [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf). The paper that was just mentioned provides two methods for natural language processing:
#
# 1. Continuous bag-of-words
# 2. Skip-gram models.
#
# Both methods are valid and have their strengths and weaknesses, but we will rely on skip-gram models in this discussion. For skip-gram models, the node embedding is generated using a simple neural network. We will step through an independent implementation of this below which leans on Tensorflow, but [Stellargraph](https://www.stellargraph.io/) provides a good straightforward interface to it as well.
#
# ### Step 1: Identify neighborhood for each node
#
# This is the step that we discussed above by implementing the biased random walk and the random walk methods. This has a key impact: The longer and more biased our random walk, the greater of range of connections we will identify, but we will possibly draw in more tenuous connections.
#
#
# ### Step 2: Map neighborhood values to one-hot autoencoders:
#
# The neighborhoods are used to generate vectors which encode the relationship of nodes. This includes a one-hot autoencoder for the target node, and a set of autoencoders for neighboring nodes.
#
# ### Step 3: Perform Optimization:
#
# The following procedure is used for each one-hot autoencoders:
#
# 1. The $1 \times N$ encoder multiplies an $N \times w $ matrix.
# 2.
#
#
def gen_auto_encoders(node_lists):
biased_rw_for_training =[list(x.values()) for x in [RW_Biased(full_from, full_to,walk_length =10,p = .5, q = 2) for i in range(10) ] ]
# +
biased_rw_for_training[0].append(biased_rw_for_training[1])
final_results = []
for i in biased_rw_for_training:
final_results = final_results + i
# -
biased_rw_for_training[0][0]
from stellargraph.data import BiasedRandomWalk
from stellargraph import StellarGraph
from stellargraph import datasets
from IPython.display import display, HTML
from gensim.models import Word2Vec
model = Word2Vec(biased_rw_for_training[0] , size=128, window=5, min_count=0, sg=1, workers=2, iter=1)
# ## References:
#
# 1. [NRL Totorial Part 1](http://snap.stanford.edu/proj/embeddings-www/files/nrltutorial-part1-embeddings.pdf)
|
Notes/.ipynb_checkpoints/Node Embeddings and Skip Gram Examples-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pypcd
import pptk
import numpy as np
import pypcd.
pc_map = pypcd.PointCloud.from_path('nuscenes/nuscenes.pcd')
points = np.array([[point[0], point[1], point[2]] for point in pc_map.pc_data])
pptk.viewer(points)
w = _
print points.shape
e_vecs, e_vals = pptk.estimate_normals(points, k=-1, r=3.2, output_eigenvalues=True, output_all_eigenvectors=True)[:2]
vert = 1 - np.linalg.norm(np.cross( np.full(shape=(len(e_vecs),3),fill_value=np.array([0,0,1])) , e_vecs[:,0,:]), axis=1)
w.attributes(vert, vert<0.6)
w.set(point_size =0.025)
points_without_gnd = points[vert<0.6]
pptk.viewer(points_without_gnd)
arr = np.array([(point[0], point[1], point[2]) for point in points_without_gnd],
dtype=[('x', '<f4'), ('y', '<f4'), ('z', '<f4')])
print(arr.shape)
from pypcd import numpy_pc2
pc = pypcd.point_cloud_from_buffer(str(arr))
print type(pc)
pc = pypcd.make_xyz_point_cloud(points_without_gnd)
pypcd.point_cloud_to_path(pc, "filtered_map.pcd")
|
bags/map_process.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#load all packages
import GEOparse
import pandas as pd
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import seaborn as sns
from sklearn import preprocessing
# -
#load the data
gse = GEOparse.get_GEO(geo="GSE10245", destdir="./")
#load gene expression values to a dataframe
gse_table = gse.pivot_samples('VALUE')
#normalize the gene expression
normalized_data = preprocessing.normalize(gse_table, norm='l2')
normalized_data
#scale the normalized data
scale_data = scale(normalized_data)
#K implies the number of clusters? in this case, that being 2?
scale_data = scale_data.transpose()
n_samples, n_features = scale_data.shape
n_digits = 2
print(n_samples)
print(n_features)
print(n_digits)
#function for k-means - we may have to change it to avoid plagiarism or add reference.
#https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html#sphx-glr-auto-examples-cluster-plot-kmeans-digits-py
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_,
average_method='arithmetic'),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
labels = gse_table.columns.values
sample_size = 58
# +
#scikit-learn current implementation of k-means only uses Euclidean distances.
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=scale_data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=scale_data)
# -
pca = PCA(n_components=n_digits).fit(scale_data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=scale_data)
print(82 * '_')
reduced_data = PCA(n_components=2).fit_transform(scale_data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# +
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=10)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
# -
print(kmeans.fit(reduced_data).labels_.tolist())
print(gse_table.columns.tolist())
pheno_data=gse.phenotype_data[["title", "characteristics_ch1.0.disease state"]]
pheno_data['ClustersfromPCA'] = kmeans.fit(reduced_data).labels_.tolist()
pheno_data
|
.ipynb_checkpoints/mrabbani_kmeans_normalized-checkpoint.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.0
# language: julia
# name: julia-1.3
# ---
# # Local Variational Inference for a Softmax Model
#
# The local variational method can be extended to multidimensional models by use of the softmax function (see e.g. Ahmed, 2013). In this demo we consider the following model:
#
# \begin{align*}
# x_t &\sim \mathcal{N}(A x_{t-1}, 0.01 I)\\
# y_t &\sim \mathcal{C}at(\sigma(x_t))\,,
# \end{align*}
#
# with $\sigma$ a softmax. We start by generating a dataset.
# +
using Random
using ForneyLab
using PyPlot
using LinearAlgebra
# Generate data set
Random.seed!(123)
n_samples = 40
theta = 2*pi/40
A = [cos(theta) -sin(theta) 0.0; sin(theta) cos(theta) 0.0; 0.0 0.0 0.9] # Transition matrix
σ(x) = exp.(x)/sum(exp.(x)) # Softmax function
x_data = []
y_data = []
x_prev = [2.0, 0.0, 2.0]
for t=1:n_samples
push!(x_data, A*x_prev + sqrt(0.01)*randn(3)) # State transition
y_data_t = sample(ProbabilityDistribution(Categorical, p=σ(x_data[end])))
push!(y_data, Vector(y_data_t)); # Observation
x_prev = x_data[end]
end
# +
# Plot the generated hidden states
x_data_1 = [x_data_t[1] for x_data_t in x_data]
x_data_2 = [x_data_t[2] for x_data_t in x_data]
x_data_3 = [x_data_t[3] for x_data_t in x_data]
plot(1:n_samples, x_data_1, linestyle="--", color="red", label="State 1")
plot(1:n_samples, x_data_2, linestyle="--", color="green", label="State 2")
plot(1:n_samples, x_data_3, linestyle="--", color="blue", label="State 3")
grid("on")
xlabel("Time t")
xlim(1, n_samples)
legend()
;
# +
# Plot the generated data
y_data_1 = findall([y_data_t == [1.0,0.0,0.0] for y_data_t in y_data])
y_data_2 = findall([y_data_t == [0.0,1.0,0.0] for y_data_t in y_data])
y_data_3 = findall([y_data_t == [0.0,0.0,1.0] for y_data_t in y_data])
scatter(y_data_1, ones(length(y_data_1)), color="red", marker="x", lw=2)
scatter(y_data_2, 2*ones(length(y_data_2)), color="green", marker="x", lw=2)
scatter(y_data_3, 3*ones(length(y_data_3)), color="blue", marker="x", lw=2)
grid("on")
xlabel("Time t")
ylabel("Outcomes y")
xlim(1, n_samples)
;
# -
# # Model specification
#
# The model specification includes local variational parameters `xi` and `a`, which are used to define an upperbound on the softmax (Bouchard, 2007).
# +
g = FactorGraph()
# State prior
@RV x_0 ~ GaussianMeanVariance(ones(3), 100.0*diageye(3))
x = Vector{Variable}(undef, n_samples)
xi = Vector{Variable}(undef, n_samples)
a = Vector{Variable}(undef, n_samples)
y = Vector{Variable}(undef, n_samples)
x_t_min = x_0
for t = 1:n_samples
@RV x[t] ~ GaussianMeanPrecision(A*x_t_min, 100.0*diageye(3))
@RV xi[t]
@RV a[t]
@RV y[t] ~ Softmax(x[t], xi[t], a[t])
# Data placeholder
placeholder(y[t], :y, index=t, dims=(3,))
# Reset state for next step
x_t_min = x[t]
end
;
# -
# # Algorithm generation
#
# Since we are interested in optimizing the local variational parameters `xi`, `a` together with the hidden state sequence `x`, we construct an algorithm that also updates `xi` and `a`. However, because we upper-bound the softmax, the free energy is no longer guaranteed to be a upper bound on surprise. This is in contrast to local variational estimation for the logistic function, which is lower bounded (see the corresponding demo).
pfz = PosteriorFactorization([x_0; x], xi, a, ids=[:X, :Xi, :A])
algo = messagePassingAlgorithm([xi; a])
source_code = algorithmSourceCode(algo);
# +
# println(source_code) # Uncomment to inspect algorithm code
# -
# # Execution
#
# For execution we initialize the local variational parameters and iterate the automatically derived algorithm.
eval(Meta.parse(source_code));
# +
# Pre-initialize marginals
marginals = Dict()
for t=1:n_samples
marginals[:xi_*t] = ProbabilityDistribution(Multivariate, Function, mode=ones(3))
marginals[:a_*t] = ProbabilityDistribution(Function, mode=1.0)
end
data = Dict(:y => y_data)
n_its = 20
for i = 1:n_its
stepX!(data, marginals) # Update hidden state
stepXi!(data, marginals) # Update local variational parameters
stepA!(data, marginals)
end
# -
# # Results
#
# Results show that the algorithm accurately estimates the hidden state.
# +
# Extract posterior state statistics
m_x_1 = [mean(marginals[:x_*t])[1] for t = 1:n_samples]
v_x_1 = [cov(marginals[:x_*t])[1,1] for t = 1:n_samples]
m_x_2 = [mean(marginals[:x_*t])[2] for t = 1:n_samples]
v_x_2 = [cov(marginals[:x_*t])[2,2] for t = 1:n_samples]
m_x_3 = [mean(marginals[:x_*t])[3] for t = 1:n_samples]
v_x_3 = [cov(marginals[:x_*t])[3,3] for t = 1:n_samples]
plot(1:n_samples, m_x_1, color="red")
fill_between(1:n_samples, m_x_1-sqrt.(v_x_1), m_x_1+sqrt.(v_x_1), color="red", alpha=0.3);
plot(1:n_samples, m_x_2, color="green")
fill_between(1:n_samples, m_x_2-sqrt.(v_x_2), m_x_2+sqrt.(v_x_2), color="green", alpha=0.3);
plot(1:n_samples, m_x_3, color="blue")
fill_between(1:n_samples, m_x_3-sqrt.(v_x_3), m_x_3+sqrt.(v_x_3), color="blue", alpha=0.3);
# Plot true states
plot(1:n_samples, x_data_1, linestyle="--", color="red", label="State 1")
plot(1:n_samples, x_data_2, linestyle="--", color="green", label="State 2")
plot(1:n_samples, x_data_3, linestyle="--", color="blue", label="State 3")
xlim(1, n_samples)
grid("on")
xlabel("Time t")
legend(loc=3)
;
# -
# Because the softmax function is approximated by an upper bound (Bouchard, 2007), the free energy is no longer a bound on surprise, and no longer guaranteed to monotonically decrease over iterations.
#
# ### References
#
# Bouchard, 2007 "Efficient Bounds for the Softmax Function"
#
# Ahmed, 2013, "Bayesian Multicategorical Soft Data Fusion for Human-Robot Collaboration"
|
demo/local_variational_inference_softmax.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from matplotlib import rc
rc("font", family="serif", size=11)
# +
output = pd.read_csv('../data/output_1000stars.csv', usecols=range(1,4))
ID = 9456478
#ID = random.choice(output['ID'])
star = output.loc[(output['ID'] == ID)]
star
# +
modes = pd.read_csv('../data/rgbmodes/modes_'+str(ID)+'.csv', usecols=['f0', 'f0_err', 'A0'])
modes = modes.sort_values(by=['f0'])
modes = modes.set_index(np.arange(0,len(modes),1))
modes['dnu'] = (modes['f0'].diff(2).shift(-1))/2
dnu_avg = np.mean(modes['dnu'])
n_min = int(modes['f0'].min() / dnu_avg)
n = np.arange(n_min, n_min+len(modes), 1)
modes.insert(loc=0, column='n', value=n)
modes
# +
dnu_avg = 2.5
def model(n, dnu, nmax, epsilon, k, A, omega, phi, tau):
freqs = (n + epsilon) * dnu
freqs += (nmax-n)**2 * k
freqs += A*np.sin(omega*n + phi)*np.exp(-n/tau)
return freqs
nmax = 7
epsilon = 0.08
k = 0.03
A = 0.1
omega = 3
phi = 2
tau = 10
f = model(n, dnu_avg, nmax, epsilon, k, A, omega, phi, tau)
plt.scatter(modes['f0'] % dnu_avg, modes['f0'], label=r'$l=$'+str(0))
plt.plot(f % dnu_avg, f, label='model')
plt.ylabel('Frequency')
plt.xlabel(r'Mod. Freq. Spacing ('+ str(u"%.3f" % dnu_avg) +') $\mu$Hz')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
# -
code = '''
functions {
real model(real n, real dnu, real nmax, real epsilon, real k, real A, real omega, real phi, real tau){
return (dnu*(n+epsilon) + k*(nmax - n)^2 + A*sin(omega*n + phi)*exp(-n/tau));
}
}
data {
int N;
real n[N];
real freq[N];
real freq_err[N];
real dnu_guess;
}
parameters {
real<lower = 0> dnu;
real<lower = 0> nmax;
real epsilon;
real k;
real<lower = 0> A;
real<lower = 0> omega;
real<lower = -2.0*pi(), upper = 2.0*pi()> phi;
real<lower = 0> tau;
}
model {
real mod[N];
for (i in 1:N){
mod[i] = model(n[i], dnu, nmax, epsilon, k, A, omega, phi, tau);
}
mod ~ normal(freq, freq_err);
dnu ~ normal(dnu_guess, dnu_guess*0.001);
nmax ~ normal(11,2);
epsilon ~ normal(0.77, 0.1);
k ~ lognormal(log(0.03), 0.3);
A ~ lognormal(log(0.2), 0.3);
omega ~ normal(3, 0.5);
tau ~ normal(10,5);
// phi ~ normal(0, 1.5);
}
'''
import pystan
sm = pystan.StanModel(model_code=code)
# +
stan_data = {'N': len(modes['n'].values),
'n': modes['n'].values,
'freq': (modes['f0'].values),
'freq_err': modes['f0_err'].values,
'dnu_guess': dnu_avg
}
start = {'dnu': dnu_avg,
'nmax': nmax,
'epsilon': epsilon,
'k': k,
'A': A,
'omega': omega,
'phi': phi,
'tau': tau
}
nchains = 4
fit = sm.sampling(data=stan_data, iter=5000, chains=nchains, init=[start for n in range(nchains)],)
#control=dict(max_treedepth=15))
# -
print(fit)
fit.plot()
plt.show()
import corner
data = np.vstack([fit['epsilon'], fit['k'], fit['dnu'], fit['nmax'],
fit['A'], fit['omega'], fit['phi'], fit['tau']]).T
corner.corner(data, labels=[r'$\epsilon$', r'$k$',r'$\Delta\nu$',r'$n_{max}$',
r'$A$', r'$\omega$', r'$\phi$', r'$\tau$'])
#, truths=[1.436, 0.07, 0.3, 2, 0])
plt.show()
# +
#n = np.arange(12,30,1)
plt.figure(4)
plt.scatter(modes['f0'] % dnu_avg, modes['f0'], c='k', marker='x', label=r'$l=$'+str(0))
plt.errorbar(modes['f0'] % dnu_avg, modes['f0'], xerr=modes['f0_err'], zorder=0, fmt="none", label="none",
c='k', capsize=2, markersize=4, elinewidth=1)
g = model(n, fit['dnu'].mean(), fit['nmax'].mean(), fit['epsilon'].mean(), fit['k'].mean(),
fit['A'].mean(), fit['omega'].mean(), fit['phi'].mean(), fit['tau'].mean())
plt.plot(f % dnu_avg, f, ':', label='Guess')
plt.plot(g % fit['dnu'].mean(), g, label='Fit')
#plt.plot(g % dnu, g, label='Fit')
plt.ylabel('Frequency')
plt.xlabel(r'Mod. Freq. Spacing ('+ str(dnu_avg) +') $\mu$Hz')
#plt.xlim(58,68)
plt.legend()
plt.show()
# -
|
Dan_notebooks/development/rgdata-9456478.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inverted Pendulum
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy import signal
# %matplotlib notebook
# Constants
M = 10 # cart mass
m = 1 # pendulum mass
l = 1 # pendulum length
g = 9.81 # standard gravity
# +
# System Model
A = np.array([[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
B = np.array([[0, 0, 0, 0]]).T
C = np.array([[1, 0, 0, 0],
[0, 0, 1, 0]]) # Output the two displacement variables
D = np.array([[0, 0]]).T
sys = signal.StateSpace(A, B, C, D)
# +
# Animation
pend = matplotlib.lines.Line2D([0,0], [0,5], linewidth=2,markersize=2)
cart = matplotlib.patches.Rectangle([-0.5, 0], 1, 1)
fig, ax = plt.subplots()
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-10, 10)
ax.grid()
ax.add_patch(cart)
ax.add_line(pend)
def animate(i):
x = np.sin(i)
cart.set_x(x - cart.get_width()/2)
pend.set_xdata(x)
return cart, pend
ani = matplotlib.animation.FuncAnimation(fig,animate,np.linspace(0,2*np.pi, 1000),interval=5,blit=True)
plt.show()
# -
|
simulation/Inverted Pendulum.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=[]
# Usar el simulador de Tello
import tello_sim
drone = tello_sim.Simulator()
# + tags=[]
# Despegar
drone.takeoff()
# + tags=[]
# Ascender
drone.up(9)
# -
# ## Qué se puede guardar en una variable
# + tags=[]
altura_actual = 90 # Para guardar un valor en una variable se usa el signo igual
NuevaAltura = 194 # Se pueden usar mayúsculas y minúsculas
diferencia = NuevaAltura - altura_actual # Se puede guardar el resultado de una operación
print('La diferencia de la altura es', diferencia) # ¿Qué valor tiene la variable?
# + tags=[]
# Números
print(NuevaAltura)
# + tags=[]
# Textos
pregunta = '¿Cual debe ser la nueva altura? ' # Los textos van entre comillas
print(pregunta)
# + tags=[]
# Listas
print(drone.altitude_data)
# + tags=[]
# Estructuras complejas
print(drone.command_log)
# + tags=[]
# También podemos pedir la información a quien esté usando el programa.
NuevaAltura = input(pregunta)
print('La nueva altura tiene que ser', NuevaAltura)
# + tags=[]
# Un valor numérico puede ser de tipo número o texto
# Para realizar operaciones matemáticas los datos han ser de tipo número
print('drone.altitude es de tipo', type(drone.altitude))
print('NuevaAltura es de tipo', type(NuevaAltura))
diferencia = drone.altitude - NuevaAltura
|
actividades/apd01/variables_b.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] internals={"slide_type": "subslide"} slideshow={"slide_type": "slide"}
# <div style="width:1000 px">
#
# <div style="float:right; width:98 px; height:98px;">
# <img src="https://raw.githubusercontent.com/Unidata/MetPy/master/src/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
# </div>
#
# <h1>Writing netCDF data</h1>
# <h3>Unidata Python Workshop</h3>
#
# <div style="clear:both"></div>
# </div>
#
# <hr style="height:2px;">
#
# <div style="float:right; width:250 px"><img src="https://www.unidata.ucar.edu/images/logos/netcdf-50x50.png" alt="netCDF logo" style="height: 100px;"></div>
#
# **Important Note**: when running this notebook interactively in a browser, you probably will not be able to execute individual cells out of order without getting an error. Instead, choose "Run All" from the Cell menu after you modify a cell.
# + internals={"frag_number": 1, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"}
from netCDF4 import Dataset # Note: python is case-sensitive!
import numpy as np
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 1, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Opening a file, creating a new Dataset
#
# Let's create a new, empty netCDF file named 'new.nc' in our project root `data` directory, opened for writing.
#
# Be careful, opening a file with 'w' will clobber any existing data (unless `clobber=False` is used, in which case an exception is raised if the file already exists).
#
# - `mode='r'` is the default.
# - `mode='a'` opens an existing file and allows for appending (does not clobber existing data)
# - `format` can be one of `NETCDF3_CLASSIC`, `NETCDF3_64BIT`, `NETCDF4_CLASSIC` or `NETCDF4` (default). `NETCDF4_CLASSIC` uses HDF5 for the underlying storage layer (as does `NETCDF4`) but enforces the classic netCDF 3 data model so data can be read with older clients.
# + internals={"frag_helper": "fragment_end", "frag_number": 3, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
try: ncfile.close() # just to be safe, make sure dataset is not already open.
except: pass
ncfile = Dataset('../../../data/new.nc',mode='w',format='NETCDF4_CLASSIC')
print(ncfile)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 3, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Creating dimensions
#
# The **ncfile** object we created is a container for _dimensions_, _variables_, and _attributes_. First, let's create some dimensions using the [`createDimension`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createDimension) method.
#
# - Every dimension has a name and a length.
# - The name is a string that is used to specify the dimension to be used when creating a variable, and as a key to access the dimension object in the `ncfile.dimensions` dictionary.
#
# Setting the dimension length to `0` or `None` makes it unlimited, so it can grow.
#
# - For `NETCDF4` files, any variable's dimension can be unlimited.
# - For `NETCDF4_CLASSIC` and `NETCDF3*` files, only one per variable can be unlimited, and it must be the leftmost (slowest varying) dimension.
# + internals={"frag_helper": "fragment_end", "frag_number": 5, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
lat_dim = ncfile.createDimension('lat', 73) # latitude axis
lon_dim = ncfile.createDimension('lon', 144) # longitude axis
time_dim = ncfile.createDimension('time', None) # unlimited axis (can be appended to).
for dim in ncfile.dimensions.items():
print(dim)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 5, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Creating attributes
#
# netCDF attributes can be created just like you would for any python object.
#
# - Best to adhere to established conventions (like the [CF](http://cfconventions.org/) conventions)
# - We won't try to adhere to any specific convention here though.
# + internals={"frag_helper": "fragment_end", "frag_number": 7} slideshow={"slide_type": "fragment"} tags=[]
ncfile.title='My model data'
print(ncfile.title)
# + tags=[]
ncfile.subtitle="My model data subtitle"
print(ncfile.subtitle)
print(ncfile)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 8, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"}
# Try adding some more attributes...
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 8, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Creating variables
#
# Now let's add some variables and store some data in them.
#
# - A variable has a name, a type, a shape, and some data values.
# - The shape of a variable is specified by a tuple of dimension names.
# - A variable should also have some named attributes, such as 'units', that describe the data.
#
# The [`createVariable`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createVariable) method takes 3 mandatory args.
#
# - the 1st argument is the variable name (a string). This is used as the key to access the variable object from the `variables` dictionary.
# - the 2nd argument is the datatype (most numpy datatypes supported).
# - the third argument is a tuple containing the dimension names (the dimensions must be created first). Unless this is a `NETCDF4` file, any unlimited dimension must be the leftmost one.
# - there are lots of optional arguments (many of which are only relevant when `format='NETCDF4'`) to control compression, chunking, fill_value, etc.
#
# + internals={"frag_helper": "fragment_end", "frag_number": 10, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
# Define two variables with the same names as dimensions,
# a conventional way to define "coordinate variables".
lat = ncfile.createVariable('lat', np.float32, ('lat',))
lat.units = 'degrees_north'
lat.long_name = 'latitude'
lon = ncfile.createVariable('lon', np.float32, ('lon',))
lon.units = 'degrees_east'
lon.long_name = 'longitude'
time = ncfile.createVariable('time', np.float64, ('time',))
time.units = 'hours since 1800-01-01'
time.long_name = 'time'
# Define a 3D variable to hold the data
temp = ncfile.createVariable('temp',np.float64,('time','lat','lon')) # note: unlimited dimension is leftmost
temp.units = 'K' # degrees Kelvin
temp.standard_name = 'air_temperature' # this is a CF standard name
print(temp)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 10, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Pre-defined variable attributes (read only)
#
# The netCDF4 module provides some useful pre-defined Python attributes for netCDF variables, such as dimensions, shape, dtype, ndim.
#
# Note: since no data has been written yet, the length of the 'time' dimension is 0.
# + internals={"frag_helper": "fragment_end", "frag_number": 12, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
print("-- Some pre-defined attributes for variable temp:")
print("temp.dimensions:", temp.dimensions)
print("temp.shape:", temp.shape)
print("temp.dtype:", temp.dtype)
print("temp.ndim:", temp.ndim)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 12, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Writing data
#
# To write data a netCDF variable object, just treat it like a numpy array and assign values to a slice.
# + internals={"frag_helper": "fragment_end", "frag_number": 14} slideshow={"slide_type": "fragment"} tags=[]
nlats = len(lat_dim); nlons = len(lon_dim); ntimes = 3
# Write latitudes, longitudes.
# Note: the ":" is necessary in these "write" statements
lat[:] = -90. + (180./nlats)*np.arange(nlats) # south pole to north pole
lon[:] = (180./nlats)*np.arange(nlons) # Greenwich meridian eastward
# create a 3D array of random numbers
data_arr = np.random.uniform(low=280,high=330,size=(ntimes,nlats,nlons))
# Write the data. This writes the whole 3D netCDF variable all at once.
temp[:,:,:] = data_arr # Appends data along unlimited dimension
print("-- Wrote data, temp.shape is now ", temp.shape)
# read data back from variable (by slicing it), print min and max
print("-- Min/Max values:", temp[:,:,:].min(), temp[:,:,:].max())
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 15, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"}
# - You can just treat a netCDF Variable object like a numpy array and assign values to it.
# - Variables automatically grow along unlimited dimensions (unlike numpy arrays)
# - The above writes the whole 3D variable all at once, but you can write it a slice at a time instead.
#
# Let's add another time slice....
#
# + internals={"frag_helper": "fragment_end", "frag_number": 15, "slide_type": "subslide"} slideshow={"slide_type": "slide"} tags=[]
# create a 2D array of random numbers
data_slice = np.random.uniform(low=280,high=330,size=(nlats,nlons))
temp[3,:,:] = data_slice # Appends the 4th time slice
print("-- Wrote more data, temp.shape is now ", temp.shape)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 17} slideshow={"slide_type": "fragment"}
# Note that we have not yet written any data to the time variable. It automatically grew as we appended data along the time dimension to the variable `temp`, but the data is missing.
# + internals={"frag_helper": "fragment_end", "frag_number": 18, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
print(time)
times_arr = time[:]
print(type(times_arr),times_arr) # dashes indicate masked values (where data has not yet been written)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 18, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# Let's add/write some data into the time variable.
#
# - Given a set of datetime instances, use date2num to convert to numeric time values and then write that data to the variable.
# + tags=[]
import datetime as dt
from netCDF4 import date2num,num2date
# 1st 4 days of October.
dates = [dt.datetime(2014,10,1,0),dt.datetime(2014,10,2,0),dt.datetime(2014,10,3,0),dt.datetime(2014,10,4,0)]
print(dates)
# + tags=[]
times = date2num(dates, time.units)
print(times, time.units) # numeric values
# + internals={"frag_helper": "fragment_end", "frag_number": 20, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
time[:] = times
# read time data back, convert to datetime instances, check values.
print(time[:])
print(time.units)
print(num2date(time[:],time.units))
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 20, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Closing a netCDF file
#
# It's **important** to close a netCDF file you opened for writing:
#
# - flushes buffers to make sure all data gets written
# - releases memory resources used by open netCDF files
# + internals={"frag_helper": "fragment_end", "frag_number": 22, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
# first print the Dataset object to see what we've got
print(ncfile)
# close the Dataset.
ncfile.close(); print('Dataset is closed!')
# + tags=[]
# !ncdump -h ../../../data/new.nc
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 22, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# # Advanced features
#
# So far we've only exercised features associated with the old netCDF version 3 data model. netCDF version 4 adds a lot of new functionality that comes with the more flexible HDF5 storage layer.
#
# Let's create a new file with `format='NETCDF4'` so we can try out some of these features.
# + internals={"frag_helper": "fragment_end", "frag_number": 25, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
ncfile = Dataset('../../../data/new2.nc','w',format='NETCDF4')
print(ncfile)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 25, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Creating Groups
#
# netCDF version 4 added support for organizing data in hierarchical groups.
#
# - analagous to directories in a filesystem.
# - Groups serve as containers for variables, dimensions and attributes, as well as other groups.
# - A `netCDF4.Dataset` creates a special group, called the 'root group', which is similar to the root directory in a unix filesystem.
#
# - groups are created using the [`createGroup`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createGroup) method.
# - takes a single argument (a string, which is the name of the Group instance). This string is used as a key to access the group instances in the `groups` dictionary.
#
# Here we create two groups to hold data for two different model runs.
# + internals={"frag_helper": "fragment_end", "frag_number": 27, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
grp1 = ncfile.createGroup('model_run1')
grp2 = ncfile.createGroup('model_run2')
for grp in ncfile.groups.items():
print(grp)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 27, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# Create some dimensions in the root group.
# + internals={"frag_helper": "fragment_end", "frag_number": 29} slideshow={"slide_type": "fragment"}
lat_dim = ncfile.createDimension('lat', 73) # latitude axis
lon_dim = ncfile.createDimension('lon', 144) # longitude axis
time_dim = ncfile.createDimension('time', None) # unlimited axis (can be appended to).
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 30} slideshow={"slide_type": "fragment"}
# Now create a variable in grp1 and grp2. The library will search recursively upwards in the group tree to find the dimensions (which in this case are defined one level up).
#
# - These variables are create with **zlib compression**, another nifty feature of netCDF 4.
# - The data are automatically compressed when data is written to the file, and uncompressed when the data is read.
# - This can really save disk space, especially when used in conjunction with the [**least_significant_digit**](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createVariable) keyword argument, which causes the data to be quantized (truncated) before compression. This makes the compression lossy, but more efficient.
# + internals={"frag_helper": "fragment_end", "frag_number": 31, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
temp1 = grp1.createVariable('temp',np.float64,('time','lat','lon'),zlib=True)
temp2 = grp2.createVariable('temp',np.float64,('time','lat','lon'),zlib=True)
for grp in ncfile.groups.items(): # shows that each group now contains 1 variable
print(grp)
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 31, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Creating a variable with a compound data type
#
# - Compound data types map directly to numpy structured (a.k.a 'record' arrays).
# - Structured arrays are akin to C structs, or derived types in Fortran.
# - They allow for the construction of table-like structures composed of combinations of other data types, including other compound types.
# - Might be useful for representing multiple parameter values at each point on a grid, or at each time and space location for scattered (point) data.
#
# Here we create a variable with a compound data type to represent complex data (there is no native complex data type in netCDF).
#
# - The compound data type is created with the [`createCompoundType`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createCompoundType) method.
# + internals={"frag_helper": "fragment_end", "frag_number": 33, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
# create complex128 numpy structured data type
complex128 = np.dtype([('real',np.float64),('imag',np.float64)])
# using this numpy dtype, create a netCDF compound data type object
# the string name can be used as a key to access the datatype from the cmptypes dictionary.
complex128_t = ncfile.createCompoundType(complex128,'complex128')
# create a variable with this data type, write some data to it.
cmplxvar = grp1.createVariable('cmplx_var',complex128_t,('time','lat','lon'))
# write some data to this variable
# first create some complex random data
nlats = len(lat_dim); nlons = len(lon_dim)
data_arr_cmplx = np.random.uniform(size=(nlats,nlons))+1.j*np.random.uniform(size=(nlats,nlons))
# write this complex data to a numpy complex128 structured array
data_arr = np.empty((nlats,nlons),complex128)
data_arr['real'] = data_arr_cmplx.real; data_arr['imag'] = data_arr_cmplx.imag
cmplxvar[0] = data_arr # write the data to the variable (appending to time dimension)
print(cmplxvar)
data_out = cmplxvar[0] # read one value of data back from variable
print(data_out.dtype, data_out.shape, data_out[0,0])
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 33, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# ## Creating a variable with a variable-length (vlen) data type
#
# netCDF 4 has support for variable-length or "ragged" arrays. These are arrays of variable length sequences having the same type.
#
# - To create a variable-length data type, use the [`createVLType`](http://unidata.github.io/netcdf4-python/netCDF4.Dataset-class.html#createVLType) method.
# - The numpy datatype of the variable-length sequences and the name of the new datatype must be specified.
# + internals={"frag_helper": "fragment_end", "frag_number": 35} slideshow={"slide_type": "fragment"}
vlen_t = ncfile.createVLType(np.int64, 'phony_vlen')
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 36} slideshow={"slide_type": "fragment"}
# A new variable can then be created using this datatype.
# + internals={"frag_helper": "fragment_end", "frag_number": 37, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"}
vlvar = grp2.createVariable('phony_vlen_var', vlen_t, ('time','lat','lon'))
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 37, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# Since there is no native vlen datatype in numpy, vlen arrays are represented in python as object arrays (arrays of dtype `object`).
#
# - These are arrays whose elements are Python object pointers, and can contain any type of python object.
# - For this application, they must contain 1-D numpy arrays all of the same type but of varying length.
# - Fill with 1-D random numpy int64 arrays of random length between 1 and 10.
# + internals={"frag_helper": "fragment_end", "frag_number": 39, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
vlen_data = np.empty((nlats,nlons),object)
for i in range(nlons):
for j in range(nlats):
size = np.random.randint(1,10,size=1) # random length of sequence
vlen_data[j,i] = np.random.randint(0,10,size=size).astype(vlen_t.dtype)# generate random sequence
vlvar[0] = vlen_data # append along unlimited dimension (time)
print(vlvar)
print('data =\n',vlvar[:])
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 39, "slide_type": "subslide"} slideshow={"slide_type": "slide"}
# Close the Dataset and examine the contents with ncdump.
# + internals={"frag_helper": "fragment_end", "frag_number": 41, "slide_helper": "subslide_end"} slide_helper="slide_end" slideshow={"slide_type": "fragment"} tags=[]
ncfile.close()
# !ncdump -h ../../../data/new2.nc
# + [markdown] internals={"frag_helper": "fragment_end", "frag_number": 41, "slide_helper": "subslide_end", "slide_type": "subslide"} slide_helper="slide_end" slideshow={"slide_type": "slide"}
# ## Other interesting and useful projects using netcdf4-python
#
# - [xarray](http://xarray.pydata.org): N-dimensional variant of the core [pandas](http://pandas.pydata.org) data structure that can operate on netcdf variables.
# - [Iris](http://scitools.org.uk/iris/): a data model to create a data abstraction layer which isolates analysis and visualisation code from data format specifics. Uses netcdf4-python to access netcdf data (can also handle GRIB).
# - [Biggus](https://github.com/SciTools/biggus): Virtual large arrays (from netcdf variables) with lazy evaluation.
# - [cf-python](http://cfpython.bitbucket.org/): Implements the [CF](http://cfconventions.org) data model for the reading, writing and processing of data and metadata.
|
pages/workshop/Bonus/netCDF Writing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + run_control={"frozen": false, "read_only": false}
from jove.DotBashers import *
from jove.Def_DFA import *
from jove.Def_NFA import *
from jove.Def_RE2NFA import *
from jove.Def_NFA2RE import *
from jove.Def_md2mc import *
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Problem Set One
# + [markdown] run_control={"frozen": false, "read_only": false}
# 1) LOdd1Three0 : Set of strings over {0,1} with an odd # of 1s OR exactly three 0s.
#
# * Hint on how to arrive at the language:
#
# - develop NFAs for the two cases and perform their union. Obtain DFA
#
# - develop REs for the two cases and perform the union.
#
# - Testing the creations:
#
# . Come up with language for even # of 1s and separately for "other than three 0s".
#
# . Do two intersections.
#
# . Is the language empty?
#
#
# 2) Language of strings over {0,1} with exactly two occurrences of 0101 in it.
#
# * Come up with it directly (take overlaps into account, i.e. 010101 has two occurrences in it
#
# * Come up in another way
#
# Notes:
#
# * Most of the problem students will have in this course is interpreting English (technical English)
#
# * So again, read the writeup at the beginning of Module6 (should be ready soon today) and work on using the tool.
#
#
#
#
# + [markdown] run_control={"frozen": false, "read_only": false}
# __Solutions__
#
# 1) LOdd1Three0 : Set of strings over {0,1} with an odd # of 1s OR exactly three 0s.
#
# * Hint on how to arrive at the language:
#
# - develop NFAs for the two cases and perform their union. Obtain DFA
#
# - develop REs for the two cases and perform the union.
#
# - Testing the creations:
#
# . Come up with language for even # of 1s and separately for "other than three 0s".
#
# . Do two intersections.
#
# . Is the language empty?
#
#
# 2) Language of strings over {0,1} with exactly two occurrences of 0101 in it.
#
# * Come up with it directly (take overlaps into account, i.e. 010101 has two occurrences in it
#
# * Come up in another way
#
# Notes:
#
# * Most of the problem students will have in this course is interpreting English (technical English)
#
# * So again, read the writeup at the beginning of Module6 (should be ready soon today) and work on using the tool.
#
#
#
#
# + run_control={"frozen": false, "read_only": false}
RE_Odd1s = "(0* 1 0* (1 0* 1 0*)*)"
NFA_Odd1s = re2nfa(RE_Odd1s)
DO_Odd1s = dotObj_dfa(min_dfa(nfa2dfa(NFA_Odd1s)))
DO_Odd1s
# + run_control={"frozen": false, "read_only": false}
RE_Ex3z = "1* 0 1* 0 1* 0 1* + (0* 1 0* (1 0* 1 0*)*)"
NFA_Ex3z = re2nfa(RE_Ex3z)
DO_Ex3z = dotObj_dfa(min_dfa(nfa2dfa(NFA_Ex3z)))
DO_Ex3z
# + run_control={"frozen": false, "read_only": false}
RE_O13z = "0* 1 0* (1 0* 1 0*)* + 1* 0 1* 0 1* 0 1* "
NFA_O13z = re2nfa(RE_O13z)
MD_O13z = min_dfa(nfa2dfa(NFA_O13z))
DO_O13z = dotObj_dfa(MD_O13z)
DO_O13z
# + run_control={"frozen": false, "read_only": false}
RE_O13z = "0* 1 0* (1 0* 1 0*)* + 1* 0 1* 0 1* 0 1* "
NFA_O13z = re2nfa(RE_O13z)
NMD_O13z = nfa2dfa(NFA_O13z)
MD_O13zB = min_dfa_brz(NMD_O13z)
DO_O13zB = dotObj_dfa(MD_O13zB)
DO_O13zB
# + run_control={"frozen": false, "read_only": false}
iso_dfa(MD_O13z,MD_O13zB)
# + run_control={"frozen": false, "read_only": false}
langeq_dfa(NMD_O13z,MD_O13z)
# + run_control={"frozen": false, "read_only": false}
iso_dfa(NMD_O13z, MD_O13z)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("''"))))
# + run_control={"frozen": false, "read_only": false}
D1 = min_dfa(nfa2dfa(re2nfa("aa")))
dotObj_dfa(D1)
# + run_control={"frozen": false, "read_only": false}
D2 = min_dfa(nfa2dfa(re2nfa("bb")))
dotObj_dfa(D2)
# + run_control={"frozen": false, "read_only": false}
D1
# + run_control={"frozen": false, "read_only": false}
D2
# + run_control={"frozen": false, "read_only": false}
D1or2 = min_dfa(union_dfa(D1,D2))
D1or2p = pruneUnreach(D1or2)
dotObj_dfa(D1or2)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(D1or2p)
# + run_control={"frozen": false, "read_only": false}
D1and2 = min_dfa(intersect_dfa(D1,D2))
D1and2p = pruneUnreach(D1and2)
dotObj_dfa(D1and2)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(D1and2p)
# + run_control={"frozen": false, "read_only": false}
d1=nfa2dfa(re2nfa("abcde"))
d2=nfa2dfa(re2nfa("abced"))
langeq_dfa(d1,d2,True)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d1)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d2)
# + run_control={"frozen": false, "read_only": false}
d1a=nfa2dfa(re2nfa("aa*+bc"))
d2a=nfa2dfa(re2nfa("a(a*+bc)"))
langeq_dfa(d1a,d2a,True)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d1a)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d2a)
# + run_control={"frozen": false, "read_only": false}
d1b=nfa2dfa(re2nfa("aaa*+aa*bc+bcaa*+bcbc"))
d2b=nfa2dfa(re2nfa("(aa*+bc)(aa*+bc)"))
langeq_dfa(d1b,d2b,True)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d1b)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d2b)
# + run_control={"frozen": false, "read_only": false}
iso_dfa(d1b,d2b)
# + run_control={"frozen": false, "read_only": false}
d1c=min_dfa(d1b)
# + run_control={"frozen": false, "read_only": false}
d2c=min_dfa(d2b)
# + run_control={"frozen": false, "read_only": false}
iso_dfa(d1c,d2c)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d1c)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d2c)
# + run_control={"frozen": false, "read_only": false}
d1d=nfa2dfa(re2nfa("aaa*+aa*bc+bcaaa*+bcbc"))
d2d=nfa2dfa(re2nfa("(aa*+bc)(aa*+bc)"))
langeq_dfa(d1d,d2d,True)
# + run_control={"frozen": false, "read_only": false}
d1d=nfa2dfa(re2nfa("a a a*+a a* b c+ b c a a a*+b c b c"))
d2d=nfa2dfa(re2nfa("(a a*+b c)(a a*+b c)"))
langeq_dfa(d1d,d2d,True)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d1d)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(d2d)
# + run_control={"frozen": false, "read_only": false}
d1d=nfa2dfa(re2nfa("james*+bond*"))
dotObj_dfa(d1d)
# + run_control={"frozen": false, "read_only": false}
d1d=nfa2dfa(re2nfa("ja mes*+bo nd*"))
dotObj_dfa(d1d)
# + run_control={"frozen": false, "read_only": false}
d1d=nfa2dfa(re2nfa("''"))
dotObj_dfa(d1d)
# + run_control={"frozen": false, "read_only": false}
help(md2mc)
# + run_control={"frozen": false, "read_only": false}
test = md2mc(src="File", fname="machines/nfafiles/endsin0101.nfa")
dotObj_nfa(test)
# + run_control={"frozen": false, "read_only": false}
# NFA for 0101 within hamming dist of 2
nfamd1 = md2mc(src="File", fname="machines/nfafiles/nfa0101h2.nfa")
dotObj_nfa(nfamd1)
# + run_control={"frozen": false, "read_only": false}
dfamd1=nfa2dfa(nfamd1)
dotObj_dfa(dfamd1)
# + run_control={"frozen": false, "read_only": false}
m1=min_dfa(dfamd1)
# + run_control={"frozen": false, "read_only": false}
m2=min_dfa_brz(dfamd1)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(m1)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(m2)
# + run_control={"frozen": false, "read_only": false}
# + run_control={"frozen": false, "read_only": false}
iso_dfa(m1,m2)
# + run_control={"frozen": false, "read_only": false}
help(del_gnfa_states)
# + run_control={"frozen": false, "read_only": false}
gnfamd1=mk_gnfa(nfamd1)
dotObj_gnfa(gnfamd1)
# + run_control={"frozen": false, "read_only": false}
(Gfinal, dotObj_List, final_re_str) = del_gnfa_states(gnfamd1)
# + run_control={"frozen": false, "read_only": false}
final_re_str
# + run_control={"frozen": false, "read_only": false}
dotObj_List[0]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[1]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[2]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[3]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[4]
# + run_control={"frozen": false, "read_only": false}
len(dotObj_List)
# + run_control={"frozen": false, "read_only": false}
dotObj_List[11]
# + run_control={"frozen": false, "read_only": false}
final_re_str
# + run_control={"frozen": false, "read_only": false}
fullcircle=min_dfa(nfa2dfa(re2nfa(final_re_str)))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(fullcircle)
# + run_control={"frozen": false, "read_only": false}
h2_from_re = min_dfa(nfa2dfa(re2nfa("(0+1)(0+1)01 + (0+1)1(0+1)1 + (0+1)10(0+1) + 0(0+1)(0+1)1 + 0(0+1)0(0+1) + 01(0+1)(0+1)")))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(h2_from_re)
# + run_control={"frozen": false, "read_only": false}
iso_dfa(fullcircle,h2_from_re)
# + run_control={"frozen": false, "read_only": false}
epsre = dotObj_nfa(re2nfa("''"), True)
epsre.source
# + run_control={"frozen": false, "read_only": false}
epsre
# + run_control={"frozen": false, "read_only": false}
are = dotObj_nfa(re2nfa("a"), True)
are.source
# + run_control={"frozen": false, "read_only": false}
are
# + run_control={"frozen": false, "read_only": false}
aplusbre = dotObj_nfa(re2nfa("a+b"), True)
aplusbre.source
# + run_control={"frozen": false, "read_only": false}
aplusbre
# + run_control={"frozen": false, "read_only": false}
abre = dotObj_nfa(re2nfa("ab"), True)
abre.source
# + run_control={"frozen": false, "read_only": false}
abre
# + run_control={"frozen": false, "read_only": false}
arestar = dotObj_nfa(re2nfa("a*"), True)
arestar.source
# + run_control={"frozen": false, "read_only": false}
arestar
# + run_control={"frozen": false, "read_only": false}
aplusbstar = dotObj_nfa(re2nfa("(a+b)*"), True)
aplusbstar.source
# + run_control={"frozen": false, "read_only": false}
aplusbstar
# + run_control={"frozen": false, "read_only": false}
aplusb_aplusbstar = dotObj_nfa(re2nfa("(a+b)(a+b)*"), True)
aplusb_aplusbstar.source
# + run_control={"frozen": false, "read_only": false}
aplusb_aplusbstar
# + run_control={"frozen": false, "read_only": false}
aplusb_aplusb = dotObj_nfa(re2nfa("(a+b)(a+b)"), True)
# + run_control={"frozen": false, "read_only": false}
aplusb_aplusb
# + run_control={"frozen": false, "read_only": false}
aplusb_aplusb.source
# + run_control={"frozen": false, "read_only": false}
DOodd1s_or_30s = dotObj_nfa(re2nfa("0* 1 0* (1 0* 1 0*)* + 1* 0 1* 0 1* 0 1* "), True)
# + run_control={"frozen": false, "read_only": false}
DOodd1s_or_30s
# + run_control={"frozen": false, "read_only": false}
DOodd1s_or_30s = dotObj_nfa(re2nfa("0* 1 0* (1 0* 1 0*)* + 1* 0 1* 0 1* 0 1* "), False)
# + run_control={"frozen": false, "read_only": false}
DOodd1s_or_30s
# + run_control={"frozen": false, "read_only": false}
DOodd1s_or_30s.source
# + run_control={"frozen": false, "read_only": false}
# + run_control={"frozen": false, "read_only": false}
DOodd1s_or_30s_mind = dotObj_dfa(min_dfa(nfa2dfa(re2nfa("0* 1 0* (1 0* 1 0*)* + 1* 0 1* 0 1* 0 1* "))))
DOodd1s_or_30s_mind
# + run_control={"frozen": false, "read_only": false}
DOodd1s_or_30s_mind.source
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Designing DFA that accept within a Hamming Distance
# + [markdown] run_control={"frozen": false, "read_only": false}
# Given a regular language, say (0+1)* 0101 (0+1)* (i.e., all bit-strings with an occurrence of 0101 in it), let us come up with
#
# 1. An RE that represents strings within a Hamming distance of 2 from strings in this language
#
# 2. An NFA that represents strings within a Hamming distance of 2 from strings in this language
#
# + run_control={"frozen": false, "read_only": false}
h2_0101_re = ("(0+1)* ( (0+1)(0+1)01 +" +
" (0+1)1(0+1)1 +" +
" (0+1)10(0+1) +" +
" 0(0+1)(0+1)1 +" +
" 0(0+1)0(0+1) +" +
" 01(0+1)(0+1) )" +
"(0+1)*")
# + run_control={"frozen": false, "read_only": false}
h2_0101_re
# + run_control={"frozen": false, "read_only": false}
minD_h2_0101_re = min_dfa(nfa2dfa(re2nfa(h2_0101_re)))
# + run_control={"frozen": false, "read_only": false}
DO_minD_h2_0101_re = dotObj_dfa(minD_h2_0101_re)
# + run_control={"frozen": false, "read_only": false}
DO_minD_h2_0101_re
# + run_control={"frozen": false, "read_only": false}
DO_minD_h2_0101_re.source
# + run_control={"frozen": false, "read_only": false}
h2_0101_nfa_md = '''
NFA
!!--------------------------------------------
!! We are supposed to process (0+1)*0101(0+1)*
!! with up to two "dings" allowed
!!
!! Approach: Silently error-correct, but remember
!! each "ding" in a new state name.
!! After two dings, do not error-correct anymore
!!--------------------------------------------
!!-- pattern for (0+1)* is the usual
!!-- no error-correction needed here :-)
I : 0 | 1 -> I
!!-- Now comes the opportunity to exit I via 0101
!!-- The state names are A,B,C,D with ding-count
!!-- Thus A0 is A with 0 dings
!!-- C2 is C with 2 dings; etc
!!-- Ding-less traversal -- how lucky!
I : 0 -> A0
A0 : 1 -> B0
B0 : 0 -> C0
C0 : 1 -> F
!!-- Phew, finally at F
F : 0 | 1 -> F
!!-- First ding in any of these cases
I : 1 -> A1
A0 : 0 -> B1
B0 : 1 -> C1
C0 : 0 -> F !!-- ding-recording un-nec.; just goto F
!!-- Second ding in any of these cases
A1 : 0 -> B2
B1 : 1 -> C2
C1 : 0 -> F !!-- ding-recording un-nec.; just goto F
!!-- No more dings allowed!
B2 : 0 -> C2
C2 : 1 -> F
!!-- Allow one-dingers to finish fine
A1 : 1 -> B1
B1 : 0 -> C1
C1 : 1 -> F
'''
# + run_control={"frozen": false, "read_only": false}
h2_0101_nfa = md2mc(h2_0101_nfa_md)
# + run_control={"frozen": false, "read_only": false}
DO_h2_0101_nfa = dotObj_nfa(h2_0101_nfa)
DO_h2_0101_nfa
# + run_control={"frozen": false, "read_only": false}
DO_h2_0101_nfa.source
# + run_control={"frozen": false, "read_only": false}
minD_h2_0101_nfa = min_dfa(nfa2dfa(h2_0101_nfa))
DO_minD_h2_0101_nfa = dotObj_dfa(minD_h2_0101_nfa)
DO_minD_h2_0101_nfa
# + run_control={"frozen": false, "read_only": false}
DO_minD_h2_0101_nfa.source
# + run_control={"frozen": false, "read_only": false}
iso_dfa(minD_h2_0101_re, minD_h2_0101_nfa)
# + run_control={"frozen": false, "read_only": false}
nfaEx = md2mc('''NFA
I : '' -> B
I : a -> A
!!A : b -> I
A : q -> F
A : r -> B
B : s -> B
B : p -> F
F : t -> A
''')
DO_nfaEx = dotObj_nfa(nfaEx)
DO_nfaEx
# + run_control={"frozen": false, "read_only": false}
DO_nfaEx.source
# + run_control={"frozen": false, "read_only": false}
GNFA_nfaEx = mk_gnfa(nfaEx)
# + run_control={"frozen": false, "read_only": false}
dotObj_gnfa(GNFA_nfaEx).source
# + run_control={"frozen": false, "read_only": false}
help(del_gnfa_states)
# + run_control={"frozen": false, "read_only": false}
(Gfinal, do_list, final_re) = del_gnfa_states(GNFA_nfaEx)
# + run_control={"frozen": false, "read_only": false}
final_re
# + run_control={"frozen": false, "read_only": false}
final_re
# + run_control={"frozen": false, "read_only": false}
# + run_control={"frozen": false, "read_only": false}
do_list[0]
# + run_control={"frozen": false, "read_only": false}
do_list[0].source
# + run_control={"frozen": false, "read_only": false}
do_list[1]
# + run_control={"frozen": false, "read_only": false}
do_list[1].source
# + run_control={"frozen": false, "read_only": false}
do_list[1]
# + run_control={"frozen": false, "read_only": false}
# + run_control={"frozen": false, "read_only": false}
do_list[2]
# + run_control={"frozen": false, "read_only": false}
do_list[2].source
# + run_control={"frozen": false, "read_only": false}
do_list[3]
# + run_control={"frozen": false, "read_only": false}
do_list[3].source
# + run_control={"frozen": false, "read_only": false}
do_list[4]
# + run_control={"frozen": false, "read_only": false}
do_list[4].source
# + run_control={"frozen": false, "read_only": false}
re_mindfa = min_dfa(nfa2dfa(re2nfa(final_re)))
# + run_control={"frozen": false, "read_only": false}
dir_mindfa = min_dfa(nfa2dfa(nfaEx))
# + run_control={"frozen": false, "read_only": false}
iso_dfa(re_mindfa,dir_mindfa)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(dir_mindfa)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(dir_mindfa).source
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(re_mindfa)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(re_mindfa).source
# + run_control={"frozen": false, "read_only": false}
dot_san_str('""')
# + run_control={"frozen": false, "read_only": false}
nfaExp = md2mc('''NFA
I : a -> A1
I : n -> B1
A1 : b -> AB1
B1 : o -> AB1
AB1 : c -> A2
AB1 : p -> B2
A2 : d -> AB2
B2 : q -> AB2
AB2 : e -> A3
AB2 : r -> B3
A3 : f -> AB3
B3 : s -> AB3
AB3 : g -> A4
AB3 : t -> B4
A4 : h -> FAB4
B4 : u -> FAB4
''')
DO_nfaExp = dotObj_nfa(nfaExp)
DO_nfaExp
# + run_control={"frozen": false, "read_only": false}
gnfaExp = mk_gnfa(nfaExp)
DO_gnfaExp = dotObj_gnfa(gnfaExp)
DO_gnfaExp
# + run_control={"frozen": false, "read_only": false}
(Gfinal, dotObj_List, final_re_str) = del_gnfa_states(gnfaExp)
# + run_control={"frozen": false, "read_only": false}
final_re_str
# + run_control={"frozen": false, "read_only": false}
nfaExer = md2mc('''NFA
I1 : a -> X
I2 : b -> X
I3 : c -> X
X : p | q -> X
X : m -> F1
X : n -> F2
''')
DO_nfaExer = dotObj_nfa(nfaExer)
DO_nfaExer
gnfaExer = mk_gnfa(nfaExer)
DO_gnfaExer = dotObj_gnfa(gnfaExer)
DO_gnfaExer
(G, DO, RE) = del_gnfa_states(gnfaExer)
# + run_control={"frozen": false, "read_only": false}
RE
# + run_control={"frozen": false, "read_only": false}
DO_nfaExer
# + run_control={"frozen": false, "read_only": false}
DO_nfaExer.source
# + run_control={"frozen": false, "read_only": false}
DO[0]
# + run_control={"frozen": false, "read_only": false}
# + run_control={"frozen": false, "read_only": false}
DO[1]
# + run_control={"frozen": false, "read_only": false}
DO[2]
# + run_control={"frozen": false, "read_only": false}
DO[3]
# + run_control={"frozen": false, "read_only": false}
DO[4]
# + run_control={"frozen": false, "read_only": false}
DO[5]
# + run_control={"frozen": false, "read_only": false}
DO[6]
# + run_control={"frozen": false, "read_only": false}
nfaExer = md2mc('''NFA
I1 : a -> X
I2 : b -> X
I3 : c -> X
X : p | q -> X
X : m -> F1
X : n -> F2
''')
DO_nfaExer = dotObj_nfa(nfaExer)
DO_nfaExer
gnfaExer = mk_gnfa(nfaExer)
DO_gnfaExer = dotObj_gnfa(gnfaExer)
DO_gnfaExer
(G, DO, RE) = del_gnfa_states(gnfaExer, DelList=["X", "I1", "I2","I3","F1","F2"])
# + run_control={"frozen": false, "read_only": false}
DO_gnfaExer
# + run_control={"frozen": false, "read_only": false}
len(DO)
# + run_control={"frozen": false, "read_only": false}
DO_gnfaExer.source
# + run_control={"frozen": false, "read_only": false}
RE
# + run_control={"frozen": false, "read_only": false}
DO[0]
# + run_control={"frozen": false, "read_only": false}
DO[1]
# + run_control={"frozen": false, "read_only": false}
DO[1].source
# + run_control={"frozen": false, "read_only": false}
DO[2]
# + run_control={"frozen": false, "read_only": false}
DO[3]
# + run_control={"frozen": false, "read_only": false}
DO[4]
# + run_control={"frozen": false, "read_only": false}
DO[5]
# + run_control={"frozen": false, "read_only": false}
DO[6]
# + run_control={"frozen": false, "read_only": false}
RE
# + run_control={"frozen": false, "read_only": false}
# + run_control={"frozen": false, "read_only": false}
sylv_11_13 = min_dfa(nfa2dfa(re2nfa("(11111111111+1111111111111)*")))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(sylv_11_13)
# + run_control={"frozen": false, "read_only": false}
sylv_11_13
# + run_control={"frozen": false, "read_only": false}
sylv_3_5 = min_dfa(nfa2dfa(re2nfa("(111+11111)*")))
# + run_control={"frozen": false, "read_only": false}
len(sylv_3_5["Q"]) - 2
# + run_control={"frozen": false, "read_only": false}
3*5-3-5
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(sylv_3_5)
# + run_control={"frozen": false, "read_only": false}
dotObj_nfa(re2nfa("(111+11111)*"))
# + run_control={"frozen": false, "read_only": false}
nfa_3_5 = re2nfa("(111+11111)*")
# + run_control={"frozen": false, "read_only": false}
nfa_3_5
# + run_control={"frozen": false, "read_only": false}
dotObj_nfa(nfa_3_5)
# + run_control={"frozen": false, "read_only": false}
Gnfa_3_5 = mk_gnfa(nfa_3_5)
# + run_control={"frozen": false, "read_only": false}
Gnfa_3_5
# + run_control={"frozen": false, "read_only": false}
dotObj_gnfa(Gnfa_3_5)
# + run_control={"frozen": false, "read_only": false}
(Gfinal, dotObj_List, final_re_str) = del_gnfa_states(Gnfa_3_5)
# + run_control={"frozen": false, "read_only": false}
len(dotObj_List)
# + run_control={"frozen": false, "read_only": false}
dotObj_List[0]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[1]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[2]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[3]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[4]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[5]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[6]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[7]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[8]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[9]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[10]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[11]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[12]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[13]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[14]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[15]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[16]
# + run_control={"frozen": false, "read_only": false}
dotObj_List[17]
# + run_control={"frozen": false, "read_only": false}
len(dotObj_List)
# + run_control={"frozen": false, "read_only": false}
final_re_str
# + run_control={"frozen": false, "read_only": false}
dotObj_gnfa(mk_gnfa(re2nfa("(111+11111)*")))
# + run_control={"frozen": false, "read_only": false}
minD_renfare = min_dfa(nfa2dfa(re2nfa(final_re_str)))
# + run_control={"frozen": false, "read_only": false}
DOminD_renfare = dotObj_dfa(minD_renfare)
# + run_control={"frozen": false, "read_only": false}
DOminD_renfare
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(nfa_3_5)))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(111+11111)*"))))
# + run_control={"frozen": false, "read_only": false}
sylv_3_5 = min_dfa(nfa2dfa(re2nfa("(111+11111)*")))
# + run_control={"frozen": false, "read_only": false}
len(sylv_3_5["Q"]) - 1 - 1
# + run_control={"frozen": false, "read_only": false}
DO_sylv_3_5 = dotObj_dfa(sylv_3_5)
# + run_control={"frozen": false, "read_only": false}
DO_sylv_3_5.source
# + run_control={"frozen": false, "read_only": false}
non_sylv_3_6 = min_dfa(nfa2dfa(re2nfa("(111+111111)*")))
# + run_control={"frozen": false, "read_only": false}
DO_non_sylv_3_6 = dotObj_dfa(non_sylv_3_6)
# + run_control={"frozen": false, "read_only": false}
DO_non_sylv_3_6
# + run_control={"frozen": false, "read_only": false}
DO_non_sylv_3_6.source
# + run_control={"frozen": false, "read_only": false}
non_sylv_prefix_and_3_6 = min_dfa(nfa2dfa(re2nfa("111(111+111111)*")))
# + run_control={"frozen": false, "read_only": false}
DO_non = dotObj_dfa(non_sylv_prefix_and_3_6)
# + run_control={"frozen": false, "read_only": false}
DO_non
# + run_control={"frozen": false, "read_only": false}
DO_non.source
# + run_control={"frozen": false, "read_only": false}
stamp_3_5_7 = min_dfa(nfa2dfa(re2nfa("(111+11111+1111111)*")))
# + run_control={"frozen": false, "read_only": false}
DOstamp_3_5_7 = dotObj_dfa(stamp_3_5_7)
# + run_control={"frozen": false, "read_only": false}
DOstamp_3_5_7
# + run_control={"frozen": false, "read_only": false}
DOstamp_3_5_7.source
# + run_control={"frozen": false, "read_only": false}
len(min_dfa(nfa2dfa(re2nfa("(111+1111111111111)*")))["Q"]) - 2
# + run_control={"frozen": false, "read_only": false}
dfaBESame = md2mc('''
DFA !! Begins and ends with same; epsilon allowed
IF : 0 -> F0
IF : 1 -> F1
!!
F0 : 0 -> F0
F0 : 1 -> S01
S01 : 1 -> S01
S01 : 0 -> F0
!!
F1 : 1 -> F1
F1 : 0 -> S10
S10 : 0 -> S10
S10 : 1 -> F1
''')
DOdfaBESame = dotObj_dfa(dfaBESame)
DOdfaBESame
# + run_control={"frozen": false, "read_only": false}
nfaBESame = apply_h_dfa(dfaBESame, lambda x: '0')
# + run_control={"frozen": false, "read_only": false}
nfaBESame
# + run_control={"frozen": false, "read_only": false}
DONFABESame = dotObj_nfa(nfaBESame)
# + run_control={"frozen": false, "read_only": false}
DONFABESame
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(nfa2dfa(nfaBESame))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(nfaBESame)))
# + run_control={"frozen": false, "read_only": false}
blimp = md2mc('''
DFA
I1 : a -> F2
I1 : b -> F3
F2 : a -> S8
F2 : b -> S5
F3 : a -> S7
F3 : b -> S4
S4 : a | b -> F6
S5 : a | b -> F6
F6 : a | b -> F6
S7 : a | b -> F6
S8 : a -> F6
S8 : b -> F9
F9 : a -> F9
F9 : b -> F6
''')
# + run_control={"frozen": false, "read_only": false}
blimpnfa = apply_h_dfa(blimp, lambda x: 'a')
# + run_control={"frozen": false, "read_only": false}
dotObj_nfa(blimpnfa)
# + run_control={"frozen": false, "read_only": false}
dotObj_nfa(blimpnfa).source
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(nfa2dfa(blimpnfa))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(blimpnfa)))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(blimpnfa))).source
# + run_control={"frozen": false, "read_only": false}
testdfa = md2mc('''DFA
I : 0 | 1 -> I
I : 2 -> F
''')
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa_w_bh(testdfa)
# + run_control={"frozen": false, "read_only": false}
help(dotObj_dfa_w_bh)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(testdfa, FuseEdges=True)
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(111+11111)*"))))
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(111+1111111111111)*"))))
# + run_control={"frozen": false, "read_only": false}
ResetStNum()
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(111+1111111111111)*"))))
# + run_control={"frozen": false, "read_only": false}
ResetStNum()
# + run_control={"frozen": false, "read_only": false}
dotObj_dfa(min_dfa(nfa2dfa(re2nfa("(111+1111111111111)*"))))
# + run_control={"frozen": false, "read_only": false}
|
notebooks/driver/Drive_AllRegularOps.ipynb
|