code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CheckV
#
# Compare the quality of viral predictions with checkv.
#
# We ran checkv on every virus, and calculated its _quality_. Per [the CheckV paper](https://www.nature.com/articles/s41587-020-00774-7), the program classifies each sequence into one of five quality tiers ([Fig. 1](https://www.nature.com/articles/s41587-020-00774-7#Fig1))—complete, high quality (>90% completeness), medium quality (50–90% completeness), low quality (0–50% completeness) or undetermined quality (no completeness estimate available).
#
# The _not determined_ essentially means that there is no similarity in either the 24,834 NCBI GenBank sequences or the 76,262 DTR Viral Contigs, and doesn't mean that things are good or bad
#
# +
# A lot of this is not used, but we import it so we have it later!
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import pandas as pd
import seaborn as sns
import numpy as np
import math
import re
from PhiSpyAnalysis import theils_u, DateConverter, printmd
from PhiSpyAnalysis import read_phages, read_gtdb, read_checkv, read_base_pp, read_categories, read_metadata, read_transposons
from scipy.stats import pearsonr, f_oneway
from sklearn.linear_model import LinearRegression
from sklearn import decomposition
from sklearn.ensemble import RandomForestClassifier
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.multicomp import pairwise_tukeyhsd, tukeyhsd, MultiComparison
from statsmodels.multivariate.manova import MANOVA
# -
# # Read the phage counts
#
# This is our generic reading of phage counts that we use each time.
acccol = 'ncbi_genbank_assembly_accession'
phagesdf = read_phages(maxcontigs=-1)
phagesdf
# # Read the CheckV output
#
# We have `small` and `regular` datasets. Use the `small` for debugging!
checkv = read_checkv()
checkv
base_pp = read_base_pp()
base_pp
# ### Convert the checkv_quality into counts per genome so we can correlate it with other metrics
# start by merging the two dataframes
checkvb = pd.merge(base_pp, checkv, on='Prophage', how='inner')
checkvb
# ## Count the number of occurrences
#
# We use the group by to count the number of `checkv_qualities` but then need to reset the indices.
acv = checkvb.groupby(['assembly_accession', 'checkv_quality']).agg({'checkv_quality': 'size'})
acvr = acv.rename({'checkv_quality':'checkv_count'}, axis=1).reset_index()
tmp = acvr[acvr['checkv_quality'] == 'Not-determined'][['assembly_accession', 'checkv_count']].rename({'checkv_count' : 'Not-determined'}, axis=1)
for t in ['Low-quality', 'Medium-quality', 'High-quality', 'Complete']:
tmp2 = acvr[acvr['checkv_quality'] == t][['assembly_accession', 'checkv_count']].rename({'checkv_count' : t}, axis=1)
tmp = pd.merge(tmp, tmp2, on='assembly_accession', how='outer')
countdf = tmp.fillna(0)
countdf
acv = checkvb.groupby(['assembly_accession', 'checkv_quality'], group_keys=False).agg({'checkv_quality': 'size'})
acvr = acv.rename({'checkv_quality':'checkv_count'}, axis=1)
colcountdf = acvr.reset_index()
colcountdf
# ## Merge the phage counts and CheckV data
phagesdf
phagecv = pd.merge(phagesdf, countdf, on='assembly_accession', how='inner')
phagecv
# # Plot the quality of predictions against the length of the genome
#
# Here we just use _number of contigs_ as the proxy for "genome quality". We also explore N<sub>50</sub> below, but this is probably the most meaningful plot
# +
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(11, 8))
cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
#for c in cols:
# tempdf=phagecv[phagecv['checkv_quality']==c]
# ax = sns.scatterplot(x='Contigs', y='checkv_count', data=tempdf, label=c, ax=ax, alpha=0.8)
for c in cols:
ax = sns.scatterplot(x='Contigs', y=c, data=phagecv, label=c, ax=ax, alpha=0.8)
ax.set_ylabel('Number of predictions')
ax.set_xlabel('Number of contigs in genome')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
plt.tight_layout()
l = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# fig.savefig('images/contigs_checkvqual.svg')
# +
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(11, 8))
cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
#for c in cols:
# tempdf=phagecv[phagecv['checkv_quality']==c]
# ax = sns.scatterplot(x='Contigs', y='checkv_count', data=tempdf, label=c, ax=ax, alpha=0.8)
for c in cols:
ax = sns.scatterplot(x='Contigs', y=c, data=phagecvsm, label=c, ax=ax, alpha=0.8)
ax.set_ylabel('Number of predicted prophage of each type')
ax.set_xlabel('Number of contigs in genome')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.get_legend().set_title("CheckV quality")
l = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
fig.savefig('images/contigs_checkvqualsm.png')
fig.savefig('images/contigs_checkvqualsm.svg')
# +
phagecvsm = phagecv[phagecv['Contigs'] < 2000]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(11, 8))
cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
for c in cols:
ax = sns.lineplot(x='Contigs', y=c, data=phagecvsm, label=c, ax=ax)
ax.set_ylabel('Number of predicted prophage')
ax.set_xlabel('Number of contigs in genome')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_color('grey')
ax.spines['left'].set_color('grey')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
l = plt.legend(title="CheckV quality", bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
fig.savefig('images/contigs_checkvqual_lines.png')
fig.savefig('images/contigs_checkvqual_lines.svg')
# +
phagecvsm = phagecv[phagecv['Contigs'] < 100]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(11, 8))
cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
#for c in cols:
# tempdf=phagecv[phagecv['checkv_quality']==c]
# ax = sns.scatterplot(x='Contigs', y='checkv_count', data=tempdf, label=c, ax=ax, alpha=0.8)
for c in cols:
ax = sns.lineplot(x='Contigs', y=c, data=phagecvsm, label=c, ax=ax)
# ax = sns.scatterplot(x='Contigs', y=c, data=phagecvsm, label=c, ax=ax, alpha=0.2)
ax.set_ylabel('Number of predicted prophage')
ax.set_xlabel('Number of contigs in genome')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_color('grey')
ax.spines['left'].set_color('grey')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
l = plt.legend(title="CheckV quality", bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
fig.savefig('images/contigs_checkvqualsm_lines.png')
fig.savefig('images/contigs_checkvqualsm_lines.svg')
# -
# ## Bin the number of contigs per genome
#
# This is another way of visualizing the same data. We create 100bp bins and use `pd.cut` to assign each _number of contigs_ to a bin, and then we can plot the data again
# +
bins = []
for i in range(0,2100,100):
bins.append(i)
labels = []
for i in range(len(bins)-1):
labels.append(i)
# labels.append(f"{bins[i]}-{bins[i+1]}")
phagecv['Contig Bins'] = pd.cut(phagecv['Contigs'], bins=bins, labels=labels)
phagecv
# -
# ### Plot all the phage quality scores
# +
data = []
cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
for index, row in phagecv.iterrows():
for c in cols:
if ~np.isnan(row[c]):
data.append([c, row['Contig Bins'], row[c]])
ndf = pd.DataFrame(data, columns=['checkv_quality', 'Contig Bins', 'prophage count'])
sns.set(rc={'figure.figsize':(17.55,8.27)})
ax = sns.violinplot(x='Contig Bins', y='prophage count', hue='checkv_quality', scale='count', jitter=True, data=ndf)
ax.set_xlabel("Contig Bin Range (bp)")
l = ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
# -
# ### Group the phage qualities into Low and High and just plot those.
#
# Note that this emphasizes the _not determined_ group, but since CheckV doesn't know what they are, I don't think we should continue with them
# +
data = []
#cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
cols = {'Not-determined' : "Low", "Low-quality" : "High", "Medium-quality" : "High", "High-quality" : "High", "Complete" : "High"}
for index, row in phagecv.iterrows():
for c in cols:
if ~np.isnan(row[c]):
data.append([cols[c], row['Contig Bins'], row[c]])
ndf = pd.DataFrame(data, columns=['checkv_quality', 'Contig Bins', 'prophage count'])
sns.set(rc={'figure.figsize':(17.55,8.27)})
ax = sns.violinplot(x='Contig Bins', y='prophage count', hue='checkv_quality', scale='count', jitter=True, data=ndf)
ax.set_xlabel("Contig Bin Range (bp)")
l = ax.set_xticklabels(ax.get_xticklabels(), rotation=30)
# -
# ## Read the GTDB data to get N50 etc
#
# In the above panels, we used number of contigs as a proxy for genome quality. We can also use N<sub>50</sub> which is included in the GTDB data table. Here we read that and merge it with our phagecv data.
gtdb = read_gtdb()
contqual = pd.merge(phagecv, gtdb[['assembly_accession', 'n50_contigs']], on='assembly_accession', how='inner')
contqual
# ### Plot Qualities by N<sub>50</sub>
#
# Here we plot the N<sub>50</sub> on the x-axis, the number of prophages on the y-axis, and we colour by the quality of the predictions.
#
# Recall that a higher N<sub>50</sub> is _probably_ better, and a complete genome should have an N<sub>50</sub> between 2 and 5 Mbp!
# +
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(11, 8))
cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
for c in cols:
ax = sns.scatterplot(x='n50_contigs', y=c, data=contqual, label=c, ax=ax)
ax.set_ylabel('Number of predictions')
ax.set_xlabel('Contig N50')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
plt.tight_layout()
l = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# -
# ### Bin the N<sub>50</sub>
#
# As before, we can create a set of bins and group the data by N<sub>50</sub> bin.
bins = []
labels = []
for i in range(9):
bins.append(i * 2e6)
labels.append(f"{i*2}-{(i+1)*2}")
#labels.append(f"{i/10}")
labels = labels[:-1]
contqual['n50bins'] = pd.cut(contqual['n50_contigs'], bins=bins, labels=labels)
contqual
# ### Convert the binned data into a new data frame
#
# Here we make a new data frame so we can group things for the violin plot.
# +
data = []
cols = ['Not-determined', "Low-quality", "Medium-quality", "High-quality", "Complete"]
for index, row in contqual.iterrows():
for c in cols:
if ~np.isnan(row[c]):
data.append([c, row['n50bins'], row[c]])
ndf = pd.DataFrame(data, columns=['checkv_quality', 'N50 bin', 'prophage count'])
ndf
# -
# ### Create a new violin plot of the quality vs the N<sub>50</sub>
#
# This is mostly to please <NAME>.
sns.set(rc={'figure.figsize':(17.55,8.27)})
#ax = sns.violinplot(x='N50 bin', y='prophage count', hue='checkv_quality', inner="points", jitter=True, data=ndf)
ax = sns.violinplot(x='N50 bin', y='prophage count', hue='checkv_quality', scale='count', jitter=True, data=ndf)
ax.set_xlabel("N50 bin range (10^6 bp)")
# handles, labels = ax.get_legend_handles_labels()
# l = plt.legend(handles, labels, bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.)
# # Find good quality small phages
#
# This is for <NAME> who is interested in the sub 10-kb phages
print(f"For the small phages (<6kb) so far, we have {checkv[checkv['contig_length'] < 6000].shape[0]:,} prophages, and these break down into the following checkv quality results:")
checkv[checkv['contig_length'] < 6000].groupby('checkv_quality').size()
if False:
checkv[(checkv['contig_length'] < 6000) & ((checkv['checkv_quality'] == 'High-quality') | (checkv['checkv_quality'] == 'Medium-quality'))].to_csv('../data/smallphages.txt', sep="\t")
printmd("We updated **../data/smallphages.txt** and so now you can run `sbatch smallphages.slurm` on the cluster to make tarballs of those prophage genomes")
| DataAnalysis/CheckV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ## AutoGluon Sample
# +
# # ! pip install --upgrade "mxnet<2.0.0"
# # ! pip install autogluon.tabular
# # ! pip install autogluon.core
# -
import autogluon.core as ag
from autogluon.tabular import TabularPrediction as task
train_data = task.Dataset(file_path='https://autogluon.s3.amazonaws.com/datasets/Inc/train.csv')
subsample_size = 500 # subsample subset of data for faster demo, try setting this to much larger values
train_data = train_data.sample(n=subsample_size, random_state=0)
print(train_data.head())
label_column = 'class'
print("Summary of class variable: \n", train_data[label_column].describe())
dir = 'agModels-predictClass' # specifies folder where to store trained models
predictor = task.fit(train_data=train_data, label=label_column, output_directory=dir)
test_data = task.Dataset(file_path='https://autogluon.s3.amazonaws.com/datasets/Inc/test.csv')
y_test = test_data[label_column] # values to predict
test_data_nolab = test_data.drop(labels=[label_column],axis=1) # delete label column to prove we're not cheating
print(test_data_nolab.head())
# +
predictor = task.load(dir) # unnecessary, just demonstrates how to load previously-trained predictor from file
y_pred = predictor.predict(test_data_nolab)
print("Predictions: ", y_pred)
perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred, auxiliary_metrics=True)
# -
predictor.leaderboard(test_data, silent=True)
| brazil_ecommerce/working/food_delivery/autoglueon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Train a Mario-playing RL Agent
# ================
#
# Authors: `<NAME> <https://github.com/YuansongFeng>`__, `<NAME> <https://github.com/suraj813>`__, `<NAME> <https://github.com/hw26>`__, `<NAME> <https://github.com/GuoYuzhang>`__.
#
#
# This tutorial walks you through the fundamentals of Deep Reinforcement
# Learning. At the end, you will implement an AI-powered Mario (using
# `Double Deep Q-Networks <https://arxiv.org/pdf/1509.06461.pdf>`__) that
# can play the game by itself.
#
# Although no prior knowledge of RL is necessary for this tutorial, you
# can familiarize yourself with these RL
# `concepts <https://spinningup.openai.com/en/latest/spinningup/rl_intro.html>`__,
# and have this handy
# `cheatsheet <https://colab.research.google.com/drive/1eN33dPVtdPViiS1njTW_-r-IYCDTFU7N>`__
# as your companion. The full code is available
# `here <https://github.com/yuansongFeng/MadMario/>`__.
#
# .. figure:: /_static/img/mario.gif
# :alt: mario
#
# +
# # !pip install gym-super-mario-bros==7.3.0
import torch
from torch import nn
from torchvision import transforms as T
from PIL import Image
import numpy as np
from pathlib import Path
from collections import deque
import random, datetime, os, copy
# Gym is an OpenAI toolkit for RL
import gym
from gym.spaces import Box
from gym.wrappers import FrameStack
# NES Emulator for OpenAI Gym
from nes_py.wrappers import JoypadSpace
# Super Mario environment for OpenAI Gym
import gym_super_mario_bros
# -
# RL Definitions
# """"""""""""""""""
#
# **Environment** The world that an agent interacts with and learns from.
#
# **Action** $a$ : How the Agent responds to the Environment. The
# set of all possible Actions is called *action-space*.
#
# **State** $s$ : The current characteristic of the Environment. The
# set of all possible States the Environment can be in is called
# *state-space*.
#
# **Reward** $r$ : Reward is the key feedback from Environment to
# Agent. It is what drives the Agent to learn and to change its future
# action. An aggregation of rewards over multiple time steps is called
# **Return**.
#
# **Optimal Action-Value function** $Q^*(s,a)$ : Gives the expected
# return if you start in state $s$, take an arbitrary action
# $a$, and then for each future time step take the action that
# maximizes returns. $Q$ can be said to stand for the “quality” of
# the action in a state. We try to approximate this function.
#
#
#
# Environment
# """"""""""""""""
#
# Initialize Environment
# ------------------------
#
# In Mario, the environment consists of tubes, mushrooms and other
# components.
#
# When Mario makes an action, the environment responds with the changed
# (next) state, reward and other info.
#
#
#
# +
# Initialize Super Mario environment
env = gym_super_mario_bros.make("SuperMarioBros-1-1-v0")
# Limit the action-space to
# 0. walk right
# 1. jump right
env = JoypadSpace(env, [["right"], ["right", "A"]])
env.reset()
next_state, reward, done, info = env.step(action=0)
print(f"{next_state.shape},\n {reward},\n {done},\n {info}")
# -
# Preprocess Environment
# ------------------------
#
# Environment data is returned to the agent in ``next_state``. As you saw
# above, each state is represented by a ``[3, 240, 256]`` size array.
# Often that is more information than our agent needs; for instance,
# Mario’s actions do not depend on the color of the pipes or the sky!
#
# We use **Wrappers** to preprocess environment data before sending it to
# the agent.
#
# ``GrayScaleObservation`` is a common wrapper to transform an RGB image
# to grayscale; doing so reduces the size of the state representation
# without losing useful information. Now the size of each state:
# ``[1, 240, 256]``
#
# ``ResizeObservation`` downsamples each observation into a square image.
# New size: ``[1, 84, 84]``
#
# ``SkipFrame`` is a custom wrapper that inherits from ``gym.Wrapper`` and
# implements the ``step()`` function. Because consecutive frames don’t
# vary much, we can skip n-intermediate frames without losing much
# information. The n-th frame aggregates rewards accumulated over each
# skipped frame.
#
# ``FrameStack`` is a wrapper that allows us to squash consecutive frames
# of the environment into a single observation point to feed to our
# learning model. This way, we can identify if Mario was landing or
# jumping based on the direction of his movement in the previous several
# frames.
#
#
#
# +
class SkipFrame(gym.Wrapper):
def __init__(self, env, skip):
"""Return only every `skip`-th frame"""
super().__init__(env)
self._skip = skip
def step(self, action):
"""Repeat action, and sum reward"""
total_reward = 0.0
done = False
for i in range(self._skip):
# Accumulate reward and repeat the same action
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
class GrayScaleObservation(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
obs_shape = self.observation_space.shape[:2]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def permute_orientation(self, observation):
# permute [H, W, C] array to [C, H, W] tensor
observation = np.transpose(observation, (2, 0, 1))
observation = torch.tensor(observation.copy(), dtype=torch.float)
return observation
def observation(self, observation):
observation = self.permute_orientation(observation)
transform = T.Grayscale()
observation = transform(observation)
return observation
class ResizeObservation(gym.ObservationWrapper):
def __init__(self, env, shape):
super().__init__(env)
if isinstance(shape, int):
self.shape = (shape, shape)
else:
self.shape = tuple(shape)
obs_shape = self.shape + self.observation_space.shape[2:]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
transforms = T.Compose(
[T.Resize(self.shape), T.Normalize(0, 255)]
)
observation = transforms(observation).squeeze(0)
return observation
# Apply Wrappers to environment
env = SkipFrame(env, skip=4)
env = GrayScaleObservation(env)
env = ResizeObservation(env, shape=84)
env = FrameStack(env, num_stack=4)
# -
# After applying the above wrappers to the environment, the final wrapped
# state consists of 4 gray-scaled consecutive frames stacked together, as
# shown above in the image on the left. Each time Mario makes an action,
# the environment responds with a state of this structure. The structure
# is represented by a 3-D array of size ``[4, 84, 84]``.
#
# .. figure:: /_static/img/mario_env.png
# :alt: picture
#
#
#
#
# Agent
# """""""""
#
# We create a class ``Mario`` to represent our agent in the game. Mario
# should be able to:
#
# - **Act** according to the optimal action policy based on the current
# state (of the environment).
#
# - **Remember** experiences. Experience = (current state, current
# action, reward, next state). Mario *caches* and later *recalls* his
# experiences to update his action policy.
#
# - **Learn** a better action policy over time
#
#
#
class Mario:
def __init__():
pass
def act(self, state):
"""Given a state, choose an epsilon-greedy action"""
pass
def cache(self, experience):
"""Add the experience to memory"""
pass
def recall(self):
"""Sample experiences from memory"""
pass
def learn(self):
"""Update online action value (Q) function with a batch of experiences"""
pass
# In the following sections, we will populate Mario’s parameters and
# define his functions.
#
#
#
# Act
# --------------
#
# For any given state, an agent can choose to do the most optimal action
# (**exploit**) or a random action (**explore**).
#
# Mario randomly explores with a chance of ``self.exploration_rate``; when
# he chooses to exploit, he relies on ``MarioNet`` (implemented in
# ``Learn`` section) to provide the most optimal action.
#
#
#
class Mario:
def __init__(self, state_dim, action_dim, save_dir):
self.state_dim = state_dim
self.action_dim = action_dim
self.save_dir = save_dir
self.use_cuda = torch.cuda.is_available()
# Mario's DNN to predict the most optimal action - we implement this in the Learn section
self.net = MarioNet(self.state_dim, self.action_dim).float()
if self.use_cuda:
self.net = self.net.to(device="cuda")
self.exploration_rate = 1
self.exploration_rate_decay = 0.99999975
self.exploration_rate_min = 0.1
self.curr_step = 0
self.save_every = 5e5 # no. of experiences between saving Mario Net
def act(self, state):
"""
Given a state, choose an epsilon-greedy action and update value of step.
Inputs:
state(LazyFrame): A single observation of the current state, dimension is (state_dim)
Outputs:
action_idx (int): An integer representing which action Mario will perform
"""
# EXPLORE
if np.random.rand() < self.exploration_rate:
action_idx = np.random.randint(self.action_dim)
# EXPLOIT
else:
state = state.__array__()
if self.use_cuda:
state = torch.tensor(state).cuda()
else:
state = torch.tensor(state)
state = state.unsqueeze(0)
action_values = self.net(state, model="online")
action_idx = torch.argmax(action_values, axis=1).item()
# decrease exploration_rate
self.exploration_rate *= self.exploration_rate_decay
self.exploration_rate = max(self.exploration_rate_min, self.exploration_rate)
# increment step
self.curr_step += 1
return action_idx
# Cache and Recall
# ----------------------
#
# These two functions serve as Mario’s “memory” process.
#
# ``cache()``: Each time Mario performs an action, he stores the
# ``experience`` to his memory. His experience includes the current
# *state*, *action* performed, *reward* from the action, the *next state*,
# and whether the game is *done*.
#
# ``recall()``: Mario randomly samples a batch of experiences from his
# memory, and uses that to learn the game.
#
#
#
class Mario(Mario): # subclassing for continuity
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim, action_dim, save_dir)
self.memory = deque(maxlen=100000)
self.batch_size = 32
def cache(self, state, next_state, action, reward, done):
"""
Store the experience to self.memory (replay buffer)
Inputs:
state (LazyFrame),
next_state (LazyFrame),
action (int),
reward (float),
done(bool))
"""
state = state.__array__()
next_state = next_state.__array__()
if self.use_cuda:
state = torch.tensor(state).cuda()
next_state = torch.tensor(next_state).cuda()
action = torch.tensor([action]).cuda()
reward = torch.tensor([reward]).cuda()
done = torch.tensor([done]).cuda()
else:
state = torch.tensor(state)
next_state = torch.tensor(next_state)
action = torch.tensor([action])
reward = torch.tensor([reward])
done = torch.tensor([done])
self.memory.append((state, next_state, action, reward, done,))
def recall(self):
"""
Retrieve a batch of experiences from memory
"""
batch = random.sample(self.memory, self.batch_size)
state, next_state, action, reward, done = map(torch.stack, zip(*batch))
return state, next_state, action.squeeze(), reward.squeeze(), done.squeeze()
# Learn
# --------------
#
# Mario uses the `DDQN algorithm <https://arxiv.org/pdf/1509.06461>`__
# under the hood. DDQN uses two ConvNets - $Q_{online}$ and
# $Q_{target}$ - that independently approximate the optimal
# action-value function.
#
# In our implementation, we share feature generator ``features`` across
# $Q_{online}$ and $Q_{target}$, but maintain separate FC
# classifiers for each. $\theta_{target}$ (the parameters of
# $Q_{target}$) is frozen to prevent updation by backprop. Instead,
# it is periodically synced with $\theta_{online}$ (more on this
# later).
#
# Neural Network
# ~~~~~~~~~~~~~~~~~~
#
#
class MarioNet(nn.Module):
"""mini cnn structure
input -> (conv2d + relu) x 3 -> flatten -> (dense + relu) x 2 -> output
"""
def __init__(self, input_dim, output_dim):
super().__init__()
c, h, w = input_dim
if h != 84:
raise ValueError(f"Expecting input height: 84, got: {h}")
if w != 84:
raise ValueError(f"Expecting input width: 84, got: {w}")
self.online = nn.Sequential(
nn.Conv2d(in_channels=c, out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
nn.Linear(512, output_dim),
)
self.target = copy.deepcopy(self.online)
# Q_target parameters are frozen.
for p in self.target.parameters():
p.requires_grad = False
def forward(self, input, model):
if model == "online":
return self.online(input)
elif model == "target":
return self.target(input)
# TD Estimate & TD Target
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Two values are involved in learning:
#
# **TD Estimate** - the predicted optimal $Q^*$ for a given state
# $s$
#
# \begin{align}{TD}_e = Q_{online}^*(s,a)\end{align}
#
# **TD Target** - aggregation of current reward and the estimated
# $Q^*$ in the next state $s'$
#
# \begin{align}a' = argmax_{a} Q_{online}(s', a)\end{align}
#
# \begin{align}{TD}_t = r + \gamma Q_{target}^*(s',a')\end{align}
#
# Because we don’t know what next action $a'$ will be, we use the
# action $a'$ maximizes $Q_{online}$ in the next state
# $s'$.
#
# Notice we use the
# `@torch.no_grad() <https://pytorch.org/docs/stable/generated/torch.no_grad.html#no-grad>`__
# decorator on ``td_target()`` to disable gradient calculations here
# (because we don’t need to backpropagate on $\theta_{target}$).
#
#
#
class Mario(Mario):
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim, action_dim, save_dir)
self.gamma = 0.9
def td_estimate(self, state, action):
current_Q = self.net(state, model="online")[
np.arange(0, self.batch_size), action
] # Q_online(s,a)
return current_Q
@torch.no_grad()
def td_target(self, reward, next_state, done):
next_state_Q = self.net(next_state, model="online")
best_action = torch.argmax(next_state_Q, axis=1)
next_Q = self.net(next_state, model="target")[
np.arange(0, self.batch_size), best_action
]
return (reward + (1 - done.float()) * self.gamma * next_Q).float()
# Updating the model
# ~~~~~~~~~~~~~~~~~~~~~~
#
# As Mario samples inputs from his replay buffer, we compute $TD_t$
# and $TD_e$ and backpropagate this loss down $Q_{online}$ to
# update its parameters $\theta_{online}$ ($\alpha$ is the
# learning rate ``lr`` passed to the ``optimizer``)
#
# \begin{align}\theta_{online} \leftarrow \theta_{online} + \alpha \nabla(TD_e - TD_t)\end{align}
#
# $\theta_{target}$ does not update through backpropagation.
# Instead, we periodically copy $\theta_{online}$ to
# $\theta_{target}$
#
# \begin{align}\theta_{target} \leftarrow \theta_{online}\end{align}
#
#
#
#
class Mario(Mario):
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim, action_dim, save_dir)
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=0.00025)
self.loss_fn = torch.nn.SmoothL1Loss()
def update_Q_online(self, td_estimate, td_target):
loss = self.loss_fn(td_estimate, td_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def sync_Q_target(self):
self.net.target.load_state_dict(self.net.online.state_dict())
# Save checkpoint
# ~~~~~~~~~~~~~~~~~~
#
#
#
class Mario(Mario):
def save(self):
save_path = (
self.save_dir / f"mario_net_{int(self.curr_step // self.save_every)}.chkpt"
)
torch.save(
dict(model=self.net.state_dict(), exploration_rate=self.exploration_rate),
save_path,
)
print(f"MarioNet saved to {save_path} at step {self.curr_step}")
# Putting it all together
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
#
class Mario(Mario):
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim, action_dim, save_dir)
self.burnin = 1e4 # min. experiences before training
self.learn_every = 3 # no. of experiences between updates to Q_online
self.sync_every = 1e4 # no. of experiences between Q_target & Q_online sync
def learn(self):
if self.curr_step % self.sync_every == 0:
self.sync_Q_target()
if self.curr_step % self.save_every == 0:
self.save()
if self.curr_step < self.burnin:
return None, None
if self.curr_step % self.learn_every != 0:
return None, None
# Sample from memory
state, next_state, action, reward, done = self.recall()
# Get TD Estimate
td_est = self.td_estimate(state, action)
# Get TD Target
td_tgt = self.td_target(reward, next_state, done)
# Backpropagate loss through Q_online
loss = self.update_Q_online(td_est, td_tgt)
return (td_est.mean().item(), loss)
# Logging
# --------------
#
#
#
# +
import numpy as np
import time, datetime
import matplotlib.pyplot as plt
class MetricLogger:
def __init__(self, save_dir):
self.save_log = save_dir / "log"
with open(self.save_log, "w") as f:
f.write(
f"{'Episode':>8}{'Step':>8}{'Epsilon':>10}{'MeanReward':>15}"
f"{'MeanLength':>15}{'MeanLoss':>15}{'MeanQValue':>15}"
f"{'TimeDelta':>15}{'Time':>20}\n"
)
self.ep_rewards_plot = save_dir / "reward_plot.jpg"
self.ep_lengths_plot = save_dir / "length_plot.jpg"
self.ep_avg_losses_plot = save_dir / "loss_plot.jpg"
self.ep_avg_qs_plot = save_dir / "q_plot.jpg"
# History metrics
self.ep_rewards = []
self.ep_lengths = []
self.ep_avg_losses = []
self.ep_avg_qs = []
# Moving averages, added for every call to record()
self.moving_avg_ep_rewards = []
self.moving_avg_ep_lengths = []
self.moving_avg_ep_avg_losses = []
self.moving_avg_ep_avg_qs = []
# Current episode metric
self.init_episode()
# Timing
self.record_time = time.time()
def log_step(self, reward, loss, q):
self.curr_ep_reward += reward
self.curr_ep_length += 1
if loss:
self.curr_ep_loss += loss
self.curr_ep_q += q
self.curr_ep_loss_length += 1
def log_episode(self):
"Mark end of episode"
self.ep_rewards.append(self.curr_ep_reward)
self.ep_lengths.append(self.curr_ep_length)
if self.curr_ep_loss_length == 0:
ep_avg_loss = 0
ep_avg_q = 0
else:
ep_avg_loss = np.round(self.curr_ep_loss / self.curr_ep_loss_length, 5)
ep_avg_q = np.round(self.curr_ep_q / self.curr_ep_loss_length, 5)
self.ep_avg_losses.append(ep_avg_loss)
self.ep_avg_qs.append(ep_avg_q)
self.init_episode()
def init_episode(self):
self.curr_ep_reward = 0.0
self.curr_ep_length = 0
self.curr_ep_loss = 0.0
self.curr_ep_q = 0.0
self.curr_ep_loss_length = 0
def record(self, episode, epsilon, step):
mean_ep_reward = np.round(np.mean(self.ep_rewards[-100:]), 3)
mean_ep_length = np.round(np.mean(self.ep_lengths[-100:]), 3)
mean_ep_loss = np.round(np.mean(self.ep_avg_losses[-100:]), 3)
mean_ep_q = np.round(np.mean(self.ep_avg_qs[-100:]), 3)
self.moving_avg_ep_rewards.append(mean_ep_reward)
self.moving_avg_ep_lengths.append(mean_ep_length)
self.moving_avg_ep_avg_losses.append(mean_ep_loss)
self.moving_avg_ep_avg_qs.append(mean_ep_q)
last_record_time = self.record_time
self.record_time = time.time()
time_since_last_record = np.round(self.record_time - last_record_time, 3)
print(
f"Episode {episode} - "
f"Step {step} - "
f"Epsilon {epsilon} - "
f"Mean Reward {mean_ep_reward} - "
f"Mean Length {mean_ep_length} - "
f"Mean Loss {mean_ep_loss} - "
f"Mean Q Value {mean_ep_q} - "
f"Time Delta {time_since_last_record} - "
f"Time {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}"
)
with open(self.save_log, "a") as f:
f.write(
f"{episode:8d}{step:8d}{epsilon:10.3f}"
f"{mean_ep_reward:15.3f}{mean_ep_length:15.3f}{mean_ep_loss:15.3f}{mean_ep_q:15.3f}"
f"{time_since_last_record:15.3f}"
f"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\n"
)
for metric in ["ep_rewards", "ep_lengths", "ep_avg_losses", "ep_avg_qs"]:
plt.plot(getattr(self, f"moving_avg_{metric}"))
plt.savefig(getattr(self, f"{metric}_plot"))
plt.clf()
# -
# Let’s play!
# """""""""""""""
#
# In this example we run the training loop for 10 episodes, but for Mario to truly learn the ways of
# his world, we suggest running the loop for at least 40,000 episodes!
#
#
#
# +
use_cuda = torch.cuda.is_available()
print(f"Using CUDA: {use_cuda}")
print()
save_dir = Path("checkpoints") / datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
save_dir.mkdir(parents=True)
mario = Mario(state_dim=(4, 84, 84), action_dim=env.action_space.n, save_dir=save_dir)
logger = MetricLogger(save_dir)
episodes = 10
for e in range(episodes):
state = env.reset()
# Play the game!
while True:
# Run agent on the state
action = mario.act(state)
# Agent performs action
next_state, reward, done, info = env.step(action)
# Remember
mario.cache(state, next_state, action, reward, done)
# Learn
q, loss = mario.learn()
# Logging
logger.log_step(reward, loss, q)
# Update state
state = next_state
# Check if end of game
if done or info["flag_get"]:
break
logger.log_episode()
if e % 20 == 0:
logger.record(episode=e, epsilon=mario.exploration_rate, step=mario.curr_step)
# -
# Conclusion
# """""""""""""""
#
# In this tutorial, we saw how we can use PyTorch to train a game-playing AI. You can use the same methods
# to train an AI to play any of the games at the `OpenAI gym <https://gym.openai.com/>`__. Hope you enjoyed this tutorial, feel free to reach us at
# `our github <https://github.com/yuansongFeng/MadMario/>`__!
#
#
| docs/_downloads/1ceade89aedc4c99a944f752a51c6d35/mario_rl_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false tags=["df89053c-55da-4946-b602-c3baf619b53f"]
# ## Initialization
# -
# ## Load Data
# The dataset is stored in the `/datasets/faces/` folder, there you can find
# - The `final_files` folder with 7.6k photos
# - The `labels.csv` file with labels, with two columns: `file_name` and `real_age`
#
# Given the fact that the number of image files is rather high, it is advisable to avoid reading them all at once, which would greatly consume computational resources. We recommend you build a generator with the ImageDataGenerator generator. This method was explained in Chapter 3, Lesson 7 of this course.
#
# The label file can be loaded as an usual CSV file.
import pandas as pd
labels = pd.read_csv('/datasets/faces/labels.csv')
labels.info()
labels.describe()
# ## EDA
labels['file_name'].duplicated().value_counts()
labels.plot(kind='hist')
# ### Findings
# Age is normally distributed around 30 years old, with a slight tail on the older side of the data. Depending on the objective of the project, one could consider dropping those older than 60 to help deal with outliers.
# ## Modelling
# Define the necessary functions to train your model on the GPU platform and build a single script containing all of them along with the initialization section.
#
# To make this task easier, you can define them in this notebook and run a ready code in the next section to automatically compose the script.
#
# The definitions below will be checked by project reviewers as well, so that they can understand how you built the model.
# +
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.resnet import ResNet50
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
# -
def load_train(path):
train_datagen = ImageDataGenerator(validation_split=0.25,horizontal_flip=True, vertical_flip=True, rescale = 1/255)
train_data = train_datagen.flow_from_dataframe(directory=path,dataframe='labels.csv', target_size = (150, 150),
class_mode='sparse', batch_size=16,subset='training', seed=12345)
return train_data
def load_test(path):
test_datagen = ImageDataGenerator(validation_split=0.25, rescale = 1/255)
test_data = train_datagen.flow_from_dataframe(directory=path,dataframe='labels.csv', target_size = (150, 150),
class_mode='sparse', batch_size=16,subset='validation', seed=12345)
return test_data
def create_model(input_shape=(150, 150, 3)):
optimizer = Adam(lr=.0001)
backbone = ResNet50(input_shape=input_shape, weights='imagenet', include_top=False)
model = Sequential()
model.add(backbone)
model.add(GlobalAveragePooling2D())
model.add(Dense(12, activation='softmax'))
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['mae'])
return model
def train_model(model, train_data, test_data, batch_size=None, epochs=3, steps_per_epoch=None,
validation_steps=None):
if steps_per_epoch is None:
steps_per_epoch = len(train_data)
if validation_steps is None:
validation_steps = len(test_data)
model.fit(train_data, validation_data=test_data,
batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
verbose=2)
return model
# ### Prepare the Script to Run on the GPU Platform
# Given you've defined the necessary functions you can compose a script for the GPU platform, download it via the "File|Open..." menu, and to upload it later for running on the GPU platform.
#
# N.B.: The script should include the initialization section as well. An example of this is shown below.
# +
# prepare a script to run on the GPU platform
init_str = """
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.resnet import ResNet50
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
"""
import inspect
with open('run_model_on_gpu.py', 'w') as f:
f.write(init_str)
f.write('\n\n')
for fn_name in [load_train, load_test, create_model, train_model]:
src = inspect.getsource(fn_name)
f.write(src)
f.write('\n\n')
# -
# ### Output
# Place the output from the GPU platform as an Markdown cell here.
# ## Conclusions
| computer_vision/computer_vision.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
import os
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression, RidgeClassifier, RidgeClassifierCV
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, VotingClassifier
from sklearn import cross_validation
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score, classification_report
from gensim.models import LdaMulticore
from gensim import corpora
from gensim.matutils import sparse2full
from scipy.sparse import hstack
# For dockers + ensembles:
os.environ['JOBLIB_TEMP_FOLDER'] = '../data/tmp/'
# ### Data Import
train = pd.read_csv('data/labeledTrainData.tsv', sep='\t')
print(train.shape)
train.head()
test = pd.read_csv('data/testData.tsv', sep='\t')
print(test.shape)
# ### Vectorize Text
count_vec = CountVectorizer(ngram_range=(1,2),
min_df=5,
max_df=0.9,
strip_accents='unicode',
max_features=None)
tfidf_vec = TfidfVectorizer(ngram_range=(1,2),
min_df=3,
#max_df=0.9,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
use_idf=1,
smooth_idf=1,
sublinear_tf=1,
max_features=None,
stop_words = 'english')
count_vec.fit(train["review"].fillna("").values)
X_count = count_vec.transform(train["review"].fillna("").values)
print(type(X_count))
print(X_count.shape)
tfidf_vec.fit(train["review"].fillna("").values)
X_tfidf = tfidf_vec.transform(train["review"].fillna("").values)
print(type(X_tfidf))
print(X_tfidf.shape)
y = train['sentiment'].as_matrix()
print(y.shape)
# ### Create Topic Vector
dictionary = corpora.Dictionary.load('models/topic_dict.dict')
lda = LdaMulticore.load('models/topic_lda_model')
text_row_count = train['review'].shape[0]
vector_size = lda.num_topics
train['review_topic_vector'] = train['review'].apply(lambda x: lda[dictionary.doc2bow(x.lower().split(" "))])
X_lda_matrix = np.reshape(np.concatenate(train['review_topic_vector']
.apply(lambda x: sparse2full(x, vector_size))
.as_matrix(), axis=0), (text_row_count,vector_size))
test['review_topic_vector'] = test['review'].apply(lambda x: lda[dictionary.doc2bow(x.lower().split(" "))])
X_lda_matrix_test = np.reshape(np.concatenate(test['review_topic_vector']
.apply(lambda x: sparse2full(x, vector_size))
.as_matrix(), axis=0), (test['review'].shape[0],vector_size))
# ### Concat Features
X_count_concat = hstack((X_count, X_lda_matrix))
X_tfidf_concat = hstack((X_tfidf, X_lda_matrix))
X_count_train, X_count_val, y_count_train, y_count_val = train_test_split(X_count_concat, y, test_size=0.1, random_state=2481632)
X_tfidf_train, X_tfidf_val, y_tfidf_train, y_tfidf_val = train_test_split(X_tfidf, y, test_size=0.1, random_state=2481632)
X_count_test = count_vec.transform(test['review'].fillna("").values)
X_tfidf_test = tfidf_vec.transform(test['review'].fillna("").values)
X_count_test = hstack((X_count_test, X_lda_matrix_test))
X_tfidf_test = hstack((X_tfidf_test, X_lda_matrix_test))
# ### Linear Models
lm_logit = LogisticRegression(penalty='l2',
dual=True,
tol=0.0001,
C=1.0,
fit_intercept=True,
intercept_scaling=1.0,
class_weight=None,
random_state=42,
solver='liblinear',
max_iter=1000,
verbose=1,
n_jobs=-1)
lm_logit.fit(X_tfidf_train, y_tfidf_train)
y_val_hat = lm_logit.predict_proba(X_tfidf_val)[:,1]
print(accuracy_score(y_tfidf_val, y_val_hat > 0.5))
print(roc_auc_score(y_tfidf_val, y_val_hat))
print(confusion_matrix(y_tfidf_val, y_val_hat > 0.5))
print(classification_report(y_tfidf_val, y_val_hat > 0.5))
print("20 Fold CV Score: {}".format(np.mean(cross_validation.cross_val_score(lm_logit, X_tfidf_train, y_tfidf_train, cv=20, scoring='roc_auc'))))
lm_logit_tfidf = LogisticRegression(penalty='l2',
C=1.0,
class_weight=None,
random_state=42,
solver='liblinear',
max_iter=1000,
verbose=1,
n_jobs=-1)
lm_logit_tfidf.fit(X_tfidf_train, y_tfidf_train)
y_tfidf_val_hat = lm_logit_tfidf.predict(X_tfidf_val)
print(accuracy_score(y_tfidf_val, y_tfidf_val_hat))
print(confusion_matrix(y_tfidf_val, y_tfidf_val_hat))
print(classification_report(y_tfidf_val, y_tfidf_val_hat))
lm_ridge = RidgeClassifierCV(alphas=(0.1, 0.5, 1.0, 5.0, 10.0),
cv=5,
class_weight=None)
lm_ridge.fit(X_count_train, y_count_train)
lm_ridge.alpha_
y_ridge_val_hat = lm_ridge.predict(X_count_val)
print(accuracy_score(y_count_val, y_ridge_val_hat))
print(confusion_matrix(y_count_val, y_ridge_val_hat))
print(classification_report(y_count_val, y_ridge_val_hat))
lm_ridge_single = RidgeClassifier(alpha=10.0)
nb = MultinomialNB(alpha=0.5)
nb.fit(X_count_train, y_count_train)
y_nb_val_hat = nb.predict(X_count_val)
print(accuracy_score(y_count_val, y_nb_val_hat))
print(confusion_matrix(y_count_val, y_nb_val_hat))
print(classification_report(y_count_val, y_nb_val_hat))
# ### NB-SVM Model
# Relevant Paper: https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf
# +
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_is_fitted
from sklearn.linear_model import LogisticRegression
from scipy import sparse
class NbSvmClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, C=1.0, dual=False, n_jobs=1):
self.C = C
self.dual = dual
self.n_jobs = n_jobs
self.coef_ = None
def predict(self, x):
# Verify that model has been fit
check_is_fitted(self, ['_r', '_clf'])
return self._clf.predict(x.multiply(self._r))
def predict_proba(self, x):
# Verify that model has been fit
check_is_fitted(self, ['_r', '_clf'])
return self._clf.predict_proba(x.multiply(self._r))
def fit(self, x, y):
# Check that X and y have correct shape
y = y.values
x, y = check_X_y(x, y, accept_sparse=True)
def pr(x, y_i, y):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
self._r = sparse.csr_matrix(np.log(pr(x,1,y) / pr(x,0,y)))
x_nb = x.multiply(self._r)
self._clf = LogisticRegression(C=self.C, dual=self.dual, n_jobs=self.n_jobs).fit(x_nb, y)
self.coef_ = self._clf.coef_
return self
# -
m = NbSvmClassifier(C=4, dual=True)
m.fit(x_nb, y_count_train)
y_hat = m.predict(X_count_val)
print(accuracy_score(y_count_val, y_hat))
print(confusion_matrix(y_count_val, y_hat))
print(classification_report(y_count_val, y_hat))
# ### Ensemble Model
rf = RandomForestClassifier(n_estimators=300, n_jobs=-1)
rf.fit(X_count_train, y_count_train)
print(accuracy_score(y_count_val, rf.predict(X_count_val)))
print(confusion_matrix(y_count_val, rf.predict(X_count_val)))
print(classification_report(y_count_val, rf.predict(X_count_val)))
gbc = GradientBoostingClassifier(n_estimators=200, verbose=1)
gbc.fit(X_count_train, y_count_train)
print(accuracy_score(y_count_val, gbc.predict(X_count_val)))
print(confusion_matrix(y_count_val, gbc.predict(X_count_val)))
print(classification_report(y_count_val, gbc.predict(X_count_val)))
vote_m = VotingClassifier([('lm_ridge_single',lm_ridge_single), ('rf',rf), ('gbc',gbc)],
voting='hard',
n_jobs=-1)
vote_m.fit(X_count_train, y_count_train)
print(accuracy_score(y_count_val, vote_m.predict(X_count_val)))
print(confusion_matrix(y_count_val, vote_m.predict(X_count_val)))
print(classification_report(y_count_val, vote_m.predict(X_count_val)))
| 02_baseline_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: reco_pyspark
# language: python
# name: reco_pyspark
# ---
# <i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
#
# <i>Licensed under the MIT License.</i>
# # Building a Real-time Recommendation API
#
# This reference architecture shows the full lifecycle of building a recommendation system. It walks through the creation of appropriate azure resources, training a recommendation model using a Virtual Machine or Databricks, and deploying it as an API. It uses Azure Cosmos DB, Azure Machine Learning, and Azure Kubernetes Service.
#
# This architecture can be generalized for many recommendation engine scenarios, including recommendations for products, movies, and news.
# ### Architecture
# 
#
# **Scenario**: A media organization wants to provide movie or video recommendations to its users. By providing personalized recommendations, the organization meets several business goals, including increased click-through rates, increased engagement on site, and higher user satisfaction.
#
# In this reference, we train and deploy a real-time recommender service API that can provide the top 10 movie recommendations for a given user.
#
# ### Components
# This architecture consists of the following key components:
# * [Azure Databricks](https://docs.microsoft.com/en-us/azure/azure-databricks/what-is-azure-databricks)<sup>1)</sup> is used as a development environment to prepare input data and train the recommender model on a Spark cluster. Azure Databricks also provides an interactive workspace to run and collaborate on notebooks for any data processing or machine learning tasks.
# * [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/intro-kubernetes)(AKS) is used to deploy and operationalize a machine learning model service API on a Kubernetes cluster. AKS hosts the containerized model, providing scalability that meets throughput requirements, identity and access management, and logging and health monitoring.
# * [Azure Cosmos DB](https://docs.microsoft.com/en-us/azure/cosmos-db/introduction) is a globally distributed database service used to store the top 10 recommended movies for each user. Azure Cosmos DB is ideal for this scenario as it provides low latency (10 ms at 99th percentile) to read the top recommended items for a given user.
# * [Azure Machine Learning Service](https://docs.microsoft.com/en-us/azure/machine-learning/service/) is a service used to track and manage machine learning models, and then package and deploy these models to a scalable Azure Kubernetes Service environment.
#
# <sup>1) Here, we are just giving an example of using Azure Databricks. Any platforms listed in [SETUP](https://github.com/microsoft/recommenders/blob/master/SETUP.md) can be used as well.</sup>
#
#
# ### Table of Contents.
# 0. [File Imports](#0-File-Imports)
# 1. [Service Creation](#1-Service-Creation)
# 2. [Training and evaluation](#2-Training)
# 3. [Operationalization](#3.-Operationalize-the-Recommender-Service)
# ## Setup
# To run this notebook on Azure Databricks, you should setup Azure Databricks by following the appropriate sections in the repository [SETUP instructions](https://github.com/microsoft/recommenders/blob/master/SETUP.md) and import this notebook into your Azure Databricks Workspace (see instructions [here](https://docs.azuredatabricks.net/user-guide/notebooks/notebook-manage.html#import-a-notebook)).
#
# Please note: This notebook **REQUIRES** that you add the dependencies to support **operationalization**. See [SETUP](https://github.com/microsoft/recommenders/blob/master/SETUP.md) for details.
#
# ## 0 File Imports
# +
import os
import sys
sys.path.append("../../")
import urllib
from azure.common.client_factory import get_client_from_cli_profile
import azure.mgmt.cosmosdb
import azureml.core
from azureml.core import Workspace
from azureml.core.model import Model
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.compute_target import ComputeTargetException
from azureml.core.webservice import Webservice, AksWebservice
from azureml.exceptions import WebserviceException
from azureml.core import Environment
from azureml.core.environment import CondaDependencies
from azureml.core.model import InferenceConfig
from azureml.core.environment import SparkPackage
import pydocumentdb.document_client as document_client
from pyspark.ml.recommendation import ALS
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import FloatType, IntegerType, LongType
from reco_utils.common.timer import Timer
from reco_utils.common.spark_utils import start_or_get_spark
from reco_utils.dataset import movielens
from reco_utils.dataset.cosmos_cli import find_collection, read_collection, read_database, find_database
from reco_utils.dataset.download_utils import maybe_download
from reco_utils.dataset.spark_splitters import spark_random_split
from reco_utils.evaluation.spark_evaluation import SparkRatingEvaluation, SparkRankingEvaluation
from reco_utils.common.notebook_utils import is_databricks
print("Azure SDK version:", azureml.core.VERSION)
# -
# Start spark session if needed
if not is_databricks():
cosmos_connector = (
"https://search.maven.org/remotecontent?filepath=com/microsoft/azure/"
"azure-cosmosdb-spark_2.3.0_2.11/1.3.3/azure-cosmosdb-spark_2.3.0_2.11-1.3.3-uber.jar"
)
jar_filepath = maybe_download(url=cosmos_connector, filename="cosmos.jar")
spark = start_or_get_spark("ALS", memory="10g", jars=[jar_filepath])
sc = spark.sparkContext
display(sc)
# ## 1 Service Creation
# Modify the **Subscription ID** to the subscription you would like to deploy to and set the resource name variables.
#
# #### Services created by this notebook:
# 1. [Azure ML Service](https://azure.microsoft.com/en-us/services/machine-learning-service/)
# 1. [Azure ML Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-workspace)
# 1. [Azure Application Insights](https://azure.microsoft.com/en-us/services/monitor/)
# 1. [Azure Storage](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview)
# 1. [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/)
#
# 1. [Azure Cosmos DB](https://azure.microsoft.com/en-us/services/cosmos-db/)
# 1. [Azure Kubernetes Service (AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/)
# **Add your Azure subscription ID**
# +
# Add your subscription ID
subscription_id = ""
# Set your workspace name
workspace_name = "o16n-test"
resource_group = "{}-rg".format(workspace_name)
# Set your region to deploy Azure ML workspace
location = "eastus"
# AzureML service and Azure Kubernetes Service prefix
service_name = "mvl-als"
# -
# Login for Azure CLI so that AzureML can use Azure CLI login credentials
# !az login
# Change subscription if needed
# !az account set --subscription {subscription_id}
# Check account
# !az account show
# +
# CosmosDB
# account_name for CosmosDB cannot have "_" and needs to be less than 31 chars
account_name = "{}-ds-sql".format(workspace_name).replace("_", "-")[:31]
cosmos_database = "recommendations"
cosmos_collection = "user_recommendations_als"
# AzureML resource names
model_name = "{}-reco.mml".format(service_name)
aks_name = "{}-aks".format(service_name)
# +
# top k items to recommend
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# +
userCol = "UserId"
itemCol = "MovieId"
ratingCol = "Rating"
train_data_path = "train"
test_data_path = "test"
# -
# ### 1.1 Import or create the AzureML Workspace.
# This command will check if the AzureML Workspace exists or not, and will create the workspace if it doesn't exist.
ws = Workspace.create(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
location=location,
exist_ok=True
)
# ### 1.2 Create a Cosmos DB to store recommendation results
#
# This step will take some time to create CosmosDB resources.
# +
# explicitly pass subscription_id in case user has multiple subscriptions
client = get_client_from_cli_profile(
azure.mgmt.cosmosdb.CosmosDB,
subscription_id=subscription_id
)
async_cosmosdb_create = client.database_accounts.create_or_update(
resource_group,
account_name,
{
'location': location,
'locations': [{
'location_name': location
}]
}
)
account = async_cosmosdb_create.result()
my_keys = client.database_accounts.list_keys(resource_group, account_name)
master_key = my_keys.primary_master_key
endpoint = "https://" + account_name + ".documents.azure.com:443/"
# DB client
client = document_client.DocumentClient(endpoint, {'masterKey': master_key})
if not find_database(client, cosmos_database):
db = client.CreateDatabase({'id': cosmos_database })
print("Database created")
else:
db = read_database(client, cosmos_database)
print("Database found")
# Create collection options
options = dict(offerThroughput=11000)
# Create a collection
collection_definition = {
'id': cosmos_collection,
'partitionKey': {'paths': ['/id'],'kind': 'Hash'}
}
if not find_collection(client, cosmos_database, cosmos_collection):
collection = client.CreateCollection(
db['_self'],
collection_definition,
options
)
print("Collection created")
else:
collection = read_collection(client, cosmos_database, cosmos_collection)
print("Collection found")
dbsecrets = dict(
Endpoint=endpoint,
Masterkey=master_key,
Database=cosmos_database,
Collection=cosmos_collection,
Upsert=True
)
# -
# ## 2 Training
#
# Next, we train an [Alternating Least Squares model](https://spark.apache.org/docs/latest/ml-collaborative-filtering.html) on [MovieLens](https://grouplens.org/datasets/movielens/) dataset.
#
# ### 2.1 Download the MovieLens dataset
# +
# Note: The DataFrame-based API for ALS currently only supports integers for user and item ids.
schema = StructType(
(
StructField(userCol, IntegerType()),
StructField(itemCol, IntegerType()),
StructField(ratingCol, FloatType()),
)
)
data = movielens.load_spark_df(spark, size=MOVIELENS_DATA_SIZE, schema=schema)
data.show()
# -
# ### 2.2 Split the data into train, test
# There are several ways of splitting the data: random, chronological, stratified, etc., each of which favors a different real-world evaluation use case. We will split randomly in this example – for more details on which splitter to choose, consult [this guide](https://github.com/Microsoft/Recommenders/blob/master/notebooks/01_data/data_split.ipynb).
train, test = spark_random_split(data, ratio=0.75, seed=42)
print("N train", train.cache().count())
print("N test", test.cache().count())
# ### 2.3 Train the ALS model on the training data
#
# To predict movie ratings, we use the rating data in the training set as users' explicit feedback. The hyperparameters used to estimate the model are set based on [this page](http://mymedialite.net/examples/datasets.html).
#
# Under most circumstances, you would explore the hyperparameters and choose an optimal set based on some criteria. For additional details on this process, please see additional information in the deep dives [here](https://github.com/microsoft/recommenders/blob/master/notebooks/04_model_select_and_optimize/tuning_spark_als.ipynb).
als = ALS(
rank=10,
maxIter=15,
implicitPrefs=False,
alpha=0.1,
regParam=0.05,
coldStartStrategy='drop',
nonnegative=True,
userCol=userCol,
itemCol=itemCol,
ratingCol=ratingCol,
)
model = als.fit(train)
# ### 2.4 Get top-k recommendations for our testing data
#
# In the movie recommendation use case, recommending movies that have been rated by the users do not make sense. Therefore, the rated movies are removed from the recommended items.
#
# In order to achieve this, we recommend all movies to all users, and then remove the user-movie pairs that exist in the training dataset.
# Get the cross join of all user-item pairs and score them.
users = train.select(userCol).distinct()
items = train.select(itemCol).distinct()
user_item = users.crossJoin(items)
dfs_pred = model.transform(user_item)
dfs_pred.show()
# +
# Remove seen items.
dfs_pred_exclude_train = dfs_pred.alias("pred").join(
train.alias("train"),
(dfs_pred[userCol]==train[userCol]) & (dfs_pred[itemCol]==train[itemCol]),
how='outer'
)
top_all = dfs_pred_exclude_train.filter(dfs_pred_exclude_train["train."+ratingCol].isNull()) \
.select("pred."+userCol, "pred."+itemCol, "pred.prediction")
top_all.show()
# -
# ### 2.5 Evaluate how well ALS performs
#
# Evaluate model performance using metrics such as Precision@K, Recall@K, [MAP@K](https://en.wikipedia.org/wiki/Evaluation_measures_\(information_retrieval\) or [nDCG@K](https://en.wikipedia.org/wiki/Discounted_cumulative_gain). For a full guide on what metrics to evaluate your recommender with, consult [this guide](https://github.com/Microsoft/Recommenders/blob/master/notebooks/03_evaluate/evaluation.ipynb).
# +
cols = {
'col_user': userCol,
'col_item': itemCol,
'col_rating': ratingCol,
'col_prediction': "prediction",
}
test.show()
# +
# Evaluate Ranking Metrics
rank_eval = SparkRankingEvaluation(
test,
top_all,
k=TOP_K,
**cols
)
print(
"Model:\tALS",
"Top K:\t%d" % rank_eval.k,
"MAP:\t%f" % rank_eval.map_at_k(),
"NDCG:\t%f" % rank_eval.ndcg_at_k(),
"Precision@K:\t%f" % rank_eval.precision_at_k(),
"Recall@K:\t%f" % rank_eval.recall_at_k(), sep='\n'
)
# +
# Evaluate Rating Metrics
prediction = model.transform(test)
rating_eval = SparkRatingEvaluation(
test,
prediction,
**cols
)
print(
"Model:\tALS rating prediction",
"RMSE:\t%.2f" % rating_eval.rmse(),
"MAE:\t%f" % rating_eval.mae(),
"Explained variance:\t%f" % rating_eval.exp_var(),
"R squared:\t%f" % rating_eval.rsquared(), sep='\n'
)
# -
# ### 2.6 Save the model
(model
.write()
.overwrite()
.save(model_name))
# ## 3. Operationalize the Recommender Service
# Once the model is built with desirable performance, it will be operationalized to run as a REST endpoint to be utilized by a real time service. We will utilize [Azure Cosmos DB](https://azure.microsoft.com/en-us/services/cosmos-db/), [Azure Machine Learning Service](https://azure.microsoft.com/en-us/services/machine-learning-service/), and [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/intro-kubernetes) to operationalize the recommender service.
# ### 3.1 Create a look-up for Recommendations in Cosmos DB
#
# First, the Top-10 recommendations for each user as predicted by the model are stored as a lookup table in Cosmos DB. At runtime, the service will return the Top-10 recommendations as precomputed and stored in Cosmos DB:
recs = model.recommendForAllUsers(10)
recs_topk = recs.withColumn("id", recs[userCol].cast("string")) \
.select("id", "recommendations." + itemCol)
recs_topk.show()
# Save data to CosmosDB
(recs_topk.coalesce(1)
.write
.format("com.microsoft.azure.cosmosdb.spark")
.mode('overwrite')
.options(**dbsecrets)
.save())
# ### 3.2 Configure Azure Machine Learning
#
# Next, Azure Machine Learning Service is used to create a model scoring image and deploy it to Azure Kubernetes Service as a scalable containerized service. To achieve this, a **scoring script** should be created. In the script, we make a call to Cosmos DB to lookup the top 10 movies to recommend given an input User ID.
# +
score_sparkml = """
import json
import pydocumentdb.document_client as document_client
def init(local=False):
global client, collection
try:
client = document_client.DocumentClient('{endpoint}', dict(masterKey='{key}'))
collection = client.ReadCollection(collection_link='dbs/{database}/colls/{collection}')
except Exception as e:
collection = e
def run(input_json):
try:
# Query them in SQL
id = str(json.loads(json.loads(input_json)[0])['id'])
query = dict(query='SELECT * FROM c WHERE c.id = "' + id +'"')
options = dict(partitionKey=str(id))
document_link = 'dbs/{database}/colls/{collection}/docs/' + id
result = client.ReadDocument(document_link, options);
except Exception as e:
result = str(e)
return json.dumps(str(result))
""".format(key=dbsecrets['Masterkey'],
endpoint=dbsecrets['Endpoint'],
database=dbsecrets['Database'],
collection=dbsecrets['Collection'])
# test validity of python string
exec(score_sparkml)
with open("score_sparkml.py", "w") as file:
file.write(score_sparkml)
# -
# Register your model:
# +
mymodel = Model.register(
model_path=model_name, # this points to a local file
model_name=model_name, # this is the name the model is registered as
description="AML trained model",
workspace=ws
)
print(mymodel.name, mymodel.description, mymodel.version)
# -
# ### 3.3 Deploy the model as a Service on AKS
# #### 3.3.1 Create an Environment for your model:
# +
env = Environment(name='sparkmlenv')
# Specify a public image from microsoft/mmlspark as base image
env.docker.base_image="microsoft/mmlspark:0.15"
pip = [
'azureml-defaults',
'numpy==1.14.2',
'scikit-learn==0.19.1',
'pandas',
'pydocumentdb'
]
# Add dependencies needed for inferencing
env.python.conda_dependencies = CondaDependencies.create(pip_packages=pip)
env.inferencing_stack_version = "latest"
# Add spark packages
env.spark.precache_packages = True
env.spark.repositories = ["https://mmlspark.azureedge.net/maven"]
env.spark.packages= [
SparkPackage("com.microsoft.ml.spark", "mmlspark_2.11", "0.15"),
SparkPackage("com.microsoft.azure", artifact="azure-storage", version="2.0.0"),
SparkPackage(group="org.apache.hadoop", artifact="hadoop-azure", version="2.7.0")
]
# -
# #### 3.3.2 Create an AKS Cluster to run your container
# This may take 20 to 30 minutes depending on the cluster size.
# Verify that cluster does not exist already
try:
aks_target = ComputeTarget(workspace=ws, name=aks_name)
print("Found existing cluster, use it.")
except ComputeTargetException:
# Create the cluster using the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_target = ComputeTarget.create(
workspace=ws,
name=aks_name,
provisioning_configuration=prov_config
)
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
# To check any error logs, print(aks_target.provisioning_errors)
# #### 3.3.3 Deploy the container image to AKS:
# +
# Create an Inferencing Configuration with your environment and scoring script
inference_config = InferenceConfig(
environment=env,
entry_script="score_sparkml.py"
)
# Set the web service configuration (using default here with app insights)
aks_config = AksWebservice.deploy_configuration(enable_app_insights=True)
# Webservice creation using single command
try:
aks_service = Model.deploy(
workspace=ws,
models=[mymodel],
name=service_name,
inference_config=inference_config,
deployment_config=aks_config,
deployment_target=aks_target
)
aks_service.wait_for_deployment(show_output=True)
except WebserviceException:
# Retrieve existing service.
aks_service = Webservice(ws, name=service_name)
print("Retrieved existing service")
# -
# ### 3.4 Call the AKS model service
# After the deployment, the service can be called with a user ID – the service will then look up the top 10 recommendations for that user in Cosmos DB and send back the results.
# The following script demonstrates how to call the recommendation service API and view the result for the given user ID:
# +
import json
scoring_url = aks_service.scoring_uri
service_key = aks_service.get_keys()[0]
input_data = '["{\\"id\\":\\"496\\"}"]'.encode()
req = urllib.request.Request(scoring_url, data=input_data)
req.add_header("Authorization","Bearer {}".format(service_key))
req.add_header("Content-Type","application/json")
with Timer() as t:
with urllib.request.urlopen(req) as result:
res = result.read()
resj = json.loads(
# Cleanup to parse into a json object
res.decode("utf-8")
.replace("\\", "")
.replace('"', "")
.replace("'", '"')
)
print(json.dumps(resj, indent=4))
print("Full run took %.2f seconds" % t.interval)
# -
# ## Appendix - Realtime scoring with AzureML
#
# In the previous cells, we utilized Cosmos DB to cache the recommendation results for realtime serving. Alternatively, we can generate recommendation results on demand by using the model we deployed. Following scripts load the registered model and use it for recommendation:
#
# * *score_sparkml.py*
# ```
# import json
# import os
# from pyspark.ml.recommendation import ALSModel
#
# # Note, set `model_name`, `userCol`, and `itemCol` defined earlier.
# model_name = "mvl-als-reco.mml"
# userCol = "UserId"
# itemCol = "MovieId"
#
# def init(local=False):
# global model
#
# # Load ALS model.
# model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_name)
# model = ALSModel.load(model_path)
#
# def run(input_json):
# js = json.loads(json.loads(input_json)[0])
# id = str(js['id'])
# k = js.get('k', 10)
#
# # Use the model to get recommendation.
# recs = model.recommendForAllUsers(k)
# recs_topk = recs.withColumn('id', recs[userCol].cast("string")).select(
# 'id', "recommendations." + itemCol
# )
# result = recs_topk[recs_topk.id==id].collect()[0].asDict()
#
# return json.dumps(str(result))
# ```
#
# * Call the AKS model service
# ```
# # Get a recommendation of 10 movies
# input_data = '["{\\"id\\":\\"496\\",\\"k\\":10}"]'.encode()
#
# req = urllib.request.Request(scoring_url, data=input_data)
# req.add_header("Authorization","Bearer {}".format(service_key))
# req.add_header("Content-Type","application/json")
#
# ...
# ```
| examples/05_operationalize/als_movie_o16n.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import re
import urllib
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_colwidth', 100)
# -
# ## Importing the data
creativeeurope = pd.read_excel('input/CreativeEurope_Projects_Overview_2018-08-01.xls')
creativeeurope.shape
creativeeurope.head()
list(creativeeurope)
creativeeurope = creativeeurope.rename(columns={
'Programme': 'funds',
'Sub-programme': 'category',
'Action': 'action',
'Activity type': 'activity_type',
'Call year': 'call_year',
'Start date': 'start_date',
'End date': 'end_date',
'Project Number': 'project_number',
'Project Title': 'project',
'Project Summary': 'summary',
'Project Status': 'project_status',
"EU Grant award in euros (This amount represents the grant awarded after the selection stage and is indicative. Please note that any changes made during or after the project's lifetime will not be reflected here.)": 'max_contribution_eur',
'Is Success Story': 'is_success',
'Project Website': 'project_url',
'Results Available': 'results_available',
'Results Platform Project Card': 'results_url',
'Participating countries': 'participating_countries',
"Coordinator's name": 'coord_name',
'Coordinator organisation type': 'coord_org_type',
"Coordinator's address": 'coord_address',
"Coordinator's region": 'coord_region',
"Coordinator's country": 'coord_country',
"Coordinator's website": 'coord_website'
}).copy()
creativeeurope.head()
# ### Unnamed Column
#
# Apparently a placeholder for projects with more than 36 partners.
[creativeeurope.shape, creativeeurope['Unnamed: 251'].isna().sum()]
creativeeurope['Unnamed: 251'][~creativeeurope['Unnamed: 251'].isna()]
creativeeurope.rename(columns={'Unnamed: 251': 'extra_partners'}, inplace=True)
# ### Project Number
#
# Fortunately, this looks to be an ID.
creativeeurope.project_number.isna().sum()
(creativeeurope.project_number.str.strip() != creativeeurope.project_number).sum()
[
creativeeurope.shape,
creativeeurope.project_number.nunique(),
creativeeurope.project_number.str.upper().nunique()
]
# ## Extract Projects from Partners and Coordinators
projects = creativeeurope[[
'project_number', 'funds', 'category', 'action', 'activity_type',
'call_year', 'start_date', 'end_date',
'project', 'summary', 'project_status',
'max_contribution_eur', 'is_success', 'project_url',
'results_available', 'results_url',
'participating_countries', 'extra_partners'
]].copy()
projects.shape
# ### Funds
#
# Always the same.
projects.funds.isna().sum()
projects.funds.unique()
# ### Category
projects.category.isna().sum()
projects.category.unique()
# ### Action
projects.action.isna().sum()
projects.action.unique()
# ### Activity Type
projects.activity_type.isna().sum()
projects.activity_type[~projects.activity_type.isna()].sort_values().unique()[0:10]
# ### Call Year
projects.call_year.isna().sum()
projects.call_year.unique()
projects.call_year = projects.call_year.astype('int32')
projects.call_year.describe()
# ### Start and End Dates
[projects.start_date.isna().sum(), projects.start_date.dtype]
[projects.start_date.isna().sum(), projects.end_date.dtype]
(projects.start_date >= projects.end_date).sum()
projects.start_date.describe()
projects.end_date.describe()
# ### Project
projects.project.isna().sum()
(projects.project != projects.project.str.strip()).sum()
projects.project = projects.project.str.strip()
# ### Summary
projects.summary.isna().sum()
projects.summary[projects.summary.str.strip() != projects.summary] # lots
projects.summary = projects.summary.str.strip()
# ### Project Status
projects.project_status.isna().sum()
projects.project_status.unique()
# ### EU Investment
#
projects.max_contribution_eur.isna().sum()
projects.max_contribution_eur = projects.max_contribution_eur.map(str).str.strip()
max_contribution_eur_bad = projects.max_contribution_eur.str.match(re.compile(r'.*[^0-9.].*'))
projects.max_contribution_eur[max_contribution_eur_bad]
projects.max_contribution_eur = projects.max_contribution_eur.astype('float')
projects.max_contribution_eur.describe()
# ### Is Success
projects.is_success.isna().sum()
projects.is_success.unique()
(projects.is_success == 'Yes').sum()
# ### Project URL
(~projects.project_url.isna()).sum()
projects.project_url[~projects.project_url.isna()].head()
# +
def is_valid_url(url):
result = urllib.parse.urlparse(str(url))
return bool(result.scheme and result.netloc)
(~projects.project_url.isna() & ~projects.project_url.apply(is_valid_url)).sum()
# -
# ### Results Available
projects.results_available.isna().sum()
projects.results_available.unique()
(projects.results_available == 'Yes').sum()
# ### Results URL
#
# It looks like every project has a page. Some projects have extra results uploaded on that page.
projects.results_url.isna().sum()
projects.results_url[projects.results_available == 'Yes'].values[0:5]
(~projects.results_url.isna() & ~projects.results_url.apply(is_valid_url)).sum()
# ### Participating Countries
projects.participating_countries.isna().sum()
projects.participating_countries.head()
# ## Extract Coordinators
#
# The coordinator is like a special partner, so make the names consistent, and we can treat partners and coordinators the same for cleaning purposes.
coordinators = creativeeurope[[
'project_number',
'coord_name',
'coord_org_type',
'coord_address',
'coord_region',
'coord_country',
'coord_website'
]].copy()
coordinators.shape
coordinators.rename(columns={
'coord_name': 'name',
'coord_org_type': 'type',
'coord_address': 'address',
'coord_region': 'region',
'coord_country': 'country',
'coord_website': 'website',
}, inplace=True)
coordinators['coordinator'] = True
coordinators.head()
coordinators.count()
# ### Name
(coordinators.name.str.strip() != coordinators.name).sum()
coordinators.name = coordinators.name.str.strip()
coordinators.name.unique().shape
# ### Type
coordinators.type.isna().sum()
(coordinators.type[~coordinators.type.isna()] != coordinators.type[~coordinators.type.isna()].str.strip()).sum()
coordinators[~coordinators.type.isna()].type.sort_values().unique()[0:10]
# ### Country
coordinators.country.isna().sum()
[
coordinators.shape[0],
(coordinators.country != coordinators.country.str.strip()).sum(),
(coordinators.country != coordinators.country.str.upper()).sum(),
(coordinators.country.str.match('[A-Z]{2}')).sum()
]
# ### Website
(~coordinators.website.isna() & ~coordinators.website.apply(is_valid_url)).sum()
[
coordinators.website.str.startswith('http').sum(),
(~coordinators.website.isna() & coordinators.website.apply(is_valid_url)).sum()
]
coordinators.loc[
~coordinators.website.isna() &
~coordinators.website.apply(is_valid_url), 'website'] = 'http://' + coordinators.website
(~coordinators.website.isna() & ~coordinators.website.apply(is_valid_url)).sum()
coordinators.website.head()
# ### Postcodes for UK Coordinators
#
# They are embedded in the addresses. Use the regex from [Wikipedia](https://en.wikipedia.org/w/index.php?title=Postcodes_in_the_United_Kingdom&oldid=855238661). Note: the page was recently edited with a different regex, but it seems to work OK.
coordinators_uk = coordinators[coordinators.country == 'UK'].copy()
[coordinators_uk.shape[0], coordinators.shape[0]]
ukpostcodes = pd.read_csv('../postcodes/input/ukpostcodes.csv.gz')
ukpostcodes.shape
VALID_POSTCODE_RE = re.compile(
r'([A-Za-z][A-Ha-hJ-Yj-y]?[0-9][A-Za-z0-9]? ?[0-9][A-Za-z]{2}|[Gg][Ii][Rr] ?0[Aa]{2})'
)
assert ukpostcodes.postcode.str.match(VALID_POSTCODE_RE).sum() == ukpostcodes.shape[0]
coordinators_uk['raw_postcode'] = \
coordinators_uk.address.str.extract(VALID_POSTCODE_RE)[0]
coordinators_uk.raw_postcode.head()
coordinators_uk[coordinators_uk.raw_postcode.isna()]
# It appears to be missing for that one.
coordinators_uk.raw_postcode.isin(ukpostcodes.postcode).sum()
# +
def find_postcode_from_raw_postcode(raw_postcode):
return raw_postcode.\
str.upper().\
str.strip().\
str.replace(r'[^A-Z0-9]', '').\
str.replace(r'^(\S+)([0-9][A-Z]{2})$', r'\1 \2')
coordinators_uk['postcode'] = find_postcode_from_raw_postcode(coordinators_uk.raw_postcode)
coordinators_uk.postcode.isin(ukpostcodes.postcode).sum()
# -
coordinators_uk.postcode[~coordinators_uk.postcode.isin(ukpostcodes.postcode)].unique()
coordinators_uk[~coordinators_uk.postcode.isin(ukpostcodes.postcode)]
clean_coordinators_uk = coordinators_uk[
coordinators_uk.postcode.isin(ukpostcodes.postcode)
].copy()
clean_coordinators_uk.drop('raw_postcode', axis=1, inplace=True)
clean_coordinators_uk.shape
# ## Extract Partners
creativeeurope.columns = [
re.sub(r'^Partner (\d+) (.+)$', r'Partner_\2_\1', column)
for column in creativeeurope.columns
]
creativeeurope.head()
partner_columns = [
column for column in creativeeurope.columns
if column.startswith('Partner_')
]
partners_wide = creativeeurope[['project_number'] + partner_columns]
partners_wide.head()
partners = pd.wide_to_long(
partners_wide,
['Partner_name','Partner_organisation type', 'Partner_address', 'Partner_country', 'Partner_region', 'Partner_website'],
'project_number', 'partner_number',
sep='_'
)
partners.head()
partners = partners.rename(columns={
'Partner_name': 'name',
'Partner_organisation type': 'type',
'Partner_address': 'address',
'Partner_country': 'country',
'Partner_region': 'region',
'Partner_website': 'website'
}).copy()
partners['coordinator'] = False
partners.head()
partners.count()
partners = partners[~partners.name.isna()].copy()
partners.count()
# ### Name
(partners.name.str.strip() != partners.name).sum()
partners.name = partners.name.str.strip()
partners.name.unique().shape
# ### Type
partners.type.isna().sum()
(partners.type[~partners.type.isna()] != partners.type[~partners.type.isna()].str.strip()).sum()
partners[~partners.type.isna()].type.sort_values().unique()[0:10]
# ### Country
partners.country.isna().sum()
[
partners.shape[0],
(partners.country != partners.country.str.strip()).sum(),
(partners.country != partners.country.str.upper()).sum(),
(partners.country.str.match('[A-Z]{2}')).sum()
]
# ### Website
(~partners.website.isna() & ~partners.website.apply(is_valid_url)).sum()
[
partners.website.str.startswith('http').sum(),
(~partners.website.isna() & partners.website.apply(is_valid_url)).sum()
]
partners.website[
partners.website.str.startswith('http') &
~partners.website.apply(is_valid_url)]
partners.website = partners.website.str.replace(r'http//:', 'http://')
partners.loc[
~partners.website.isna() &
~partners.website.apply(is_valid_url), 'website'] = 'http://' + partners.website
(~partners.website.isna() & ~partners.website.apply(is_valid_url)).sum()
partners.website.head()
# ### Separating out UK partners
partners_uk = partners[partners.country == 'UK'].copy()
[partners_uk.shape, partners.shape]
partners_uk['raw_postcode'] = \
partners_uk.address.str.extract(VALID_POSTCODE_RE)[0]
partners_uk.raw_postcode.head()
partners_uk[partners_uk.raw_postcode.isna()]
# It looks like it should be 4AA.
partners_uk.raw_postcode.isin(ukpostcodes.postcode).sum()
partners_uk['postcode'] = find_postcode_from_raw_postcode(partners_uk.raw_postcode)
partners_uk.postcode.isin(ukpostcodes.postcode).sum()
partners_uk.postcode[~partners_uk.postcode.isin(ukpostcodes.postcode)].unique()
partners_uk[~partners_uk.postcode.isin(ukpostcodes.postcode)]
clean_partners_uk = partners_uk[partners_uk.postcode.isin(ukpostcodes.postcode)].copy()
clean_partners_uk.drop('raw_postcode', axis=1, inplace=True)
clean_partners_uk.reset_index(inplace=True)
clean_partners_uk.shape
# ## Count Organisations and Countries
#
# It is useful to know the total number of organisations and the number of countries involved, to deal with cases where the contribution of each organisation is unknown.
organisations = pd.concat([
partners.reset_index()[['project_number', 'country']],
coordinators.reset_index()[['project_number', 'country']]
])
organisations.shape
project_num_organisations = organisations.groupby('project_number').\
country.count().reset_index().rename(columns={'country': 'num_organisations'})
[projects.shape[0], project_num_organisations.shape]
# Cross-check with partner numbers:
project_num_organisations_check = \
(partners.reset_index().groupby('project_number').partner_number.max() + 1).\
reset_index().rename(columns={'partner_number': 'num_organisations'})
[projects.shape[0], project_num_organisations_check.shape]
def compare_project_num_organisations():
c = pd.merge(project_num_organisations, project_num_organisations_check,
on='project_number', how='left')
c.loc[c.num_organisations_y.isna(), 'num_organisations_y'] = 1
return (c.num_organisations_x != c.num_organisations_y).sum()
compare_project_num_organisations()
project_num_countries = organisations.groupby('project_number').\
country.nunique().reset_index().rename(columns={'country': 'num_countries'})
[projects.shape[0], project_num_countries.shape]
project_num_organisations_and_countries = pd.merge(
project_num_countries, project_num_organisations,
on='project_number', validate='1:1'
)
project_num_organisations_and_countries.shape
project_num_organisations_and_countries.head()
projects = pd.merge(projects, project_num_organisations_and_countries,
on='project_number', validate='1:1')
# ## Save Data
# ### Organisations
organisations_uk = pd.concat([clean_coordinators_uk, clean_partners_uk], sort=True)
[
organisations_uk.shape,
clean_coordinators_uk.shape,
clean_partners_uk.shape
]
organisations_uk.rename(columns={
'name': 'organisation_name',
'type': 'organisation_type',
'address': 'organisation_address',
'country': 'organisation_country',
'region': 'organisation_region',
'website': 'organisation_website',
'coordinator': 'organisation_coordinator'
}, inplace=True)
organisations_uk
organisations_uk.project_number.unique().shape
organisations_uk.to_pickle('output/creative_europe_organisations.pkl.gz')
# ### Projects in the UK
projects_uk_full = pd.merge(projects, organisations_uk, on='project_number', validate='1:m')
projects_uk_full.shape
projects_uk_full.head()
projects_uk = projects[projects.project_number.isin(organisations_uk.project_number)].copy()
projects_uk.shape
# #### Convert to GBP
eur_gbp = pd.read_pickle('../exchange_rates/output/exchange_rates.pkl.gz')
eur_gbp.tail()
# +
def find_average_eur_gbp_rate(row):
# create timeseries from start to end
days = pd.date_range(row.start_date, row.end_date, closed='left')
daily = pd.DataFrame({
'month_start': days,
'weight': 1.0 / days.shape[0]
})
monthly = daily.resample('MS', on='month_start').sum()
monthly = pd.merge(monthly, eur_gbp, on='month_start', validate='1:1')
return (monthly.weight * monthly.rate).sum()
projects_uk['eur_gbp'] = projects_uk.apply(
find_average_eur_gbp_rate, axis=1, result_type='reduce')
# -
projects_uk.head()
projects_uk.to_pickle('output/creative_europe_projects.pkl.gz')
| data/creative/creative_clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df_orig = pd.read_excel('default_of_credit_card_clients.xls')
df_zero_mask = df_orig == 0
feature_zero_mask = df_zero_mask.iloc[:,1:].all(axis=1)
sum(feature_zero_mask)
df_clean = df_orig.loc[~feature_zero_mask,:].copy()
df_clean.shape
df_clean['ID'].nunique()
df_clean['EDUCATION'].replace(to_replace=[0, 5, 6], value=4, inplace=True)
df_clean['MARRIAGE'].replace(to_replace=0, value=3, inplace=True)
df_clean['PAY_1'].value_counts()
missing_pay_1_mask = df_clean['PAY_1'] == 'Not available'
sum(missing_pay_1_mask)
df_missing_pay_1 = df_clean.loc[missing_pay_1_mask,:].copy()
df = pd.read_csv('cleaned_data.csv')
df.columns
features_response = df.columns.tolist()
items_to_remove = ['ID', 'SEX', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6',
'EDUCATION_CAT', 'graduate school', 'high school', 'none',
'others', 'university']
features_response = [item for item in features_response if item not in items_to_remove]
features_response
import numpy as np
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(df[features_response[:-1]].values, df['default payment next month'].values,
test_size=0.2, random_state=24)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
features_response[4]
np.median(X_train[:,4])
np.random.seed(seed=1)
fill_values = [0, np.random.choice(X_train[:,4], size=(3021,), replace=True)]
fill_strategy = ['mode', 'random']
fill_values[-1]
import matplotlib.pyplot as plt #plotting package
# %matplotlib inline
fig, axs = plt.subplots(1,2, figsize=(8,3))
bin_edges = np.arange(-2,9)
axs[0].hist(X_train[:,4], bins=bin_edges, align='left')
axs[0].set_xticks(bin_edges)
axs[0].set_title('Non-missing values of PAY_1')
axs[1].hist(fill_values[-1], bins=bin_edges, align='left')
axs[1].set_xticks(bin_edges)
axs[1].set_title('Random selection for imputation')
plt.tight_layout()
from sklearn.model_selection import KFold
k_folds = KFold(n_splits=4, shuffle=True, random_state=1)
from sklearn.model_selection import cross_validate
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(
n_estimators=200, criterion='gini', max_depth=9,
min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None,
random_state=4, verbose=1, warm_start=False, class_weight=None)
for counter in range(len(fill_values)):
#Copy the data frame with missing PAY_1 and assign imputed values
df_fill_pay_1_filled = df_missing_pay_1.copy()
df_fill_pay_1_filled['PAY_1'] = fill_values[counter]
#Split imputed data in to training and testing, using the same
#80/20 split we have used for the data with non-missing PAY_1
X_fill_pay_1_train, X_fill_pay_1_test, y_fill_pay_1_train, y_fill_pay_1_test = \
train_test_split(
df_fill_pay_1_filled[features_response[:-1]].values,
df_fill_pay_1_filled['default payment next month'].values,
test_size=0.2, random_state=24)
#Concatenate the imputed data with the array of non-missing data
X_train_all = np.concatenate((X_train, X_fill_pay_1_train), axis=0)
y_train_all = np.concatenate((y_train, y_fill_pay_1_train), axis=0)
#Use the KFolds splitter and the random forest model to get
#4-fold cross-validation scores for both imputation methods
imputation_compare_cv = cross_validate(rf, X_train_all, y_train_all, scoring='roc_auc',
cv=k_folds, n_jobs=-1, verbose=1,
return_train_score=True, return_estimator=True,
error_score='raise-deprecating')
test_score = imputation_compare_cv['test_score']
print(fill_strategy[counter] + ' imputation: ' +
'mean testing score ' + str(np.mean(test_score)) +
', std ' + str(np.std(test_score)))
pay_1_df = df.copy()
features_for_imputation = pay_1_df.columns.tolist()
items_to_remove = ['ID', 'SEX', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6',
'EDUCATION_CAT', 'graduate school', 'high school', 'none',
'others', 'university', 'default payment next month', 'PAY_1']
features_for_imputation = [item for item in features_for_imputation if item not in items_to_remove]
features_for_imputation
X_impute_train, X_impute_test, y_impute_train, y_impute_test = \
train_test_split(
pay_1_df[features_for_imputation].values,
pay_1_df['PAY_1'].values,
test_size=0.2, random_state=24)
rf_impute_params = {'max_depth':[3, 6, 9, 12],
'n_estimators':[10, 50, 100, 200]}
from sklearn.model_selection import GridSearchCV
cv_rf_impute = GridSearchCV(rf, param_grid=rf_impute_params, scoring='accuracy', n_jobs=-1, iid=False, refit=True,
cv=4, verbose=2, error_score=np.nan, return_train_score=True)
cv_rf_impute.fit(X_impute_train, y_impute_train)
cv_rf_impute.best_params_
cv_rf_impute.best_score_
pay_1_value_counts = pay_1_df['PAY_1'].value_counts().sort_index()
pay_1_value_counts
pay_1_value_counts/pay_1_value_counts.sum()
y_impute_predict = cv_rf_impute.predict(X_impute_test)
from sklearn import metrics
metrics.accuracy_score(y_impute_test, y_impute_predict)
fig, axs = plt.subplots(1,2, figsize=(8,3))
axs[0].hist(y_impute_test, bins=bin_edges, align='left')
axs[0].set_xticks(bin_edges)
axs[0].set_title('Non-missing values of PAY_1')
axs[1].hist(y_impute_predict, bins=bin_edges, align='left')
axs[1].set_xticks(bin_edges)
axs[1].set_title('Model-based imputation')
plt.tight_layout()
X_impute_all = pay_1_df[features_for_imputation].values
y_impute_all = pay_1_df['PAY_1'].values
rf_impute = RandomForestClassifier(n_estimators=100, max_depth=12)
rf_impute.fit(X_impute_all, y_impute_all)
df_fill_pay_1_model = df_missing_pay_1.copy()
df_fill_pay_1_model['PAY_1'].head()
df_fill_pay_1_model['PAY_1'] = rf_impute.predict(df_fill_pay_1_model[features_for_imputation].values)
df_fill_pay_1_model['PAY_1'].head()
df_fill_pay_1_model['PAY_1'].value_counts().sort_index()
X_fill_pay_1_train, X_fill_pay_1_test, y_fill_pay_1_train, y_fill_pay_1_test = \
train_test_split(
df_fill_pay_1_model[features_response[:-1]].values,
df_fill_pay_1_model['default payment next month'].values,
test_size=0.2, random_state=24)
X_train_all = np.concatenate((X_train, X_fill_pay_1_train), axis=0)
y_train_all = np.concatenate((y_train, y_fill_pay_1_train), axis=0)
rf
imputation_compare_cv = cross_validate(rf, X_train_all, y_train_all, scoring='roc_auc',
cv=k_folds, n_jobs=-1, verbose=1,
return_train_score=True, return_estimator=True,
error_score='raise-deprecating')
np.mean(imputation_compare_cv['test_score'])
df_fill_pay_1_model['PAY_1'] = np.zeros_like(df_fill_pay_1_model['PAY_1'].values)
df_fill_pay_1_model['PAY_1'].unique()
X_fill_pay_1_train, X_fill_pay_1_test, y_fill_pay_1_train, y_fill_pay_1_test = \
train_test_split(
df_fill_pay_1_model[features_response[:-1]].values,
df_fill_pay_1_model['default payment next month'].values,
test_size=0.2, random_state=24)
X_train_all = np.concatenate((X_train, X_fill_pay_1_train), axis=0)
X_test_all = np.concatenate((X_test, X_fill_pay_1_test), axis=0)
y_train_all = np.concatenate((y_train, y_fill_pay_1_train), axis=0)
y_test_all = np.concatenate((y_test, y_fill_pay_1_test), axis=0)
imputation_compare_cv = cross_validate(rf, X_train_all, y_train_all, scoring='roc_auc',
cv=k_folds, n_jobs=-1, verbose=1,
return_train_score=True, return_estimator=True,
error_score='raise-deprecating')
np.mean(imputation_compare_cv['test_score'])
rf.fit(X_train_all, y_train_all)
y_test_all_predict_proba = rf.predict_proba(X_test_all)
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test_all, y_test_all_predict_proba[:,1])
thresholds = np.linspace(0, 1, 101)
df[features_response[:-1]].columns[5]
savings_per_default = np.mean(X_test_all[:, 5])
savings_per_default
cost_per_counseling = 7500
effectiveness = 0.70
n_pos_pred = np.empty_like(thresholds)
cost_of_all_counselings = np.empty_like(thresholds)
n_true_pos = np.empty_like(thresholds)
savings_of_all_counselings = np.empty_like(thresholds)
savings_based_on_balances = np.empty_like(thresholds)
counter = 0
for threshold in thresholds:
pos_pred = y_test_all_predict_proba[:,1]>threshold
n_pos_pred[counter] = sum(pos_pred)
cost_of_all_counselings[counter] = n_pos_pred[counter] * cost_per_counseling
true_pos = pos_pred & y_test_all.astype(bool)
n_true_pos[counter] = sum(true_pos)
savings_of_all_counselings[counter] = n_true_pos[counter] * savings_per_default * effectiveness
counter += 1
net_savings = savings_of_all_counselings - cost_of_all_counselings
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 400
plt.plot(thresholds, net_savings)
plt.xlabel('Threshold')
plt.ylabel('Net savings (NT$)')
plt.xticks(np.linspace(0,1,11))
plt.grid(True)
max_savings_ix = np.argmax(net_savings)
thresholds[max_savings_ix]
net_savings[max_savings_ix]
plt.hist(y_test_all_predict_proba[:,1], bins=30)
plt.xlabel('Predicted probability of default')
plt.ylabel('Number of accounts')
| internship-problem 7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Fundamental librarys to math and stats process
import numpy as np
import numpy.random as nr
import scipy.stats as ss
import math
#data prepared
import pandas as pd
#ML preprocessi
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import sklearn.model_selection as ms
from sklearn.pipeline import make_pipeline
from sklearn import feature_selection as fs
# ML algorithms models
from sklearn import linear_model as lm
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor
from xgboost import XGBRegressor
# ML Evaluations
import sklearn.metrics as sklm
from sklearn import metrics
from sklearn.model_selection import cross_validate, train_test_split
#Ploting
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# +
import requests
download_url = "https://raw.githubusercontent.com/fivethirtyeight/data/master/nba-elo/nbaallelo.csv"
target_csv_path = "nba_all_elo.csv"
response = requests.get(download_url)
response.raise_for_status() # Check that the request was successful
with open(target_csv_path, "wb") as f:
f.write(response.content)
print("Download ready.")
# -
nba = pd.read_csv("nba_all_elo.csv")
nba.head()
type(nba),nba.shape
pd.set_option("display.max.columns", None)
pd.set_option("display.precision", 2)
nba.tail()
| Woman_big_Data/stack_wxmen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bar Charts and Heatmaps
#Import libraries
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# # Loading The Data
flight_path = pd.read_csv("flight_delays.csv")
flight_path
# # Examine Data
flight_path.head()
df = pd.read_csv('flight_delays.csv', index_col="Month")
df
# # Bar chart
plt.figure(figsize=(10,6))
plt.title("Average Arrival Delay for Spirit Airlines Flights, by Month")
sns.barplot(x=df.index, y=df['NK'])
plt.ylabel("Arrival delay (in minutes)")
# # Heatmap
plt.figure(figsize=(14,7))
plt.title("Average Arrival Delay for Each Airline, by Month")
sns.heatmap(data=df, annot=True)
plt.xlabel("Airline")
| Data Visualization/Bar Charts and Heatmaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
import pandas as pd
import re
import nltk
import string
from nltk.tokenize import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
from sklearn.utils.multiclass import unique_labels
from zipfile import ZipFile
# +
with ZipFile("gbv.zip","r") as zip:
zip.extractall()
df = pd.read_csv("Train.csv")
print(df["tweet"])
print(unique_labels(df["type"]))
# +
def clean_text(x):
x = x.lower()
x = x.encode("ascii","ignore").decode()
x = re.sub("https*\S+"," ",x)
x = re.sub("@\S+"," ",x)
x = re.sub("#\S+"," ",x)
x = re.sub("\'\w+","",x)
x = re.sub("[%s]" % re.escape(string.punctuation)," ",x)
x = re.sub("\w*\d+\w*","",x)
x = re.sub("\s{2,}"," ",x)
return x
temp = []
data_to_list = df["tweet"]
for i in range(len(data_to_list)):
temp.append(clean_text(data_to_list[i]))
def tokenize(y):
for x in y:
yield(word_tokenize(str(x)))
data_words = list(tokenize(temp))
def detokenize(txt):
return TreebankWordDetokenizer().detokenize(txt)
final_data = []
for i in range(len(data_words)):
final_data.append(detokenize(data_words[i]))
print(final_data[:5])
final_data = np.array(final_data)
# -
import pickle
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
# +
max_words = 16000
max_len = 200
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(final_data)
sequences = tokenizer.texts_to_sequences(final_data)
tweets = pad_sequences(sequences,maxlen=max_len)
with open("tokenizer.pickle","wb") as handle:
pickle.dump(tokenizer,handle,protocol=pickle.HIGHEST_PROTOCOL)
print(tweets)
dict = {"Harmful_Traditional_practice":0,"Physical_violence":1,
"economic_violence":2,"emotional_violence":3,
"sexual_violence":4}
df["labels"] = ""
df["labels"] = df["type"].map(dict)
labels = df["labels"]
x_train,x_test,y_train,y_test = train_test_split(tweets,labels,random_state=42)
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.25,random_state=42)
# +
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Embedding, GRU, Dense
def model(y):
x = Embedding(max_words,128)(y)
x = GRU(64,return_sequences=True)(x)
x = GRU(64)(x)
outputs = Dense(5,activation="softmax")(x)
model = Model(y,outputs)
return model
model = model(Input(shape=(None,),dtype="int32"))
model.summary()
# +
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import SparseCategoricalCrossentropy
if __name__=="__main__":
model.compile(Adam(),SparseCategoricalCrossentropy(),metrics=["accuracy"])
checkpoint = ModelCheckpoint("gbv.h5",monitor="val_accuracy",save_best_only=True,save_weights_only=False)
model.fit(x_train,y_train,batch_size=32,epochs=4,validation_data=(x_val,y_val),callbacks=[checkpoint])
best = load_model("gbv.h5")
loss,acc = best.evaluate(x_test,y_test,verbose=2)
print("\nTest acc: {:.2f} %".format(100*acc))
print("Test loss: {:.2f} %".format(100*loss))
| GBV/gbv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/zevan07/DS-Unit-2-Tree-Ensembles/blob/master/DS_Sprint_Challenge_7_Classification_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="7NQhCv9N9NCo"
# _Lambda School Data Science, Unit 2_
#
# # Sprint Challenge: Predict Steph Curry's shots 🏀
#
# For your Sprint Challenge, you'll use a dataset with all Steph Curry's NBA field goal attempts. (Regular season and playoff games, from October 28, 2009, through June 5, 2019.)
#
# You'll use information about the shot and the game to predict whether the shot was made. This is hard to predict! Try for an accuracy score in the high 50's or low 60's. The dataset was collected with the [nba_api](https://github.com/swar/nba_api) Python library.
# + [markdown] colab_type="text" id="gWRX4uaY9NCw"
# This Sprint Challenge has two parts. To demonstrate mastery on each part, do all the required, numbered instructions. To earn a score of "3" for the part, also do the stretch goals.
#
# ## Part 1. Prepare to model
#
# ### Required
#
# 1. **Do train/validate/test split.** Use the 2009-10 season through 2016-17 season to train, the 2017-18 season to validate, and the 2018-19 season to test. NBA seasons begin in October and end in June. You'll know you've split the data correctly when your train set has 11081 observations, your validation set has 1168 observations, and your test set has 1709 observations.
# 2. **Begin with baselines for classification.** Your target to predict is `shot_made_flag`. What is the baseline accuracy for the validation set, if you guessed the majority class for every prediction?
# 3. **Use Ordinal Encoding _or_ One-Hot Encoding,** for the categorical features you select.
# 4. **Train a Random Forest _or_ Logistic Regression** with the features you select.
#
# ### Stretch goals
# Engineer at least 4 of these 5 features:
#
# - **Homecourt Advantage**: Is the home team (`htm`) the Golden State Warriors (`GSW`) ?
# - **Opponent**: Who is the other team playing the Golden State Warriors?
# - **Seconds remaining in the period**: Combine minutes remaining with seconds remaining, to get the total number of seconds remaining in the period.
# - **Seconds remaining in the game**: Combine period, and seconds remaining in the period, to get the total number of seconds remaining in the game. A basketball game has 4 periods, each 12 minutes long.
# - **Made previous shot**: Was <NAME>'s previous shot successful?
#
#
# ## Part 2. Evaluate models
#
# ### Required
# 1. Get your model's **validation accuracy.** (Multiple times if you try multiple iterations.)
# 2. Get your model's **test accuracy.** (One time, at the end.)
# 3. Get and plot your Random Forest's **feature importances** _or_ your Logistic Regression's **coefficients.**
# 4. Imagine this is the confusion matrix for a binary classification model. **Calculate accuracy, precision, and recall for this confusion matrix:**
#
# <table>
# <tr>
# <td colspan="2" rowspan="2"></td>
# <td colspan="2">Predicted</td>
# </tr>
# <tr>
# <td>Negative</td>
# <td>Positive</td>
# </tr>
# <tr>
# <td rowspan="2">Actual</td>
# <td>Negative</td>
# <td style="border: solid">85</td>
# <td style="border: solid">58</td>
# </tr>
# <tr>
# <td>Positive</td>
# <td style="border: solid">8</td>
# <td style="border: solid"> 36</td>
# </tr>
# </table>
#
#
# ### Stretch goals
# - Calculate F1 score for the provided, imaginary confusion matrix.
# - Plot a real confusion matrix for your basketball model, with row and column labels.
# - Print the classification report for your model.
# + id="EwNdoimxwa1o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 334} outputId="d8509b24-e6f2-4446-ce8a-6499b6da34dd"
# !pip install category_encoders
# + colab_type="code" id="5RDEWI9c9NCr" colab={}
import pandas as pd
url = 'https://drive.google.com/uc?export=download&id=1fL7KPyxgGYfQDsuJoBWHIWwCAf-HTFpX'
df = pd.read_csv(url, parse_dates=['game_date'])
assert df.shape == (13958, 20)
# + id="DQ7uh0iM8-nN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="2a78c2e9-5efb-4035-8e2b-159ecb136f26"
# check for null values
df.isnull().sum()
# + id="kW489OtGwn4c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 314} outputId="9b30d361-0a7a-4fcb-f77c-78fb6d3fb64b"
df.head()
# + id="0GlorhPdw4YZ" colab_type="code" colab={}
df['game_date'] = pd.to_datetime(df['game_date'], infer_datetime_format=True)
df['game_month'] = df['game_date'].dt.month
df['game_day'] = df['game_date'].dt.day
df['game_year'] = df['game_date'].dt.year
# add home advantage
df['home_adv'] = df['htm'] == 'GSW'
# more feature engineering
df['opponent'] = (df['vtm'].replace('GSW', '') + df['htm'].replace('GSW', ''))
df['sec_remain_period'] = (df['minutes_remaining'] * 60) + df['seconds_remaining']
df['sec_remain_game'] = (df['minutes_remaining'] * 60) + df['seconds_remaining'] + ((4-df['period']) * 12 * 60)
# + id="9hFxq_omxEGY" colab_type="code" colab={}
#train, test, val split
train = df[(df['game_date'] >= '2009-10-1') & (df['game_date'] <= '2017-6-30')]
test = df[(df['game_date'] >= '2017-10-1') & (df['game_date'] <= '2018-6-30')]
val = df[(df['game_date'] >= '2018-10-1') & (df['game_date'] <= '2019-6-30')]
# + id="9YKLJr-T2VsY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="77a154f4-cf9a-4a31-9912-c0d1d185749c"
train.shape, test.shape, val.shape
# + id="zasJxJ2V_GSO" colab_type="code" colab={}
target = 'shot_made_flag'
# + id="DmCTiRQU65XP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="e09d0de4-c956-4666-ea61-6c96c1740d6e"
y_train = train[target]
y_train.value_counts(normalize=True)
# + id="BFjdf_4a8JIZ" colab_type="code" colab={}
majority_class = y_train.mode()[0]
y_pred = [majority_class] * len(y_train)
# + id="f-Uq1c3U8Z5b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="38f86228-a19b-40ef-a251-c6d7ecf1e073"
from sklearn.metrics import accuracy_score
accuracy_score(y_train, y_pred)
# + id="qdM0YDcN9tmo" colab_type="code" colab={}
features = df.columns.tolist()
# + id="jDShsFqu-s90" colab_type="code" colab={}
features.remove('player_name')
features.remove('game_date')
features.remove('shot_made_flag')
# + id="bFmHgCGu_VnI" colab_type="code" colab={}
from sklearn.pipeline import make_pipeline
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
# + id="vp--QbEs8fZr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cd9be016-3f5c-47f9-d8cb-6af44de06deb"
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
pipeline = make_pipeline(ce.OrdinalEncoder()
, RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=1))
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + [markdown] id="eC-2nbjZBqr2" colab_type="text"
# ## Part 2
# + id="UeObMq-U9ra7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ccdfe240-7c96-42b1-8838-e8909e2ea5a5"
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + id="ZzVWCRVyB__9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cd44ade2-dd6d-42fb-f144-1464b1e54299"
y_test = test[target]
print('Test Accuracy', pipeline.score(X_test, y_test))
# + id="uRuAeNZzD8hm" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
# + id="kwyZO-3PC__w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 680} outputId="731c3f12-375a-4540-a1b4-eb57f97823fb"
# get feature importances
rf = pipeline.named_steps['randomforestclassifier']
importances = pd.Series(rf.feature_importances_, X_train.columns)
n = 10
plt.figure(figsize = (10, n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey')
n = 10
plt.figure(figsize = (10, n/2))
plt.title(f'Bottom {n} features')
importances.sort_values()[:n].plot.barh(color='grey');
# + id="xO_xZbCAD6ow" colab_type="code" colab={}
# + [markdown] id="t2AcrFEVFPoj" colab_type="text"
# ### Confusion Matrix
# <table>
# <tr>
# <td colspan="2" rowspan="2"></td>
# <td colspan="2">Predicted</td>
# </tr>
# <tr>
# <td>Negative</td>
# <td>Positive</td>
# </tr>
# <tr>
# <td rowspan="2">Actual</td>
# <td>Negative</td>
# <td style="border: solid">85</td>
# <td style="border: solid">58</td>
# </tr>
# <tr>
# <td>Positive</td>
# <td style="border: solid">8</td>
# <td style="border: solid"> 36</td>
# </tr>
# </table>
# + id="rhguZezxFk4E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="ecf47116-3f11-4d44-f0a7-bc4d6a01c326"
total = 85 + 58 + 8 + 36
print('Accuracy:',(36+85)/total)
precision = 36/(36+58)
print('Precision:', precision)
recall = 36/(36+8)
print('Recall:', recall)
print('F1:', 2*(recall * precision) / (recall + precision))
# + id="LsOtG9hdOn1m" colab_type="code" colab={}
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
# + id="FyfCBpJYPPVz" colab_type="code" colab={}
y_pred = pipeline.predict(X_test)
# + id="6VEwvvg-OXqY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="7ade3d64-2e6a-4eea-ee2c-ebd59b100a34"
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_test, y_pred);
# + id="geEtDV1lOlVZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="35fea9b7-3332-4303-a7ad-7da127b1918a"
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
# + id="5fBptshAQusw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6d955c0e-c480-4175-8ce6-d129a6106240"
from sklearn.linear_model import LogisticRegression
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
pipeline = make_pipeline(ce.OneHotEncoder()
, LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=500))
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + id="RJ6pzhRkRf4Y" colab_type="code" colab={}
| DS_Sprint_Challenge_7_Classification_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SNES verification
# Each test problem is a critical assembly and should have $k_\mathrm{eff} = 1$.
#
# Each problem is run using a diamond-difference code version (yellow circles) and a linear-discontinuous finite-element version (blue diamonds).
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,10)
# + magic_args="--out keffs" language="bash"
# grep 'K[ ]EFF' qa/*.outs | tr -s ' ' | cut -d' ' -f4 # Read the diamond-difference results
# + magic_args="--out keffl" language="bash"
# grep 'K[ ]EFF' qa/*.outl | tr -s ' ' | cut -d' ' -f4 # Read the linear-discontinuous results
# -
snes = [float(s) for s in keffs.split()]
snel = [float(s) for s in keffl.split()]
n = len(snes)
x = range(1, n+1)
xc = [x for x in range(n+2)] # Critical curve
yc = [1 for i in xw] # Critical curve
plt.plot(x, snes, 'o', color='orange', markersize=26, label="Diamond-difference")
plt.plot(x, snel, 'd', color='indigo', markersize=18, label="Linear discontinuous")
plt.plot(xc, yc, 'k--', label="Critical ($k_\mathrm{eff}=1$)")
plt.xlim([0.5, n+0.5])
plt.ylim([0.5, 1.1])
plt.xlabel("Test problem number", fontsize=20)
plt.ylabel("$k_\mathrm{eff}$", fontsize=20)
plt.title("$k_\mathrm{eff}$ results", fontsize=30)
plt.legend(fontsize=18, shadow=True, borderpad=1.0, labelspacing=1.2)
plt.grid()
plt.xticks(x)
plt.show()
| verification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Discrete Markov Chains and Stationary Distributions
#
# This is BONUS content related to Day 17, where we introduce Markov models for discrete random variables.
#
#
# ## Random variables
#
# * $z_1, z_2, \ldots z_T$, where $t \in \{1, 2, \ldots T$ indexes the discrete timestep
# * Each is a discrete random variable: $z_t \in \{1, 2, \ldots K\}$
#
# ## Parameters
#
# * $\pi$ is the $K$-length initial probability vector. $\pi \in \Delta^K$ (non-negative, sums to one)
# * $A$ is a $K \times K$ transition matrix. For each row $j$, we have $A_j \in \Delta^K$ (all entries are non-negative and each *row* sums to one).
#
# ## Probability mass function
#
# * $p(z_1) = \text{CatPMF}(z_1 | \pi_1, \pi_2, \ldots \pi_K)$, which implies $p(z_1 = k) = \pi_k$
# * $p(z_t | z_{t-1}=j) = \text{CatPMF}(z_t | A_{j1}, A_{j2}, \ldots A_{jK})$, which implies $p(z_t = k | z_{t-1}=j) = A_{jk}$
#
# ## Key takeaways
#
# * New concept: 'Stationary distribution' or 'Equilibrium distribution', the limiting distribution of $p(z_t)$ as $t \rightarrow \infty$
#
# * When does the stationary distribution exist? As long it is "ergodic", which (intuitively) means each state has *some* probability of reaching every other state.
#
# * Below, we'll see 3 ways to compute a stationary distribution:
# * * Manually compute the marginal $p(z_t)$ at each timestep $t \in 1, 2, \ldots T$. Observe it become stationary
# * * Look at limits of multiplying the transition matrix: $A \cdot A \cdot A \cdot \ldots$. Eventually, will converge to a matrix where rows are the stationary distribution.
# * * Look at eigenvectors of $A^T$. Find the eigenvector corresponding to eigenvalue 1 and renormalize it so it sums to one.
#
#
# ## Things to remember
#
# In the code below, we need to use zero-based indexing (like python always does).
#
# So, the "first" timestep is t=0.
#
import numpy as np
## Number of states
K = 2
## Transition probabilities
A_KK = np.asarray([[0.9, 0.1], [0.2, 0.8]])
print(A_KK)
# # What happens to p(z[t]) after many timesteps? Converge to one distribution
pi_K = np.asarray([0.5, 0.5])
for t in range(100):
if t == 0:
proba_K = 1.0 * pi_K
elif t > 0:
proba_K = np.dot(proba_K, A_KK)
print("after t=%3d steps: p(z[t]) = Cat(%.4f, %.4f)" % (t, proba_K[0], proba_K[1]))
# # What about starting from pi_K = [0.01, 0.99]? Converge to same distribution
pi_K = np.asarray([0.01, 0.99])
for t in range(100):
if t == 0:
proba_K = 1.0 * pi_K
elif t > 0:
proba_K = np.dot(proba_K, A_KK)
print("after t=%3d steps: p(z[t]) = Cat(%.4f, %.4f)" % (t, proba_K[0], proba_K[1]))
# # What about starting from pi_K = [0.99, 0.01]? Converge to same distribution
pi_K = np.asarray([0.99, 0.01])
for t in range(100):
if t == 0:
proba_K = 1.0 * pi_K
elif t > 0:
proba_K = np.dot(proba_K, A_KK)
print("after t=%3d steps: p(z[t]) = Cat(%.4f, %.4f)" % (t, proba_K[0], proba_K[1]))
# # What is the limit of many products of the transition matrix?
#
#
np.set_printoptions(precision=2)
val_KK = A_KK
for t in range(100):
val_KK = np.dot(val_KK, A_KK)
msg = "--- after %3d steps: A_KK=\n%s" % (t, str(val_KK))
print(msg)
lam_K, V_KK = np.linalg.eig(A_KK.T)
lam_K
V_KK
V_KK[:,0] / np.sum(V_KK[:,0])
| notebooks/MarkovChainsAndStationaryDistributions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image features exercise
# *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
#
# We have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.
#
# All of your work for this exercise will be done in this notebook.
# +
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# -
# ## Load data
# Similar to previous exercises, we will load CIFAR-10 data from disk.
# +
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
# -
# ## Extract Features
# For each image we will compute a Histogram of Oriented
# Gradients (HOG) as well as a color histogram using the hue channel in HSV
# color space. We form our final feature vector for each image by concatenating
# the HOG and color histogram feature vectors.
#
# Roughly speaking, HOG should capture the texture of the image while ignoring
# color information, and the color histogram represents the color of the input
# image while ignoring texture. As a result, we expect that using both together
# ought to work better than using either alone. Verifying this assumption would
# be a good thing to try for the bonus section.
#
# The `hog_feature` and `color_histogram_hsv` functions both operate on a single
# image and return a feature vector for that image. The extract_features
# function takes a set of images and a list of feature functions and evaluates
# each feature function on each image, storing the results in a matrix where
# each column is the concatenation of all feature vectors for a single image.
# +
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# -
# ## Train SVM on features
# Using the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.
# +
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [5e4, 5e5, 5e6]
results = {}
best_val = -1
best_svm = None
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
for lr in learning_rates:
for reg in regularization_strengths:
local_svm = LinearSVM()
local_svm.train(X_train_feats, y_train, learning_rate=lr, reg=reg, num_iters=1500)
acc_train = np.mean(y_train == local_svm.predict(X_train_feats))
acc_val = np.mean(y_val == local_svm.predict(X_val_feats))
results[(lr, reg)] = (acc_train, acc_val)
if acc_val > best_val:
best_val = acc_val
best_svm = local_svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# -
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print(test_accuracy)
# +
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
# -
# ### Inline question 1:
# Describe the misclassification results that you see. Do they make sense?
#
# **Your answer:** Yes, for example under 'horse' category, there is a cat in it, which makes sense because (1) the picture has similar colour to what an image to a horse would have, and (2) the shape of a cat in this image is similar to the shape of a horse as well. So the misclassifications make sense.
# ## Neural Network on image features
# Earlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels.
#
# For completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.
print(X_train_feats.shape)
# +
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 1000
num_classes = 10
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
best_val = -1
learning_rates = [.6] # 1
learning_rate_decays = [.995]
regularization_strengths = [2e-3] # 2e-3
result = {}
for lr in learning_rates:
for reg in regularization_strengths:
for lr_decay in learning_rate_decays:
# Train the network
print(lr, reg, lr_decay)
local_net = TwoLayerNet(input_dim, hidden_dim, num_classes)
local_stat = local_net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=2500, batch_size=500,
learning_rate=lr, learning_rate_decay=lr_decay,
reg=reg, verbose=True)
acc = np.max(local_stat['val_acc_history'])
result[(lr, reg, lr_decay)] = acc
if acc > best_val:
best_val = acc
best_net = local_net
plt.subplot(2, 1, 1)
plt.plot(local_stat['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(local_stat['train_acc_history'], label='train')
plt.plot(local_stat['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
print('best validation accuracy achieved during cross-validation: %f' % best_val)
print('Details :-')
for lr, reg, decay in result:
print('lr: %f; reg: %f; decay: %f: %f' % (lr, reg, decay, result[(lr, reg, decay)]) )
################################################################################
# END OF YOUR CODE #
################################################################################
# +
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (best_net.predict(X_test_feats) == y_test).mean()
print(test_acc)
# -
# # Bonus: Design your own features!
#
# You have seen that simple image features can improve classification performance. So far we have tried HOG and color histograms, but other types of features may be able to achieve even better classification performance.
#
# For bonus points, design and implement a new type of feature and use it for image classification on CIFAR-10. Explain how your feature works and why you expect it to be useful for image classification. Implement it in this notebook, cross-validate any hyperparameters, and compare its performance to the HOG + Color histogram baseline.
# # Bonus: Do something extra!
# Use the material and code we have presented in this assignment to do something interesting. Was there another question we should have asked? Did any cool ideas pop into your head as you were working on the assignment? This is your chance to show off!
# # Bonus 1: Consider only either HoG or Color Histogram in training data
# ## (a) HoG
# +
### THIS IS BOUNS PART ###
### In this part, I would like to investigate the accuracy of network if I only considered either HoG or color histogram
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
# feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
feature_fns = [hog_feature]
X_train_feats_hog = extract_features(X_train, feature_fns, verbose=True)
X_val_feats_hog = extract_features(X_val, feature_fns)
X_test_feats_hog = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat_hog = np.mean(X_train_feats_hog, axis=0, keepdims=True)
X_train_feats_hog -= mean_feat_hog
X_val_feats_hog -= mean_feat_hog
X_test_feats_hog -= mean_feat_hog
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat_hog = np.std(X_train_feats_hog, axis=0, keepdims=True)
X_train_feats_hog /= std_feat_hog
X_val_feats_hog /= std_feat_hog
X_test_feats_hog /= std_feat_hog
# Preprocessing: Add a bias dimension
X_train_feats_hog = np.hstack([X_train_feats_hog, np.ones((X_train_feats_hog.shape[0], 1))])
X_val_feats_hog = np.hstack([X_val_feats_hog, np.ones((X_val_feats_hog.shape[0], 1))])
X_test_feats_hog = np.hstack([X_test_feats_hog, np.ones((X_test_feats_hog.shape[0], 1))])
# +
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats_hog.shape[1]
hidden_dim = 1000
num_classes = 10
best_net_hog = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
best_val_hog = -1
learning_rates = [.6] # 1
learning_rate_decays = [.995]
regularization_strengths = [2e-3] # 2e-3
result_hog = {}
for lr in learning_rates:
for reg in regularization_strengths:
for lr_decay in learning_rate_decays:
# Train the network
print(lr, reg, lr_decay)
local_net = TwoLayerNet(input_dim, hidden_dim, num_classes)
local_stat_hog = local_net.train(X_train_feats_hog, y_train, X_val_feats_hog, y_val,
num_iters=1500, batch_size=500,
learning_rate=lr, learning_rate_decay=lr_decay,
reg=reg, verbose=True)
acc = np.max(local_stat_hog['val_acc_history'])
result_hog[(lr, reg, lr_decay)] = acc
if acc > best_val_hog:
best_val_hog = acc
best_net_hog = local_net
plt.subplot(2, 1, 1)
plt.plot(local_stat_hog['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(local_stat_hog['train_acc_history'], label='train')
plt.plot(local_stat_hog['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
print('best validation accuracy achieved during cross-validation: %f' % best_val_hog)
print('Details :-')
for lr, reg, decay in result_hog:
print('lr: %f; reg: %f; decay: %f: %f' % (lr, reg, decay, result_hog[(lr, reg, decay)]) )
################################################################################
# END OF YOUR CODE #
################################################################################
# +
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc_hog = (best_net_hog.predict(X_test_feats_hog) == y_test).mean()
print(test_acc_hog)
# -
# ## (b) Color Histogram
# +
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
# feature_fns = [hsv_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
feature_fns = [lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats_hsv = extract_features(X_train, feature_fns, verbose=True)
X_val_feats_hsv = extract_features(X_val, feature_fns)
X_test_feats_hsv = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat_hsv = np.mean(X_train_feats_hsv, axis=0, keepdims=True)
X_train_feats_hsv -= mean_feat_hsv
X_val_feats_hsv -= mean_feat_hsv
X_test_feats_hsv -= mean_feat_hsv
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat_hsv = np.std(X_train_feats_hsv, axis=0, keepdims=True)
X_train_feats_hsv /= std_feat_hsv
X_val_feats_hsv /= std_feat_hsv
X_test_feats_hsv /= std_feat_hsv
# Preprocessing: Add a bias dimension
X_train_feats_hsv = np.hstack([X_train_feats_hsv, np.ones((X_train_feats_hsv.shape[0], 1))])
X_val_feats_hsv = np.hstack([X_val_feats_hsv, np.ones((X_val_feats_hsv.shape[0], 1))])
X_test_feats_hsv = np.hstack([X_test_feats_hsv, np.ones((X_test_feats_hsv.shape[0], 1))])
# +
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats_hsv.shape[1]
hidden_dim = 1000
num_classes = 10
best_net_hsv = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
best_val_hsv = -1
learning_rates = [.6] # 1
learning_rate_decays = [1]
regularization_strengths = [2e-3] # 2e-3
result_hsv = {}
for lr in learning_rates:
for reg in regularization_strengths:
for lr_decay in learning_rate_decays:
# Train the network
print(lr, reg, lr_decay)
local_net = TwoLayerNet(input_dim, hidden_dim, num_classes)
local_stat_hsv = local_net.train(X_train_feats_hsv, y_train, X_val_feats_hsv, y_val,
num_iters=1500, batch_size=500,
learning_rate=lr, learning_rate_decay=lr_decay,
reg=reg, verbose=True)
acc = np.max(local_stat_hsv['val_acc_history'])
result_hsv[(lr, reg, lr_decay)] = acc
if acc > best_val_hsv:
best_val_hsv = acc
best_net_hsv = local_net
plt.subplot(2, 1, 1)
plt.plot(local_stat_hsv['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(local_stat_hsv['train_acc_history'], label='train')
plt.plot(local_stat_hsv['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
print('best validation accuracy achieved during cross-validation: %f' % best_val_hsv)
print('Details :-')
for lr, reg, decay in result_hsv:
print('lr: %f; reg: %f; decay: %f: %f' % (lr, reg, decay, result_hsv[(lr, reg, decay)]) )
################################################################################
# END OF YOUR CODE #
################################################################################
# +
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc_hsv = (best_net_hsv.predict(X_test_feats_hsv) == y_test).mean()
print(test_acc_hsv)
# -
# It seems that using only HoG I can get a similar result to the one using both features. Though, for the only using color histogram, I had a hard time reducing the loss below 2. I think using only color histogram is not a good feature, beacuse multiple objects can have similar color distributions (as seen in KNN python notebook). Therefore, this model is more difficult to converge.
# +
plt.figure(figsize=(20, 10))
plt.subplot(2, 1, 1)
plt.plot(local_stat['loss_history'], 'r', label='hog+hsv')
plt.plot(local_stat_hog['loss_history'], 'g', label='hog')
plt.plot(local_stat_hsv['loss_history'], 'b', label='hsv')
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(local_stat['train_acc_history'], 'r--', label='hog+hsv train')
plt.plot(local_stat['val_acc_history'], 'r', label='hog+hsv val')
plt.plot(local_stat_hog['train_acc_history'], 'g--', label='hog train')
plt.plot(local_stat_hog['val_acc_history'], 'g', label='hog val')
plt.plot(local_stat_hsv['train_acc_history'], 'b--', label='hsv train')
plt.plot(local_stat_hsv['val_acc_history'], 'b', label='hsv val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.legend()
plt.show()
# -
# # Bonus 2: Using TwoLayerNet to train MNIST
# This is one of the challenges I encountered in CodeIT 2018 the weekend before this assignment submission deadline. Since I completed that challenge by TwoLayerNet in this assignment, I might as well just post my code here. tensorflow is used here just for downloading the MNIST pack.
#
# FYI: The challenge requires an accuracy of 90%+, and a timeout of 10 seconds. So instead of training network on the spot after receiving data, I trained the network, saved down the parameters, and just loaded them up whenever they were needed.
# +
import tensorflow as tf
import numpy as np
tf.keras.backend.clear_session()
mnist = tf.keras.datasets.mnist
# Download data -- only place where tf is called
(x_train, y_train),(x_test, y_test) = mnist.load_data()
# Normalized data to [0, 1]
x_train, x_test = x_train / 255.0, x_test / 255.0
# Flatten
x_train = x_train.reshape([x_train.shape[0], -1])
# Select first 1000 images as validation set
x_val = x_train[:1000]
y_val = y_train[:1000]
x_train = x_train[1000:]
y_train = y_train[1000:]
# Flatten
x_test = x_test.reshape([x_test.shape[0], -1])
from cs231n.classifiers.neural_net import TwoLayerNet
result = {}
best_val = -1
best_softmax = None
learning_rates = [1e-1]
learning_rate_decays = [.94]
regularization_strengths = [1e-2]
for lr in learning_rates:
for reg in regularization_strengths:
for lr_decay in learning_rate_decays:
# Train the network
print(lr, reg, lr_decay)
local_net = TwoLayerNet(28*28, 256, 10) # Input: 28x28, Hidden Size: 256, Output: 10 categories
local_stat = local_net.train(x_train, y_train, x_val, y_val,
num_iters=1500, batch_size=500,
learning_rate=lr, learning_rate_decay=lr_decay,
reg=reg, verbose=True)
acc = np.max(local_stat['val_acc_history'])
result[(lr, reg, lr_decay)] = acc
if acc > best_val:
best_val = acc
# Print out results.
for lr, reg, decay in result:
print('lr: %f; reg: %f; decay: %f: %f' % (lr, reg, decay, result[(lr, reg, decay)]) )
print('best validation accuracy achieved during cross-validation: %f' % best_val)
eva = local_net.predict(x_test)
print('Accuracy:', np.mean(eva - y_test == 0) )
# -
num_rows = 6
num_cols = 6
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*2*num_rows))
# Plotting the results of first n*m images
for i in range(num_images):
plt.subplot(num_rows, num_cols, i+1)
plt.axis('off')
plt.imshow(x_test[i].reshape(28,28))
color = 'blue' if y_test[i] == eva[i] else 'red'
plt.title('Label: ' + str(y_test[i]) + ' Predicted:' + str(eva[i]), color=color)
# The incorrect cases are when the integers are more illegible, hence, the model is predicting very well.
| PA1/features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Writing your own libraries (& Command-Line Programs)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Overview:
# - **Teaching:** 15 min
# - **Exercises:** 45 min
#
# **Questions**
# - How can I write Python programs that will work like Unix command-line tools?
#
# **Objectives**
# - Use the values of command-line arguments in a program.
# - Handle flags and files separately in a command-line program.
# - Read data from standard input in a program so that it can be used in a pipeline.
# + [markdown] slideshow={"slide_type": "slide"}
# Interactive tools such as Jupyter notebooks and Ipython are great for prototyping code and exploring data, but sooner or later if we want to re-use our codes, or demonstrate reproducbile workflows, we will want to use our program in a pipeline or run it in a shell script to process thousands of data files. In order to do that, we need to make our programs work like other Unix command-line tools. For example, we may want a program that reads a dataset and prints the average inflammation per patient.
#
# You have several ways to choose from to run this episode. If you are comfortable using linux editors and the terminal you are welcome to do so. Otherwise you can create a file directly in notebooks from the menu page where you create a new notebook, instead of selecting Python3.6 select **Text File**. Once the new file has opened click on Untitled.txt and change it's name as you are instructed. notebooks allows you to edit the file in a number of different modes replicating more advanced editors which you should explore if you want to use notebooks for regular development.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Information: Switching to Shell Commands
# In this lesson we are switching from typing commands that comprise our program in a Python interpreter to typing commands in a program file or script. We will the run or import the program from within a Jupyter notebook. When you see a `%run` in acode cell this is `Python` magic, which is loading and running a script through the Python interpreter.
# + [markdown] slideshow={"slide_type": "subslide"}
# As you might now expect our first task will be to produce a program that sends a greeting to the world.
#
# Let's start by creating a new **Text File** rename it `hello_world.py` and enter:
#
# ```python
# print("Hello world!")
# ```
#
# Create a new notebook in the same folder as your program and run it with
# ```python
# # # %run hello_world.py
# ```
#
# Verify that this gives the output you would expect.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Passing arguments from the command line
#
# Often we will want to pass information into the program from the command line. Fortunately Python has a standard library that allows us to do this. Copy your `hello_world.py` program to a new file `hello.py` and edit the file as follows:
#
# ```python
# import sys
#
# print("Hello",sys.argv)
# ```
#
# If we run our new program with the argument, `James` we should see the following output:
# ```bash
# # # %run hello.py James
# Hello ['./hello.py', 'James']
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# `sys.argv` means system **arg**ument **v**alues. The first argument is the name of the program and the full set of values are presented as a list, we don't want to say hello to the name of the program, and generally we will want to ignore this argument so let's modify our program to just consider the rest of the list:
#
# ```python
# import sys
#
# names = sys.argv[1:]
#
# for name in names:
# print("Hello",name)
# ```
#
# Make sure that you understand what we have done here, and why, discuss with your neighbours to make sure eveyone is following.
# + [markdown] slideshow={"slide_type": "slide"}
# We can now re-run our new program with the same command as before:
#
# ```bash
# # # %run hello.py James
# Hello James
# ```
#
# Because we have generalised the program to operate on all arguments passed to it we can also run
#
# ```bash
# # # %run hello.py <NAME> <NAME>
# Hello Alan
# Hello Bob
# Hello Carl
# Hello Dave
# ```
#
# so we already have a way to generalise the script to perform the same task on a number of arguments.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise: What would the following do?
#
# This exercise requires a working knowledge of bash/linux.
#
# ```python
# # # %run hello.py ../*
# ```
#
# Remember that in bash `*` represents the wildcard match any characters of any length.
# + [markdown] slideshow={"slide_type": "slide"}
# We will next make some small changes to our program to encapsulate the main part of the our program in its own function, and then tell Python that this is what it should run we the program is executed:
#
# ```python
# import sys
#
# def main():
# '''
# We can also add a docstring to remind our future selves that this program:
#
# Takes a list of arguments and say hello to each of them.
# '''
#
# names = sys.argv[1:]
#
# for name in names:
# print("Hello",name)
#
# if __name__ == '__main__':
# main()
# ```
#
# Run your program with the same arguments as before to check that you have not change its behaviour. Note that we can also add a 'docstring' to our `main` function to explain what it does.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Information: Running versus Importing
#
# If the program behaves in the same way, why have we changed it? The reason is that running a Python script in bash is very similar to importing that file in Python. The biggest difference is that we don’t expect anything to happen when we import a file, whereas when running a script, we expect to see some output printed to the console.
#
# In order for a Python script to work as expected when imported or when run as a script, we typically put the part of the script that produces output in the following if statement:
#
# ```python
# if __name__ == '__main__':
# main() # Or whatever function produces output
# ```
#
# When you `import` a Python file, `__name__` is a special variable which holds the name of that file (e.g., when importing `readings.py`, `__name__` is `'readings'`). However, when running a script in bash, `__name__` is always set to `'__main__'` in that script so that you can determine if the file is being imported or run as a script.
#
# By adopting the practice of encapsulating the script part of our code in a `main` function we are making sure that we can safely `import` our code in other programs to safely reuse the fantastic functions we write.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Exercise: The Right Way to Do It
# If you want to create programs which can take complex parameters or multiple filenames, we shouldn’t handle `sys.argv` directly. Instead, we should use Python’s `argparse` library, which handles common cases in a systematic way, and also makes it easy for us to provide sensible error messages for our users. We will not cover this module in this lesson but you can go to Tshepang Lekhonkhobe’s [Argparse tutorial](http://docs.python.org/dev/howto/argparse.html) that is part of Python’s Official Documentation.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Information:
#
# The [Software Carpentry material](http://swcarpentry.github.io/python-novice-inflammation/10-cmdline/index.html) that this is episode is based on makes use of the 'code' files you downloaded at the beginning of the episode. If you wish to explore these files further you are encouraged to do so as these also explore some of the functionality of the `numpy` library. Note that they take a slightly different to run their programs from the way we have looked at.
#
#
# We will instead explore an example that builds on what we did in the preceding episode more directly.
#
# First of all you will need to unzip files in the **data** folder, if you are comfortable using the terminal feek free to launch a terminal and extract the files. Alternatively you can create a new notebook in data folder and unzip the following in a code cell, if you have not already done so.
#
# ```python
# # # !unzip RS50001/python-novice-inflammation-data.zip
# # # !unzip RS50001/python-novice-inflammation-code.zip
# ```
#
# As we were with **%** executing *magic* the **!** runs standard bash commands rather than Python.
#
# Now open the data folder that this command should create, ask a demonstrator if nothing happens. You will need to move the programs you create and a notebook to run them in this folder.
# + [markdown] slideshow={"slide_type": "slide"}
# Let's say we want to find the mean inflamation of each of the patients in the inflammation data we read in during the previous lesson.
#
# First copy our template `hello.py` to `inflammation_mean.py`. Open `inflammation_mean.py` and edit it as follows:
# ```python
# import sys
#
# def main():
# '''
# We can also add a docstring to remind our future selves that this program:
#
# Takes a list of files, and find and print the mean of each line of data:
# '''
#
# filenames = sys.argv[1:]
#
# for filename in filenames:
# data = read_csv_to_floats(filename)
# count=0
# for line in data:
# count += 1
# print("File: ", filename, "patient: ", count, "average inflammation", mean(line))
#
# if __name__ == '__main__':
# main()
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# We will now add the solution to the second exercise at the end of the last episode to our new program between `import sys` and `def main():`
#
# ```python
# def read_csv_to_floats(filename):
# '''
# Take string parameters as csv filename
# Read in and process file, converting all elements to floats.
#
# Return as 2D list (list of lists)
# '''
#
# with open(filename) as file:
# data_string = file.readlines()
# data_floats = []
# for line in data_string:
# tmp_floats = []
# tmp_list = line.split(',')
# for item in tmp_list:
# tmp_floats.append( float(item) )
# data_floats.append(tmp_floats)
# return data_floats
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# Now we need to add the `mean(sample)` function that we considered in episode 7, add this between `read_csv_to_floats()` and `main`:
# ```python
# def mean(sample):
# '''
# Takes a list of numbers, sample
#
# and returns the mean.
# '''
# sample_sum = 0
# for value in sample:
# sample_sum += value
#
# sample_mean = sample_sum / len(sample)
# return sample_mean
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# Now run your program with:
#
# ```bash
# # # %run inflammation_mean.py inflammation-01.csv
# ```
#
# Your output should look something like:
#
# ```brainfuck
# File: ../inflammation-01.csv patient: 1 average inflammation 5.45
# File: ../inflammation-01.csv patient: 2 average inflammation 5.425
# File: ../inflammation-01.csv patient: 3 average inflammation 6.1
# File: ../inflammation-01.csv patient: 4 average inflammation 5.9
# File: ../inflammation-01.csv patient: 5 average inflammation 5.55
# File: ../inflammation-01.csv patient: 6 average inflammation 6.225
# File: ../inflammation-01.csv patient: 7 average inflammation 5.975
# File: ../inflammation-01.csv patient: 8 average inflammation 6.65
# File: ../inflammation-01.csv patient: 9 average inflammation 6.625
# File: ../inflammation-01.csv patient: 10 average inflammation 6.525
# File: ../inflammation-01.csv patient: 11 average inflammation 6.775
# File: ../inflammation-01.csv patient: 12 average inflammation 5.8
# File: ../inflammation-01.csv patient: 13 average inflammation 6.225
# File: ../inflammation-01.csv patient: 14 average inflammation 5.75
# File: ../inflammation-01.csv patient: 15 average inflammation 5.225
# File: ../inflammation-01.csv patient: 16 average inflammation 6.3
# File: ../inflammation-01.csv patient: 17 average inflammation 6.55
# File: ../inflammation-01.csv patient: 18 average inflammation 5.7
# File: ../inflammation-01.csv patient: 19 average inflammation 5.85
# File: ../inflammation-01.csv patient: 20 average inflammation 6.55
# File: ../inflammation-01.csv patient: 21 average inflammation 5.775
# File: ../inflammation-01.csv patient: 22 average inflammation 5.825
# File: ../inflammation-01.csv patient: 23 average inflammation 6.175
# File: ../inflammation-01.csv patient: 24 average inflammation 6.1
# File: ../inflammation-01.csv patient: 25 average inflammation 5.8
# File: ../inflammation-01.csv patient: 26 average inflammation 6.425
# File: ../inflammation-01.csv patient: 27 average inflammation 6.05
# File: ../inflammation-01.csv patient: 28 average inflammation 6.025
# File: ../inflammation-01.csv patient: 29 average inflammation 6.175
# File: ../inflammation-01.csv patient: 30 average inflammation 6.55
# File: ../inflammation-01.csv patient: 31 average inflammation 6.175
# File: ../inflammation-01.csv patient: 32 average inflammation 6.35
# File: ../inflammation-01.csv patient: 33 average inflammation 6.725
# File: ../inflammation-01.csv patient: 34 average inflammation 6.125
# File: ../inflammation-01.csv patient: 35 average inflammation 7.075
# File: ../inflammation-01.csv patient: 36 average inflammation 5.725
# File: ../inflammation-01.csv patient: 37 average inflammation 5.925
# File: ../inflammation-01.csv patient: 38 average inflammation 6.15
# File: ../inflammation-01.csv patient: 39 average inflammation 6.075
# File: ../inflammation-01.csv patient: 40 average inflammation 5.75
# File: ../inflammation-01.csv patient: 41 average inflammation 5.975
# File: ../inflammation-01.csv patient: 42 average inflammation 5.725
# File: ../inflammation-01.csv patient: 43 average inflammation 6.3
# File: ../inflammation-01.csv patient: 44 average inflammation 5.9
# File: ../inflammation-01.csv patient: 45 average inflammation 6.75
# File: ../inflammation-01.csv patient: 46 average inflammation 5.925
# File: ../inflammation-01.csv patient: 47 average inflammation 7.225
# File: ../inflammation-01.csv patient: 48 average inflammation 6.15
# File: ../inflammation-01.csv patient: 49 average inflammation 5.95
# File: ../inflammation-01.csv patient: 50 average inflammation 6.275
# File: ../inflammation-01.csv patient: 51 average inflammation 5.7
# File: ../inflammation-01.csv patient: 52 average inflammation 6.1
# File: ../inflammation-01.csv patient: 53 average inflammation 6.825
# File: ../inflammation-01.csv patient: 54 average inflammation 5.975
# File: ../inflammation-01.csv patient: 55 average inflammation 6.725
# File: ../inflammation-01.csv patient: 56 average inflammation 5.7
# File: ../inflammation-01.csv patient: 57 average inflammation 6.25
# File: ../inflammation-01.csv patient: 58 average inflammation 6.4
# File: ../inflammation-01.csv patient: 59 average inflammation 7.05
# File: ../inflammation-01.csv patient: 60 average inflammation 5.9
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# We can also run our program with all the inflammation data:
#
# ```bash
# # # %run inflammation_mean.py inflammation-*.csv
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# We may also want to output our data to a file. In order to do this modify your `main` function as follows:
# ```python
# def main():
# '''
# We can also add a docstring to remind our future selves that this program:
#
# Takes a list of files, and find and print the mean of each line of data:
# '''
#
# filenames = sys.argv[1:]
#
# output_filename = "my_data.txt"
#
# output_file = open(output_filename, 'w')
#
# for filename in filenames:
# data = read_csv_to_floats(filename)
# count=0
# for line in data:
# count += 1
# output_file.write("File: "+filename+"patient: "+str(count)+"average inflammation: "+str(mean(line))+"\n")
# output_file.close()
# ```
#
# + [markdown] slideshow={"slide_type": "slide"}
# Note that we as with reading from files we have to `open` and `close` the file. Also the function `file.write()` can only take a single str as its parameter, so the `write` line is a little different to our `print` statement before, we also have to add a explicit new line at the end of the line which is the reason for the `"\n"`.
#
# Run your program and `cat` the file `my_data.txt` to verify that it has worked as intended.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Excercise: Import your code
#
# Verify that you can also `import` your library and access the functions it defines, remember that as with undefined variables, if your function is not found, the library has not been correctly read in. Repeat the 'analysis' in the main function by explicitly assigning values to `filename` and calling your `read_csv_to_floats` and `mean` functions.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Exercise: Arithmetic on the Command Line
# Write a python program that does addition and subtraction:
# ```bash
# # # %run arith.py add 1 2
# 3
# # # %run arith.py subtract 3 4
# -1
# ```
#
# [Solution]()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Solution: Arithmetic on the Command Line
#
# ```python
# import sys
#
#
# def do_arithmetic(operand1, operator, operand2):
#
# if operator == 'add':
# value = operand1 + operand2
# elif operator == 'subtract':
# value = operand1 - operand2
# elif operator == 'multiply':
# value = operand1 * operand2
# elif operator == 'divide':
# value = operand1 / operand2
# print(value)
#
# def main():
# assert len(sys.argv) == 4, 'Need exactly 3 arguments'
#
# operator = sys.argv[1]
# assert operator in ['add', 'subtract', 'multiply', 'divide'], \
# 'Operator is not one of add, subtract, multiply, or divide: bailing out'
# try:
# operand1, operand2 = float(sys.argv[2]), float(sys.argv[3])
# except ValueError:
# print('cannot convert input to a number: bailing out')
# return
#
# do_arithmetic(operand1, operator, operand2)
#
# if __name__ == '__main__':
# main()
# ```
#
# Check that your solution works by trying the different componenets and a few different values.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Exercise: Counting Lines
#
# By modifying `inflammation_mean.py` or otherwise, write a program called `line_count.py` that counts the number of lines in files that are passed as arguments and at the end the total number of lines in all the files added together, for those familiar with bash it works like the Unix `wc` command. Your program should:
#
# 1. If no filenames are given inform the user to provide a filename(s).
# 2. If one or more filenames are given, it reports the number of lines in each, followed by the total number of lines.
#
#
# [Solution]()
# -
# ## Solution: Couting Lines
#
# ```python
# # #!/usr/bin/env python3
# import sys
#
# def main():
# '''print each input filename and the number of lines in it,
# and print the sum of the number of lines'''
# filenames = sys.argv[1:]
# sum_nlines = 0 #initialize counting variable
#
# if len(filenames) == 0: # no filenames, advise usage
# print("Please provide filenames in order to count number of lines")
# else:
# for f in filenames:
# n = count_file(f)
# print('%s %d' % (f, n))
# sum_nlines += n
# print('total: %d' % sum_nlines)
#
# def count_file(filename):
# '''count the number of lines in a file'''
# f = open(filename,'r')
# nlines = len(f.readlines())
# f.close()
# return(nlines)
#
# if __name__ == '__main__':
# main()
# ```
# If you have used inflammatio_mean.py as the basis for this program then you will have a very different solution.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Key Points:
# - The `sys` library connects a Python program to the system it is running on.
# - The list `sys.argv` contains the command-line arguments that a program was run with.
# - Avoid silent failures, try to inform your user what is going on.
| nbplain/10_episode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction-to-Quantum-Mechanics" data-toc-modified-id="Introduction-to-Quantum-Mechanics-1"><span class="toc-item-num">1 </span>Introduction to Quantum Mechanics</a></span><ul class="toc-item"><li><span><a href="#🎯-Objective¶" data-toc-modified-id="🎯-Objective¶-1.1"><span class="toc-item-num">1.1 </span>🎯 Objective¶</a></span></li><li><span><a href="#📜-Instructions" data-toc-modified-id="📜-Instructions-1.2"><span class="toc-item-num">1.2 </span>📜 Instructions</a></span></li><li><span><a href="#Wave-Particle-Duality" data-toc-modified-id="Wave-Particle-Duality-1.3"><span class="toc-item-num">1.3 </span>Wave-Particle Duality</a></span><ul class="toc-item"><li><span><a href="#🎲-Particle-like-features-of-light." data-toc-modified-id="🎲-Particle-like-features-of-light.-1.3.1"><span class="toc-item-num">1.3.1 </span>🎲 Particle-like features of light.</a></span></li><li><span><a href="#🖩-Properties-of-photons" data-toc-modified-id="🖩-Properties-of-photons-1.3.2"><span class="toc-item-num">1.3.2 </span>🖩 Properties of photons</a></span></li><li><span><a href="#🪙-Properties-of-photons" data-toc-modified-id="🪙-Properties-of-photons-1.3.3"><span class="toc-item-num">1.3.3 </span>🪙 Properties of photons</a></span></li><li><span><a href="#🪙-Properties-of-photons" data-toc-modified-id="🪙-Properties-of-photons-1.3.4"><span class="toc-item-num">1.3.4 </span>🪙 Properties of photons</a></span></li><li><span><a href="#🖩-Properties-of-photons" data-toc-modified-id="🖩-Properties-of-photons-1.3.5"><span class="toc-item-num">1.3.5 </span>🖩 Properties of photons</a></span></li><li><span><a href="#🎲-Properties-of-photons" data-toc-modified-id="🎲-Properties-of-photons-1.3.6"><span class="toc-item-num">1.3.6 </span>🎲 Properties of photons</a></span></li><li><span><a href="#🖩-Momentum-from-a-green-laser-pointer" data-toc-modified-id="🖩-Momentum-from-a-green-laser-pointer-1.3.7"><span class="toc-item-num">1.3.7 </span>🖩 Momentum from a green laser pointer</a></span></li><li><span><a href="#🖩-Wavelength-emitted-by-a-radiopharmaceutical" data-toc-modified-id="🖩-Wavelength-emitted-by-a-radiopharmaceutical-1.3.8"><span class="toc-item-num">1.3.8 </span>🖩 Wavelength emitted by a radiopharmaceutical</a></span></li><li><span><a href="#🪙-Davisson-Germer-experiment" data-toc-modified-id="🪙-Davisson-Germer-experiment-1.3.9"><span class="toc-item-num">1.3.9 </span>🪙 Davisson-Germer experiment</a></span></li><li><span><a href="#🎲-Davisson-Germer-experiment" data-toc-modified-id="🎲-Davisson-Germer-experiment-1.3.10"><span class="toc-item-num">1.3.10 </span>🎲 Davisson-Germer experiment</a></span></li><li><span><a href="#🖩-Properties-of-photons" data-toc-modified-id="🖩-Properties-of-photons-1.3.11"><span class="toc-item-num">1.3.11 </span>🖩 Properties of photons</a></span></li><li><span><a href="#🖩-Rydberg's-Law" data-toc-modified-id="🖩-Rydberg's-Law-1.3.12"><span class="toc-item-num">1.3.12 </span>🖩 Rydberg's Law</a></span></li><li><span><a href="#🎲-Wave-properties-of-particles" data-toc-modified-id="🎲-Wave-properties-of-particles-1.3.13"><span class="toc-item-num">1.3.13 </span>🎲 Wave properties of particles</a></span></li><li><span><a href="#🎲-Particle-properties-of-waves" data-toc-modified-id="🎲-Particle-properties-of-waves-1.3.14"><span class="toc-item-num">1.3.14 </span>🎲 Particle properties of waves</a></span></li><li><span><a href="#🖩-Properties-of-photons" data-toc-modified-id="🖩-Properties-of-photons-1.3.15"><span class="toc-item-num">1.3.15 </span>🖩 Properties of photons</a></span></li><li><span><a href="#🖩-Properties-of-photons" data-toc-modified-id="🖩-Properties-of-photons-1.3.16"><span class="toc-item-num">1.3.16 </span>🖩 Properties of photons</a></span></li><li><span><a href="#🖩-De-Broglie-wavelength-of-a-baseball" data-toc-modified-id="🖩-De-Broglie-wavelength-of-a-baseball-1.3.17"><span class="toc-item-num">1.3.17 </span>🖩 De Broglie wavelength of a baseball</a></span></li></ul></li><li><span><a href="#The-Schrödinger-Equation" data-toc-modified-id="The-Schrödinger-Equation-1.4"><span class="toc-item-num">1.4 </span>The Schrödinger Equation</a></span><ul class="toc-item"><li><span><a href="#✍️-Time-Dependent-Schrödinger-Equation" data-toc-modified-id="✍️-Time-Dependent-Schrödinger-Equation-1.4.1"><span class="toc-item-num">1.4.1 </span>✍️ Time-Dependent Schrödinger Equation</a></span></li><li><span><a href="#🎲-Hamiltonian-operator" data-toc-modified-id="🎲-Hamiltonian-operator-1.4.2"><span class="toc-item-num">1.4.2 </span>🎲 Hamiltonian operator</a></span></li></ul></li><li><span><a href="#Mathematics" data-toc-modified-id="Mathematics-1.5"><span class="toc-item-num">1.5 </span>Mathematics</a></span><ul class="toc-item"><li><span><a href="#🪙-Mathematical-Properties-of-the-wavefunction" data-toc-modified-id="🪙-Mathematical-Properties-of-the-wavefunction-1.5.1"><span class="toc-item-num">1.5.1 </span>🪙 Mathematical Properties of the wavefunction</a></span></li><li><span><a href="#🎲-Complex-Conjugation" data-toc-modified-id="🎲-Complex-Conjugation-1.5.2"><span class="toc-item-num">1.5.2 </span>🎲 Complex Conjugation</a></span></li><li><span><a href="#✍️-Complex-Conjugation" data-toc-modified-id="✍️-Complex-Conjugation-1.5.3"><span class="toc-item-num">1.5.3 </span>✍️ Complex Conjugation</a></span></li><li><span><a href="#🪙-Eigenfunctions-of-the-kinetic-energy-operator" data-toc-modified-id="🪙-Eigenfunctions-of-the-kinetic-energy-operator-1.5.4"><span class="toc-item-num">1.5.4 </span>🪙 Eigenfunctions of the kinetic-energy operator</a></span></li></ul></li></ul></li></ul></div>
# + [markdown] nbgrader={"grade": false, "grade_id": "preamble", "locked": true, "schema_version": 3, "solution": false, "task": false}
# # Introduction to Quantum Mechanics
#
# ## 🎯 Objective¶
# To review basic aspects of quantum mechanics.
#
# ## 📜 Instructions
# Before you turn this problem in, make sure everything runs as expected. First, restart the kernel (in the menubar, select Kernel → Restart) and then run all cells (in the menubar, select Cell → Run All).
#
# Make sure you fill in any place that says YOUR CODE HERE or "YOUR ANSWER HERE", as well as your name, username (the prefix to your @university.ext e-mail), and student ID number in the cell below
# + nbgrader={"grade": true, "grade_id": "cell-347e783dba403114", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false}
Name = "<NAME>"
email_user_name = "username"
ID_number = 1234567
# It's useful to import these libraries.
# You can import others or not even use these, though.
import numpy as np
import scipy
from scipy import constants
# + [markdown] nbgrader={"grade": false, "grade_id": "preambleDuality", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Wave-Particle Duality
# + [markdown] nbgrader={"grade": false, "grade_id": "quDuality1", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🎲 Particle-like features of light.
# Which of the following phenomena are strongly associated with the particle-like nature of light. <br>
# **A**. Blackbody radiation <br>
# **B**. Compton Scattering <br>
# **C**. Electron Diffraction <br>
# **D**. Stern-Gerlach Experiment <br>
# **E**. Photoelectric effect
# + nbgrader={"grade": false, "grade_id": "ansDuality1", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a list, tuple, or set containing the correct answers.
# For example, if the answers are A and C, then ad1 = ["A", "C"].
# I've initialized the answer to the empty list.
ad1 = []
### BEGIN SOLUTION
ad1 = ["A", "B", "E"]
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "testDuality1", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
print("The following phenomena are associated with the particle-like nature of light:", ad1)
assert(isinstance(ad1,set) or isinstance(ad1,list) or isinstance(ad1,tuple))
assert(len(ad1) > 0)
### BEGIN HIDDEN TESTS
assert(set(map(str.casefold,ad1)) == {"a","b","e"})
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": true, "grade_id": "explainDuality1", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
#
# More information about this question can be found in section 3 of the notes [From Newton to Schrodinger](https://paulwayers.github.io/IntroQChem/notes/html/History.html "See especially section 3").
#
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "quDuality2", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Properties of photons
# What is the frequency of light in Hz ($s^{-1}$) of light with wavelength 500 nm?
# + nbgrader={"grade": false, "grade_id": "ansDuality2", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as a float.
# ansDuality2 = float. I've initialized the answer to None.
ad2 = None
### BEGIN SOLUTION
# wavelength * frequency = speed of light
ad2 = constants.c/500e-9
print("the frequency of light with wavelength 500 nm is {0:.3e} Hz".format(ad2))
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "testDuality2", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(ad2,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(ad2,constants.c/500e-9,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": true, "grade_id": "explainDuality2", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
#
# The key equations for this problem are:
# $$\lambda \nu = c$$
# so
# $$\nu = \frac{c}{\lambda}$$
#
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Q_frequency_doubles", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🪙 Properties of photons
#
# Doubling the wavelength of radiation doubles its frequency. (True/False)
#
# + nbgrader={"grade": false, "grade_id": "A_frequency_doubles", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a Boolean, so freq_double = True or freq_double = False
freq_double = None
### BEGIN SOLUTION
freq_double = False
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "T_frequency_doubles", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(freq_double,bool))
print("It is", freq_double, "that when the wavelength of radiation doubles its frequency does also.")
### BEGIN HIDDEN TESTS
assert(freq_double == False)
# The frequency halves, because frequency = c/wavelength
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Q_speed_halves", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🪙 Properties of photons
#
# Doubling the wavelength of radiation halves its speed. (True/False)
# + nbgrader={"grade": false, "grade_id": "A_speed_halves", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a Boolean, so speed_halves = True or speed_halves = False
speed_halves = None
### BEGIN SOLUTION
speed_halves = False
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "T_speed_halves", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(speed_halves,bool))
print("It is", speed_halves, "that when the wavelength of radiation doubles its speed halves.")
### BEGIN HIDDEN TESTS
assert(speed_halves == False)
# The speed of light is a constant and does not depend on its wavelength.
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Q_HeNe", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Properties of photons
# A helium-neon laser emits light at 632.8 nm. What is the energy of the photons generated by this laser, in Joules?
# + solution2="hidden"
# Use the code box as a calculator, but report your answer as a float.
# E_HeNe = float. I've initialized the answer to None.
E_HeNe = None
### BEGIN SOLUTION
# E = h * frequency = h * c/wavelength
E_HeNe = constants.h * constants.c/632.8e-9
print("the energy of light with wavelength 632.8 nm is {0:.3e} J".format(E_HeNe))
### END SOLUTION
# + solution2="hidden"
assert(isinstance(E_HeNe,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(E_HeNe,3.1391e-19,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Q_energy_doubles", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🎲 Properties of photons
#
# Which of the following changes would double the energy of a photon: <br>
# **A**. Doubling its frequency <br>
# **B**. Doubling its wavelength <br>
# **C**. Doubling its momentum <br>
# **D**. Doubling its speed <br>
# **E**. Doubling its effective (relativistic) mass <br>
# **F**. Doubling its wavenumber.
# + nbgrader={"grade": false, "grade_id": "A_energy_doubles", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a list, tuple, or set containing the correct answers.
# For example, if the answers are A and C, then e_doubles = ["A", "C"].
# I've initialized the answer to the empty list.
e_doubles = []
### BEGIN SOLUTION
e_doubles = ["A","C","E","F"]
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "T_energy_doubles", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
print("Ways you can double the energy of a photon include", e_doubles)
assert(isinstance(e_doubles,set) or isinstance(e_doubles,list) or isinstance(e_doubles,tuple))
assert(len(e_doubles) > 0)
### BEGIN HIDDEN TESTS
assert(set(map(str.casefold,e_doubles)) == {"a","c","e","f"})
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Qgreenlaser", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Momentum from a green laser pointer
# I have a high-powered green laser pointer (532 nm wavelength, 100 mW power) that I use for astronomical starspotting. If I shine this laser pointer on you, how much momentum, per second, will be transferred to you? Report your answer in SI units of kg m/s.
# + nbgrader={"grade": false, "grade_id": "Agreenlaser", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as a float.
# p_greenlaser = float. I've initialized the answer to None.
p_greenlaser = None
### BEGIN SOLUTION
# The energy and momentum of a single green photon is
p_greenphoton = constants.h/532e-9 #in kg m/s
E_greenphoton = constants.h*constants.c/532e-9 #in Joules
# Based on the power, which is 100 mW = .1 J/s, we can deduce
# power = (energy of a single photon)(number of photons per second)
n_greenphotons_persecond = .1/E_greenphoton
p_greenlaser = p_greenphoton * n_greenphotons_persecond
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tgreenlaser", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(p_greenlaser,float))
print("the momentum transfered per second is {0:.3e} kg m/s".format(p_greenlaser))
### BEGIN HIDDEN TESTS
assert(np.isclose(p_greenlaser,3.336e-10,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "QCo60", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Wavelength emitted by a radiopharmaceutical
# The radioactive isotope Cobalt-60 is used in nuclear medicine to treat cancer. The energy emitted by Cobalt-60 is 1.29 x 10^11 J/mol. What is the wavelength of the emitted $\gamma$ rays?
# + nbgrader={"grade": false, "grade_id": "cell-66e8bdad8b507666", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as a float.
# wlength_Co60 = float. I've initialized the answer to None.
wlength_Co60 = None
### BEGIN SOLUTION
# The energy is given in Joules per mole, so let's first compute the energy of a single photon,
E_photonCo60 = 1.29e11/constants.N_A
# The wavelength is then determined form E = h*frequency = hc/wavelength
wlength_C60 = constants.h * constants.c/E_photonCo60
print("the wavelength emitted by the radioactive isotope Co60 is {0:.3e} m".format(wlength_C60))
### END SOLUTION
# + solution2="hidden"
assert(isinstance(wlength_C60,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(wlength_C60,9.273e-13,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Qduality3", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🪙 Davisson-Germer experiment
# The Davisson-Germer experiment was among the first explicit verifications of the wave-like nature of electrons, and was foundational for modern electron diffraction methods. (True/False)
# + nbgrader={"grade": false, "grade_id": "Adualit3", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a Boolean, so ad3 = True or ad3 = False
ad3 = None
### BEGIN SOLUTION
ad3 = True
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tdualit3", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(ad3,bool))
print("The answer is:", ad3)
### BEGIN HIDDEN TESTS
assert(ad3 == True)
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": true, "grade_id": "Edualit3", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
#
# You can find more details about the Davisson-Germer experiment in section 3.3 of the [notes on the Introduction to Quantum Mechanics](https://paulwayers.github.io/IntroQChem/notes/html/History.html).
#
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Qduality3b", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🎲 Davisson-Germer experiment
# The Davisson-Germer experiment demonstrated that if you shine a beam of electrons on a metal crystal, the result is <br>
# **A**. the electrons are absorbed at “critical energies” similar to the optical (light) absorption spectrum. <br>
# **B**. the electrons scatter according to the Bragg law for X-ray scattering. <br>
# **C**. the electrons go right through the metal. <br>
# **D**. the metal gets very hot and becomes a dull red color (stimulated blackbody emission of radiation). <br>
# + nbgrader={"grade": false, "grade_id": "Aduality3b", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a list, tuple, or set containing the correct answers.
# For example, if the answers are A and C, then ad3b = ["A", "C"].
# I've initialized the answer to the empty list.
ad3b = []
### BEGIN SOLUTION
ad3b = ["B"]
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tduality3b", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
print("In the Davisson-Germer experiment", ad3b)
assert(isinstance(ad3b,set) or isinstance(ad3b,list) or isinstance(ad3b,tuple))
assert(len(ad3b) == 1)
### BEGIN HIDDEN TESTS
assert(set(map(str.casefold,ad3b)) == {"b"})
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Qduality4", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Properties of photons
# What is the momentum of a $\gamma$-ray photon with a wavelength of $10^{-13}$ m in SI units of ${\frac{\text{m} \cdot \text{kg}}{\text{s}}}$?
# + nbgrader={"grade": false, "grade_id": "Aduality4", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as a float.
# ad4 = float. I've initialized the answer to None.
ad4 = None
### BEGIN SOLUTION
# momentum = h/wavelength
ad4 = constants.h/1e-13
print("the momentum of a photon with a wavelength of 1e-13 m is {0:.3e} m kg/s".format(ad4))
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tduality4", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(ad4,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(ad4,constants.h/1e-13,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": true, "grade_id": "Eduality4", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
# The momentum of a photon can be computed from the De Broglie relation (here, better credited to Compton):
# $$ p = \frac{h}{\lambda} $$
#
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Qduality5", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Rydberg's Law
# Rydberg's law says that the wavenumber for the absorptions for a one-electron atom/ion with atomic number Z is given by the expression
# $$ \tilde{\nu} = \left( 1.0974 \cdot 10^7 m^{-1}\right) Z^2
# \left( \frac{1}{n_1^2} - \frac{1}{n_2^2} \right) $$
# where $1 < n_1 < n_2 < \inf$. Suppose you are given the Hydrogen atom in its ground state, $n_1=1$. What is the lowest absorption frequency?
# + nbgrader={"grade": false, "grade_id": "Aduality5", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as a float.
# ad5 = float. I've initialized the answer to None.
ad5 = None
### BEGIN SOLUTION
wavenumber = 1.0974e7 * 1 * (1 - 1./4) #from the Rydberg formula
# frequency is speed of light times wavenumber, where wavenumber = 1/wavelength
ad5 = constants.c*wavenumber
print("the lowest absorption frequency for the hydrogen atom is {0:.3e} Hz".format(ad5))
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tduality5", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(ad5,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(ad5,2.467e15,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": true, "grade_id": "Eduality5", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
#
# The lowest absorption frequency will correspond to exciting from the ground state to the lowest excited state, so $n_2 = 2$. Using this, we can compute the wavelength from:
# $$\tilde{\nu} = (1.0974\cdot 10^7)(1^2)\left(\frac{1}{1^2} - \frac{1}{2^2} \right) $$
# and then convert the wavelength to frequency using
# $$ \nu = c\tilde{\nu} $$
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Qduality6", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🎲 Wave properties of particles
# Which of the following experimental results are often cited as examples of the wave-likeness of particles like electrons?
# **A**. blackbody radiation
# **B**. discrete emission lines in the hydrogen spectrum
# **C**. photoelectric effect
# **D**. Compton scattering of light by a particle
# **E**. Electron scattering from a crystal
# + nbgrader={"grade": false, "grade_id": "Aduality6", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a list, tuple, or set containing the correct answers.
# For example, if the answers are A and C, then ad6 = ["A", "C"].
# I've initialized the answer to the empty list.
ad6 = []
### BEGIN SOLUTION
ad6 = ["E"]
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tduality6", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
print("The following phenomena are associated with the wave-like nature of electrons:", ad6)
assert(isinstance(ad6,set) or isinstance(ad6,list) or isinstance(ad6,tuple))
assert(len(ad6) > 0)
### BEGIN HIDDEN TESTS
assert(set(map(str.casefold,ad6)) == {"e"} or set(map(str.casefold,ad6)) == {"b","e"})
### END HIDDEN TESTS
# B is a reasonable answer from the viewpoint of the Bohr model of the Hydrogen atom, but is less obvious than
# E (electron scattering).
# + [markdown] nbgrader={"grade": false, "grade_id": "Qduality7", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🎲 Particle properties of waves
# Which of the following experimental results are often cited as examples of the particle-likeness of radiation (light)?
# **A**. blackbody radiation <br>
# **B**. discrete emission lines in the hydrogen spectrum <br>
# **C**. photoelectric effect <br>
# **D**. Compton scattering of light by a particle <br>
# **E**. Electron scattering from a crystal <br>
# + nbgrader={"grade": false, "grade_id": "Aduality7", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a list, tuple, or set containing the correct answers.
# For example, if the answers are A and C, then ad7 = ["A", "C"].
# I've initialized the answer to the empty list.
ad7 = []
### BEGIN SOLUTION
ad7 = ["A","C","D"]
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tdualilty7", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
print("The following phenomena are associated with the particle-like nature of light:", ad7)
assert(isinstance(ad7,set) or isinstance(ad7,list) or isinstance(ad7,tuple))
assert(len(ad7) > 0)
### BEGIN HIDDEN TESTS
assert(set(map(str.casefold,ad7)) == {"a","c","d"})
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Qduality9", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Properties of photons
# Suppose you are given a photon with an energy of 2 eV. What is its momentum in
# $\frac{\text{m} \cdot \text{kg}}{\text{s}}$? What is its frequency in Hz?
# + nbgrader={"grade": false, "grade_id": "Aduality9", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as float(s).
# I've initialized the answers to None.
momentum_d9 = None
frequency_d9 = None
### BEGIN SOLUTION
# frequency = E/h
# get Planck's constant in useful units.
h_in_eVs = scipy.constants.value("Planck constant in eV/Hz")
frequency_d9 = 2.0/h_in_eVs
#Now useful to use Planck's constant in nice units.
momentum_d9 = constants.h*frequency_d9/constants.c
print("the frequency of a photon with an energy of 2 eV is {0:.3e} Hz".format(frequency_d9))
print("the momentum of a photon with an energy of 2 eV is {0:.3e} m kg/s".format(momentum_d9))
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tduality9", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(momentum_d9,float))
assert(isinstance(frequency_d9,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(momentum_d9,1.069e-27,rtol=1e-3))
assert(np.isclose(frequency_d9,4.836e+14,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": true, "grade_id": "Eduality9", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
#
# First we can compute the frequency of the photon as:
# $$ \nu = \frac{E}{h} $$
# but there is the slight complication that the energy was given in electron-volts. Fortunately we have this constant built into to scipy.constants.
#
# The momentum of the photon can be computed from the De Broglie relation,
# $$ p = \frac{h}{\lambda} = \frac{h}{\tfrac{c}{\nu}} = \frac{h \nu}{c} = \frac{E}{c} $$
# Where the last formula, which was proposed long ago by Einstein and Compton and appeared in the notes, could have been used directly had you remembered it. However, because our energy is in electron-volts, it's a bit easier to use the next-to-last formula.
#
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Q_Ep_from_k", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 Properties of photons
# What is the momentum and energy of a photon with angular wavenumber $k=10^7 \text{m}^{-1}$?
# + nbgrader={"grade": false, "grade_id": "A_Ep_from_K", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as float(s).
# I've initialized the answers to None.
p_from_k = None #momentum of the photon
E_from_k = None #Energy of the photon
### BEGIN SOLUTION
# p = h-bar * k
p_from_k = constants.hbar * 1e7
E_from_k = constants.c * p_from_k
print("the momentum of a photon with an angular wavenumber of 1e7 1/m is {0:.3e} m kg/s.".format(p_from_k))
print("the energy of a photon with an angular wavenumber of 1e7 1/m is {0:.3e} J.".format(E_from_k))
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "T_Ep_from_k", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(p_from_k,float))
assert(isinstance(E_from_k,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(p_from_k,1.055e-27,rtol=1e-3))
assert(np.isclose(E_from_k,3.162e-19,rtol=1e-3))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": true, "grade_id": "E_Ep_from_k", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
#
# We can start with the equation for the momentum, which is easy:
# $$ p = \hbar k $$.
# The equation for the energy can deduced directly as $E = pc$, but if you forgot this, then,
# $$ E = h \nu = \frac{hc}{\lambda} = p c $$
# using the Planck relation (first equality) and the De Broglie relation (last equality).
#
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Q_baseball", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🖩 De Broglie wavelength of a baseball
# During departmental baseball games, your instructor insists that the only reason he strikes out is because of the De Broglie wavelength of the baseball means that even though he swings in the correct location, he still misses. Suppose that the opposing major-league-quality hurler throws the baseball (mass = 145 g) at 100 miles-per-hour (45 m/s). What is the De Broglie wavelength of the baseball?
# + nbgrader={"grade": false, "grade_id": "A_baseball", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Use the code box as a calculator, but report your answer as float(s).
# I've initialized the answer to None.
wl_baseball = None #wavelength of the baseball.
### BEGIN SOLUTION
# wavelength = h/momentum = h/(mass * velocity) = h/(.145 kg * 45 m/s)
wl_baseball = constants.h/(.145*45)
print("the wavelength of the baseball is {0:.3e} m!".format(wl_baseball))
### END SOLUTION
# + solution2="hidden"
assert(isinstance(wl_baseball,float))
### BEGIN HIDDEN TESTS
assert(np.isclose(wl_baseball,1.e-34,rtol=1e-2))
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "preambleSE", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## The Schrödinger Equation
# + [markdown] nbgrader={"grade": false, "grade_id": "Qse_1", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### ✍️ Time-Dependent Schrödinger Equation
# What is the time-dependent Schrödinger equation for the complex conjugate of the wavefunction, $\Psi^*$?
# Put your answer in the markdown cell below. You can drag and drop an attachment (of most types) to this cell also.
# + [markdown] nbgrader={"grade": true, "grade_id": "Ase_1", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
#
# Taking the complex-conjugate of the time-dependent Schrödinger equation gives:
# $$ -i \hbar \frac{d \Psi^*(x,t)}{dt} = - \frac{\hbar}{2m} \frac{d^2 \Psi^*(x,t)}{dx^2} + V(x,t)\Psi^*(x,t) $$
#
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Qse_2", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🎲 Hamiltonian operator
# The Hamiltonian operator corresponds to which observable property of a quantum system? <br>
# **A**. Action <br>
# **B**. Momentum <br>
# **C**. Kinetic Energy <br>
# **D**. De Broglie Wavelength <br>
# **E**. Total Energy <br>
# **F**. Angular Momentum <br>
# **G**. Entropy
# **H**. Planck Mass
# + nbgrader={"grade": false, "grade_id": "Ase_2", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a list, tuple, or set containing the correct answers.
# For example, if the answers are A and C, then ansSE2 = ["A", "C"].
# I've initialized the answer to the empty list.
ansSE2 = []
### BEGIN SOLUTION
ansSE2 = ["E"]
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tse_2", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
print("The Hamiltonian is the quantum-mechanical operator for:", ansSE2)
assert(isinstance(ansSE2,set) or isinstance(ansSE2,list) or isinstance(ansSE2,tuple))
assert(len(ansSE2) == 1)
### BEGIN HIDDEN TESTS
assert(set(map(str.casefold,ansSE2)) == {"e"})
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "preambleMath", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## Mathematics
# + [markdown] nbgrader={"grade": false, "grade_id": "QProb_canbe_negative", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🪙 Mathematical Properties of the wavefunction
# A probability density can be negative. (True/False)
# + nbgrader={"grade": false, "grade_id": "AProb_canbe_negative", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
prob_canbe_negative = None
### BEGIN SOLUTION
prob_canbe_negative = False
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "TProb_canbe_negative", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
assert(isinstance(prob_canbe_negative,bool))
print("It is", prob_canbe_negative, "that a probability density can be negative.")
### BEGIN HIDDEN TESTS
assert(prob_canbe_negative == False)
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "Qzzstar", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🎲 Complex Conjugation
# Let $z$ be a complex number. If $w$ is the product of $z$ and its complex
# conjugate, $w = z z^*$, which of the following is **always** true about $w$: <br>
# **A**. w is an imaginary number. <br>
# **B**. w is a complex number. <br>
# **C**. w is nonzero real number. <br>
# **D**. w is a nonnegative real number. <br>
# **E**. w is a nonzero complex number. <br>
# **F**. None of the above
# + nbgrader={"grade": false, "grade_id": "Azzstar", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a list, tuple, or set containing the correct answers.
# For example, if the answers are A and C, then zzstar = ["A", "C"].
# I've initialized the answer to the empty list.
zzstar = []
### BEGIN SOLUTION
zzstar = ["B","D"]
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "Tzzstar", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false} solution2="hidden"
print("The product of a number and its complex conjugate is always", zzstar)
assert(isinstance(zzstar,set) or isinstance(zzstar,list) or isinstance(zzstar,tuple))
assert(len(zzstar) > 0)
### BEGIN HIDDEN TESTS
assert(set(map(str.casefold,zzstar)) == {"b","d"})
### END HIDDEN TESTS
# + [markdown] nbgrader={"grade": false, "grade_id": "qMath1", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### ✍️ Complex Conjugation
# What is the complex conjugate of
# $$ \Psi(x,t) = A e^{(a+bi)(kx - \omega t)} $$
# + [markdown] nbgrader={"grade": true, "grade_id": "aMath1", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# === BEGIN MARK SCHEME ===
# The complex conjugate is obtained by replacing $i$ with $-i$. So
# $$ \Psi^*(x,t) = A e^{(a-bi)(kx - \omega t)} $$
# I would accept an answer where it was not assumed that the constants in the expression were real, e.g.,
# $$ \Psi^*(x,t) = A^* e^{(a^*-b^*i)(k^*x - \omega^* t)} $$
# === END MARK SCHEME ===
# + [markdown] nbgrader={"grade": false, "grade_id": "Q_ke_eigenfunction", "locked": true, "schema_version": 3, "solution": false, "task": false} solution2="hidden" solution2_first=true
# ### 🪙 Eigenfunctions of the kinetic-energy operator
# *Every* eigenfunction of the momentum operator is also an eigenfunction of the kinetic-energy operator. (True/False)
# + nbgrader={"grade": false, "grade_id": "A_ke_eigenfunction", "locked": false, "schema_version": 3, "solution": true, "task": false} solution2="hidden"
# Report your answer as a Boolean, so is_also_eigenfunction = True or = False
is_also_eigenfunction = None
### BEGIN SOLUTION
is_also_eigenfunction = True
### END SOLUTION
# + solution2="hidden"
assert(isinstance(is_also_eigenfunction,bool))
print("The answer is:", is_also_eigenfunction)
### BEGIN HIDDEN TESTS
assert(is_also_eigenfunction == True)
### END HIDDEN TESTS
# + [markdown] solution2="hidden"
# $$\hat{p} \psi(x) = \lambda \psi(x) $$
# $$ \hat{T} = \frac{\hat{p}^2}{2m} $$
# $$ \hat{T} \psi(x) = \tfrac{1}{2m} \hat{p} \hat{p} \psi(x) = \tfrac{1}{2m} \hat{p} \lambda \psi(x) = \tfrac{1}{2m} \lambda^2 \psi(x) $$
#
# The reverse is also true, but it's more subtle. You can use the fact that $\hat{p} = 2m \sqrt{\hat{T}}$, but this is not quite true; $\cos a x$ and $sin a x$ are eigenfunctions of the kinetic energy but not the momentum. The general result is that, given an operator, $\hat{Q}$, with has eigenfunctions
# $$ \hat{Q} \psi_k(x) = \theta_k \psi_k(x) $$
# then any (analytic) function of $\hat{Q}$, has the same eigenfunctions, and the values are:
# $$ f(\hat{Q}) \psi_k(x) = f(\theta_k) \psi_k(x) $$
#
# -
# 📝
# 🔀
| ipynb/IntroQMkey.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #!/usr/bin/env python2
# -*- coding: utf-8 -*-from sklearn.externals import joblib
from array import array
import cPickle as pickle
from scipy.stats import ks_2samp
import numpy as np
import pandas as pd
import datetime
import math
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import numpy as np
import math
import pickle
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
trainFraction = 0.7
classifier = MLPClassifier(activation='tanh', alpha=1e-05, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=(25, 20), learning_rate='adaptive',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
print 'Loading signal data file...'
sig_data1 = pd.read_pickle('../data/signal_data.p')
sig_data = pd.DataFrame(data=sig_data1)
print 'Loading background data file...'
bkg_data1 = pd.read_pickle('../data/background_data.p')
bkg_data = pd.DataFrame(data=bkg_data1)
#
cutIndex = int(trainFraction * len(sig_data))
#
print ' '
for i in range(6):
for j in range(6):
if j > i :
print "For features at index ",i," and ",j," :"
sigTrain = sig_data.iloc[0:cutIndex,[i,j]]
sigTest = sig_data.iloc[cutIndex: ,[i,j]]
bgTrain = bkg_data.iloc[0:cutIndex,[i,j]]
bgTest = bkg_data.iloc[cutIndex: ,[i,j]]
# Create the scaler to preprocess the data
scaler = StandardScaler(copy=True, with_mean=True, with_std=True).fit(sigTrain)
# transform the training sameple
sigTrain = scaler.transform(sigTrain)
# do the same to the test data
sigTest = scaler.transform(sigTest)
# do the same to the test data
bgTrain = scaler.transform(bgTrain)
# do the same to the test data
bgTest = scaler.transform(bgTest)
print 'Learning...'
train = np.append(sigTrain, bgTrain, axis=0)
target = [-1] * len(sigTrain) + [1] * len(bgTrain)
classifier.fit(train, target)
trainingSample = []
for entry in sigTrain:
probability = float(classifier.predict_proba([entry])[0][0])
trainingSample.append(probability)
z = []
testSample = []
for entry in sigTest:
probability = float(classifier.predict_proba([entry])[0][0])
testSample.append(probability)
q = int(classifier.predict([entry]))
z.append(q);
print "Signal", ks_2samp(trainingSample, testSample)
trainingSample = []
for entry in bgTrain:
probability = float(classifier.predict_proba([entry])[0][0])
trainingSample.append(probability)
testSample = []
for entry in bgTest:
probability = float(classifier.predict_proba([entry])[0][0])
testSample.append(probability)
q = int(classifier.predict([entry]))
z.append(q);
print "Background", ks_2samp(trainingSample, testSample)
print "calculating F1 Score , Precision , Accuracy , Recall : "
target_test = [-1] * len(sigTest) + [1] * len(bgTest)
ab = precision_score(target_test, z, labels=None, pos_label=1)
ac = recall_score(target_test, z, labels=None, pos_label=1)
ad = accuracy_score(target_test,z)
v = f1_score(target_test, z,pos_label=1,labels=None)
print "F1 score: ",v
print "Accuracy: ",ad
print "Precision: ",ab
print "Recall: ",ac
print " "
# -
# cd
# cd project1/HEPDrone/sklearn-classifiers
# +
# #!/usr/bin/env python2
# -*- coding: utf-8 -*-from sklearn.externals import joblib
from array import array
import cPickle as pickle
from scipy.stats import ks_2samp
import numpy as np
import pandas as pd
import datetime
import math
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import numpy as np
import math
import pickle
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
trainFraction = 0.7
classifier = MLPClassifier(activation='tanh', alpha=1e-05, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=(25, 20), learning_rate='adaptive',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
print 'Loading signal data file...'
sig_data1 = pd.read_pickle('../data/signal_data.p')
sig_data = pd.DataFrame(data=sig_data1)
print 'Loading background data file...'
bkg_data1 = pd.read_pickle('../data/background_data.p')
bkg_data = pd.DataFrame(data=bkg_data1)
#
cutIndex = int(trainFraction * len(sig_data))
#
print ' '
#taking features at index 2 and 3
i=2
j=3
print "For features at index ",i," and ",j," :"
sigTrain = sig_data.iloc[0:cutIndex,[i,j]]
sigTest = sig_data.iloc[cutIndex: ,[i,j]]
bgTrain = bkg_data.iloc[0:cutIndex,[i,j]]
bgTest = bkg_data.iloc[cutIndex: ,[i,j]]
# Create the scaler to preprocess the data
scaler = StandardScaler(copy=True, with_mean=True, with_std=True).fit(sigTrain)
# transform the training sameple
sigTrain = scaler.transform(sigTrain)
# do the same to the test data
sigTest = scaler.transform(sigTest)
# do the same to the test data
bgTrain = scaler.transform(bgTrain)
# do the same to the test data
bgTest = scaler.transform(bgTest)
print 'Learning...'
train = np.append(sigTrain, bgTrain, axis=0)
target = [-1] * len(sigTrain) + [1] * len(bgTrain)
classifier.fit(train, target)
joblib.dump(classifier, 'classifier_jindal.pkl')
joblib.dump(scaler, 'scaler_jindal.pkl')
print 'Classifier saved to file'
# -
| archive/GSoC/Jatin_Jindal/training_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img align="right" src="images/tf-small.png" width="128"/>
# <img align="right" src="images/etcbc.png"/>
# <img align="right" src="images/dans-small.png"/>
#
# You might want to consider the [start](search.ipynb) of this tutorial.
#
# Short introductions to other TF datasets:
#
# * [Dead Sea Scrolls](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/dss.ipynb),
# * [Old Babylonian Letters](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/oldbabylonian.ipynb),
# or the
# * [Q'uran](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/quran.ipynb)
#
# %load_ext autoreload
# %autoreload 2
from tf.app import use
VERSION = "2021"
A = use('bhsa', hoist=globals())
# A = use("bhsa:clone", checkout="clone", hoist=globals())
# ## Rough edges
#
# It might be helpful to peek under the hood, especially when exploring searches that go slow.
#
# If you went through the previous parts of the tutorial you have encountered cases where things come
# to a grinding halt.
#
# Yet we can get a hunch of what is going on, even in those cases.
# For that, we use the lower-level search api `S` of Text-Fabric, and not the
# wrappers that the high level `A` api provides.
#
# The main difference is, that `S.search()` returns a *generator* of the results,
# whereas `A.search()` returns a list of the results.
# In fact, `A.search()` calls the generator function delivered by `S.search()` as often as needed.
#
# For some queries, the fetching of results is quite costly, so costly that we do not want to fetch
# all results up-front. Rather we want to fetch a few, to see how it goes.
# In these cases, directly using `S.search()` is preferred over `A.search()`.
query = """
book
chapter
verse
phrase det=und
word lex=>LHJM/
"""
# ### Study
#
# First we call `S.study(query)`.
#
# The syntax will be checked, features loaded, the search space will be set up, narrowed down,
# and the fetching of results will be prepared, but not yet executed.
#
# In order to make the query a bit more interesting, we lift the constraint that the results must be in Genesis 1-2.
S.study(query)
# Before we rush to the results, lets have a look at the *plan*.
S.showPlan()
# Here you see already what your results will look like.
# Each result `r` is a *tuple* of nodes:
# ```
# (R0, R1, R2, R3, R4)
# ```
# that instantiate the objects in your template.
#
# In case you are curious, you can get details about the search space as well:
S.showPlan(details=True)
# The part about the *nodes* shows you how many possible instantiations for each object in your template
# has been found.
# These are not results yet, because only combinations of instantiations
# that satisfy all constraints are results.
#
# The constraints come from the relations between the objects that you specified.
# In this case, there is only an implicit relation: embedding `[[`.
# Later on we'll examine all
# [spatial relations](https://annotation.github.io/text-fabric/tf/about/searchusage.html#relational-operators).
#
# The part about the *edges* shows you the constraints,
# and in what order they will be computed when stitching results together.
# In this case the order is exactly the order by which the relations appear in the template,
# but that will not always be the case.
# Text-Fabric spends some time and ingenuity to find out an optimal *stitch plan*.
# Fetching results is like selecting a node, stitching it to another node with an edge,
# and so on, until a full stitch of nodes intersects with all the node sets from which they
# must be chosen (the yarns).
#
# Fetching results may take time.
#
# For some queries, it can take a large amount of time to walk through all results.
# Even worse, it may happen that it takes a large amount of time before getting the *first* result.
# During stitching, many stitchings will be tried and fail before they can be completed.
#
# This has to do with search strategies on the one hand,
# and the very likely possibility to encounter *pathological* search patterns,
# which have billions of results, mostly unintended.
# For example, a simple query that asks for 5 words in the Hebrew Bible without further constraints,
# will have 425,000 to the power of 5 results.
# That is 10-e28 (a one with 28 zeros),
# roughly the number of molecules in a few hundred liters of air.
# That may not sound much, but it is 10,000 times the amount of bytes
# that can be currently stored on the whole Internet.
#
# Text-Fabric search is not yet done with finding optimal search strategies,
# and I hope to refine its arsenal of methods in the future, depending on what you report.
# ### Counting results
# It is always a good idea to get a feel for the amount of results, before you dive into them head-on.
S.count(progress=1, limit=5)
# We asked for 5 results in total, with a progress message for every one.
# That was a bit conservative.
S.count(progress=100, limit=500)
# Still pretty quick, now we want to count all results.
S.count(progress=200, limit=-1)
# ### Fetching results
#
# It is time to see something of those results.
S.fetch(limit=10)
# Not very informative.
# Just a quick observation: look at the last column.
# These are the result nodes for the `word` part in the query, indicated as `R7` by `showPlan()` before.
# And indeed, they are all below 425,000, the number of words in the Hebrew Bible.
#
# Nevertheless, we want to glean a bit more information off them.
for r in S.fetch(limit=10):
print(S.glean(r))
# ##### Caution
# > It is not possible to do `len(S.fetch())`.
# Because `fetch()` is a *generator*, not a list.
# It will deliver a result every time it is being asked and for as long as there are results,
# but it does not know in advance how many there will be.
#
# >Fetching a result can be costly, because due to the constraints, a lot of possibilities
# may have to be tried and rejected before a the next result is found.
#
# > That is why you often see results coming in at varying speeds when counting them.
# We can also use `A.table()` to make a list of results.
# This function is part of the `Bhsa` API, not of the generic Text-Fabric machinery, as opposed to `S.glean()`.
#
# So, you can use `S.glean()` for every Text-Fabric corpus, but the output is still not very nice.
# `A.table()` gives much nicer output.
A.table(S.fetch(limit=5))
# ## Slow queries
#
# The search template above has some pretty tight constraints on one of its objects,
# so the amount of data to deal with is pretty limited.
#
# If the constraints are weak, search may become slow.
#
# For example, here is a query that looks for pairs of phrases in the same clause in such a way that
# one is engulfed by the other.
query = """
% test
% verse book=Genesis chapter=2 verse=25
verse
clause
p1:phrase
w1:word
w3:word
w1 < w3
p2:phrase
w2:word
w1 < w2
w3 > w2
p1 < p2
"""
# A couple of remarks you may have encountered before.
#
# * some objects have got a name
# * there are additional relations specified between named objects
# * `<` means: *comes before*, and `>`: *comes after* in the canonical order for nodes,
# which for words means: comes textually before/after, but for other nodes the meaning
# is explained [here](https://annotation.github.io/text-fabric/tf/core/nodes.html)
# * later on we describe those relations in more detail
#
# > **Note on order**
# Look at the words `w1` and `w3` below phrase `p1`.
# Although in the template `w1` comes before `w3`, this is not
# translated in a search constraint of the same nature.
#
# > Order between objects in a template is never significant, only embedding is.
#
# Because order is not significant, you have to specify order yourself, using relations.
#
# It turns out that this is better than the other way around.
# In MQL order *is* significant, and it is very difficult to
# search for `w1` and `w2` in any order.
# Especially if your are looking for more than 2 complex objects with lots of feature
# conditions, your search template would explode if you had to spell out all
# possible permutations. See the example of Reinoud Oosting below.
#
# > **Note on gaps**
# Look at the phrases `p1` and `p2`.
# We do not specify an order here, only that they are different.
# In order to prevent duplicated searches with `p1` and `p2` interchanged, we even
# stipulate that `p1 < p2`.
# There are many spatial relationships possible between different objects.
# In many cases, neither the one comes before the other, nor vice versa.
# They can overlap, one can occur in a gap of the other, they can be completely disjoint
# and interleaved, etc.
# +
# ignore this
# S.tweakPerformance(yarnRatio=2)
# -
S.study(query)
# Text-Fabric knows that narrowing down the search space in this case would take ages,
# without resulting in a significantly shrunken space.
# So it skips doing so for most constraints.
#
# Let us see the plan, with details.
S.showPlan(details=True)
# As you see, we have a hefty search space here.
# Let us play with the `count()` function.
S.count(progress=10, limit=100)
# We can be bolder than this!
S.count(progress=100, limit=1000)
# OK, not too bad, but note that it takes a big fraction of a second to get just 100 results.
#
# Now let us go for all of them by the thousand.
S.count(progress=1000, limit=-1)
# See? This is substantial work.
A.table(S.fetch(limit=5))
# ## Hand-coding
#
# As a check, here is some code that looks for basically the same phenomenon:
# a phrase within the gap of another phrase.
# It does not use search, and it gets a bit more focused results, in half the time compared
# to the search with the template.
#
# > **Hint**
# If you are comfortable with programming, and what you look for is fairly generic,
# you may be better off without search, provided you can translate your insight in the
# data into an effective procedure within Text-Fabric.
# But wait till we are completely done with this example!
TF.indent(reset=True)
TF.info("Getting gapped phrases")
results = []
for v in F.otype.s("verse"):
for c in L.d(v, otype="clause"):
ps = L.d(c, otype="phrase")
first = {}
last = {}
slots = {}
# make index of phrase boundaries
for p in ps:
words = L.d(p, otype="word")
first[p] = words[0]
last[p] = words[-1]
slots[p] = set(words)
for p1 in ps:
for p2 in ps:
if p2 < p1:
continue
if len(slots[p1] & slots[p2]) != 0:
continue
if first[p1] < first[p2] and last[p2] < last[p1]:
results.append(
(v, c, p1, p2, first[p1], first[p2], last[p2], last[p1])
)
TF.info("{} results".format(len(results)))
# ## Pretty printing
#
# We can use the pretty printing of `A.table()` and `A.show()` here as well, even though we have
# not used search!
#
# Not that you can show the node numbers. In this case it helps to see where the gaps are.
A.table(results, withNodes=True, end=5)
A.show(results, start=1, end=1)
# **NB**
# Gaps are a tricky phenomenon. In [gaps](searchGaps.ipynb) we will deal with them cruelly.
# # Performance tuning
#
# Here is an example by <NAME> (2018-09-21).
query = """
c:clause
PreGap:phrase_atom
LastPhrase:phrase_atom
:=
Gap:clause_atom
:: word
PreGap < Gap
Gap < LastPhrase
c || Gap
"""
# Here are the current settings of the performance parameters:
S.tweakPerformance()
S.study(query)
S.showPlan(details=True)
S.count(progress=1, limit=3)
# Can we do better?
#
# The performance parameter `yarnRatio` can be used to increase the amount of preprocessing, and we can
# increase to number of random samples that we make by `tryLimitFrom` and `tryLimitTo`.
#
# We start with increasing the amount of up-front edge-spinning.
S.tweakPerformance(yarnRatio=0.2, tryLimitFrom=10000, tryLimitTo=10000)
S.study(query)
S.showPlan(details=True)
# It seems to be the same plan.
S.count(progress=1, limit=3)
# No improvement.
# What if we decrease the amount of edge spinning?
S.tweakPerformance(yarnRatio=5, tryLimitFrom=10000, tryLimitTo=10000)
S.study(query)
S.showPlan(details=True)
S.count(progress=1, limit=3)
# Again, no improvement.
# We'll look for queries where the parameters matter more in the future.
# Here is how to reset the performance parameters:
S.tweakPerformance(yarnRatio=None, tryLimitFrom=None, tryLimitTo=None)
# # Next
#
# You have seen cases where the implementation is to blame.
#
# Now I want to point to gaps in your understanding:
# [gaps](searchGaps.ipynb)
#
# ---
#
# [basic](search.ipynb)
# [advanced](searchAdvanced.ipynb)
# [sets](searchSets.ipynb)
# [relations](searchRelations.ipynb)
# [quantifiers](searchQuantifiers.ipynb)
# rough
# [gaps](searchGaps.ipynb)
# # All steps
#
# * **[start](start.ipynb)** your first step in mastering the bible computationally
# * **[display](display.ipynb)** become an expert in creating pretty displays of your text structures
# * **[search](search.ipynb)** turbo charge your hand-coding with search templates
#
# ---
#
# [advanced](searchAdvanced.ipynb)
# [sets](searchSets.ipynb)
# [relations](searchRelations.ipynb)
# [quantifiers](searchQuantifiers.ipynb)
# [fromMQL](searchFromMQL.ipynb)
# rough
#
# You have seen cases where the implementation is to blame.
#
# Now I want to point to gaps in your understanding:
#
# [gaps](searchGaps.ipynb)
#
# ---
#
# * **[exportExcel](exportExcel.ipynb)** make tailor-made spreadsheets out of your results
# * **[share](share.ipynb)** draw in other people's data and let them use yours
# * **[export](export.ipynb)** export your dataset as an Emdros database
# * **[annotate](annotate.ipynb)** annotate plain text by means of other tools and import the annotations as TF features
# * **[volumes](volumes.ipynb)** work with selected books only
# * **[trees](trees.ipynb)** work with the BHSA data as syntax trees
#
# CC-BY <NAME>
| bhsa/searchRough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Variational Quantum Eigensolver VQE
# It is a typical quantum-classical hybrid algorithm that runs on modern quantum computers and uses both quantum and classical computers.
# ## What we learn
# 1. Learn the basic theory of VQE
# 2. Implementing Quantum Variational Circuits in Blueqat
# ## Install Blueqat
# Install Blueqat from pip.
# !pip install blueqat
# ---
# ## Overview
# In addition to a universal algorithm that assumes an ideal error-correcting quantum computer, there is a variational algorithm for the NISQ of current quantum computers.
#
# 1. Universal (Grover, Shor, Phase Estimation, Quantum Fourier Transform, HHL, Quantum Support Vector Machine, etc.)
# 2. variational (VQE,QAOA)
#
# VQE is an algorithm that uses short quantum circuits to solve problems in a hybrid way with existing computers.
# ## Eigenvalue problem
# VQE calculates the expectation value of the eigenvalue of a given matrix (Hamiltonian). Various problems can be solved by solving eigenvalue problems. If the eigenvalue is $E_0$ and the eigenvector is $\mid \psi \rangle, the expectation value is,
#
# $$
# H \mid \psi \rangle = E_0 \mid \psi \rangle
# $$
#
# The purpose is to find $E_0$.
# ## Hamiltonian and expectations
#
# The problem is passed in an Hermitian matrix called the Hamiltonian $H$. The Hamiltonian consists of a Pauli matrix and a unit matrix, which takes the form of a complex matrix.
# +
from blueqat.pauli import X, Y, Z, I
h = 1.23 * I - 4.56 * X(0) + 2.45 * Y(0) + 2.34 * Z(0)
h.to_matrix()
# -
# Expectation value of Hamiltonian is,
#
# $$
# \langle \psi \mid H \mid \psi \rangle
# $$
#
# And, the Hamiltonian expectation can be decomposed because it is a linear combination of unitary matrices.
#
# $$
# \langle \psi \mid aH_1 + bH_2 \mid \psi \rangle \\ = \langle \psi \mid aH_1 \mid \psi \rangle + \langle \psi \mid bH_2 \mid \psi \rangle \\ = a\langle \psi \mid H_1 \mid \psi \rangle + b\langle \psi \mid H_2 \mid \psi \rangle
# $$
#
# For example,
#
# $$
# H = 1.2 X_0 Z_2 + 2.5 Z_0 X_1 Y_2 - 3.4 Z_2 X_1
# $$
#
# The expected value of this formula can be obtained as follows.
#
# $$
# \langle \psi \mid 1.2 X_0 Z_2 + 2.5 Z_0 X_1 Y_2 - 3.4 Z_2 X_1 \mid \psi \rangle\\
# = 1.2\langle \psi \mid X_0 Z_2 \mid \psi \rangle + 2.5 \langle \psi \mid Z_0 X_1 Y_2\mid \psi \rangle - 3.4 \langle \psi \mid Z_2 X_1 \mid \psi \rangle
# $$
# ## Hamiltonian expectations and sampling
# The expected value of the Hamiltonian can be obtained from the sampling of the calculation results. The expected value for the Hamiltonian $H=Z$ is as follows.
#
# $$
# \langle \psi \mid Z \mid \psi \rangle =
# \begin{bmatrix}
# \alpha^* & \beta^*
# \end{bmatrix}
# \begin{bmatrix}
# 1&0\\
# 0&-1
# \end{bmatrix}
# \begin{bmatrix}
# \alpha\\
# \beta
# \end{bmatrix}
# = |\alpha|^2 - |\beta|^2
# $$
#
# $|alpha|^2$ and $|\beta|^2$ are the probabilities of 0 and 1, respectively. Perform multiple calculations and find the expected value from that sample value.
# Normally, when the Hamiltonian is X or Y, it is not possible to find the expected value from the sample value. In this case, the rotation of each axis is used to adjust the sample so that it can be taken.
#
# For $X$ we use $X = HZH$
#
# $$
# \langle \psi \mid X \mid \psi \rangle \\
# = \langle \psi \mid HZH \mid \psi \rangle\\
# = \langle \psi' \mid Z \mid \psi' \rangle
# $$
#
# For $Y$ we use $Y = RX(-\pi/2) Z RX(\pi/2)$
#
# $$
# \langle \psi \mid Y \mid \psi \rangle \\
# = \langle \psi \mid RX(-\pi/2) Z RX(\pi/2) \mid \psi \rangle\\
# = \langle \psi'' \mid Z \mid \psi'' \rangle
# $$
#
# In this case, the corresponding rotating gate is inserted just before the measurement.
# ## Quantum Variational Principle
# In an arbitrary state vector $\psi(\theta)$, the Hamiltonian expectation satisfies the following
#
# $$
# \langle \psi (\theta) \mid H \mid \psi (\theta) \rangle \geq E_0
# $$
#
# VQE takes advantage of this quantum variational principle and uses an optimization algorithm from an existing computer to find a minimum value that is as close to $E_0$ as possible while changing the state vector with the angle parameter $\theta$.
#
# ## Ansatz
# The quantum circuit for efficiently finding the minimum value is called Ansatz. Currently, efficient quantum circuits such as UCC Ansatz for quantum chemistry and QAOA Ansatz for combinatorial optimization problems have been found and are expected to be an application. Ansatz has rules for each field to some extent and is written according to the rules.
# ## Example.
# Finally, let's do an example.
#
# 1. Create an Ansatz with the angle of the rotating gate as a parameter. (Quantum)
# 2. Compute the following from the results of the run, $\langle \psi (\theta) \mid H \mid \psi (\theta) \rangle$ (Classical)
# 3. Try the next suggested angle parameters from the classical optimizer.
# This time, as the Ansatz, use an arbitrary one.
#
# ```
# rx(a)[0].rz(b)[0]
# ```
#
# I would like to calculate one qubit using two angles, a and b. For the Hamiltonian, I'll use the example that came up above. Finally, let's compare the results of VQE's calculations with those of the numerical library numpy.
# +
import numpy as np
from blueqat import Circuit
from blueqat.pauli import X, Y, Z, I
from blueqat.vqe import AnsatzBase, Vqe
class OneQubitAnsatz(AnsatzBase):
def __init__(self, hamiltonian):
super().__init__(hamiltonian.to_expr(), 2)
self.step = 1
def get_circuit(self, params):
a, b = params
return Circuit().rx(a)[0].rz(b)[0]
# hamiltonian
h = 1.23 * I - 4.56 * X(0) + 2.45 * Y(0) + 2.34 * Z(0)
runner = Vqe(OneQubitAnsatz(h))
result = runner.run()
print('Result by VQE')
print(runner.ansatz.get_energy_sparse(result.circuit))
# This is for check
mat = h.to_matrix()
print('Result by numpy')
print(np.linalg.eigh(mat)[0][0])
# -
# Now you did VQE
| tutorial/200_vqe_en.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pro-tips- the challenge here is that with the netcdf in its origional configuration it was not possible to read all the data in to downsample it. So what we did was to read in each file, swap the time and obs dimensions and then only keep the
# 1. Create a directory in you data files for downsampled .ncdfs to live
import xarray as xr
import pandas as pd
import dask.dataframe as dd
import re
import requests
import pickle as pk
import os
import gc
# #!pip install netcdf4 == 1.5.0
# make the output directory
new_dir = '/home/jovyan/data/botpt/minute_mean_dataD/'
if not os.path.isdir(new_dir):
try:
os.makedirs(new_dir)
except OSError:
if os.path.exists(new_dir):
pass
else:
raise
# Sensor E: url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/dax.soule@<EMAIL>/20181104T104012-RS03ECAL-MJ03E-06-BOTPTA302-streamed-botpt_nano_sample/catalog.html'
# Sensor B url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/dax.soule@<EMAIL>/20181020T213838-RS03ASHS-MJ03B-09-BOTPTA304-streamed-botpt_nano_sample/catalog.html'
# Sensor C url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/dax.soule@<EMAIL>/20181104T041943-RS03CCAL-MJ03F-05-BOTPTA301-streamed-botpt_nano_sample/catalog.html'
url = 'https://opendap.oceanobservatories.org/thredds/catalog/ooi/dax.soule@<EMAIL>/20191015T040958136Z-RS03INT2-MJ03D-06-BOTPTA303-streamed-botpt_nano_sample/catalog.html'
tds_url = 'https://opendap.oceanobservatories.org/thredds/dodsC'
datasets = requests.get(url).text
urls = re.findall(r'href=[\'"]?([^\'" >]+)', datasets)
x = re.findall(r'(ooi/.*?.nc)', datasets)
for i in x:
if i.endswith('.nc') == False:
x.remove(i)
for i in x:
try:
float(i[-4])
except:
x.remove(i)
datasets = [os.path.join(tds_url, i + '#fillmismatch') for i in x]
#datasets
# # pd routine
# %%time
for i in datasets:
ds = xr.open_dataset(i)
ds = ds.swap_dims({'obs': 'time'})
pressure_min = pd.DataFrame()
pressure_min['bottom_pressure'] = ds['bottom_pressure'].to_pandas().resample('T').mean()
del pressure_min.index.name
pressure_min = pressure_min.dropna()
out = '/home/jovyan/data/botpt/minute_mean_dataD/' + i.split('/')[-1][:-3] + '_resampled' + '.nc'
ds = xr.Dataset.from_dataframe(pressure_min)
ds.to_netcdf(out)
# # Build Dask Cluster
# 1. Use gui interface to create a new cluster with ~10 workers
# 2. Use < > to insert an "import Client" statement. This is critical because it is how your script knows to use yoru cluster.
# 3. Execute client cell
# 4. Execute your Dask Cell. Note that once you have a cluster running you do not need to re-import the client.
# 5. When finished, always shut down your cluster.
# +
from dask.distributed import Client
client = Client("tcp://10.0.129.130:36559")
client
# -
ds = xr.open_mfdataset('/home/jovyan/data/botpt/minute_mean_dataD/*_resampled.nc', parallel=True
,combine='by_coords')
ds
df = ds.to_dask_dataframe()
df.head()
df.tail()
| notebooks/jaz_dask_botpt_resample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import glob
from collections import defaultdict
for file in glob.glob("../../data/purchase/purchase_std_b*.b"):
fd = pickle.load(open(file, 'rb'))
nfd = defaultdict(list)
for k, v in fd.items():
if k != 'max_norms':
nfd[k] = v
print(file)
pickle.dump(nfd, open(file, 'wb'))
| experiments/immediate_sensitivity/remove_max_norms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stats
import statsmodels.api as sm
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
# + jupyter={"source_hidden": true}
# if necessary, download 'US' library dependency
# #!pip install US
from clean_data import *
# +
# Helper functions
def summary_model(X, y, label='scatter'):
X = sm.add_constant(X)
model = sm.OLS(y, X).fit()
summary = model.summary()
return model, summary
def plot_model(X, y, label='Residual Plot'):
model = sm.OLS(y, X).fit()
student_resids = model.outlier_test()['student_resid']
y_hats = model.predict(X)
plt.scatter(y_hats, student_resids, alpha = .35, label=label)
plt.legend()
plt.show()
# -
# Load in training data
df = pd.read_csv('data/Train.csv', low_memory=False)
# Clean the Data with Helper Script
#df_clean =
clean_df = clean_df(df)
clean_df.head()
# +
# Split up Data Between Features (X) and SalePrice, i.e. the Target Values (y))
X = clean_df.drop(columns=['SalePrice'])
y = clean_df['SalePrice']
summary_model(X, y)
# -
model, summary = summary_model(X, y)
# Plotting the histogram for the Balance (y)
y.hist(bins=100)
plt.show()
# +
def rmsle(actual, predictions):
log_diff = np.log(predictions+1) - np.log(actual+1)
return np.sqrt(np.mean(log_diff**2))
def rmse(actual, predictions):
diff = predictions - actual
return np.sqrt(np.mean(diff**2))
# +
n_folds = 10
kf = KFold(n_splits=n_folds, shuffle=True)
test_cv_errors, train_cv_errors = np.empty(n_folds), np.empty(n_folds)
X_array = np.array(X)
y_array = np.log(np.array(y))
for idx, (train, test) in enumerate(kf.split(X)):
model = LinearRegression()
model.fit(X_array[train], y_array[train])
y_hat = model.predict(X_array[test])
y_train = model.predict(X_array[train])
train_cv_errors[idx] = rmsle(y_array[train], y_train)
test_cv_errors[idx] = rmsle(y_array[test], y_hat)
train_cv_errors, test_cv_errors
| linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="XComJvcOscah"
# !pip install pandas numpy sklearn mlrose_hiive mlrose seaborn
# + id="rruu_OABW_WB"
import six
import sys
sys.modules['sklearn.externals.six'] = six
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import mlrose_hiive as mlrose
import mlrose as mlrose_old
from sklearn.metrics import accuracy_score, f1_score
import seaborn as sns
import time
from mlrose_hiive.algorithms.decay import GeomDecay
#Random State
rs = 614
# + id="NNjUXpNWXGLP"
class Data():
def dataAllocation(self, path):
df = pd.read_csv(path)
x_data = df.iloc[:, :-1]
y_data = df.iloc[:, -1 ]
return x_data,y_data
def trainSets(self,x_data,y_data):
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.3, random_state = rs, shuffle = True)
return x_train, x_test, y_train, y_test
dataset = Data()
x1_data,y1_data = dataset.dataAllocation('pima-indians-diabetes.csv')
x1_train, x1_test, y1_train, y1_test = dataset.trainSets(x1_data,y1_data)
scaler = StandardScaler()
scaled_x1_train = scaler.fit_transform(x1_train)
scaled_x1_test = scaler.transform(x1_test)
# + id="RniIiRgrXQr2"
algorithms = ['random_hill_climb', 'simulated_annealing', 'genetic_alg']
algorithm = algorithms[0]
# + colab={"base_uri": "https://localhost:8080/"} id="vulxqbQeXYIz" outputId="0cbdcbcb-abab-4c25-9fc4-6df42a9ab914"
results = []
for i in range(1, 5002, 100):
model = mlrose.NeuralNetwork(hidden_nodes=[4], activation='relu',
algorithm=algorithm, max_iters=i,
bias=True, is_classifier=True, learning_rate=0.1,
early_stopping=True, clip_max=5, max_attempts=100,
random_state=rs)
model.fit(scaled_x1_train, y1_train)
y_train_pred = model.predict(scaled_x1_train)
y_train_accuracy = accuracy_score(y1_train, y_train_pred)
y_test_pred = model.predict(scaled_x1_test)
y_test_accuracy = accuracy_score(y1_test, y_test_pred)
f1score = f1_score(y1_test, y_test_pred)
results.append([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
print([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
# + id="DBL-Iu17XcLT"
df = pd.DataFrame(results, columns=["Iterations", "Algorithm", "Train Accuracy", "Test Accuracy", "F1 Score"])
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="RunCyzu0XdVc" outputId="71d948ea-a175-4423-a566-c636b9220846"
sns.lineplot(data=df, x="Iterations", y="F1 Score", hue="Algorithm")
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="TXkpI0Y3Xe9N" outputId="fa201e07-2221-4d27-a421-ed80ce8cc875"
sns.lineplot(data=df, x="Iterations", y="Test Accuracy", hue="Algorithm")
# + colab={"base_uri": "https://localhost:8080/"} id="o5XcGpnjadOj" outputId="22d65e3b-11a9-436e-fd8f-e6b2df9815dd"
results = []
for i in range(1, 11):
model = mlrose.NeuralNetwork(hidden_nodes=[4], activation='relu',
algorithm=algorithm, max_iters=3000,
bias=True, is_classifier=True, learning_rate=0.1,
early_stopping=True, clip_max=5, max_attempts=100,
restarts=10)
model.fit(scaled_x1_train, y1_train)
y_train_pred = model.predict(scaled_x1_train)
y_train_accuracy = accuracy_score(y1_train, y_train_pred)
y_test_pred = model.predict(scaled_x1_test)
y_test_accuracy = accuracy_score(y1_test, y_test_pred)
f1score = f1_score(y1_test, y_test_pred)
results.append([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
print([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="NLOxxv3Dboy5" outputId="c8b56902-da5c-4ece-c795-8e17bd6224da"
runs = pd.DataFrame(results, columns=["Runs", "Algorithm", "Train Accuracy", "Test Accuracy", "F1 Score"])
sns.lineplot(data=runs, x="Runs", y="F1 Score")
# + id="L6t310xMbs91"
df.to_csv("nn_sa_ex1.csv", index=False)
# + id="DDZpBWyehfCf"
runs.to_csv("nn_sa_ex2.csv", index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="EtsYRestjL5E" outputId="e774c746-6edc-4cc5-de37-eab1a3fba373"
algorithms[1]
# + colab={"base_uri": "https://localhost:8080/"} id="tIQ8VcqKhnhv" outputId="cea3e612-740e-4487-c357-b974f1ea5849"
results = []
algorithm='simulated_annealing'
for i in range(1, 20000, 1000):
model = mlrose.NeuralNetwork(hidden_nodes=[4], activation='relu',
algorithm=algorithm, max_iters=i,
bias=True, is_classifier=True, learning_rate=0.1,
early_stopping=True, clip_max=5, max_attempts=100,
random_state=rs)
model.fit(scaled_x1_train, y1_train)
y_train_pred = model.predict(scaled_x1_train)
y_train_accuracy = accuracy_score(y1_train, y_train_pred)
y_test_pred = model.predict(scaled_x1_test)
y_test_accuracy = accuracy_score(y1_test, y_test_pred)
f1score = f1_score(y1_test, y_test_pred)
results.append([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
print([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="XLNE0DVsiGZ5" outputId="2074f223-7aa9-49a7-fdc5-f7fc3f56eb9c"
df_sa = pd.DataFrame(results, columns=["Iterations", "Algorithm", "Train Accuracy", "Test Accuracy", "F1 Score"])
sns.lineplot(data=df_sa, x="Iterations", y="F1 Score", hue="Algorithm")
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="h0-NjHdGiSdi" outputId="d7773d31-fb56-4358-c4b9-cbd3a5748184"
sns.lineplot(data=df_sa, x="Iterations", y="Test Accuracy", hue="Algorithm")
# + colab={"base_uri": "https://localhost:8080/"} id="ceXOjtM4mZD4" outputId="a26f8563-6381-4f43-d295-f5df11d73994"
results = []
algorithm = algorithms[2]
for i in range(1, 800, 100):
model = mlrose.NeuralNetwork(hidden_nodes=[4], activation='relu',
algorithm=algorithm, max_iters=i,
bias=True, is_classifier=True, learning_rate=0.1,
early_stopping=True, clip_max=5, max_attempts=100,
random_state=rs, pop_size=200, mutation_prob = 0.1)
model.fit(scaled_x1_train, y1_train)
y_train_pred = model.predict(scaled_x1_train)
y_train_accuracy = accuracy_score(y1_train, y_train_pred)
y_test_pred = model.predict(scaled_x1_test)
y_test_accuracy = accuracy_score(y1_test, y_test_pred)
f1score = f1_score(y1_test, y_test_pred)
results.append([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
print([i, algorithm, y_train_accuracy, y_test_accuracy, f1score])
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="QO4ZYyxirw7I" outputId="8f8d8701-7a18-430d-f6a2-5ba6e2639434"
df_ga = pd.DataFrame(results, columns=["Iterations", "Algorithm", "Train Accuracy", "Test Accuracy", "F1 Score"])
sns.lineplot(data=df_ga, x="Iterations", y="F1 Score", hue="Algorithm")
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="ikGhe2R0ujsg" outputId="7374e9c1-1c02-4cae-b091-42f42ce575ac"
sns.lineplot(data=df_ga, x="Iterations", y="Test Accuracy", hue="Algorithm")
# + id="IdVak-Lcuk59"
# Run time train and test for all optimizers
# + colab={"base_uri": "https://localhost:8080/"} id="IiA76Nc4ywVQ" outputId="349a7c3e-78e5-44ea-cc5d-5fa0c7ce9f4b"
times = []
model = mlrose.NeuralNetwork(hidden_nodes=[4], activation='relu',
algorithm=algorithms[0], max_iters=3000,
bias=True, is_classifier=True, learning_rate=0.1,
early_stopping=True, clip_max=5, max_attempts=100)
start = time.time()
model.fit(scaled_x1_train, y1_train)
traintime = time.time()-start
start = time.time()
y_train_pred = model.predict(scaled_x1_train)
testime = time.time()-start
y_train_accuracy = accuracy_score(y1_train, y_train_pred)
y_test_pred = model.predict(scaled_x1_test)
y_test_accuracy = accuracy_score(y1_test, y_test_pred)
f1score = f1_score(y1_test, y_test_pred)
times.append([algorithms[0], traintime, y_test_accuracy, testime])
model = mlrose.NeuralNetwork(hidden_nodes=[4], activation='relu',
algorithm=algorithms[1], max_iters=3000,
bias=True, is_classifier=True, learning_rate=0.1,
early_stopping=True, clip_max=5, max_attempts=100,
random_state=rs)
start = time.time()
model.fit(scaled_x1_train, y1_train)
traintime = time.time()-start
start = time.time()
y_train_pred = model.predict(scaled_x1_train)
testime = time.time()-start
y_train_accuracy = accuracy_score(y1_train, y_train_pred)
y_test_pred = model.predict(scaled_x1_test)
y_test_accuracy = accuracy_score(y1_test, y_test_pred)
f1score = f1_score(y1_test, y_test_pred)
times.append([algorithms[1], traintime, y_test_accuracy, testime])
model = mlrose.NeuralNetwork(hidden_nodes=[4], activation='relu',
algorithm=algorithms[2], max_iters=500,
bias=True, is_classifier=True, learning_rate=0.1,
early_stopping=True, clip_max=5, max_attempts=100,
random_state=rs, pop_size=200, mutation_prob = 0.1)
start = time.time()
model.fit(scaled_x1_train, y1_train)
traintime = time.time()-start
start = time.time()
y_train_pred = model.predict(scaled_x1_train)
testime = time.time()-start
y_train_accuracy = accuracy_score(y1_train, y_train_pred)
y_test_pred = model.predict(scaled_x1_test)
y_test_accuracy = accuracy_score(y1_test, y_test_pred)
f1score = f1_score(y1_test, y_test_pred)
times.append([algorithms[2], traintime, y_test_accuracy, testime])
print(times)
# + id="ZBtcQQ-RzPJP"
df_times = pd.DataFrame(times, columns=["Algorithm", "Training Time", "Test Accuracy", "Testing Time"])
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="RWYqhpbF1rj5" outputId="3d40b63b-5d68-46fc-f90d-58b22277eaf4"
df_times.head()
# + id="vSGsppf_2JfX"
df_times = df_times.melt(id_vars=["Algorithm"],
var_name=["Type"],
value_name="Value")
# + colab={"base_uri": "https://localhost:8080/", "height": 328} id="EAX2MWnH2oYP" outputId="fbeea764-7824-4dc4-85f6-3582616c6afd"
df_times.head(20)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="GprIvwnq1szK" outputId="2b46c5f2-eb64-49fa-ca69-522bd24ae449"
sns.barplot(data=df_times[df_times["Type"]=="Training Time"], x="Type", y="Value", hue="Algorithm")
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="XLRimYlR2lMU" outputId="8c064866-1d1d-4571-d471-3b6a46b6bdd5"
sns.barplot(data=df_times[df_times["Type"]=="Testing Time"], x="Type", y="Value", hue="Algorithm")
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="cdC1kvXy3Ahj" outputId="e41e956a-07a3-4708-d6fd-f9eff1074d2e"
sns.barplot(data=df_times[df_times["Type"]=="Test Accuracy"], x="Type", y="Value", hue="Algorithm")
# + id="ag1b71cQ48So"
| part1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Confusion Matrix
# * Compares the predicted values with the true values in a tabular way, if 100% correct, all values in the matrix will be top left to bottom right (diagnol line).
# > Compute confusion matrix to evaluate the accuracy of a classification.
#
# ```
# sklearn.metrics.confusion_matrix()
# ```
import numpy as np
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
y_true = np.array([1., 0., 1, 1, 0, 0, 1, 1., 0., 1, 1, 0, 0, 1])
y_pred = np.array([1., 1., 1., 0., 0. ,1, 0, 1., 0., 1, 1, 0, 0, 1])
# ### Using `scikit-learn` to generate the confusion maxtrix.
#
matrix = confusion_matrix(y_true, y_pred)
print(matrix)
# > Correct predictions appear down the diagonal **_(from top left to bottom right)_**.
# ### Ploting the ``classification_report``
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_true, y_pred, classes=None, figsize=(5, 5), text_size=16):
# Create the confustion matrix
cm = confusion_matrix(y_true, y_pred)
cm_norm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
n_classes = cm.shape[0]
if n_classes == 2:
print("BINARY CLASSIFICTAION")
else:
print("MULTICLASS CLASSIFICATION")
# Plot the figure and make it pretty
fig, ax = plt.subplots(figsize=figsize)
cax = ax.matshow(cm, cmap=plt.cm.Blues)
fig.colorbar(cax)
if classes:
labels = classes
else:
labels = np.arange(cm.shape[0])
ax.set(title="Confusion Matrix",
xlabel="Predicted label",
ylabel="True label",
xticks=np.arange(n_classes),
yticks=np.arange(n_classes),
xticklabels=labels,
yticklabels=labels,
)
ax.xaxis.set_label_position("bottom")
ax.xaxis.tick_bottom()
threshold = (cm.max() + cm.min()) / 2.
# Plot the text on each cell
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, f"{cm[i, j]} ({cm_norm[i, j]*100:.1f}%)",
horizontalalignment="center",
color="white" if cm[i, j] > threshold else "black",
size=text_size)
plot_confusion_matrix(y_true, y_pred)
| 04_Evaluation_Methods/04_Confusion Matrix/Confusion_Matrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: obidam36
# language: python
# name: obidam36
# ---
# # Launch Dask Cluster from your computer (v1)
# +
from dask.distributed import Client
# with default:
# client = Client(processes=False) # run workers in your same process
# or using some tuned parameters:
client = Client(threads_per_worker=4, n_workers=10)
# Display info
client
# -
# # Launch Dask Cluster from your computer (v2: LocalCluster)
from dask.distributed import Client, LocalCluster
cluster = LocalCluster()
client = Client(cluster)
cluster
## Read the cluster scheduler_address:
cluster.scheduler_address
# #### then connect to the local cluster from notebooks
#
# From any notebook, you can now access the cluster through:
from dask.distributed import Client
# scheduler_address = 'tcp://10.48.44.5:42209'
scheduler_address = 'tcp://127.0.0.1:57702'
client = Client(scheduler_address)
client
# # Launch Dask Cluster From pangeo/binder (KubeCluster)
# +
from dask.distributed import Client, progress
from dask_kubernetes import KubeCluster
cluster = KubeCluster(n_workers=10)
cluster
# -
## Read the cluster scheduler_address:
cluster.scheduler_address
# ### Connect Jupyterlab Dask extension
# To access the cluster dashboard from the jupyterlab dask extension, enter the following path:
#
# /user/obidam-m2poc2019-<BINDER_INSTANCE_CODE>/proxy/8787
# ### Connect to Kube cluster from notebooks
#
# From any notebook, you can now access the cluster through:
from dask.distributed import Client
scheduler_address = 'tcp://10.48.44.5:42209'
client = Client(scheduler_address)
client
# # Launch Dask Cluster from Jupyterlab Dask extension
#
# See the Dask icon on the left-side menu bar and simply click on +NEW button !
#
# Once the cluster is set-up, you have direct access to the dashboard through the overlying buttons
#
# You can connect to the cluster from any notebook using the scheduler address:
from dask.distributed import Client
scheduler_address = 'tcp://10.48.44.5:42209'
client = Client(scheduler_address)
client
# # Cluster and client shutdown
client.close()
cluster.close()
| practice/environment/Launch Dask Cluster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# language: python
# name: python37664bitbaseconda6aa881468f1e4805adf3aff94fc544cc
# ---
# # Linear Algebra
# ## Vector
# +
from typing import List
Vector = List[float]
height_weight_age = [70, # inches,
170, # pounds,
40 ] # years
grades = [95, # exam1
80, # exam2
75, # exam3
62 ] # exam4
# -
def add(v: Vector, w: Vector) -> Vector:
"""Adds corresponding elements"""
assert len(v) == len(w), "vectors must be the same length"
return [v_i + w_i for v_i, w_i in zip(v, w)]
assert add([1, 2, 3], [4, 5, 6]) == [5, 7, 9]
def subtract(v: Vector, w: Vector) -> Vector:
"""Subtracts corresponding elements"""
assert len(v) == len(w), "vectors must be the same length"
return [v_i - w_i for v_i, w_i in zip(v, w)]
assert subtract([5, 7, 9], [4, 5, 6]) == [1, 2, 3]
def vector_sum(vectors: List[Vector]) -> Vector:
"""Sums all corresponding elements"""
# Check that vectors is not empty
assert vectors, "no vectors provided!"
# Check the vectors are all the same size
num_elements = len(vectors[0])
assert all(len(v) == num_elements for v in vectors), "different sizes!"
# the i-th element of the result is the sum of every vector[i]
return [sum(vector[i] for vector in vectors) for i in range(num_elements)]
assert vector_sum([[1, 2], [3, 4], [5, 6], [7, 8]]) == [16, 20]
def scalar_multiply(c: float, v: Vector) -> Vector:
"""Multiplies every element by c"""
return [c * v_i for v_i in v]
assert scalar_multiply(2, [1, 2, 3]) == [2, 4, 6]
def vector_mean(vectors: List[Vector]) -> Vector:
"""Computes the element-wise average"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
assert vector_mean([[1, 2], [3, 4], [5, 6]]) == [3, 4]
def dot(v: Vector, w: Vector) -> float:
"""Computes v_1 * w_1 + ... + v_n * w_n"""
assert len(v) == len(w), "vectors must be same length"
return sum(v_i * w_i for v_i, w_i in zip(v, w))
assert dot([1, 2, 3], [4, 5, 6]) == 32 # 1 * 4 + 2 * 5 + 3 * 6
def sum_of_squares(v: Vector) -> float:
"""Returns v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
assert sum_of_squares([1, 2, 3]) == 14 # 1 * 1 + 2 * 2 + 3 * 3
import math
def magnitude(v: Vector) -> float:
"""Returns the magnitude (or length) of v"""
return math.sqrt(sum_of_squares(v)) # math.sqrt is square root function
assert magnitude([3, 4]) == 5
def distance(v: Vector, w: Vector) -> float:
return magnitude(subtract(v, w))
# ## Matrices
# +
# Another type alias
Matrix = List[List[float]]
A = [[1, 2, 3], # A has 2 rows and 3 columns
[4, 5, 6]]
B = [[1, 2], # B has 3 rows and 2 columns
[3, 4],
[5, 6]]
# -
from typing import Tuple
def shape(A: Matrix) -> Tuple[int, int]:
"""Returns (# of rows of A, # of columns of A)"""
num_rows = len(A)
num_cols = len(A[0]) if A else 0 # number of elements in first row
return num_rows, num_cols
assert shape([[1, 2, 3], [4, 5, 6]]) == (2, 3) # 2 rows, 3 columns
def get_row(A: Matrix, i: int) -> Vector:
"""Returns the i-th row of A (as a Vector)"""
return A[i] # A[i] is already the ith row
def get_column(A: Matrix, j: int) -> Vector:
"""Returns the j-th column of A (as a Vector)"""
return [A_i[j] # jth element of row A_i
for A_i in A] # for each row A_i
from typing import Callable
def make_matrix(num_rows: int, num_cols: int, entry_fn: Callable[[int, int], float]) -> Matrix:
"""
Returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)
"""
return [[entry_fn(i, j) # given i, create a list
for j in range(num_cols)] # [entry_fn(i, 0), ... ]
for i in range(num_rows)] # create one list for each i
def identity_matrix(n: int) -> Matrix:
"""Returns the n x n identity matrix"""
return make_matrix(n, n, lambda i, j: 1 if i == j else 0)
assert identity_matrix(5) == [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
| Chapter 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# # Data Conversion Challenge
# Challenge to automate the conversion of raw data into a specified format of data to make it more usable.
#
# **Important note**: The data used in this notebook has been randomised and all names have been masked so they can be used for training purposes. No data is committed to the project repo. This notebook is for development purposes only.
#
# This notebook is available in the following locations. These versions are kept in sync *manually* - there should not be discrepancies, but it is possible.
# - On Kaggle: <https://www.kaggle.com/btw78jt/data-conversion-challenge-202004>
# - In the GitHub project repo: <https://github.com/A-Breeze/premierconverter>. See the `README.md` for further instructions, and the associated `simulate_dummy_data` notebook to generate the dummy data that is used for this notebook.
# + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# <!-- This table of contents is updated *manually* -->
# # Contents
# 1. [Setup](#Setup): Import packages, Config variables
# 1. [Variables](#Variables): Raw data structure, Inputs
# 1. [Workflow](#Workflow): Load raw data, Remove unwanted extra values, Stem section, Factor sets, Output to CSV, Load expected output to check it is as expected
# 1. [Using the functions](#Using-the-functions): Default arguments, Limited rows
# -
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Setup
# Set warning messages
import warnings
# Show all warnings in IPython
warnings.filterwarnings('always')
# Ignore specific numpy warnings (as per <https://github.com/numpy/numpy/issues/11788#issuecomment-422846396>)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# Other warnings that sometimes occur
warnings.filterwarnings("ignore", message="unclosed file <_io.Buffered")
# +
# Determine whether this notebook is running on Kaggle
from pathlib import Path
ON_KAGGLE = False
print("Current working directory: " + str(Path('.').absolute()))
if str(Path('.').absolute()) == '/kaggle/working':
ON_KAGGLE = True
# +
# Import built-in modules
import sys
import platform
import os
import io
# Import external modules
from IPython import __version__ as IPy_version
import numpy as np
import pandas as pd
from click import __version__ as click_version
# Import project modules
if not ON_KAGGLE:
from pyprojroot import here
root_dir_path = here()
# Allow modules to be imported relative to the project root directory
if not sys.path[0] == root_dir_path:
sys.path.insert(0, str(root_dir_path))
import premierconverter as PCon
# Re-load the project module that we are working on
# %load_ext autoreload
# %aimport premierconverter
# %autoreload 1
# Check they have loaded and the versions are as expected
assert platform.python_version_tuple() == ('3', '6', '6')
print(f"Python version:\t\t\t{sys.version}")
assert IPy_version == '7.13.0'
print(f'IPython version:\t\t{IPy_version}')
assert np.__version__ == '1.18.2'
print(f'numpy version:\t\t\t{np.__version__}')
assert pd.__version__ == '0.25.3'
print(f'pandas version:\t\t\t{pd.__version__}')
assert click_version == '7.1.1'
print(f'click version:\t\t\t{click_version}')
print(f'premierconverter version:\t{PCon.__version__}')
# -
# Output exact environment specification, in case it is needed later
if ON_KAGGLE:
print("Capturing full package environment spec")
print("(But note that not all these packages are required)")
# !pip freeze > requirements_snapshot.txt
# !jupyter --version > jupyter_versions_snapshot.txt
# Configuration variables
if ON_KAGGLE:
raw_data_folder_path = Path('/kaggle/input') / 'dummy-premier-data-raw'
else:
import proj_config
raw_data_folder_path = proj_config.example_data_dir_path
assert raw_data_folder_path.is_dir()
print("Correct: All locations are available as expected")
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Variables
# ## Raw data structure
# +
# Configuration variables for the expected format and structure of the data
ACCEPTED_FILE_EXTENSIONS = ['.csv', '', '.txt']
INPUT_FILE_ENCODINGS = ['utf-8', 'latin-1', 'ISO-8859-1']
INPUT_SEPARATOR = ","
RAW_STRUCT = {
'stop_row_at': 'Total Peril Premium',
'stem': {
'ncols': 5,
'chosen_cols': [0,1],
'col_names': ['Premier_Test_Status', 'Total_Premium'],
'col_types': [np.dtype('object'), np.dtype('float')],
},
'f_set': {
'include_Test_Status': ['Ok'],
'ncols': 4,
'col_names': ['Peril_Factor', 'Relativity', 'Premium_increment', 'Premium_cumulative'],
'col_types': [np.dtype('object')] + [np.dtype('float')] * 3,
},
'bp_name': 'Base Premium',
}
TRUNC_AFTER_REGEX = r",\s*{}.*".format(RAW_STRUCT['stop_row_at'])
# Output variables, considered to be constants
# Column name of the row IDs
ROW_ID_NAME = "Ref_num"
OUTPUT_DEFAULTS = {
'pf_sep': ' ',
'file_delimiter': ','
}
# -
# ## Parameters
# +
# Include Factors which are not found in the data
include_factors = None
if include_factors is None:
include_factors = []
# Maximum number of rows to read in
nrows = None
# +
# Input file location
in_filepath = raw_data_folder_path / 'minimal_input_adj.csv'
# Checks the file exists and has a recognised extension
in_filepath = Path(in_filepath)
if not in_filepath.is_file():
raise FileNotFoundError(
"\n\tin_filepath: There is no file at the input location:"
f"\n\t'{in_filepath.absolute()}'"
"\n\tCannot read the input data"
)
if not in_filepath.suffix.lower() in ACCEPTED_FILE_EXTENSIONS:
warnings.warn(
f"in_filepath: The input file extension '{in_filepath.suffix}' "
f"is not one of the recognised file extensions {ACCEPTED_FILE_EXTENSIONS}"
)
print("Correct: Input file exists and has a recognised extension")
# -
# View the first n raw CSV lines (without loading into a DataFrame)
nlines = 2
lines = []
with in_filepath.open() as f:
for line_num in range(nlines):
lines.append(f.readline())
print(''.join(lines))
# +
# Output file location
out_filepath = 'formatted_dummy_data1.csv'
force_overwrite = False
# Checks
out_filepath = Path(out_filepath)
if not out_filepath.parent.is_dir():
raise FileNotFoundError(
f"\n\tout_filepath: The folder of the output file does not exist"
f"Folder path: '{out_filepath.parent}'"
"\n\tCreate the output folder before running this command"
)
if out_filepath.is_file() and not force_overwrite:
raise FileExistsError(
"\n\tOutput options: File already exists at the output location:"
f"\n\t'{out_filepath.absolute()}'"
"\n\tIf you want to overwrite it, re-run with `force_overwrite = True`"
)
else:
if not out_filepath.suffix in ACCEPTED_FILE_EXTENSIONS:
warnings.warn(
f"out_filepath: The output file extension '{out_filepath.suffix}' "
f"is not one of the recognised file extensions {ACCEPTED_FILE_EXTENSIONS}",
)
print("Correct: A suitable location for output has been chosen")
# -
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Workflow
# ## Load raw data
# +
# Load the CSV lines truncated as required
in_lines_trunc_df = None
for encoding in INPUT_FILE_ENCODINGS:
try:
in_lines_trunc_df = pd.read_csv(
in_filepath, header=None, index_col=False,
nrows=nrows, sep=TRUNC_AFTER_REGEX,
engine='python', encoding=encoding,
)
# print(f"'{encoding}': Success") # Used for debugging only
break
except UnicodeDecodeError:
# print(f"'{encoding}': Fail") # Used for debugging only
pass
if in_lines_trunc_df is None:
raise IOError(
"\n\tread_input_lines: pandas.read_csv() failed."
f"\n\tFile cannot be read with any of the encodings: {INPUT_FILE_ENCODINGS}"
)
in_lines_trunc_df.head()
# -
# Check it worked and is not malformed
if in_lines_trunc_df.shape[0] <= 1:
warnings.warn(
"Raw data lines: Only one row of data has been read. "
"Are you sure you have specified the correct file? "
"Are rows of data split into lines of the file?"
)
if not ((
in_lines_trunc_df.shape[1] == 1
) or (
in_lines_trunc_df.iloc[:, 1].isna().sum() == in_lines_trunc_df.shape[0]
)):
warnings.warn(
"Raw data lines: A line in the input data has more than one match "
f"to the regex pattern \"{TRUNC_AFTER_REGEX}\". "
"Are you sure you have specified the correct file?"
)
# +
# Convert to DataFrame
with warnings.catch_warnings():
# Ignore dtype warnings at this point, because we check them later on (after casting)
warnings.filterwarnings(
"ignore", message='.*Specify dtype option on import or set low_memory=False',
category=pd.errors.DtypeWarning,
)
with io.StringIO('\n'.join(in_lines_trunc_df[0])) as in_lines_trunc_stream:
df_trimmed = pd.read_csv(
in_lines_trunc_stream, header=None, index_col=0, sep=INPUT_SEPARATOR,
names=range(in_lines_trunc_df[0].str.count(INPUT_SEPARATOR).max() + 1),
).rename_axis(index=PCon.ROW_ID_NAME)
df_trimmed.head()
# -
# Check it is as expected and not malformed
if not df_trimmed.index.is_unique:
warnings.warn(
f"Trimmed data: Row identifiers '{ROW_ID_NAME}' are not unique. "
"This may lead to unexpected results."
)
if not (
# At least the stem columns and one factor set column
df_trimmed.shape[1] >=
RAW_STRUCT['stem']['ncols'] + 1 * RAW_STRUCT['f_set']['ncols']
) or not (
# Stem columns plus a multiple of factor set columns
(df_trimmed.shape[1] - RAW_STRUCT['stem']['ncols'])
% RAW_STRUCT['f_set']['ncols'] == 0
):
warnings.warn(
"Trimmed data: Incorrect number of columns with relevant data: "
f"{df_trimmed.shape[1] + 1}"
"\n\tThere should be: 1 for index, "
f"{RAW_STRUCT['stem']['ncols']} for stem section, "
f"and by a multiple of {RAW_STRUCT['f_set']['ncols']} for factor sets"
)
# ## Stem section
# +
# Get the stem section of columns
df_stem = df_trimmed.iloc[
:, RAW_STRUCT['stem']['chosen_cols']
].pipe( # Rename the columns
lambda df: df.rename(columns=dict(zip(
df.columns,
RAW_STRUCT['stem']['col_names']
)))
)
df_stem.head()
# -
# Checks
if not (
df_stem.dtypes == RAW_STRUCT['stem']['col_types']
).all():
warnings.warn(
"Stem columns: Unexpected column data types"
f"\n\tExepcted: {RAW_STRUCT['stem']['col_types']}"
f"\n\tActual: {df_stem.dtypes.tolist()}"
)
# ## Factor sets
# +
# Combine the rest of the DataFrame into one
df_fsets = pd.concat([
# For each of the factor sets of columns
df_trimmed.loc[ # Filter to only the valid rows
df_trimmed[1].isin(RAW_STRUCT['f_set']['include_Test_Status'])
].iloc[ # Select the columns
:, fset_start_col:(fset_start_col + RAW_STRUCT['f_set']['ncols'])
].dropna( # Remove rows that have all missing values
how="all"
).pipe(lambda df: df.rename(columns=dict(zip( # Rename columns
df.columns, RAW_STRUCT['f_set']['col_names']
)))).reset_index() # Get row_ID as a column
for fset_start_col in range(
RAW_STRUCT['stem']['ncols'], df_trimmed.shape[1], RAW_STRUCT['f_set']['ncols']
)
], sort=False).apply( # Where possible, convert object columns to numeric dtype
pd.to_numeric, errors='ignore'
).reset_index(drop=True) # Best practice to ensure a unique index
df_fsets.head()
# -
# Checks
if not (
df_fsets[RAW_STRUCT['f_set']['col_names']].dtypes ==
RAW_STRUCT['f_set']['col_types']
).all():
warnings.warn(
"Factor sets columns: Unexpected column data types"
f"\n\tExpected: {RAW_STRUCT['f_set']['col_types']}"
f"\n\tActual: {df_fsets[RAW_STRUCT['f_set']['col_names']].dtypes.tolist()}"
)
# +
perils_implied = df_fsets.Peril_Factor.drop_duplicates( # Get only unique 'Peril_Factor' combinations
).to_frame().pipe(lambda df: df.loc[ # Filter to leave only 'Base Premium' occurences
df.Peril_Factor.str.contains(RAW_STRUCT['bp_name']), :
]).assign(
# Get the 'Peril' part of 'Peril_Factor'
Peril=lambda df: df.Peril_Factor.str.replace(RAW_STRUCT['bp_name'], "").str.strip()
).Peril.sort_values().to_list()
perils_implied
# -
# Check that every 'Peril_Factor' starts with a Peril
if not df_fsets.Peril_Factor.str.startswith(
tuple(perils_implied)
).all():
warnings.warn(
"Implied perils: Not every Peril_Factor starts with a Peril. "
"Suggests the raw data format is not as expected."
)
if '' in perils_implied:
warnings.warn(
"Implied perils: Empty string has been implied. "
"Suggests the raw data format is not as expected."
)
# +
# Split out Peril_Factor
df_fsets_split = df_fsets.assign(
# Split the Peril_Factor column into two
Factor=lambda df: df.Peril_Factor.str.replace(
'|'.join(perils_implied), ""
).str.strip(),
Peril=lambda df: df.apply(
lambda row: row.Peril_Factor.replace(row.Factor, "").strip()
, axis=1
)
).drop(columns='Peril_Factor')
df_fsets_split.head()
# +
# Get the Base Premiums for all row_IDs and Perils
df_base_prems = df_fsets_split.query(
# Get only the Base Preimum rows
f"Factor == '{RAW_STRUCT['bp_name']}'"
).assign(
# Create Peril_Factor combination for column names
Peril_Factor=lambda df: df.Peril + OUTPUT_DEFAULTS['pf_sep'] + df.Factor,
Custom_order=0, # Will be used later to ensure desired column order
).pivot_table(
# Pivot to 'Peril_Factor' columns and one row per row_ID
index=ROW_ID_NAME,
columns=['Peril', 'Custom_order', 'Peril_Factor'],
values='Premium_cumulative'
)
df_base_prems.head()
# -
# Warning if the data set is not complete
if df_base_prems.isna().sum().sum() > 0:
warnings.warn(
"Base Premiums: Base Premium is missing for some rows and Perils."
"Suggests the raw data format is not as expected."
)
# +
# Ensure every row_ID has a row for every Peril, Factor combination
# Get the Relativity for all row_ID, Perils and Factors
df_factors = df_fsets_split.query(
# Get only the Factor rows
f"Factor != '{RAW_STRUCT['bp_name']}'"
).drop(
columns=['Premium_increment', 'Premium_cumulative']
).set_index(
# Ensure there is one row for every combination of row_ID, Peril, Factor
[ROW_ID_NAME, 'Peril', 'Factor']
).pipe(lambda df: df.reindex(index=pd.MultiIndex.from_product([
df.index.get_level_values(ROW_ID_NAME).unique(),
df.index.get_level_values('Peril').unique(),
# Include additional factors if desired from the inputs
set(df.index.get_level_values('Factor').tolist() + include_factors),
], names = df.index.names
))).sort_index().fillna({ # Any new rows need to have Relativity of 1
'Relativity': 1.,
}).reset_index().assign(
# Create Peril_Factor combination for column names
Peril_Factor=lambda df: df.Peril + OUTPUT_DEFAULTS['pf_sep'] + df.Factor,
Custom_order=1
).pivot_table(
# Pivot to 'Peril_Factor' columns and one row per row_ID
index=ROW_ID_NAME,
columns=['Peril', 'Custom_order', 'Peril_Factor'],
values='Relativity'
)
df_factors.head()
# -
# Checks
if not df_factors.apply(lambda col: (col > 0)).all().all():
warnings.warn(
"Factor relativities: At least one relativity is below zero."
)
# +
# Combine Base Premium and Factors columns
df_base_factors = df_base_prems.merge(
df_factors,
how='inner', left_index=True, right_index=True
).pipe(
# Sort columns (uses 'Custom_order')
lambda df: df[df.columns.sort_values()]
)
# Drop unwanted levels of the column MultiIndex
# Possible to do this following in a chain, but much to complicated
# See 'Chained drop a column MultiIndex level' in 'Unused rough work'
df_base_factors.columns = df_base_factors.columns.get_level_values('Peril_Factor')
df_base_factors.head()
# +
# Join back on to stem section
df_formatted = df_stem.merge(
df_base_factors,
how='left', left_index=True, right_index=True
).fillna(0.) # The only mising values are from 'error' rows
df_formatted.iloc[:10,:20]
# -
# ## Output to CSV
# Save it
df_formatted.to_csv(
out_filepath, sep=OUTPUT_DEFAULTS['file_delimiter'], index=True
)
print("Output saved")
# ### Reload the spreadsheet to check it worked
# +
# Check it worked
df_reload = pd.read_csv(
out_filepath, index_col=0, sep=OUTPUT_DEFAULTS['file_delimiter'],
)
df_reload.head()
# -
assert (df_formatted.dtypes == df_reload.dtypes).all()
assert df_reload.shape == df_formatted.shape
assert (df_formatted.index == df_reload.index).all()
assert df_formatted.iloc[:,1:].apply(
lambda col: np.abs(col - df_reload[col.name]) < 1e-10
).all().all()
print("Correct: The reloaded values are equal, up to floating point tolerance")
# ## Load expected output to check it is as expected
# Location of sheet of expected results
expected_filepath = raw_data_folder_path / 'minimal_expected_output_5.csv'
# +
df_expected = None
for encoding in INPUT_FILE_ENCODINGS:
try:
df_expected = pd.read_csv(
expected_filepath,
index_col=0, sep=OUTPUT_DEFAULTS['file_delimiter'],
encoding=encoding
).apply(lambda col: (
col.astype('float')
if np.issubdtype(col.dtype, np.number)
else col
))
# print(f"'{encoding}': Success") # Used for debugging only
break
except UnicodeDecodeError:
# print(f"'{encoding}': Fail") # Used for debugging only
pass
if df_expected is None:
raise IOError(
"\n\tload_formatted_file: pandas.read_csv() failed."
f"\n\tFile cannot be read with any of the encodings: {INPUT_FILE_ENCODINGS}"
)
df_expected.head()
# -
assert (df_formatted.dtypes == df_expected.dtypes).all()
assert df_expected.shape == df_formatted.shape
assert (df_formatted.index == df_expected.index).all()
assert df_formatted.iloc[:,1:].apply(
lambda col: np.abs(col - df_expected[col.name]) < 1e-10
).all().all()
print("Correct: The reloaded values are equal, up to floating point tolerance")
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Using the functions
# ## Default arguments
help(PCon.convert)
#in_filepath = raw_data_folder_path / 'minimal_input_adj.csv'
out_filepath = 'formatted_data.csv'
res_filepath = PCon.convert(in_filepath, out_filepath)
# +
# Run the pipeline manually to check
# Load raw data
in_lines_trunc_df = PCon.read_input_lines(in_filepath)
PCon.validate_input_lines_trunc(in_lines_trunc_df)
df_trimmed = PCon.split_lines_to_df(in_lines_trunc_df)
# Get converted DataFrame
df_formatted = PCon.convert_df(df_trimmed)
df_formatted.head()
# +
# Reload resulting data from workbook
df_reload = PCon.load_formatted_file(res_filepath)
# Check it matches expectations
if PCon.formatted_dfs_are_equal(df_formatted, df_reload):
print("Correct: The reloaded values are equal, up to floating point tolerance")
# +
# Check against expected output from manually created worksheet
expected_filepath = raw_data_folder_path / 'minimal_expected_output_5.csv'
df_expected = PCon.load_formatted_file(expected_filepath)
# Check it matches expectations
if PCon.formatted_dfs_are_equal(df_reload, df_expected):
print("Correct: The reloaded values are equal, up to floating point tolerance")
# -
# Delete the results file
res_filepath.unlink()
print("Workspace restored")
# ## Limited rows
# +
nrows = 2 # Choose a specific number for which the expected results have been created: 2, 4 or 5
in_filepath = raw_data_folder_path / 'minimal_input_adj.csv'
out_filepath = f'formatted_data_{nrows}.csv'
res_filepath = PCon.convert(in_filepath, out_filepath, nrows = nrows)
# Check against expected output from manually created worksheet
expected_filepath = raw_data_folder_path / f'minimal_expected_output_{nrows}.csv'
df_expected = PCon.load_formatted_file(expected_filepath)
df_reload = PCon.load_formatted_file(res_filepath)
# Check it matches expectations
if PCon.formatted_dfs_are_equal(df_reload, df_expected):
print("Correct: The reloaded values are equal, up to floating point tolerance")
# Delete the results file
res_filepath.unlink()
print("Workspace restored")
# -
# ## Limited rows with included factors
# +
nrows = 2
include_factors = ['NewFact', 'SomeFact']
in_filepath = raw_data_folder_path / 'minimal_input_adj.csv'
out_filepath = f'formatted_data_2_all_facts.csv'
res_filepath = PCon.convert(in_filepath, out_filepath, nrows=nrows, include_factors=include_factors)
# Check against expected output from manually created worksheet
expected_filepath = raw_data_folder_path / 'minimal_expected_output_2_all_facts.csv' # Specifically created for this test
df_expected = PCon.load_formatted_file(expected_filepath)
df_reload = PCon.load_formatted_file(res_filepath)
# Check it matches expectations
if PCon.formatted_dfs_are_equal(df_reload, df_expected):
print("Correct: The reloaded values are equal, up to floating point tolerance")
# Delete the results file
res_filepath.unlink()
print("Workspace restored")
# -
# Further connotations are tested in the package's automated test suite.
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
| development/compiled/data-conversion-challenge-202004.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .venv
# language: python
# name: .venv
# ---
# +
import pandas as pd
import os
import json
import requests as req
import datetime
import math
from matplotlib import pyplot as plt
import matplotlib.ticker as mpticker
import mplfinance as mpf
#read in the finnhub.io token
with open('local_settings.txt') as f:
json_local = json.load(f)
finn_token = json_local["finn_token"]
#Set variables for the dataset
end_time = math.floor(datetime.datetime.utcnow().timestamp())
start_time = math.floor(end_time - datetime.timedelta(weeks=52).total_seconds())
# #testing on Apple Stock
ticker = "AAPL"
# finn_url = f"https://finnhub.io/api/v1/stock/candle?symbol={ticker}&resolution=D&from={start_time}&to={end_time}&token={finn_token}"
# print(finn_url)
# r = req.get(finn_url)
# data = r.json()
# df = pd.read_json(finn_url, orient='records')
# df['t'] = df['t'].apply(lambda x: datetime.datetime.utcfromtimestamp(x).date())
# df.rename(columns={"c": "close", "h": "high", "l":"low", "o":"open", "v":"volume", "t":"date"}, inplace=True)
# #df.set_index('date', inplace=True)
# df.set_index('date', inplace=True)
# df.index = pd.to_datetime(df.index)
# print(df)
# df.to_pickle("aapl.p")
daily = pd.read_pickle('aapl.p')
daily = daily.loc['2020-08-01':]
print(daily.head())
mpf.plot(daily,type='candle',mav=(3,6,9),volume=True,show_nontrading=True)
# +
smoothed_closes = list([math.floor(x) for x in daily['close']])
#print(smoothed_closes)
bins = [10,25,50,100, 200, 1000]
for bin in bins:
plt.hist(smoothed_closes, bins=bin)
plt.show()
# -
| Jupyter Books/StockPlots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# Generating random data points w/ a seed
# +
#sets RNG seed
np.random.seed(119)
#sets number of data points
npoints = 50
#set x
x = np.linspace(0,10,npoints)
#setting slope, intercept, uncertainty
m = 2.0
b = 1.0
sigma = 2.0
#generating y points
y = m*x + b + np.random.normal(scale=sigma, size=npoints)
y_err = np.full(npoints, sigma)
# -
plt.errorbar(x,y,sigma,fmt='.')
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
# +
m_fit, b_fit = np.poly1d(np.polyfit(x,y,1, w=1./y_err)) #weight with uncertainties
print(m_fit, b_fit)
y_fit = m_fit*x + b_fit
# -
plt.errorbar(x, y, yerr = y_err, fmt='.', label = 'fake data')
plt.plot(x, y_fit, label = 'fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=2, frameon=0)
# +
#import optimize from scipy
from scipy import optimize
#define a function to fit
def f_line(x, m, b):
return m*x + b
#perform the fit (cov is covariance)
params, params_cov = optimize.curve_fit(f_line, x, y, sigma= y_err)
m_fit = params[0]
b_fit = params[1]
print(m_fit, b_fit)
# +
# optimize.curve_fit??
# -
plt.figure(figsize=(7,7))
plt.errorbar(x, y, yerr = y_err, fmt='.', label = 'fake data')
plt.plot(x, y_fit, label = 'fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=2, frameon=0)
# +
#redefine x and y
npoints = 50
x = np.linspace(0,2*np.pi,npoints)
#make y a complicated function
a = 3.4
b = 2.1
c = 0.27
d = -1.3
sig = 0.6
y = a*np.sin(b*x + c) + d + np.random.normal(scale = sig, size= npoints)
y_err = np.full(npoints, sig)
plt.figure(figsize=(7,7))
plt.errorbar(x, y, yerr=y_err, fmt = '.')
plt.xlabel('x')
plt.ylabel('y')
# +
def g_sin(x, a, b, c, d):
return a* np.sin(b*x + c) + d
#perform the fit
#perform the fit (cov is covariance)
params, params_cov = optimize.curve_fit(g_sin, x, y, sigma= y_err, p0=[1,2.,0.1,-0.1])
a_fit = params[0]
b_fit = params[1]
c_fit = params[2]
d_fit = params[3]
print(a_fit, b_fit, c_fit, d_fit)
y_fit = a_fit * np.sin(b_fit*x + c_fit) + d_fit
# -
plt.figure(figsize=(7,7))
plt.errorbar(x, y, yerr = y_err, fmt='.', label = 'fake data')
plt.plot(x, y_fit, label = 'fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=2, frameon=0)
# ## Algorithm for Bisection Method of Root Finding
# 1. Declare variables
#
# 2. Set maximum number of iterations to perform
#
# 3. Set tolerance to a small value (eg. 1.0e-6)
#
# 4. Set the two inital bracket values
# a) Check that the values bracket a root or singularity
# b) Determine value of function fnct at the two bracket values
# c) Make sure product of functional values is less than 0.0
# d)
#
#
| Sessions/session_7/Session_7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Section Schedule
#
# Schedule students to sections based on their preferences.
# ## Setup
#
# Import the required packages into the namespace.
# +
import difflib
import os
import numpy as np
import pandas as pd
import itertools
from typing import NamedTuple
# -
CAPACITIES = {
'cs61a': 4,
'cs61b': 6,
'cs70' : 6,
'ee16a': 6,
}
COURSE = 'cs61a'
def path(filename, directory=COURSE):
return os.path.join(directory, filename)
SEED = sum(ord(c) for c in 'Computer Science Mentors')
# ## Section schedule
CODE = 'Code'
EMAIL = 'Email Address'
COURSE = 'Course'
ROOM = 'Room'
CAP = 'Capacity'
TIME = 'Time'
# Import an existing schedule, if it exists.
#
# Section data should be specified in the format,
#
# ```
# 'Email Address', 'Course', 'Room', 'Capacity', 'Time'
# ```
#
# Capacity is important as we need to determine how many students can enroll into that section. If no capacity for a room is provided (or a non-integer capacity), then we will use the default capacity specified above per course.
# ### Using an existing schedule
section_schedule = pd.read_csv(path('section-schedule.csv'), dtype=str).set_index(CODE)
section_schedule.head()
# ### Generating control codes
def generate_control_code(row, length=6):
"""Return a control code of the desired length, zero-padded as necessary."""
return str(abs(hash(tuple(row))))[:length].zfill(length)
section_schedule = pd.read_csv(path('room-schedule.csv'))
section_schedule[CODE] = section_schedule.apply(generate_control_code, axis=1, raw=True)
section_schedule = section_schedule.set_index(CODE)
section_schedule.head()
# #### Manually define a few sections
# +
manual_schedule = pd.DataFrame.from_records(
[
# ('Email', 'Course', 'Room', Capacity, 'Time'),
],
columns=[EMAIL, COURSE, ROOM, CAP, TIME]
)
manual_schedule[CODE] = manual_schedule.apply(generate_control_code, axis=1, raw=True)
manual_schedule = manual_schedule.set_index(CODE)
manual_schedule
# -
# #### Export schedule
section_schedule[CODE].to_csv(path('control-codes.csv'), index=False)
section_schedule.to_csv(path('section-schedule.csv'))
# ## Input data
#
# Load student preferences from a Google Form.
#
# **The data must be downloaded directly from the Form, rather than a linked Google Sheet so that data is properly quoted.**
# +
EMAIL = 'Username'
COURSE = 'Course'
FIRST = 'First option'
SECOND = 'Second option'
THIRD = 'Third option'
BACKUP = 'Backup options'
RANKS = [FIRST, SECOND, THIRD]
SPLIT_ON = r', ?'
# -
preferences = pd.read_csv(path('preferences.csv'), dtype=str)
preferences = pd.concat([
preferences[[EMAIL, COURSE] + RANKS],
preferences[BACKUP].str.split(SPLIT_ON, expand=True).fillna('').astype(str)
], axis=1).rename(columns=str).set_index(EMAIL)
preferences.head()
# ### Enrollment priority
#
# Give enrollment priority to a subset of the students.
EMAIL = 'Email Address'
PREF = 'Preferred'
priority = pd.read_csv(path('priority.csv'), dtype=str)['Email']
preferences.insert(1, PREF, preferences.index.isin(priority))
preferences[preferences[PREF] == True].head()
# ## Greedy algorithm
#
# Solve the problem using a simple greedy algorithm with randomized restarts.
# +
class Solution(NamedTuple):
"""Solution to an assignment problem."""
assignments: dict
stats: dict
def metric(self, weights={FIRST: 3, SECOND: 2, THIRD: 1}):
"""Assign weights to each rank to evaluate the quality of the solution."""
return sum(count * weights[rank] for rank, count in self.stats.items())
class Assignment(NamedTuple):
email: str
course: str
def generate_preference_slice(preferences, first=FIRST, index=1):
return slice(pd.Index(preferences.columns).get_loc(first.lower()) + index,
len(preferences.columns))
# -
# ### Validate the solution
# +
def validate(preferences, schedule, ranks=RANKS, preference_slice=None):
"""Validate the preferences to check for errors in student input."""
preferences = preferences.rename(columns=str.lower)
schedule = schedule.rename(columns=str.lower)
if preference_slice is None:
preference_slice = generate_preference_slice(preferences)
def closest(key):
match = difflib.get_close_matches(key, schedule.index, n=1)
return match[0] if match else key
invalid = []
for row in preferences.itertuples():
for rank, preference in itertools.zip_longest(ranks, row[preference_slice]):
if not preference:
continue
elif preference not in schedule.index:
print(f'{row.Index}: {preference} not found in schedule')
invalid += [(row.Index, preference, closest(preference))]
elif row.course != schedule.loc[preference].course:
print(f'{row.Index}: {course} not found')
invalid += [(row.Index, preference, closest(preference))]
return pd.DataFrame.from_records(invalid, columns=['Email', 'Input', 'Match'])
# TODO: Write a function to replace invalid entries in the preferences with their match.
# -
validate(preferences, section_schedule)
LIMIT = 1000
rand = np.random.RandomState(SEED)
# +
def greedy(preferences, schedule, ranks=RANKS,default_cap=CAPACITIES[COURSE],
preference_slice=None):
"""Return a naive greedy algorithm for assigning each student in the preferences list
to a section in the schedule based on the ranks.
"""
preferences = preferences.rename(columns=str.lower)
schedule = schedule.rename(columns=str.lower)
if preference_slice is None:
preference_slice = generate_preference_slice(preferences)
if CAP in schedule.index:
enrolled = {code: capacity for code, capacity in schedule[[CAP]].itertuples()}
else:
enrolled = {code: 4 for code in schedule.index}
assignments = {}
stats = {rank: 0 for rank in ranks}
for row in preferences.itertuples():
assignment = Assignment(row.Index, row.course)
if assignment not in assignments:
for rank, preference in itertools.zip_longest(ranks, row[preference_slice]):
if (preference in schedule.index
and row.course == schedule.loc[preference].course
and enrolled[preference] > 0):
# Make an assignment if the preference exists, matches the course, and
# if there is space still left in the section
assignments[assignment] = preference
if rank in stats:
stats[rank] += 1
enrolled[preference] -= 1
break
return Solution(assignments, stats)
def sample(preferences, priority=None):
"""Resample the preferences, prioritizing by True/False column value."""
if priority is None:
return preferences.sample(frac=1, random_state=rand)
return (preferences[preferences[priority]]
.sample(frac=1, random_state=rand)
.append(preferences[~preferences[priority]]
.sample(frac=1, random_state=rand)))
# -
best = max((greedy(sample(preferences, priority=PREF), section_schedule)
for _ in range(LIMIT)), key=Solution.metric)
best.stats
len(best.assignments)
# ## Simulated Annealing
#
# Implement a simulated annealing algorithm to improve upon the best greedy solution.
# +
# TODO: Implement simulated annealing algorithm
# -
# ### Export schedule
schedule = pd.DataFrame.from_records((
(assignment.email, section) + tuple(section_schedule.loc[section])
for assignment, section in best.assignments.items()
), columns=['Student Email', 'Section', 'Mentor Email','Course', 'Room', 'Capacity', 'Time'])
schedule.to_csv(path('schedule.csv'), index=False)
| students/section_schedule.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# Nessie Iceberg/Hive SQL Demo with NBA Dataset
# ============================
# This demo showcases how to use Nessie Python API along with Hive from Iceberg
#
# Initialize PyHive
# ----------------------------------------------
# To get started, we will first have to do a few setup steps that give us everything we need
# to get started with Nessie. In case you're interested in the detailed setup steps for Hive, you can check out the [docs](https://projectnessie.org/tools/iceberg/hive/)
#
# The Binder server has downloaded Hive, Hadoop and some data for us as well as started a Nessie server in the background. All we have to do is to connect to Hive session.
#
# The below cell starts a local Hive session with parameters needed to configure Nessie. Each config option is followed by a comment explaining its purpose.
# + pycharm={"name": "#%%\n"} tags=[]
import os
from pyhive import hive
from pynessie import init
# where we will store our data
warehouse = "file://" + os.path.join(os.getcwd(), "nessie_warehouse")
# where our datasets are located
datasets_path = "file://" + os.path.join(os.path.dirname(os.getcwd()), "datasets")
nessie_client = init()
def create_ref_catalog(ref):
"""
Create a branch and switch the current ref to the created branch
"""
default_branch = nessie_client.get_default_branch()
if ref != default_branch:
default_branch_hash = nessie_client.get_reference(default_branch).hash_
nessie_client.create_branch(ref, ref=default_branch, hash_on_ref=default_branch_hash)
return switch_ref_catalog(ref)
def switch_ref_catalog(ref):
"""
Switch a branch. When we switch the branch via Hive, we will need to reconnect to Hive
"""
# The important args below are:
# catalog-impl: which Iceberg catalog to use, in this case we want NessieCatalog
# uri: the location of the nessie server.
# ref: the Nessie ref/branch we want to use (defaults to main)
# warehouse: the location this catalog should store its data
return hive.connect(
"localhost",
configuration={
"iceberg.catalog.dev_catalog.catalog-impl": "org.apache.iceberg.nessie.NessieCatalog",
"iceberg.catalog.dev_catalog.uri": "http://localhost:19120/api/v1",
"iceberg.catalog.dev_catalog.ref": ref,
"iceberg.catalog.dev_catalog.warehouse": warehouse,
},
).cursor()
print("\n\nHive running\n\n\n")
# -
# Solving Data Engineering problems with Nessie
# ============================
#
# In this Demo we are a data engineer working at a fictional sports analytics blog. In order for the authors to write articles they have to have access to the relevant data. They need to be able to retrieve data quickly and be able to create charts with it.
#
# We have been asked to collect and expose some information about basketball players. We have located some data sources and are now ready to start ingesting data into our data lakehouse. We will perform the ingestion steps on a Nessie branch to test and validate the data before exposing to the analysts.
# Set up Nessie branches (via Nessie CLI)
# ----------------------------
# Once all dependencies are configured, we can get started with ingesting our basketball data into `Nessie` with the following steps:
#
# - Create a new branch named `dev`
# - List all branches
#
# It is worth mentioning that we don't have to explicitly create a `main` branch, since it's the default branch.
current_ref = create_ref_catalog("dev")
# + [markdown] pycharm={"name": "#%% md\n"}
# We have created the branch `dev` and we can see the branch with the Nessie `hash` its currently pointing to.
#
# Below we list all branches. Note that the auto created `main` branch already exists and both branches point at the same empty `hash` initially
# + pycharm={"name": "#%%\n"}
# !nessie --verbose branch
# -
# Create tables under dev branch
# -------------------------------------
# Once we created the `dev` branch and verified that it exists, we can create some tables and add some data.
#
# We create two tables under the `dev` branch:
# - `salaries`
# - `totals_stats`
#
# These tables list the salaries per player per year and their stats per year.
#
# To create the data we:
#
# 1. switch our branch context to dev
# 2. create the table
# 3. insert the data from an existing csv file. This csv file is already stored locally on the demo machine. A production use case would likely take feeds from official data sources
# +
# Creating our demo schema
current_ref.execute("CREATE SCHEMA IF NOT EXISTS nba")
print("\nCreated schema nba\n")
print("\nCreating tables nba.salaries and nba.totals_stats....\n")
# Creating `salaries` table
current_ref.execute(
f"""CREATE TABLE IF NOT EXISTS nba.salaries (Season STRING,
Team STRING, Salary STRING, Player STRING)
STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'
LOCATION '{warehouse}/nba/salaries'
TBLPROPERTIES ('iceberg.catalog'='dev_catalog', 'write.format.default'='parquet',
'iceberg.mr.in.memory.data.model'='GENERIC')"""
)
## We create a temporary table to load data into our target table since
## is not possible to load data directly from CSV into non-native table.
current_ref.execute(
"""CREATE TABLE IF NOT EXISTS nba.salaries_temp (Season STRING,
Team STRING, Salary STRING, Player STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','"""
)
current_ref.execute(f'LOAD DATA LOCAL INPATH "{datasets_path}/nba/salaries.csv" OVERWRITE INTO TABLE nba.salaries_temp')
current_ref.execute("INSERT OVERWRITE TABLE nba.salaries SELECT * FROM nba.salaries_temp")
print("\nCreated and inserted data into table nba.salaries from dataset salaries\n")
# Creating `totals_stats` table
current_ref.execute(
f"""CREATE TABLE IF NOT EXISTS nba.totals_stats (
Season STRING, Age STRING, Team STRING, ORB STRING,
DRB STRING, TRB STRING, AST STRING, STL STRING,
BLK STRING, TOV STRING, PTS STRING, Player STRING, RSorPO STRING)
STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'
LOCATION '{warehouse}/nba/totals_stats'
TBLPROPERTIES ('iceberg.catalog'='dev_catalog', 'write.format.default'='parquet',
'iceberg.mr.in.memory.data.model'='GENERIC')"""
)
## We create a temporary table to load data into our target table since
## is not possible to load data directly from CSV into non-native table.
current_ref.execute(
"""CREATE TABLE IF NOT EXISTS nba.totals_stats_temp (
Season STRING, Age STRING, Team STRING, ORB STRING,
DRB STRING, TRB STRING, AST STRING, STL STRING,
BLK STRING, TOV STRING, PTS STRING, Player STRING, RSorPO STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','"""
)
current_ref.execute(
f'LOAD DATA LOCAL INPATH "{datasets_path}/nba/totals_stats.csv" OVERWRITE INTO TABLE nba.totals_stats_temp'
)
current_ref.execute("INSERT OVERWRITE TABLE nba.totals_stats SELECT * FROM nba.totals_stats_temp")
print("\nCreated and inserted data into table nba.totals_stats from dataset totals_stats\n")
# + [markdown] jupyter={"outputs_hidden": false} pycharm={"name": "#%% md\n"}
# Now we count the rows in our tables to ensure they are the same number as the csv files. Unlike Spark and Flink demos, we can't use the notation of `table@branch` (see the github issue [here](https://github.com/projectnessie/nessie/issues/1985). Therefore, we just set Nessie ref settings through Hive setting `SET iceberg.catalog.{catalog}.ref = {branch}` whenever we want to work on a specific branch.
# + pycharm={"name": "#%%\n"} tags=[]
# We make sure we are still in dev branch
current_ref = switch_ref_catalog("dev")
print("\nCounting rows in nba.salaries\n")
# We count now
current_ref.execute("SELECT COUNT(*) FROM nba.salaries")
table_count = current_ref.fetchone()[0]
current_ref.execute("SELECT COUNT(*) FROM nba.salaries_temp")
csv_count = current_ref.fetchone()[0]
assert table_count == csv_count
print(table_count)
print("\nCounting rows in nba.totals_stats\n")
current_ref.execute("SELECT COUNT(*) FROM nba.totals_stats")
table_count = current_ref.fetchone()[0]
current_ref.execute("SELECT COUNT(*) FROM nba.totals_stats_temp")
csv_count = current_ref.fetchone()[0]
assert table_count == csv_count
print(table_count)
# -
# Check generated tables
# ----------------------------
# Since we have been working solely on the `dev` branch, where we created 2 tables and added some data,
# let's verify that the `main` branch was not altered by our changes.
# + pycharm={"name": "#%%\n"}
# !nessie content list
# + [markdown] jupyter={"outputs_hidden": false} pycharm={"name": "#%% md\n"}
# And on the `dev` branch we expect to see two tables
# -
# !nessie content list --ref dev
# + [markdown] pycharm={"name": "#%% md\n"}
# We can also verify that the `dev` and `main` branches point to different commits
# -
# !nessie --verbose branch
# Dev promotion into main
# -----------------------
# Once we are done with our changes on the `dev` branch, we would like to merge those changes into `main`.
# We merge `dev` into `main` via the command line `merge` command.
# Both branches should be at the same revision after merging/promotion.
# !nessie merge dev -b main --force
# + [markdown] pycharm={"name": "#%% md\n"}
# We can verify that the `main` branch now contains the expected tables and row counts.
#
# The tables are now on `main` and ready for consumption by our blog authors and analysts!
# -
# !nessie --verbose branch
# + pycharm={"name": "#%%\n"}
# !nessie content list
# + pycharm={"name": "#%%\n"}
# We switch to main branch
current_ref = switch_ref_catalog("main")
print("\nCounting rows in nba.salaries\n")
# We count now
current_ref.execute("SELECT COUNT(*) FROM nba.salaries")
table_count = current_ref.fetchone()[0]
current_ref.execute("SELECT COUNT(*) FROM nba.salaries_temp")
csv_count = current_ref.fetchone()[0]
assert table_count == csv_count
print(table_count)
print("\nCounting rows in nba.totals_stats\n")
current_ref.execute("SELECT COUNT(*) FROM nba.totals_stats")
table_count = current_ref.fetchone()[0]
current_ref.execute("SELECT COUNT(*) FROM nba.totals_stats_temp")
csv_count = current_ref.fetchone()[0]
assert table_count == csv_count
print(table_count)
# + [markdown] pycharm={"name": "#%% md\n"}
# Perform regular ETL on the new tables
# -------------------
# Our analysts are happy with the data and we want to now regularly ingest data to keep things up to date. Our first ETL job consists of the following:
#
# 1. Update the salaries table to add new data
# 2. We have decided the `Age` column isn't required in the `totals_stats` table so we will drop the column
# 3. We create a new table to hold information about the players appearances in all star games
#
# As always we will do this work on a branch and verify the results. This ETL job can then be set up to run nightly with new stats and salary information.
# -
current_ref = create_ref_catalog("etl")
# + pycharm={"name": "#%%\n"}
# add some salaries for <NAME>
current_ref.execute(
"""INSERT INTO nba.salaries
VALUES ('2017-18', 'Golden State Warriors', '$25000000', '<NAME>'),
('2018-19', 'Golden State Warriors', '$30000000', '<NAME>'),
('2019-20', 'Brooklyn Nets', '$37199000', '<NAME>'),
('2020-21', 'Brooklyn Nets', '$39058950', '<NAME>')"""
)
# +
print("\nCreating table nba.allstar_games_stats\n")
# Creating `allstar_games_stats` table
current_ref.execute(
f"""CREATE TABLE IF NOT EXISTS nba.allstar_games_stats (
Season STRING, Age STRING, Team STRING, ORB STRING,
TRB STRING, AST STRING, STL STRING, BLK STRING,
TOV STRING, PF STRING, PTS STRING, Player STRING)
STORED BY 'org.apache.iceberg.mr.hive.HiveIcebergStorageHandler'
LOCATION '{warehouse}/nba/allstar_games_stats'
TBLPROPERTIES ('iceberg.catalog'='dev_catalog', 'write.format.default'='parquet',
'iceberg.mr.in.memory.data.model'='GENERIC')"""
)
## We create a temporary table to load data into our target table since
## is not possible to load data directly from CSV into non-native table.
current_ref.execute(
"""CREATE TABLE IF NOT EXISTS nba.allstar_table_temp (
Season STRING, Age STRING, Team STRING, ORB STRING, TRB STRING,
AST STRING, STL STRING, BLK STRING,
TOV STRING, PF STRING, PTS STRING, Player STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','"""
)
current_ref.execute(
f'LOAD DATA LOCAL INPATH "{datasets_path}/nba/allstar_games_stats.csv" OVERWRITE INTO TABLE nba.allstar_table_temp'
)
current_ref.execute("INSERT OVERWRITE TABLE nba.allstar_games_stats SELECT * FROM nba.allstar_table_temp")
print("\nCreated and inserted data into table nba.allstar_table_temp from dataset allstar_games_stats\n")
print("\nCounting rows in nba.allstar_games_stats\n")
# Since we can't do 'table@branch'
current_ref = switch_ref_catalog("etl")
current_ref.execute("SELECT COUNT(*) FROM nba.allstar_games_stats")
print(current_ref.fetchone()[0])
# -
# We can verify that the new table isn't on the `main` branch but is present on the etl branch
# Since we have been working on the `etl` branch, the `allstar_games_stats` table is not on the `main` branch
# !nessie content list
# We should see the new `allstar_games_stats` table on the `etl` branch
# !nessie content list --ref etl
# Now that we are happy with the data we can again merge it into `main`
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# !nessie merge etl -b main --force
# + [markdown] jupyter={"outputs_hidden": false} pycharm={"name": "#%% md\n"}
# Now lets verify that the changes exist on the `main` branch
# + pycharm={"name": "#%%\n"}
# !nessie content list
# + pycharm={"name": "#%%\n"}
# !nessie --verbose branch
# + pycharm={"name": "#%%\n"}
# We switch to the main branch
current_ref = switch_ref_catalog("main")
print("\nCounting rows in nba.allstar_games_stats\n")
# We count now
current_ref.execute("SELECT COUNT(*) FROM nba.allstar_games_stats")
table_count = current_ref.fetchone()[0]
current_ref.execute("SELECT COUNT(*) FROM nba.allstar_table_temp")
csv_count = current_ref.fetchone()[0]
assert table_count == csv_count
print(table_count)
# -
# Create `experiment` branch
# --------------------------------
# As a data analyst we might want to carry out some experiments with some data, without affecting `main` in any way.
# As in the previous examples, we can just get started by creating an `experiment` branch off of `main`
# and carry out our experiment, which could consist of the following steps:
# - drop `totals_stats` table
# - add data to `salaries` table
# - compare `experiment` and `main` tables
current_ref = create_ref_catalog("experiment")
# Drop the `totals_stats` table on the `experiment` branch
current_ref.execute("DROP TABLE nba.totals_stats")
# add some salaries for <NAME>
current_ref.execute(
"""INSERT INTO nba.salaries VALUES
('2015-16', 'Dallas Mavericks', '$8333333', 'Dirk Nowitzki'),
('2016-17', 'Dallas Mavericks', '$25000000', 'Dirk Nowitzki'),
('2017-18', 'Dallas Mavericks', '$5000000', 'Dirk Nowitzki'),
('2018-19', 'Dallas Mavericks', '$5000000', 'Dir<NAME>')"""
)
# We should see the `salaries` and `allstar_games_stats` tables only (since we just dropped `totals_stats`)
# !nessie content list --ref experiment
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# `main` hasn't been changed and still has the `totals_stats` table
# !nessie content list
# -
# Let's take a look at the contents of the `salaries` table on the `experiment` branch.
# +
current_ref = switch_ref_catalog("experiment")
print("\nCounting rows in nba.salaries\n")
current_ref.execute("SELECT COUNT(*) FROM nba.salaries")
print(current_ref.fetchone()[0])
# -
# and compare to the contents of the `salaries` table on the `main` branch.
# + pycharm={"name": "#%%\n"}
current_ref = switch_ref_catalog("main")
# the following INSERT is a workaround for https://github.com/apache/iceberg/pull/4509 until iceberg 0.13.2 is released
# add a single salary for <NAME> (so we expect 3 less total rows)
current_ref.execute(
"""INSERT INTO nba.salaries VALUES
('2018-19', 'Dallas Mavericks', '$5000000', '<NAME>')"""
)
print("\nCounting rows in nba.salaries\n")
current_ref.execute("SELECT COUNT(*) FROM nba.salaries")
print(current_ref.fetchone()[0])
# -
# And finally lets clean up after ourselves
# !nessie branch --delete dev
# !nessie branch --delete etl
# !nessie branch --delete experiment
| notebooks/nessie-iceberg-hive-demo-nba.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Summary
#
# The dataset for this project was collected from [kaggle](https://www.kaggle.com/shashwatwork/impact-of-covid19-pandemic-on-the-global-economy) and originates from Mendeley Data: [The Impact of Covid-19 Pandemic on the Global Economy: Emphasis on Poverty Alleviation and Economic Growth](https://data.mendeley.com/datasets/b2wvnbnpj9/1). The data I investigate here consists of records on the impact of covid-19 on the global economy including 210 countries.
#
# Main objective of the analysis is to focus on prediction. In this project, we will employ linear regression algorithms to find relationship between common GDP and human development index and total number of death. We will then choose the best candidate algorithm from preliminary results. The goal with this implementation is to construct a model that accurately predicts how the global economy of each country is affected.
#
# # Exploratory Data Analysis
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.linear_model import LinearRegression, Lasso, Ridge, RidgeCV, LassoCV, ElasticNetCV
from sklearn.pipeline import Pipeline
# Mute the sklearn warning about regularization
import warnings
warnings.filterwarnings('ignore', module='sklearn')
data = pd.read_csv('./raw_data.csv', sep=',')
data = data.rename(columns={'human_development_index':'hdi'})
data.head()
# -
data.info()
print('The total number of records: '+str(len(data.index)))
print('Column names: '+str(data.columns.tolist()))
print('Number of countries: '+str(len(data['location'].unique())))
print('Number of missing values: \n' + str(data.isnull().sum()))
# ## Featureset Exploration
#
# **iso_code**: country code
#
# **location**: name of the country
#
# **date**
#
# **total_cases**: number of COVID19 cases
#
# **total_deaths**
#
# **stringency_index**: The Stringency Index provides a computable parameter to evaluate the effectiveness of the nationwide lockdown. It is used by the Oxford COVID-19 Government Response Tracker with a database of 17 indicators of government response such as school and workplace closings, public events, public transport, stay-at-home policies. The Stringency Index is a number from 0 to 100 that reflects these indicators. A higher index score indicates a higher level of stringency.
#
# **population**
#
# **gdp_per_capita**: A country's GDP or gross domestic product is calculated by taking into account the monetary worth of a nation's goods and services after a certain period of time, usually one year. It's a measure of economic activity.
#
# **hdi**: The HDI was created to emphasize that people and their capabilities should be the ultimate criteria for assessing the development of a country, not economic growth alone. The Human Development Index (HDI) is a summary measure of average achievement in key dimensions of human development: a long and healthy life, being knowledgeable and have a decent standard of living. The HDI is the geometric mean of normalized indices for each of the three dimensions.
# ## Preparing the Data
#
# The following columns contain missing values: total_cases, total_deaths, stringency_index, population, gdp_per_capita, hdi. I decided to drop the rows with missing data as we would still have enough data(31518) to train our models.
#drop the irrelevant columns
data = data.drop(['iso_code', 'Unnamed: 9', 'Unnamed: 10', 'Unnamed: 11', 'Unnamed: 12', 'Unnamed: 13'], axis = 1)
data = data.dropna(axis = 0)
data.isnull().sum()
len(data)
# Let's look at the correlation coefficient. A coefficient close to 1 means that there’s a very strong positive correlation between the two variables. The diagonal line is the correlation of the variables to themselves, that's why they are 1.
#
# In our case we can quickly see that:
# The Human Development Index (HDI) is strongly correlated to the GDP per Capita and total number case to deaths.
# The population also has a strong correlation to the number of total cases and deaths. This is what we expected. A high population will have a higher number of cases and deaths.
# What we are looking for is the relationship between GDP per capita(or HDI) and total number of cases or deaths.
corr = data.corr(method='pearson')
fig = plt.subplots(figsize = (10, 4))
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns,
cmap='YlOrBr',
annot=True,
linewidth=0.5)
# From the heatmap it seems that **GDP** and **HDI** are both more affected by the number of deaths than the number of cases.
# Log-transform the skewed features
gdp_transformed = data['gdp_per_capita'].apply(lambda x: np.log(x + 1))
total_deaths_transformed = data['total_deaths'].apply(lambda x: np.log(x + 1))
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (15, 4))
ax1.hist(data['gdp_per_capita'])
ax2.hist(gdp_transformed)
ax1.set_title("GDP per Capita before log transform")
ax2.set_title("GDP per Capita after log transform")
ax1.set_ylabel("GDP")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (15, 4))
ax1.hist(data['total_deaths'])
ax2.hist(total_deaths_transformed)
ax1.set_title("Total Number of Deaths before log transform")
ax2.set_title("Total Number of Deaths after log transform")
ax1.set_ylabel("Total Number of Deaths")
# -
data['gdp_per_capita'] = gdp_transformed
data['total_deaths'] = total_deaths_transformed
data.head()
# Apply scaler to normalise data. This ensures that each feature is treated equally when applying supervised learners.
scaler = MinMaxScaler()
numerical = ['total_deaths', 'gdp_per_capita']
features_log_minmax_transform = pd.DataFrame(data = data)
features_log_minmax_transform[numerical] = scaler.fit_transform(data[numerical])
features_log_minmax_transform
# After one-hot encoding the location column I found that the model was overfitting hence I decided to drop that column as it's not necessary for the learning algorithm.
# +
from sklearn.model_selection import train_test_split
X_data = features_log_minmax_transform[['total_cases','total_deaths','stringency_index','population','hdi']]
y_data = features_log_minmax_transform['gdp_per_capita']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size = 0.3, random_state = 42)
# Show the results of the split
print("Training set has {} samples.".format(X_train.shape[0]))
print("Testing set has {} samples.".format(X_test.shape[0]))
# -
# # Train models
#
# - Train the following models: Vanilla Linear, Ridge, Lasso, RidgeCV, LassoCV, Elastic Net
# - Compare accuracy scores
# - Compare root-mean square errors
# - Plot the results: prediction vs actual
kf = KFold(shuffle=True, random_state=72018, n_splits=3)
# +
# vanilla regression and K-fold cross validation
s = StandardScaler()
lr = LinearRegression()
X_train_s = s.fit_transform(X_train)
lr.fit(X_train_s, y_train)
X_test = s.transform(X_test)
y_pred = lr.predict(X_test)
score = r2_score(y_test.values, y_pred)
# with pipeline
estimator = Pipeline([("scaler", s),("regression", lr)])
predictions_lr = cross_val_predict(estimator, X_train, y_train, cv=kf)
linear_score = r2_score(y_train, predictions_lr)
linear_score, score #almost identical
# -
# lasso regression and K-fold cross validation
s = StandardScaler()
pf = PolynomialFeatures(degree=3)
kf = KFold(shuffle=True, random_state=72018, n_splits=3)
scores = []
alphas = np.geomspace(0.06, 6.0, 20)
predictions_lsr = []
for alpha in alphas:
las = Lasso(alpha=alpha, max_iter=100000)
estimator = Pipeline([
("scaler", s),
("make_higher_degree", pf),
("lasso_regression", las)])
predictions_lsr = cross_val_predict(estimator, X_train, y_train, cv = kf)
score = r2_score(y_train, predictions_lsr)
scores.append(score)
plt.semilogx(alphas, scores, '-o', color='purple')
plt.title('Lasso Regression')
plt.xlabel('$\\alpha$')
plt.ylabel('$R^2$');
# +
best_estimator = Pipeline([
("scaler", s),
("make_higher_degree", PolynomialFeatures(degree=2)),
("lasso_regression", Lasso(alpha=0.03))])
best_estimator.fit(X_train, y_train)
lasso_score = best_estimator.score(X_train, y_train)
# +
# ridge regression and K-fold cross validation
pf = PolynomialFeatures(degree=2)
alphas = np.geomspace(4, 20, 20)
scores=[]
predictions_rr = []
for alpha in alphas:
ridge = Ridge(alpha=alpha, max_iter=100000)
estimator = Pipeline([
("scaler", s),
("polynomial_features", pf),
("ridge_regression", ridge)])
predictions_rr = cross_val_predict(estimator, X_train, y_train, cv = kf)
score = r2_score(y_train, predictions_rr)
scores.append(score)
plt.plot(alphas, scores, '-o', color='purple')
plt.title('Ridge Regression')
plt.xlabel('$\\alpha$')
plt.ylabel('$R^2$');
# +
best_estimator = Pipeline([
("scaler", s),
("make_higher_degree", PolynomialFeatures(degree=2)),
("ridge_regression", Ridge(alpha=0.03))])
best_estimator.fit(X_train, y_train)
ridge_score = best_estimator.score(X_train, y_train)
# -
# comparing scores
pd.DataFrame([[linear_score, lasso_score, ridge_score]],columns=['linear', 'lasso', 'ridge'], index=['score'])
# **Conclusion**: Both Lasso and Ridge with proper hyperparameter tuning give better results than plain Linear Regression!
# +
def rmse(ytrue, ypredicted):
return np.sqrt(mean_squared_error(ytrue, ypredicted))
# Fit a basic linear regression model
linearRegression = LinearRegression().fit(X_train, y_train)
linearRegression_rmse = rmse(y_test, linearRegression.predict(X_test))
# Fit a regular (non-cross validated) Ridge model
alphas = [0.005, 0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 80]
ridgeCV = RidgeCV(alphas=alphas, cv=4).fit(X_train, y_train)
ridgeCV_rmse = rmse(y_test, ridgeCV.predict(X_test))
# Fit a Lasso model using cross validation and determine the optimum value for 𝛼
alphas2 = np.array([1e-5, 5e-5, 0.0001, 0.0005])
lassoCV = LassoCV(alphas=alphas2,
max_iter=5e4,
cv=3).fit(X_train, y_train)
lassoCV_rmse = rmse(y_test, lassoCV.predict(X_test))
# Fit elastic net with the same set of alphas as lasso
l1_ratios = np.linspace(0.1, 0.9, 9)
elasticNetCV = ElasticNetCV(alphas=alphas2,
l1_ratio=l1_ratios,
max_iter=1e4).fit(X_train, y_train)
elasticNetCV_rmse = rmse(y_test, elasticNetCV.predict(X_test))
rmse_vals = [linearRegression_rmse, ridgeCV_rmse, lassoCV_rmse, elasticNetCV_rmse]
labels = ['Linear', 'Lasso', 'Ridge' 'ElasticNet']
rmse_df = pd.DataFrame([[linearRegression_rmse, ridgeCV_rmse, lassoCV_rmse, elasticNetCV_rmse]],columns=['Linear', 'Lasso', 'Ridge', 'ElasticNet'], index=['rmse'])
rmse_df
# +
f = plt.figure(figsize=(6,6))
ax = plt.axes()
labels, models = ['Linear', 'Ridge', 'Lasso', 'ElasticNet'], [linearRegression, ridgeCV, lassoCV, elasticNetCV]
for mod, label in zip(models, labels):
ax.plot(y_test, mod.predict(X_test), marker='o', ls='', ms=3.0, label=label, alpha=1)
leg = plt.legend(frameon=True)
leg.get_frame().set_edgecolor('black')
leg.get_frame().set_linewidth(1.0)
ax.set(xlabel='Actual', ylabel='Predicted', title='Linear Regression Results')
# -
# **Conclusion 2**: Lasso gives the smallest Root-mean-square error however, the difference in scores and errors are not significant and almost identical. The best candidate based on Root-mean-square error and score results is Lasso Regression, therefore we recommend LassoCV as a final model that best fits the data in terms of accuracy.
# # Next Steps
#
# We could further try optimize Lasso using GridSearchCV.
#
# To predict the effect on GDP for an individual country, we could one-hot encode the location or iso_code columns and use that for training our models. Perhaps collecting more frequent records on specific countries would help achieve more accurate results.
| supervised-learning-regression/Supervised Learning - Regression Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Notebook settings
# %load_ext autoreload
# %autoreload 2
# %config Completer.use_jedi = False
# # SpikeInterface pipeline for Movshon Lab - OpenEphys
# +
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
import pytz
import spikeextractors as se
import spiketoolkit as st
import spikesorters as ss
import spikecomparison as sc
import spikewidgets as sw
# -
# %matplotlib notebook
# ## 1) Load recordings, compute LFP, and inspect signals
# +
# Data files directory
# dir_path = Path('/home/luiz/storage/taufferconsulting/client_ben/project_movshon/movshon_data/oephys')
# dir_path = Path('/home/luiz/storage/taufferconsulting/client_ben/project_movshon/movshon_data/expo/exampledata/expo_openephys/m666l3#7/openephys')
dir_path = Path('/Users/abuccino/Documents/Data/catalyst/movshon/m666l3#7/openephys')
# Spikeinterface directory
dir_spikeinterface = dir_path / "spikeinterface"
dir_spikeinterface.mkdir(parents=True, exist_ok=True)
print(dir_spikeinterface)
# Create recording extractor
recording_0 = se.OpenEphysRecordingExtractor(folder_path=dir_path)
# Load probe info from .prb file
probe_file = Path("..") / "probe_files" / 'A32_ACDC.prb'
recording = recording_0.load_probe_file(probe_file=probe_file)
print()
print(f"Num channels: {recording.get_num_channels()}")
print(f"Sampling rate: {recording.get_sampling_frequency()}")
print(f"Duration (s): {recording.get_num_frames() / recording.get_sampling_frequency()}")
# -
fig = plt.figure(figsize=(12, 6))
sw.plot_electrode_geometry(recording, figure=fig)
# ### Compute LFP
# +
freq_min_lfp = 1
freq_max_lfp = 300
freq_resample_lfp = 1000.
# Apply bandpass filter
recording_lfp = st.preprocessing.bandpass_filter(
recording=recording,
freq_min=freq_min_lfp,
freq_max=freq_max_lfp
)
# Resample lfp
recording_lfp = st.preprocessing.resample(
recording=recording_lfp,
resample_rate=freq_resample_lfp,
)
print(f"Sampling frequency Raw: {recording.get_sampling_frequency()}")
print(f"Sampling frequency LF: {recording_lfp.get_sampling_frequency()}")
# -
# ### Inspect signals
fig = plt.figure(figsize=(16, 8))
w_ts_raw = sw.plot_timeseries(recording, trange=[0, 5], figure=fig)
fig = plt.figure(figsize=(16, 8))
w_ts_lf = sw.plot_timeseries(recording_lfp, trange=[0, 5], figure=fig)
# ## 2) Pre-processing
# - Filters
# - Common-reference removal
# - Remove bad channels
# - Remove stimulation artifacts
#
# Ref: https://spikeinterface.readthedocs.io/en/latest/modules/toolkit/plot_1_preprocessing.html#preprocessing-tutorial
apply_filter = True
apply_cmr = True
freq_min_hp = 300
freq_max_hp = 3000
# +
# Filtered recordings
if apply_filter:
recording_processed = st.preprocessing.bandpass_filter(recording, freq_min=freq_min_hp, freq_max=freq_max_hp)
else:
recording_processed = recording
if apply_cmr:
recording_processed = st.preprocessing.common_reference(recording_processed)
# Stub recording for fast testing; set to False for running processing pipeline on entire data
stub_test = True
nsec_stub = 30
subr_ids = [i + 1 for i in range(31)]
if stub_test:
recording_processed = se.SubRecordingExtractor(
parent_recording=recording_processed,
channel_ids=subr_ids,
end_frame=int(nsec_stub*recording_processed.get_sampling_frequency())
)
recording_lfp = se.SubRecordingExtractor(recording_lfp, end_frame=int(nsec_stub*recording_lfp.get_sampling_frequency()))
print(f"Original signal length: {recording.get_num_frames()}")
print(f"Processed signal length: {recording_processed.get_num_frames()}")
# -
fig = plt.figure(figsize=(16, 8))
w_ts_processed = sw.plot_timeseries(recording_processed, trange=[0, 5], figure=fig)
# ## 3) Run spike sorters
#
# Ref: https://spikeinterface.readthedocs.io/en/latest/sortersinfo.html
ss.installed_sorters()
# +
sorter_list = ['klusta', 'herdingspikes']
sorter_params = dict()
# Inspect sorter-specific parameters and defaults
for sorter in sorter_list:
print(f"\n\n{sorter} params description:\n")
pprint(ss.get_params_description(sorter))
print("Default params:")
sorter_params[sorter] = ss.get_default_params(sorter)
pprint(sorter_params)
print()
# +
# Choose which recording to use for sorting
rec_to_sort = recording_processed #se.CacheRecordingExtractor(recording_processed, "cache.dat")
# Sorting parameters - Herdingspikes
sorter_params['herdingspikes'] = {
'filter': False,
'filter_duplicates': False,
'probe_neighbor_radius': 200,
}
sorting_outputs = {}
for sorter_name in sorter_list:
# run sorter
sorting = ss.run_sorter(
sorter_name_or_class=sorter_name,
recording=rec_to_sort,
output_folder=dir_spikeinterface / "si_output" / sorter_name,
**sorter_params[sorter_name],
verbose=False
)
sorting_outputs[sorter_name] = sorting
for sorter_name in sorter_list:
print(f'Sorter {sorter_name} identified {len(sorting_outputs[sorter_name].get_unit_ids())} units')
# -
# ## 4) Post-processing
# - Compute spike waveforms
# - Compute unit templates
# - Compute extracellular features
#
# Ref: https://spikeinterface.readthedocs.io/en/latest/modules/toolkit/plot_2_postprocessing.html
# Post-processing params
postprocessing_params = st.postprocessing.get_common_params()
pprint(postprocessing_params)
# (optional) change parameters
postprocessing_params['max_spikes_per_unit'] = 1000 # with None, all waveforms are extracted
postprocessing_params['recompute_info'] = False # reset to True to recompute
postprocessing_params["verbose"] = False
# +
# Choose sorter to postprocess
sorting = sorting_outputs['klusta']
tmp_folder = dir_spikeinterface / 'tmp' / 'klusta'
tmp_folder.mkdir(parents=True, exist_ok=True)
# set local tmp folder
sorting.set_tmp_folder(tmp_folder)
# compute waveforms
waveforms = st.postprocessing.get_unit_waveforms(rec_to_sort, sorting, **postprocessing_params)
# compute templates
templates = st.postprocessing.get_unit_templates(rec_to_sort, sorting, **postprocessing_params)
# -
# Visualize spike template waverforms
fig = plt.figure(figsize=(16, 8))
sw.plot_unit_templates(rec_to_sort, sorting, unit_ids=[22, 31, 41], figure=fig)
# Extracellular features
ec_list = st.postprocessing.get_template_features_list()
print(f"Available EC features: {ec_list}")
# +
# (optional) define subset of ec
ec_list = ["peak_to_valley", "halfwidth"]
# comput EC features
ec = st.postprocessing.compute_unit_template_features(
rec_to_sort,
sorting,
feature_names=ec_list,
as_dataframe=True
)
ec.head(10)
# -
# # 5) Curating spike sorting outputs
#
# ### 5a) Manual curation with Phy
# +
# export to phy
phy_folder = dir_spikeinterface / 'phy' / 'klusta'
phy_folder.mkdir(parents=True, exist_ok=True)
print("Exporting to phy")
# here setting save_property_or_features to False so that Phy doesn't overwrite existing wfs, templates
st.postprocessing.export_to_phy(
rec_to_sort,
sorting,
phy_folder,
save_property_or_features=False,
verbose=True
)
# -
# Reload curated output from phy
sorting_manual_curated = se.PhySortingExtractor(phy_folder, exclude_cluster_groups=["noise"])
# ### 5b) Automatic curation - quality metrics
#
# You can automatically curate the spike sorting output using the quality metrics.
#
# Ref: https://spikeinterface.readthedocs.io/en/latest/modules/toolkit/plot_4_curation.html
# Quality metrics
qc_list = st.validation.get_quality_metrics_list()
print(f"Available quality metrics: {qc_list}")
# (optional) define subset of qc
qc_list = ["snr", "isi_violation", "firing_rate"]
# +
# compute quality metrics
qc = st.validation.compute_quality_metrics(
sorting=sorting,
recording=rec_to_sort,
metric_names=qc_list,
as_dataframe=True
)
qc.head(10)
# -
# # 5) Curating spike sorting outputs
#
# ### 5a) Manual curation with Phy
# export to phy
phy_folder = dir_spikeinterface / 'phy' / 'klusta'
phy_folder.mkdir(parents=True, exist_ok=True)
print("Exporting to phy")
# here setting save_property_or_features to False so that Phy doesn't overwrite existing wfs, templates
st.postprocessing.export_to_phy(rec_to_sort, sorting, phy_folder, save_property_or_features=False,
verbose=True)
# ### Reload curated output from phy
# define curators and thresholds
firing_rate_threshold = 0.1
isi_violation_threshold = 0.6
snr_threshold = 4
# +
num_frames = rec_to_sort.get_num_frames()
# firing rate threshold
sorting_curated = st.curation.threshold_firing_rates(
sorting,
duration_in_frames=num_frames,
threshold=firing_rate_threshold,
threshold_sign='less'
)
# isi violation threshold
sorting_curated = st.curation.threshold_isi_violations(
sorting_curated,
duration_in_frames=num_frames,
threshold=isi_violation_threshold,
threshold_sign='greater'
)
# isi violation threshold
sorting_curated = st.curation.threshold_snrs(
sorting_curated,
recording=rec_to_sort,
threshold=snr_threshold,
threshold_sign='less'
)
# -
print(f'Number of sorted units before curation: {len(sorting.get_unit_ids())}')
print(f'Number of sorted units after curation: {len(sorting_curated.get_unit_ids())}')
# ### 5c) Multisorting comparison - get consensus-based ensemble results
# Ref: https://spikeinterface.readthedocs.io/en/latest/modules/comparison/plot_2_compare_multiple_sorters.html?highlight=get_agreement_sorting#consensus-based-method
if len(sorter_list) > 1:
# run multisorting comparison
mcmp = sc.compare_multiple_sorters(
sorting_list=[s for s in sorting_outputs.values()],
name_list=sorter_list
)
# plot agreement results
fig = plt.figure(figsize=(12, 7))
w_agr = sw.plot_multicomp_agreement(
multi_sorting_comparison=mcmp,
plot_type='pie', #'bar'
figure=fig
)
# extract ensamble sorting
sorting_ensemble = mcmp.get_agreement_sorting(minimum_agreement_count=2)
print(f"Ensamble sorting among {sorter_list} found: {len(sorting_ensemble.get_unit_ids())} units")
# # 6) Quick save to NWB
#
# To complete the full conversion with other types of data and extended options, use a NWB Converter class.
#
# NWBFile metadata reference: https://pynwb.readthedocs.io/en/stable/pynwb.file.html#pynwb.file.NWBFile
# +
from nwb_conversion_tools.utils.spike_interface import write_recording, write_sorting
output_nwb = 'openephys_si_results.nwb'
# Add customized Metadata info - Optional
session_start_time = recording_0._fileobj.experiments[0].datetime
session_start_time_tzaware = pytz.timezone('EST').localize(session_start_time)
metadata = se.NwbRecordingExtractor.get_nwb_metadata(recording=rec_to_sort)
metadata['NWBFile'].update(
session_start_time=session_start_time_tzaware,
session_description='a detailed description of this experimental session...',
institution='NYU',
lab='Movshon lab',
pharmacology='Description of drugs used',
experimenter=['Person1', 'Person2'],
keywords=['openephys', 'tutorial', 'etc']
)
metadata['Ecephys']['Device'][0].update(description='a detailed description of this device')
# Write voltage traces data
write_recording(
recording=rec_to_sort,
save_path=output_nwb,
overwrite=True,
metadata=metadata
)
# Write spiking data
write_sorting(
sorting=sorting,
save_path=output_nwb,
overwrite=False
)
# +
# Check NWB file with widgets
from pynwb import NWBFile, NWBHDF5IO
from nwbwidgets import nwb2widget
output_nwb = 'openephys_si_results.nwb'
io = NWBHDF5IO(output_nwb, 'r')
nwbfile = io.read()
nwb2widget(nwbfile)
# -
io.close()
# # 7) Include Expo trials with NWB conversion tools
from movshon_lab_to_nwb import MovshonExpoNWBConverter
# +
# Source data
base_path = Path('/home/luiz/storage/taufferconsulting/client_ben/project_movshon/movshon_data/expo/exampledata/expo_openephys/m666l3#7')
expo_file = base_path / 'm666l3#7[ori16].xml'
ttl_file = base_path / 'openephys/100_ADC1.continuous'
source_data = dict(
ExpoDataInterface=dict(
expo_file=str(expo_file),
ttl_file=str(ttl_file)
)
)
# Initialize converter
converter = MovshonExpoNWBConverter(source_data=source_data)
# Conversion options
conversion_options = dict(
ExpoDataInterface=dict(convert_expo=True)
)
converter.run_conversion(
nwbfile_path=output_nwb,
overwrite=False,
conversion_options=conversion_options
)
# -
io = NWBHDF5IO(output_nwb, 'r')
nwbfile = io.read()
nwb2widget(nwbfile)
io.close()
nwbfile.units
| tutorials/openephys_spikeinterface_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Advanced topic: Heat transport decomposition
#
# This notebook is part of [The Climate Laboratory](https://brian-rose.github.io/ClimateLaboratoryBook) by [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.
# -
# *This notebook is an extension of the [Heat transport notes](https://brian-rose.github.io/ClimateLaboratoryBook/courseware/heat-transport.html) containing some additional advanced material on the decomposition of total heat transport in components. The advanced notes are unfinished but may be useful.*
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section1'></a>
#
# ## 1. Spatial patterns of insolation and surface temperature
# ____________
#
# Let's take a look at seasonal and spatial pattern of insolation and compare this to the zonal average surface temperatures.
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import climlab
from climlab import constants as const
# -
# Calculate daily average insolation as function of latitude and time of year
lat = np.linspace( -90., 90., 500 )
days = np.linspace(0, const.days_per_year, 365 )
Q = climlab.solar.insolation.daily_insolation( lat, days )
# + slideshow={"slide_type": "slide"}
## daily surface temperature from NCEP reanalysis
ncep_url = "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.derived/"
ncep_temp = xr.open_dataset( ncep_url + "surface_gauss/skt.sfc.day.1981-2010.ltm.nc", decode_times=False)
#url = 'http://apdrc.soest.hawaii.edu:80/dods/public_data/Reanalysis_Data/NCEP/NCEP/clima/'
#skt_path = 'surface_gauss/skt'
#ncep_temp = xr.open_dataset(url+skt_path)
ncep_temp_zon = ncep_temp.skt.mean(dim='lon')
# +
fig = plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(121)
CS = ax1.contour( days, lat, Q , levels = np.arange(0., 600., 50.) )
ax1.clabel(CS, CS.levels, inline=True, fmt='%r', fontsize=10)
ax1.set_title('Daily average insolation', fontsize=18 )
ax1.contourf ( days, lat, Q, levels=[-100., 0.], colors='k' )
ax2 = fig.add_subplot(122)
CS = ax2.contour( (ncep_temp.time - ncep_temp.time[0])/const.hours_per_day, ncep_temp.lat,
ncep_temp_zon.T, levels=np.arange(210., 310., 10. ) )
ax2.clabel(CS, CS.levels, inline=True, fmt='%r', fontsize=10)
ax2.set_title('Observed zonal average surface temperature', fontsize=18 )
for ax in [ax1,ax2]:
ax.set_xlabel('Days since January 1', fontsize=16 )
ax.set_ylabel('Latitude', fontsize=16 )
ax.set_yticks([-90,-60,-30,0,30,60,90])
ax.grid()
# + [markdown] slideshow={"slide_type": "slide"}
# This figure reveals something fairly obvious, but still worth thinking about:
#
# **Warm temperatures are correlated with high insolation**. It's warm where the sun shines.
#
# More specifically, we can see a few interesting details here:
#
# - The seasonal cycle is weakest in the tropics and strongest in the high latitudes.
# - The warmest temperatures occur slighly NORTH of the equator
# - The highest insolation occurs at the poles at summer solstice.
# + [markdown] slideshow={"slide_type": "slide"}
# The local surface temperature does not correlate perfectly with local insolation for two reasons:
#
# - the climate system has heat capacity, which buffers some of the seasonal variations
# - the climate system moves energy around in space!
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section2'></a>
#
# ## 2. Calculating Radiative-Convective Equilibrium as a function of latitude
# ____________
#
# As a first step to understanding the effects of **heat transport by fluid motions** in the atmosphere and ocean, we can calculate **what the surface temperature would be without any motion**.
#
# Let's calculate a **radiative-convective equilibrium** state for every latitude band.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Putting realistic insolation into an RCM
#
# This code demonstrates how to create a model with both latitude and vertical dimensions.
# + slideshow={"slide_type": "slide"}
# A two-dimensional domain
state = climlab.column_state(num_lev=30, num_lat=40, water_depth=10.)
# Specified relative humidity distribution
h2o = climlab.radiation.ManabeWaterVapor(name='Fixed Relative Humidity', state=state)
# Hard convective adjustment
conv = climlab.convection.ConvectiveAdjustment(name='Convective Adjustment', state=state, adj_lapse_rate=6.5)
# Daily insolation as a function of latitude and time of year
sun = climlab.radiation.DailyInsolation(name='Insolation', domains=state['Ts'].domain)
# Couple the radiation to insolation and water vapor processes
rad = climlab.radiation.RRTMG(name='Radiation',
state=state,
specific_humidity=h2o.q,
albedo=0.125,
insolation=sun.insolation,
coszen=sun.coszen)
model = climlab.couple([rad,sun,h2o,conv], name='RCM')
print( model)
# -
model.compute_diagnostics()
# + slideshow={"slide_type": "-"}
fig, ax = plt.subplots()
ax.plot(model.lat, model.insolation)
ax.set_xlabel('Latitude')
ax.set_ylabel('Insolation (W/m2)');
# + [markdown] slideshow={"slide_type": "slide"}
# This new insolation process uses the same code we've already been working with to compute realistic distributions of insolation. Here we are using
# ```
# climlab.radiation.DailyInsolation
# ```
# but there is also
#
# ```
# climlab.radiation.AnnualMeanInsolation
# ```
# for models in which you prefer to suppress the seasonal cycle and prescribe a time-invariant insolation.
# + [markdown] slideshow={"slide_type": "slide"}
# The following code will just integrate the model forward in four steps in order to get snapshots of insolation at the solstices and equinoxes.
# +
# model is initialized on Jan. 1
# integrate forward just under 1/4 year... should get about to the NH spring equinox
model.integrate_days(31+28+22)
Q_spring = model.insolation.copy()
# Then forward to NH summer solstice
model.integrate_days(31+30+31)
Q_summer = model.insolation.copy()
# and on to autumnal equinox
model.integrate_days(30+31+33)
Q_fall = model.insolation.copy()
# and finally to NH winter solstice
model.integrate_days(30+31+30)
Q_winter = model.insolation.copy()
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
ax.plot(model.lat, Q_spring, label='Spring')
ax.plot(model.lat, Q_summer, label='Summer')
ax.plot(model.lat, Q_fall, label='Fall')
ax.plot(model.lat, Q_winter, label='Winter')
ax.legend()
ax.set_xlabel('Latitude')
ax.set_ylabel('Insolation (W/m2)');
# + [markdown] slideshow={"slide_type": "slide"}
# This just serves to demonstrate that the `DailyInsolation` process is doing something sensible.
# -
# Note that we could also pass different orbital parameters to this subprocess. They default to present-day values, which is what we are using here.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Find the steady seasonal cycle of temperature in radiative-convective equilibrium
# -
model.integrate_years(4.)
model.integrate_years(1.)
# + [markdown] slideshow={"slide_type": "slide"}
# All climlab `Process` objects have an attribute called `timeave`.
#
# This is a dictionary of time-averaged diagnostics, which are automatically calculated during the most recent call to `integrate_years()` or `integrate_days()`.
# -
model.timeave.keys()
# + [markdown] slideshow={"slide_type": "slide"}
# Here we use the `timeave['insolation']` to plot the annual mean insolation.
#
# (We know it is the *annual* average because the last call to `model.integrate_years` was for exactly 1 year)
# -
fig, ax = plt.subplots()
ax.plot(model.lat, model.timeave['insolation'])
ax.set_xlabel('Latitude')
ax.set_ylabel('Insolation (W/m2)')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Compare annual average temperature in RCE to the zonal-, annual mean observations.
# -
# Plot annual mean surface temperature in the model,
# compare to observed annual mean surface temperatures
fig, ax = plt.subplots()
ax.plot(model.lat, model.timeave['Ts'], label='RCE')
ax.plot(ncep_temp_zon.lat, ncep_temp_zon.mean(dim='time'), label='obs')
ax.set_xticks(range(-90,100,30))
ax.grid(); ax.legend();
# + [markdown] slideshow={"slide_type": "slide"}
# Our modeled RCE state is **far too warm in the tropics**, and **too cold in the mid- to high latitudes.**
# + [markdown] slideshow={"slide_type": "slide"}
# ### Vertical structure of temperature: comparing RCE to observations
# -
# Observed air temperature from NCEP reanalysis
## The NOAA ESRL server is shutdown! January 2019
ncep_air = xr.open_dataset( ncep_url + "pressure/air.mon.1981-2010.ltm.nc", decode_times=False)
#air = xr.open_dataset(url+'pressure/air')
#ncep_air = air.rename({'lev':'level'})
level_ncep_air = ncep_air.level
lat_ncep_air = ncep_air.lat
Tzon = ncep_air.air.mean(dim=('time','lon'))
# + slideshow={"slide_type": "skip"}
# Compare temperature profiles in RCE and observations
contours = np.arange(180., 350., 15.)
fig = plt.figure(figsize=(14,6))
ax1 = fig.add_subplot(1,2,1)
cax1 = ax1.contourf(lat_ncep_air, level_ncep_air, Tzon+const.tempCtoK, levels=contours)
fig.colorbar(cax1)
ax1.set_title('Observered temperature (K)')
ax2 = fig.add_subplot(1,2,2)
field = model.timeave['Tatm'].transpose()
cax2 = ax2.contourf(model.lat, model.lev, field, levels=contours)
fig.colorbar(cax2)
ax2.set_title('RCE temperature (K)')
for ax in [ax1, ax2]:
ax.invert_yaxis()
ax.set_xlim(-90,90)
ax.set_xticks([-90, -60, -30, 0, 30, 60, 90])
# + [markdown] slideshow={"slide_type": "slide"}
# Again, this plot reveals temperatures that are too warm in the tropics, too cold at the poles throughout the troposphere.
#
# Note however that the **vertical temperature gradients** are largely dictated by the convective adjustment in our model. We have parameterized this gradient, and so we can change it by changing our parameter for the adjustment.
#
# We have (as yet) no parameterization for the **horizontal** redistribution of energy in the climate system.
# + [markdown] slideshow={"slide_type": "slide"}
# ### TOA energy budget in RCE equilibrium
#
# Because there is no horizontal energy transport in this model, the TOA radiation budget should be closed (net flux is zero) at all latitudes.
#
# Let's check this by plotting time-averaged shortwave and longwave radiation:
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
ax.plot(model.lat, model.timeave['ASR'], label='ASR')
ax.plot(model.lat, model.timeave['OLR'], label='OLR')
ax.set_xlabel('Latitude')
ax.set_ylabel('W/m2')
ax.legend(); ax.grid()
# -
# Indeed, the budget is (very nearly) closed everywhere. Each latitude is in energy balance, independent of every other column.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section3'></a>
#
# ## 3. Observed and modeled TOA radiation budget
# ____________
#
# + [markdown] slideshow={"slide_type": "slide"}
# We are going to look at the (time average) TOA budget as a function of latitude to see how it differs from the RCE state we just plotted.
#
# Ideally we would look at actual satellite observations of SW and LW fluxes. Instead, here we will use the NCEP Reanalysis for convenience.
#
# But bear in mind that the radiative fluxes in the reanalysis are a model-generated product, they are not really observations.
# + [markdown] slideshow={"slide_type": "slide"}
# ### TOA budget from NCEP Reanalysis
# -
# Get TOA radiative flux data from NCEP reanalysis
# downwelling SW
dswrf = xr.open_dataset(ncep_url + '/other_gauss/dswrf.ntat.mon.1981-2010.ltm.nc', decode_times=False)
#dswrf = xr.open_dataset(url + 'other_gauss/dswrf')
# upwelling SW
uswrf = xr.open_dataset(ncep_url + '/other_gauss/uswrf.ntat.mon.1981-2010.ltm.nc', decode_times=False)
#uswrf = xr.open_dataset(url + 'other_gauss/uswrf')
# upwelling LW
ulwrf = xr.open_dataset(ncep_url + '/other_gauss/ulwrf.ntat.mon.1981-2010.ltm.nc', decode_times=False)
#ulwrf = xr.open_dataset(url + 'other_gauss/ulwrf')
# + slideshow={"slide_type": "slide"}
ASR = dswrf.dswrf - uswrf.uswrf
OLR = ulwrf.ulwrf
# -
ASRzon = ASR.mean(dim=('time','lon'))
OLRzon = OLR.mean(dim=('time','lon'))
# + slideshow={"slide_type": "slide"}
ticks = [-90, -60, -30, 0, 30, 60, 90]
fig, ax = plt.subplots()
ax.plot(ASRzon.lat, ASRzon, label='ASR')
ax.plot(OLRzon.lat, OLRzon, label='OLR')
ax.set_ylabel('W/m2')
ax.set_xlabel('Latitude')
ax.set_xlim(-90,90); ax.set_ylim(50,310)
ax.set_xticks(ticks);
ax.set_title('Observed annual mean radiation at TOA')
ax.legend(); ax.grid();
# + [markdown] slideshow={"slide_type": "slide"}
# We find that ASR does NOT balance OLR in most locations.
#
# Across the tropics the absorbed solar radiation exceeds the longwave emission to space. The tropics have a **net gain of energy by radiation**.
#
# The opposite is true in mid- to high latitudes: **the Earth is losing energy by net radiation to space** at these latitudes.
# + [markdown] slideshow={"slide_type": "slide"}
# ### TOA budget from the control CESM simulation
#
# Load data from the fully coupled CESM control simulation that we've used before.
# +
casenames = {'cpl_control': 'cpl_1850_f19',
'cpl_CO2ramp': 'cpl_CO2ramp_f19',
'som_control': 'som_1850_f19',
'som_2xCO2': 'som_1850_2xCO2',
}
# The path to the THREDDS server, should work from anywhere
basepath = 'http://thredds.atmos.albany.edu:8080/thredds/dodsC/CESMA/'
# For better performance if you can access the roselab_rit filesystem (e.g. from JupyterHub)
#basepath = '/roselab_rit/cesm_archive/'
casepaths = {}
for name in casenames:
casepaths[name] = basepath + casenames[name] + '/concatenated/'
# make a dictionary of all the CAM atmosphere output
atm = {}
for name in casenames:
path = casepaths[name] + casenames[name] + '.cam.h0.nc'
print('Attempting to open the dataset ', path)
atm[name] = xr.open_dataset(path)
# -
lat_cesm = atm['cpl_control'].lat
ASR_cesm = atm['cpl_control'].FSNT
OLR_cesm = atm['cpl_control'].FLNT
# +
# extract the last 10 years from the slab ocean control simulation
# and the last 20 years from the coupled control
nyears_slab = 10
nyears_cpl = 20
clim_slice_slab = slice(-(nyears_slab*12),None)
clim_slice_cpl = slice(-(nyears_cpl*12),None)
# For now we're just working with the coupled control simulation
# Take the time and zonal average
ASR_cesm_zon = ASR_cesm.isel(time=clim_slice_slab).mean(dim=('lon','time'))
OLR_cesm_zon = OLR_cesm.isel(time=clim_slice_slab).mean(dim=('lon','time'))
# -
# Now we can make the same plot of ASR and OLR that we made for the observations above.
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
ax.plot(lat_cesm, ASR_cesm_zon, label='ASR')
ax.plot(lat_cesm, OLR_cesm_zon, label='OLR')
ax.set_ylabel('W/m2')
ax.set_xlabel('Latitude')
ax.set_xlim(-90,90); ax.set_ylim(50,310)
ax.set_xticks(ticks);
ax.set_title('CESM control simulation: Annual mean radiation at TOA')
ax.legend(); ax.grid();
# + [markdown] slideshow={"slide_type": "slide"}
# Essentially the same story as the reanalysis data: there is a **surplus of energy across the tropics** and a net **energy deficit in mid- to high latitudes**.
#
# There are two locations where ASR = OLR, near about 35º in both hemispheres.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
#
# ## 4. The energy budget for a zonal band
# ____________
# -
# ### The basic idea
#
# Through most of the previous notes we have been thinking about **global averages**.
#
# We've been working with an energy budget that looks something like this:
#
# 
# When we start thinking about regional climates, we need to modify our budget to account for the **additional heating or cooling** due to **transport** in and out of the column:
#
# 
# Conceptually, the additional energy source is the difference between what's coming in and what's going out:
#
# $$ h = \mathcal{H}_{in} - \mathcal{H}_{out} $$
#
# where $h$ is a **dynamic heating rate** in W m$^{-2}$.
# ### A more careful budget
#
# Let’s now consider a thin band of the climate system, of width $\delta \phi$ , and write down a careful energy budget for it.
# <img src='../images/ZonalEnergyBudget_sketch.png' width=400>
# + [markdown] slideshow={"slide_type": "slide"}
# Let $\mathcal{H}(\phi)$ be the total rate of northward energy transport across the latitude line $\phi$, measured in Watts (usually PW).
#
# Let $T(\phi,t)$ be the zonal average surface temperature ("zonal average" = average around latitude circle).
# + [markdown] slideshow={"slide_type": "slide"}
# We can write the energy budget as
#
# $$ \frac{\partial E}{\partial t} = \text{energy in} - \text{energy out} $$
#
# where $E$ is the total energy content of the column, which is useful to write as
#
# $$ E = \int_{bottom}^{top} \rho ~ e ~ dz $$
#
# and $e$ is the local **enthalpy** of the fluid, in units of J kg$^{-1}$. The integral energy content $E$ thus has units of J m$^{-2}$.
# + [markdown] slideshow={"slide_type": "slide"}
# We have written the time tendency as a partial derivative now because $E$ varies in both space and time.
# + [markdown] slideshow={"slide_type": "slide"}
# Now there are two energy sources and two energy sinks to think about:
# Radiation and dynamics (horizontal transport)
#
# $$ \frac{\partial E}{\partial t} = R_{TOA} - (\text{transport out} - \text{transport in})~/ ~\text{area of band} $$
#
# where we define the net incoming radiation at the top of atmosphere as
#
# $$ R_{TOA} = \text{ASR} - \text{OLR} = (1-\alpha) Q - \text{OLR} $$
# + [markdown] slideshow={"slide_type": "slide"}
# The surface area of the latitude band is
#
# $$ A = \text{Circumference} ~\times ~ \text{north-south width} $$
#
# $$ A = 2 \pi a \cos \phi ~ \times ~ a \delta \phi $$
#
# $$ A = 2 \pi a^2 \cos\phi ~ \delta\phi $$
# + [markdown] slideshow={"slide_type": "slide"}
# We will denote the energy transport in and out of the band respectively as $\mathcal{H}(\phi), \mathcal{H}(\phi + \delta\phi)$
#
# Then the budget can be written
#
# $$ \frac{\partial E}{\partial t} = \text{ASR} - \text{OLR} - \frac{1}{2 \pi a^2 \cos\phi ~ \delta\phi} \Big( \mathcal{H}(\phi + \delta\phi) - \mathcal{H}(\phi) \Big) $$
# + [markdown] slideshow={"slide_type": "slide"}
# For thin bands where $\delta\phi$ is very small, we can write
#
# $$ \frac{1}{\delta\phi} \Big( \mathcal{H}(\phi + \delta\phi) - \mathcal{H}(\phi) \Big) = \frac{\partial \mathcal{H}}{\partial \phi} $$
# + [markdown] slideshow={"slide_type": "fragment"}
# So the local budget at any latitude $\phi$ is
#
# $$ \frac{\partial E}{\partial t} = \text{ASR} - \text{OLR} - \frac{1}{2 \pi a^2 \cos\phi } \frac{\partial \mathcal{H}}{\partial \phi} $$
# + [markdown] slideshow={"slide_type": "slide"}
# The **dynamical heating rate** in W m$^{-2}$ is thus
#
# $$ h = - \frac{1}{2 \pi a^2 \cos\phi } \frac{\partial \mathcal{H}}{\partial \phi} $$
#
# which is the **convergence of energy transport** into this latitude band: the difference between what's coming in and what's going out.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Calculating heat transport from the steady-state energy budget
#
# Notice that if the above budget is in **equilibrium** then $\partial E/ \partial t = 0$ and the budget says that **divergence of heat transport balances the net radiative heating** at every latitude.
#
# If we can **assume that the budget is balanced**, i.e. assume that the system is at equilibrium and there is negligible heat storage, then we can use the budget to infer $\mathcal{H}$ from a measured (or modeled) TOA radiation imbalance.
# + [markdown] slideshow={"slide_type": "slide"}
# Setting $\partial E/ \partial t = 0$ and rearranging:
#
# $$ \frac{\partial \mathcal{H}}{\partial \phi} = 2 \pi ~a^2 \cos\phi ~ R_{TOA} $$
# -
# Now integrate from the South Pole ($\phi = -\pi/2$):
#
# $$ \int_{-\pi/2}^{\phi} \frac{\partial \mathcal{H}}{\partial \phi^\prime} d\phi^\prime = 2 \pi ~a^2 \int_{-\pi/2}^{\phi} \cos\phi^\prime ~ R_{TOA} d\phi^\prime $$
#
# $$ \mathcal{H}(\phi) - \mathcal{H}(-\pi/2) = 2 \pi ~a^2 \int_{-\pi/2}^{\phi} \cos\phi^\prime ~ R_{TOA} d\phi^\prime $$
# + [markdown] slideshow={"slide_type": "slide"}
# Our boundary condition is that the transport must go to zero at the pole. We therefore have a formula for calculating the heat transport at any latitude, by integrating the imbalance from the South Pole:
#
# $$ \mathcal{H}(\phi) = 2 \pi ~a^2 \int_{-\pi/2}^{\phi} \cos\phi^\prime ~ R_{TOA} d\phi^\prime $$
# + [markdown] slideshow={"slide_type": "slide"}
# What about the boundary condition at the other pole? We must have $\mathcal{H}(\pi/2) = 0$ as well, because a non-zero transport at the pole is not physically meaningful.
#
# Notice that if we apply the above formula and integrate all the way to the other pole, we then have
#
# $$ \mathcal{H}(\pi/2) = 2 \pi ~a^2 \int_{-\pi/2}^{\pi/2} \cos\phi^\prime ~ R_{TOA} d\phi^\prime $$
# + [markdown] slideshow={"slide_type": "slide"}
# This is an integral of the radiation imbalance weighted by cosine of latitude. In other words, this is **proportional to the area-weighted global average energy imbalance**.
#
# We started by assuming that this imbalance is zero.
#
# If the **global budget is balanced**, then the physical boundary condition of no-flux at the poles is satisfied.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section5'></a>
#
# ## 5. Observed and modeled poleward heat transport
# ____________
#
#
# + [markdown] slideshow={"slide_type": "-"}
# Here we will code up a function that performs the above integration.
# + slideshow={"slide_type": "slide"}
def inferred_heat_transport(energy_in, lat=None, latax=None):
'''Compute heat transport as integral of local energy imbalance.
Required input:
energy_in: energy imbalance in W/m2, positive in to domain
As either numpy array or xarray.DataArray
If using plain numpy, need to supply these arguments:
lat: latitude in degrees
latax: axis number corresponding to latitude in the data
(axis over which to integrate)
returns the heat transport in PW.
Will attempt to return data in xarray.DataArray if possible.
'''
from scipy import integrate
from climlab import constants as const
if lat is None:
try: lat = energy_in.lat
except:
raise InputError('Need to supply latitude array if input data is not self-describing.')
lat_rad = np.deg2rad(lat)
coslat = np.cos(lat_rad)
field = coslat*energy_in
if latax is None:
try: latax = field.get_axis_num('lat')
except:
raise ValueError('Need to supply axis number for integral over latitude.')
# result as plain numpy array
integral = integrate.cumtrapz(field, x=lat_rad, initial=0., axis=latax)
result = (1E-15 * 2 * np.math.pi * const.a**2 * integral)
if isinstance(field, xr.DataArray):
result_xarray = field.copy()
result_xarray.values = result
return result_xarray
else:
return result
# + [markdown] slideshow={"slide_type": "slide"}
# Let's now use this to calculate the total northward heat transport from our control simulation with the CESM:
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
ax.plot(lat_cesm, inferred_heat_transport(ASR_cesm_zon - OLR_cesm_zon))
ax.set_ylabel('PW')
ax.set_xticks(ticks)
ax.grid()
ax.set_title('Total northward heat transport inferred from CESM control simulation')
# + [markdown] slideshow={"slide_type": "slide"}
# The total heat transport is very nearly symmetric about the equator, with poleward transport of about 5 to 6 PW in both hemispheres.
#
# The transport peaks in magnitude near 35º latitude, the same latitude where we found that ASR = OLR. This is no coincidence!
#
# Equatorward of 35º (across the tropics) there is **net heating by radiation** and **net cooling by dynamics**. The opposite is true poleward of 35º.
# + [markdown] slideshow={"slide_type": "slide"}
# What about the "observations", i.e. the reanalysis data?
#
# We can try to do the same calculation.
# + slideshow={"slide_type": "slide"}
# Need to flip the arrays because we want to start from the south pole
Rtoa_ncep = ASRzon-OLRzon
lat_ncep = ASRzon.lat
fig, ax = plt.subplots()
ax.plot(lat_ncep, inferred_heat_transport(Rtoa_ncep))
ax.set_ylabel('PW')
ax.set_xticks(ticks)
ax.grid()
ax.set_title('Total northward heat transport inferred from NCEP reanalysis')
# + [markdown] slideshow={"slide_type": "slide"}
# Our integral **does NOT go to zero at the North Pole!**. This means that the global energy budget is NOT balanced in the reanalysis data.
#
# Let's look at the global imbalance:
# + slideshow={"slide_type": "fragment"}
# global average of TOA radiation in reanalysis data
weight_ncep = np.cos(np.deg2rad(lat_ncep)) / np.cos(np.deg2rad(lat_ncep)).mean(dim='lat')
imbal_ncep = (Rtoa_ncep * weight_ncep).mean(dim='lat')
print( 'The net downward TOA radiation flux in NCEP renalysis data is %0.1f W/m2.' %imbal_ncep)
# + [markdown] slideshow={"slide_type": "slide"}
# Evidently there is a substantial net flux out to space in this dataset.
#
# Before we can compute heat transport from this data, we need to **balance the global data**.
#
# To do this requires making assumptions about the spatial distribution of the imbalance.
#
# The simplest assumption we can make is that the imbalance is uniform across the Earth.
# + slideshow={"slide_type": "slide"}
Rtoa_ncep_balanced = Rtoa_ncep - imbal_ncep
newimbalance = float((Rtoa_ncep_balanced * weight_ncep).mean(dim='lat'))
print( 'The net downward TOA radiation flux after balancing the data is %0.2e W/m2.' %newimbalance)
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
ax.plot(lat_ncep, inferred_heat_transport(Rtoa_ncep_balanced))
ax.set_ylabel('PW')
ax.set_xticks(ticks)
ax.grid()
ax.set_title('Total northward heat transport inferred from NCEP reanalysis (after global balancing)')
# + [markdown] slideshow={"slide_type": "slide"}
# We now get a physically sensible result (zero at both poles).
#
# The heat transport is poleward everywhere, and very nearly anti-symmetric across the equator. The shape is very similar to what we found from the CESM simulation, with peaks near 35º.
# + [markdown] slideshow={"slide_type": "slide"}
# However the magnitude of the peaks is substantially smaller. **Does this indicate a shortcoming of the CESM simulation?**
#
# **Probably not!**
#
# It turns out that our result here is **very sensitive to the details** of how we balance the radiation data.
#
# As an exercise, you might try applying different corrections other than the globally uniform correction we used above. E.g. try weighting the tropics or the mid-latitudes more strongly.
# + [markdown] slideshow={"slide_type": "slide"}
# ### An example of a recently published observational estimate of meridional heat transport
# -
# <img src='../images/Fasullo_Trenberth_2008b_Fig7.jpg'>
# + [markdown] slideshow={"slide_type": "-"}
# > The ERBE period zonal mean annual cycle of the meridional energy transport in PW by (a) the atmosphere and ocean as inferred from ERBE $R_T$, NRA $\delta$A_E/$\delta$t, and GODAS $\delta$O_E/$\delta$t; (b) the atmosphere based on NRA; and (c) by the ocean as implied by ERBE + NRA $F_S$ and GODAS $\delta$O_E/$\delta$t. Stippling and hatching in (a)–(c) represent regions and times of year in which the standard deviation of the monthly mean values among estimates, some of which include the CERES period (see text), exceeds 0.5 and 1.0 PW, respectively. (d) The median annual mean transport by latitude for the total (gray), atmosphere (red), and ocean (blue) accompanied with the associated $\pm2\sigma$ range (shaded).
#
# This is a reproduction of Figure 7 from Fasullo and Trenberth (2008), "The Annual Cycle of the Energy Budget. Part II: Meridional Structures and Poleward Transports", J. Climate 21, doi:10.1175/2007JCLI1936.1
# + [markdown] slideshow={"slide_type": "slide"}
# This figure shows the breakdown of the heat transport by **season** as well as the **partition between the atmosphere and ocean**.
#
# Focussing just on the total, annual transport in panel (d) (black curve), we see that is quite consistent with what we computed from the CESM simulation.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section6'></a>
#
# ## 6. Energy budgets for the atmosphere and ocean
# ____________
# + [markdown] slideshow={"slide_type": "slide"}
# The total transport (which we have been inferring from the TOA radiation imbalance) includes contributions from both the **atmosphere** and the **ocean**:
#
# $$ \mathcal{H} = \mathcal{H}_{a} + \mathcal{H}_{o} $$
# + [markdown] slideshow={"slide_type": "slide"}
# We have used the **TOA imbalance** to infer the total transport because TOA radiation is the only significant energy source / sink to the climate system as a whole.
#
# However, if we want to study (or model) the individual contributions from the atmosphere and ocean, we need to consider the energy budgets for **each individual domain**.
#
# We will therefore need to broaden our discussion to include the **net surface heat flux**, i.e. the total flux of energy between the surface and the atmosphere.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Surface fluxes
#
# Let's denote the **net upward energy flux at the surface** as $F_S$.
# + [markdown] slideshow={"slide_type": "slide"}
# There are four principal contributions to $F_S$:
#
# 1. Shortwave radiation
# 2. Longwave radiation
# 3. Sensible heat flux
# 4. Evaporation or latent heat flux
#
# Sensible and latent heat fluxes involve turbulent exchanges in the planetary boundary layer. We will look at these in more detail later.
# + slideshow={"slide_type": "slide"}
# monthly climatologies for surface flux data from reanalysis
# all defined as positive UP
ncep_nswrs = xr.open_dataset(ncep_url + "surface_gauss/nswrs.sfc.mon.1981-2010.ltm.nc", decode_times=False)
ncep_nlwrs = xr.open_dataset(ncep_url + "surface_gauss/nlwrs.sfc.mon.1981-2010.ltm.nc", decode_times=False)
ncep_shtfl = xr.open_dataset(ncep_url + "surface_gauss/shtfl.sfc.mon.1981-2010.ltm.nc", decode_times=False)
ncep_lhtfl = xr.open_dataset(ncep_url + "surface_gauss/lhtfl.sfc.mon.1981-2010.ltm.nc", decode_times=False)
#ncep_nswrs = xr.open_dataset(url + 'surface_gauss/nswrs')
#ncep_nlwrs = xr.open_dataset(url + 'surface_gauss/nlwrs')
#ncep_shtfl = xr.open_dataset(url + 'surface_gauss/shtfl')
#ncep_lhtfl = xr.open_dataset(url + 'surface_gauss/lhtfl')
# + slideshow={"slide_type": "slide"}
# Calculate ANNUAL AVERAGE net upward surface flux
ncep_net_surface_up = (ncep_nlwrs.nlwrs
+ ncep_nswrs.nswrs
+ ncep_shtfl.shtfl
+ ncep_lhtfl.lhtfl
).mean(dim='time')
# + slideshow={"slide_type": "slide"}
lon_ncep = ncep_net_surface_up.lon
fig, ax = plt.subplots()
cax = ax.pcolormesh(lon_ncep, lat_ncep, ncep_net_surface_up,
cmap=plt.cm.seismic, vmin=-200., vmax=200. )
fig.colorbar(cax, ax=ax)
ax.set_title('Net upward surface energy flux in NCEP Reanalysis data')
# -
# Discuss... Large net fluxes over ocean, not over land.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Energy budget for the ocean
#
# Using exactly the same reasoning we used for the whole climate system, we can write a budget for the OCEAN ONLY:
#
# $$ \frac{\partial E_o}{\partial t} = -F_S - \frac{1}{2 \pi a^2 \cos\phi } \frac{\partial \mathcal{H_o}}{\partial \phi} $$
# -
# In principle it is possible to calculate $\mathcal{H}_o$ from this budget, analagously to how we calculated the total $\mathcal{H}$.
# + [markdown] slideshow={"slide_type": "slide"}
# Assuming that
#
# - surface fluxes are well-known
# - the ocean heat storage is negligible (a big assumption!)
#
# we can write
#
# $$ \mathcal{H}_o(\phi) = 2 \pi ~a^2 \int_{-\pi/2}^{\phi} - \cos\phi^\prime ~ F_S d\phi^\prime $$
#
# where the minus sign account for the fact that we defined $F_S$ as **positive up** (out of the ocean).
# + [markdown] slideshow={"slide_type": "slide"}
# ### Energy budget for the atmosphere
#
# The net energy source to the atmosphere is the sum of the TOA flux and the surface flux. Thus we can write
#
# $$ \frac{\partial E_a}{\partial t} = R_{TOA} + F_S - \frac{1}{2 \pi a^2 \cos\phi } \frac{\partial \mathcal{H_a}}{\partial \phi} $$
# + [markdown] slideshow={"slide_type": "slide"}
# and we can similarly integrate to get the transport:
#
# $$ \mathcal{H}_a(\phi) = 2 \pi ~a^2 \int_{-\pi/2}^{\phi} \cos\phi^\prime ~ \big( R_{TOA} + F_S \big) d\phi^\prime $$
#
# Note that these formulas ensure that $\mathcal{H} = \mathcal{H}_a + \mathcal{H}_o$.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Atmospheric water budget and latent heat transport
#
# Water vapor contributes to the atmopsheric energy transport because **energy consumed through evaporation** is converted back to **sensible heat** wherever the vapor subsequently condenses.
#
# If the evaporation and the condensation occur at different latitudes then there is a net transport of energy due to the **movement of water vapor**.
# + [markdown] slideshow={"slide_type": "slide"}
# We can use the same kind of budget reasoning to compute this **latent heat transport**. But this time we will make a budget for water vapor only.
# + [markdown] slideshow={"slide_type": "slide"}
# The only sources and sinks of water vapor to the atmosphere are surface evaporation and precipitation:
#
# $$ L_v \frac{\partial Q}{\partial t} = L_v \big( Evap - Precip \big) - \frac{1}{2 \pi a^2 \cos\phi } \frac{\partial \mathcal{H}_{LH}}{\partial \phi} $$
# + [markdown] slideshow={"slide_type": "slide"}
# Here we are using
#
# - $Q$ is the depth-integrated water vapor (the "precipitable water") in kg m$^{-2}$
# - Evap and Precip are in kg m$^{-2}$ s$^{-1}$ (equivalent to mm/s)
# - $L_v = 2.5 \times 10^6$ J kg$^{-1}$ is the latent heat of vaporization
# - $\mathcal{H}_{LH}$ is the northward latent heat transport
#
# All terms in the above equation thus have units of W m$^{-2}$.
# + [markdown] slideshow={"slide_type": "slide"}
# Using the now-familiar equilibrium reasoning, we can use this water balance to compute the latent heat transport from the net surface evaporation minus precipitation:
#
# $$ \mathcal{H}_{LH}(\phi) = 2 \pi ~a^2 \int_{-\pi/2}^{\phi} \cos\phi^\prime ~ L_v ~\big( Evap - Precip \big) d\phi^\prime $$
# + [markdown] slideshow={"slide_type": "slide"}
# From this we can then infer all the energy transport associated with the motion of dry air as a residual:
#
# $$\mathcal{H}_{Dry} = \mathcal{H}_a - \mathcal{H}_{LH} $$
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section7'></a>
#
# ## 7. Calculating the partitioning of poleward energy transport into different components
# ____________
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# This function implements the above formulas to calculate the following quantities from CESM simulation output:
#
# - Total heat transport, $\mathcal{H}$
# - Ocean heat transport, $\mathcal{H}_o$
# - Atmospheric heat transport, $\mathcal{H}_a$
# - Atmospheric latent heat transport, $\mathcal{H}_{LH}$
# - Atmospheric dry heat transport, $\mathcal{H}_{Dry}$
# + slideshow={"slide_type": "slide"}
def CESM_heat_transport(run, timeslice=clim_slice_cpl):
# Take zonal and time averages of the necessary input fields
fieldlist = ['FLNT','FSNT','LHFLX','SHFLX','FLNS','FSNS','PRECSC','PRECSL','QFLX','PRECC','PRECL']
zon = run[fieldlist].isel(time=timeslice).mean(dim=('lon','time'))
OLR = zon.FLNT
ASR = zon.FSNT
Rtoa = ASR - OLR # net downwelling radiation
# surface energy budget terms, all defined as POSITIVE UP
# (from ocean to atmosphere)
LHF = zon.LHFLX
SHF = zon.SHFLX
LWsfc = zon.FLNS
SWsfc = -zon.FSNS
SnowFlux = ((zon.PRECSC + zon.PRECSL) *
const.rho_w * const.Lhfus)
# net upward radiation from surface
SurfaceRadiation = LWsfc + SWsfc
# net upward surface heat flux
SurfaceHeatFlux = SurfaceRadiation + LHF + SHF + SnowFlux
# net heat flux into atmosphere
Fatmin = Rtoa + SurfaceHeatFlux
# hydrological cycle, all terms in kg/m2/s or mm/s
Evap = zon.QFLX
Precip = (zon.PRECC + zon.PRECL) * const.rho_w
EminusP = Evap - Precip
# heat transport terms
HT = {}
HT['total'] = inferred_heat_transport(Rtoa)
HT['atm'] = inferred_heat_transport(Fatmin)
HT['ocean'] = inferred_heat_transport(-SurfaceHeatFlux)
HT['latent'] = inferred_heat_transport(EminusP*const.Lhvap) # atm. latent heat transport from moisture imbal.
HT['dse'] = HT['atm'] - HT['latent'] # dry static energy transport as residual
return HT
# + slideshow={"slide_type": "slide"}
# Compute heat transport partition for both control and 2xCO2 simulations
HT_control = CESM_heat_transport(atm['cpl_control'])
HT_2xCO2 = CESM_heat_transport(atm['cpl_CO2ramp'])
# +
fig = plt.figure(figsize=(16,6))
runs = [HT_control, HT_2xCO2]
N = len(runs)
for n, HT in enumerate([HT_control, HT_2xCO2]):
ax = fig.add_subplot(1, N, n+1)
ax.plot(lat_cesm, HT['total'], 'k-', label='total', linewidth=2)
ax.plot(lat_cesm, HT['atm'], 'r-', label='atm', linewidth=2)
ax.plot(lat_cesm, HT['dse'], 'r--', label='dry')
ax.plot(lat_cesm, HT['latent'], 'r:', label='latent')
ax.plot(lat_cesm, HT['ocean'], 'b-', label='ocean', linewidth=2)
ax.set_xlim(-90,90)
ax.set_xticks(ticks)
ax.legend(loc='upper left')
ax.grid()
# -
# Discuss the shape of these curves, before and after the global warming.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section8'></a>
#
# ## 8. Mechanisms of heat transport
# ____________
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# Energy is transported across latitude lines whenever there is an exchange of fluids with different energy content: e.g. warm fluid moving northward while colder fluid moves southward.
#
# Thus energy transport always involves **correlations between northward component of velocity $v$ and energy $e$**
# + [markdown] slideshow={"slide_type": "slide"}
# The transport is an integral of these correlations, around a latitude circle and over the depth of the fluid:
#
# $$ \mathcal{H} = \int_0^{2\pi} \int_{\text{bottom}}^{\text{top}} \rho ~ v ~ e ~ dz ~ a \cos\phi ~ d\lambda$$
# + [markdown] slideshow={"slide_type": "slide"}
# The total transport (which we have been inferring from the TOA radiation imbalance) includes contributions from both the **atmosphere** and the **ocean**:
#
# $$ \mathcal{H} = \mathcal{H}_{a} + \mathcal{H}_{o} $$
# + [markdown] slideshow={"slide_type": "slide"}
# We can apply the above definition to both fluids (with appropriate values for bottom and top in the depth integral).
#
# The appropriate measure of energy content is different for the atmosphere and ocean.
# + [markdown] slideshow={"slide_type": "slide"}
# For the ocean, we usually use the **enthalpy for an incompressible fluid**:
#
# $$ e_o \approx c_w ~ T $$
#
# where $c_w \approx 4.2 \times 10^{3}$ J kg$^{-1}$ K$^{-1}$ is the specific heat for seawater.
# + [markdown] slideshow={"slide_type": "slide"}
# For the atmosphere, it's a bit more complicated. We need to account for both the compressibility of air, and for its water vapor content. This is because of the latent energy associated with evaporation and condensation of vapor.
# + [markdown] slideshow={"slide_type": "slide"}
# It is convenient to define the **moist static energy** for the atmosphere:
#
# $$ MSE = c_p ~T + g~ Z + L_v ~q $$
#
# whose terms are respectively the internal energy, the potential energy, and the latent heat of water vapor (see texts on atmopsheric thermodynamics for details).
# + [markdown] slideshow={"slide_type": "slide"}
# We will assume that $MSE$ is a good approximation to the total energy content of the atmosphere, so
#
# $$ e_a \approx MSE $$
# + [markdown] slideshow={"slide_type": "slide"}
# Note that in both cases we have **neglected the kinetic energy** from this budget.
#
# The kinetic energy per unit mass is $e_k = |\vec{v}|^2/2$, where $\vec{v} = (u,v,w)$ is the velocity vector.
#
# In practice it is a very small component of the total energy content of the fluid and is usually neglected in analyses of poleward energy transport.
# + [markdown] slideshow={"slide_type": "slide"}
# As we have seen, we can further divide the atmospheric transport into transports due to the movement of **dry air** (the tranport of **dry static energy**) and transport associated with evaporation and condensation of **water vapor** (the **latent heat transport**)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Mechanisms of energy transport in the ocean
#
# Assuming the ocean extends from $z=-H$ to $z=0$ we can then write
#
# $$ \mathcal{H}_o \approx a \cos\phi \int_0^{2\pi} \int_{-H}^{0} c_w ~\rho ~ v ~ T ~ dz ~ d\lambda$$
#
# setting $v ~ T = 0$ at all land locations around the latitude circle.
# + [markdown] slideshow={"slide_type": "slide"}
# The northward transport $\mathcal{H}_o$ is positive if there is a net northward flow of warm water and southward flow of cold water.
# + [markdown] slideshow={"slide_type": "slide"}
# This can occur due to **horizontal** differences in currents and temperatures.
#
# The classic example is flow in the subtropical gyres and western boundary currents. In the subtropical North Atlantic, there is rapid northward flow of warm water in the Gulf Stream. This is compensated by a slow southward flow of cooler water across the interior of the basin.
#
# **Because the water masses are at different temperatures, equal and opposite north-south exchanges of mass result in net northward transport of energy.**
# + [markdown] slideshow={"slide_type": "slide"}
# Energy transport can also result from **vertical** structure of the currents.
#
# There is a large-scale **overturning circulation** in the Atlantic that involves near-surface northward flow of warmer water, compensated by deeper southward flow of colder water.
#
# Again, equal exchange of water but net transport of energy.
# + [markdown] slideshow={"slide_type": "skip"}
# ### *These notes are not really finished...*
# + [markdown] slideshow={"slide_type": "skip"}
# ____________
#
# ## Credits
#
# This notebook is part of [The Climate Laboratory](https://brian-rose.github.io/ClimateLaboratoryBook), an open-source textbook developed and maintained by [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.
#
# It is licensed for free and open consumption under the
# [Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) license.
#
# Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation.
# ____________
# + slideshow={"slide_type": "skip"}
| content/courseware/advanced-heat-transport.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center> Radon microlocal </center>
# In this notebook we present the microcanonical relation of the Radon transform
# From paper _"An introduction to X-Ray tomography and Radon transform"_ by <NAME> (Section 2.2 _"Wavefront sets and singularity detection").
#
# ## Definition of Ray Transform
#
# With coordinates on the unit sphere $\mathbb{S}^1$, so to each angle $\phi\in[0,2\pi]$, with the unit vector in direction $\phi$ denoted as $\theta$ and $\theta^{\perp}$ as the unit vector $\pi/2$ units counterclock wise:
#
# $$
# \theta = \theta(\phi) = (\cos\phi, \sin\phi)\text{,}\quad \theta^{\perp} = \theta^{\perp}(\phi)= (-\sin\phi,\cos\phi)
# $$
#
# Then $[0,2\pi]$ can be identified as $\mathbb{S}^1$ with coordinates $\phi$. Let us define
#
# $$
# L(\phi,s)=\{x\in\mathbb{R}^2| x\cdot\theta\phi=s\}
# $$
#
# the line perepndicular to $\theta$ and with $s$ distance to the origin.
#
# The Radon transform $f\in L^1(\mathbb{R}^2)$ is given by:
#
# $$
# \mathcal{R}f(\phi,s)=\int_{x\in L(\phi,s)}f(x)dx_L = \int_{t=-\infty}^{\infty} f(s\theta+t\theta^{\perp})dt
# $$
#
# The Radon transform is a pseudodifferential operator, by using microlocal analysis, we can map the singularities of a function and the singularities of its Radon transform, we are naming this relation _the canonical relation_, and reads as follows.
#
# ## The canonical relation
#
# Let $f\in \epsilon^1(\mathbb{R}^2)$ be a two-dimensional distribution of compact support. Let $x_0\in L(\phi,s_0)$, $\theta_0 = \theta(\phi_0)$, $\eta_0 = ds - (x_0\cdot\theta_0^{\perp})d\phi$ and $a\neq 0$. The __canonical relation__ is the wavefront set correspondence
#
# $$
# (x_0; a\theta_0 dx)\in WF^{\alpha} (f) \Longleftrightarrow (\phi_0,s_0; a\eta_0)\in WF^{\alpha+1/2}(\mathcal{R}f)
# $$
#
# ## The digital canonical relation
#
# In the digital case the wavefront set is parametrized by the values $(x_0;\phi_0)$, where $x_0 = (x_{01}, x_{02})\in [0,N]^2$ and $\phi_0\in\{0,ldots,180\}$, therefore the forward and inverse operator that implements the canonical relation can be defined as follows.
#
# ### Primal canonical relation
#
# $$
# \mathbb{Can}^d: WF^d(f)\longrightarrow WF^d(\mathcal{R}f)
# $$
#
# Let $f\in \ell^2([0,N]^2)$, and $((x_{01},x_{02});\phi_0)\in WF^d(f)$, where $x_0\in [0,N]^2$ and $\phi_0\in\{0,\ldots,180\}$, therefore
#
# $$
# \mathbb{Can}^d(((x_{01}, x_{02});\phi_0)) = ((s_0,\phi_0+90); \varphi_0)
# $$
#
# where
#
# $$
# s_0 = x_0\cdot\theta(\phi_0+90) =
# \begin{cases}
# -x_{01}\sin\phi_0+x_{02}\cos\phi_0 & \text{if } \phi_0\leq 90\\
# x_{01}\sin\phi_0-x_{02}\cos\phi_0 & \text{if } \phi_0 > 90
# \end{cases}
# $$
#
# and
#
# $$
# \varphi_0 = \arctan(-x_0\cdot\theta^{\perp}(\phi_0+90))=
# \begin{cases}
# -x_{01}\cos\phi_0-x_{02}\sin\phi_0 & \phi_0\leq 90\\
# x_{01}\cos\phi_0+x_{02}\sin\phi_0 & \phi_0>90
# \end{cases}
# $$
#
#
#
#
# ### Dual canonical relation
#
# $$
# (\mathbb{Can}^d)^{-1}: WF^d(\mathcal{R}f)\longrightarrow WF^d(f)
# $$
#
# Let $f\in\ell^2([0,N]^2)$ and $((s_0,\phi_0);\varphi_0)\in WF^d(\mathcal{R}f)$, where $s\in [0,N]$ and $\phi_0\in\{ 0,\ldots,180\}$, and $\varphi_0\in\{0,\ldots,180\}$, therefore
#
# $$
# (\mathbb{Can}^d)^{-1}((s_0,\phi_0);\varphi_0) = ((x_{01},x_{02}); \phi_0-90)
# $$
#
# where
#
# $$
# x_{01} = s_0\cos\phi_0-\tan\varphi_0\sin\phi_0
# $$
#
# and
#
# $$
# x_{02} = s_0\sin\phi_0+\tan\varphi_0\cos\phi_0
# $$
# ## Implementation
import sys
sys.path.append("../../WF_inpaint/")
from ellipse.ellipseWF_factory import random_phantom, plot_WF, WFupdate
import matplotlib.pyplot as plt
import numpy.random as rnd
import numpy as np
# %matplotlib inline
import odl
# ## Radon transform
size = 256
# Create ODL data structures
space = odl.uniform_discr([-int(size/2), -int(size/2)], [int(size/2), int(size/2)], [size, size],
dtype='float32')
num_angles = 180
geometry = odl.tomo.parallel_beam_geometry(space, num_angles=num_angles)
operator = odl.tomo.RayTransform(space, geometry)
# Ensure operator has fixed operator norm for scale invariance
opnorm = odl.power_method_opnorm(operator)
operator = (1 / opnorm) * operator
# ## Primal canonical relation
def point_img2sino(x, phi, size, sinogram_shape):
# Compute the angle in radians
rad_phi = ((phi[0])*np.pi)/180
# In the images the coordinates are switched and moved
x01, x02 = x[1]-int(size/2), x[0]-int(size/2)
# Compute the distance to center component
if phi <= 90:
dist_center = -x01*np.sin(rad_phi)+x02*np.cos(rad_phi)+int(sinogram_shape[1]/2)
else:
dist_center = x01*np.sin(rad_phi)-x02*np.cos(rad_phi)+int(sinogram_shape[1]/2)
return np.array([dist_center,(phi+90)%180])
def class_img2sino(x, phi, size, sinogram_shape):
# Compute the angle in radians
rad_phi = ((phi[0])*np.pi)/180
# In the images the coordinates are switched and moved
x01, x02 = x[1]-int(size/2), x[0]-int(size/2)
# Compute the class
if phi <= 90:
classe = -x01*np.cos(rad_phi)-x02*np.sin(rad_phi)
else:
classe = x01*np.cos(rad_phi)+x02*np.sin(rad_phi)
return np.array([((np.arctan(classe))*180/np.pi)%180+1])
def CanRel_img2sino(WFpoints, WFclasses, size, sinogram_shape, num_angles):
# Compute the WF points in the sinogram
WFpoints_sino = []
WFclasses_sino = []
for i in range(WFpoints.shape[0]):
if (WFclasses[i][0] % int(180/num_angles) == 0):
WFpoints_sino.append(point_img2sino(WFpoints[i], WFclasses[i]-1, size, sinogram_shape))
WFclasses_sino.append(class_img2sino(WFpoints[i], WFclasses[i]-1, size, sinogram_shape))
return np.array(WFpoints_sino), WFclasses_sino
def WFupdate_sino(WFpoints, WFclasses, WFimage):
# Function to update the Wavefront set classes with points and classes
size = WFimage.shape[0]
WFimage[WFpoints.astype(int)[:,1],WFpoints.astype(int)[:,0]] = np.array(WFclasses)[:,0]
return WFimage
# ## Dual canonical relation
def point_sino2img(y, varphi, size, sinogram_shape):
rad_varphi = ((varphi[0])*np.pi)/180
s, phi = y
s = s - sinogram_shape[1]/2
rad_phi =(phi*np.pi)/180
x01 = s*np.cos(rad_phi)-np.tan(rad_varphi)*np.sin(rad_phi)
x02 = s*np.sin(rad_phi)+np.tan(rad_varphi)*np.cos(rad_phi)
return np.array([x02+int(size/2), x01+int(size/2)])
def CanRel_sino2img(WFpoints_sino, WFclasses_sino, size, sinogram_shape, num_angles):
# Compute the WF points in the sinogram
WFpoints = []
WFclasses = []
for i in range(WFpoints_sino.shape[0]):
WFpoints.append(point_sino2img(WFpoints_sino[i], WFclasses_sino[i]-1, size, sinogram_shape))
WFclasses.append(np.array([(WFpoints_sino[i][1]-90)%180]))
return np.array(WFpoints), WFclasses
# ## Load data
size = 256
nEllipses = 10
dirBias = 65
nClasses = 180
phantom, WFpoints, WFclasses, WFimage = random_phantom(size, nEllipses, dirBias, nClasses)
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(phantom, cmap = 'gray')
#plt.plot(WFpoints[:,0], WFpoints[:,1], 'bo', markersize = 0.5)
plt.figure(figsize = (6,6))
plt.axis("off")
plot_WF(WFimage)
# Computing sinogram
sinogram = np.array(operator(space.element(phantom)))
sinogram_shape = sinogram.shape
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(sinogram, cmap = 'gray')
# ## Primal canonical relation
WFpoints_sino, WFclasses_sino = CanRel_img2sino(WFpoints, WFclasses, size, sinogram_shape, num_angles)
WFimage_sino = np.zeros(sinogram.shape)
WFimage_sino = WFupdate_sino(WFpoints_sino, WFclasses_sino, WFimage_sino)
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(sinogram, cmap = 'gray')
plt.plot(WFpoints_sino[:,0],WFpoints_sino[:,1], 'bo', markersize = 0.5)
plt.figure(figsize = (6,6))
plt.axis("off")
plot_WF(WFimage_sino)
# ## Dual canonical relation
WFpoints_canon, WFclasses_canon = CanRel_sino2img(WFpoints_sino, WFclasses_sino, size, sinogram_shape, num_angles)
WFimage_canon = np.zeros([size,size])
WFimage_canon = WFupdate(WFpoints_canon, WFclasses_canon, WFimage_canon)
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(phantom, cmap = 'gray')
plt.plot(WFpoints_canon[:,0],WFpoints_canon[:,1], 'bo', markersize = 0.5)
plt.figure(figsize = (6,6))
plt.axis("off")
plot_WF(WFimage_canon)
# ## Low dose
size = 256
# Create ODL data structures
space = odl.uniform_discr([-int(size/2), -int(size/2)], [int(size/2), int(size/2)], [size, size],
dtype='float32')
num_angles = 60
geometry = odl.tomo.parallel_beam_geometry(space, num_angles=num_angles)
operator = odl.tomo.RayTransform(space, geometry)
# Ensure operator has fixed operator norm for scale invariance
opnorm = odl.power_method_opnorm(operator)
operator = (1 / opnorm) * operator
sinogram = np.array(operator(space.element(phantom)))
sinogram_shape = sinogram.shape
plt.figure(figsize = (6,6))
plt.axis("off")
plt.imshow(sinogram, cmap = 'gray')
sinogram_shape
# ## Low dose primal canonical relation
# Low dose version
angles_lowd = np.array([i for i in range(0,180,int(180/num_angles))])
# Extracting the wavefront set orientations in the low dose
angles_primal = (np.array(WFclasses).astype(int)[:,0]-1)
angles_primal_lowd = np.array([angle in angles_lowd for angle in angles_primal])
# Generating the new WFpoints and classes
WFpoints_primal_lowd = WFpoints[angles_primal_lowd]
WFclasses_primal_lowd = list(np.array(WFclasses)[angles_primal_lowd])
# Generating the low dose WFimage
WFimage_lowd = np.zeros([size,size])
WFimage_lowd = WFupdate(WFpoints_primal_lowd, WFclasses_primal_lowd, WFimage_lowd)
plt.figure(figsize=(6,6))
plt.axis('off')
plot_WF(WFimage_lowd)
WFpoints_sino_lowd, WFclasses_sino_lowd = CanRel_img2sino(WFpoints_primal_lowd, WFclasses_primal_lowd, size, sinogram_shape, num_angles = 180)
WFpoints_sino_lowd[:,1] = num_angles* WFpoints_sino_lowd[:,1]/180
WFimage_sino_lowd = np.zeros(sinogram.shape)
WFimage_sino_lowd = WFupdate_sino(WFpoints_sino_lowd, WFclasses_sino_lowd, WFimage_sino)
plt.figure(figsize = (6,6))
plt.axis("off")
plot_WF(WFimage_sino_lowd)
# ## Low dose dual canonical relation
## Upscaling WFpoints_sino_lowd
WFpoints_sino_lowd_up = WFpoints_sino_lowd.copy()
WFpoints_sino_lowd_up[:,1] = WFpoints_sino_lowd_up[:,1]*180/num_angles
WFclasses_sino_lowd_up = WFclasses_sino_lowd
WFpoints_canon, WFclasses_canon = CanRel_sino2img(WFpoints_sino_lowd_up, WFclasses_sino_lowd_up, size, (180,sinogram_shape[1]) , num_angles = 60)
WFimage_canon = np.zeros([size,size])
WFimage_canon = WFupdate(WFpoints_canon, WFclasses_canon, WFimage_canon)
plt.figure(figsize=(6,6))
plt.axis('off')
plot_WF(WFimage_canon)
| Microcanonical_NN/Numpy_microlocal/Radon_microlocal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="bOChJSNXtC9g"
# # PyTorch
# + [markdown] colab_type="text" id="OLIxEDq6VhvZ"
# In this lesson we'll learn about PyTorch which is a machine learning library used to build dynamic neural networks. We'll learn about the basics, like creating and using Tensors, in this lesson but we'll be making models with it in the next lesson.
#
# <img src="figures/pytorch.png" width=300>
# + [markdown] colab_type="text" id="VoMq0eFRvugb"
# # Tensor basics
# + colab={} colab_type="code" id="rX7Vs1JxL9wX"
# Let's make sure the libraries are installed
# #!pip install numpy
# #!pip install torch
# Now import the libraries
import numpy as np
import torch
import warnings
warnings.filterwarnings('ignore')
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="Nv0xryLkKujV" outputId="d46d5e58-2195-40a8-841c-26b627541a83"
# Creating a zero tensor
x = torch.Tensor(3, 4)
print("Type: {}".format(x.type()))
print("Size: {}".format(x.shape))
print("Values: \n{}".format(x))
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="vnyzY4PHL7c5" outputId="70ed373d-e7e0-43cd-e732-51be86377721"
# Creating a random tensor
x = torch.randn(2, 3) # normal distribution (rand(2,3) -> uniform distribution)
print (x)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="DVwGNeKxMXI8" outputId="6a185aa3-96f2-4e29-b116-3de3025cff4d"
# Zero and Ones tensor
x = torch.zeros(2, 3)
print (x)
x = torch.ones(2, 3)
print (x)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="BPjHnDmCMXLm" outputId="c14c494e-b714-4983-eb90-665064830a14"
# List → Tensor
x = torch.Tensor([[1, 2, 3],[4, 5, 6]])
print("Size: {}".format(x.shape))
print("Values: \n{}".format(x))
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="mG4-CHkgMXOE" outputId="2b9ed2e5-9862-480e-d0ce-d231676d7f49"
# NumPy array → Tensor
x = torch.from_numpy(np.random.rand(2, 3))
print("Size: {}".format(x.shape))
print("Values: \n{}".format(x))
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="L8X2-5cqMXRA" outputId="af1c82ab-b8d7-4ea6-e142-7f8ed50fda40"
# Changing tensor type
x = torch.Tensor(3, 4)
print("Type: {}".format(x.type()))
x = x.long()
print("Type: {}".format(x.type()))
# + [markdown] colab_type="text" id="S2BRPaMvPbe3"
# # Tensor operations
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="Xrn8I76TMXT1" outputId="556b9d7f-79da-415c-f85d-648c5394e3a3"
# Addition
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = x + y
print("Size: {}".format(z.shape))
print("Values: \n{}".format(z))
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="157fC9WsMXWf" outputId="a6890b43-4c74-42c6-d654-f62b8c130403"
# Dot product
x = torch.randn(2, 3)
y = torch.randn(3, 2)
z = torch.mm(x, y)
print("Size: {}".format(z.shape))
print("Values: \n{}".format(z))
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="G6316lAmMXZG" outputId="3dce79e7-1b9f-4218-84cd-afbb16af7dd4"
# Transpose
x = torch.randn(2, 3)
print("Size: {}".format(x.shape))
print("Values: \n{}".format(x))
y = torch.t(x)
print("Size: {}".format(y.shape))
print("Values: \n{}".format(y))
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="FCgDCOCjMXcF" outputId="ff1e16f5-bcd9-407f-9c99-361a0b7f27f6"
# Reshape
z = x.view(3, 2)
print("Size: {}".format(z.shape))
print("Values: \n{}".format(z))
# + colab={"base_uri": "https://localhost:8080/", "height": 561} colab_type="code" id="T3-6nGgvECH9" outputId="9599adaf-1feb-4a42-d4b5-af23f1de5b2d"
# Dangers of reshaping (unintended consequences)
x = torch.tensor([
[[1,1,1,1], [2,2,2,2], [3,3,3,3]],
[[10,10,10,10], [20,20,20,20], [30,30,30,30]]
])
print("Size: {}".format(x.shape))
print("Values: \n{}\n".format(x))
a = x.view(x.size(1), -1)
print("Size: {}".format(a.shape))
print("Values: \n{}\n".format(a))
b = x.transpose(0,1).contiguous()
print("Size: {}".format(b.shape))
print("Values: \n{}\n".format(b))
c = b.view(b.size(0), -1)
print("Size: {}".format(c.shape))
print("Values: \n{}".format(c))
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="hRtG5LShMXew" outputId="b54e520a-8cd5-40a9-8b38-64919574dce0"
# Dimensional operations
x = torch.randn(2, 3)
print("Values: \n{}".format(x))
y = torch.sum(x, dim=0) # add each row's value for every column
print("Values: \n{}".format(y))
z = torch.sum(x, dim=1) # add each columns's value for every row
print("Values: \n{}".format(z))
# + [markdown] colab_type="text" id="zI0ZV45PrYmw"
# # Indexing, Splicing and Joining
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="iM3UFrs0MXhL" outputId="bfcbbf13-d8a1-4fc1-f244-fd54068ca74b"
x = torch.randn(3, 4)
print("x: \n{}".format(x))
print ("x[:1]: \n{}".format(x[:1]))
print ("x[:1, 1:3]: \n{}".format(x[:1, 1:3]))
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="_tbpwGxcMXj0" outputId="678e805f-f5ec-49fe-d8d6-0986a3c41672"
# Select with dimensional indicies
x = torch.randn(2, 3)
print("Values: \n{}".format(x))
col_indices = torch.LongTensor([0, 2])
chosen = torch.index_select(x, dim=1, index=col_indices) # values from column 0 & 2
print("Values: \n{}".format(chosen))
row_indices = torch.LongTensor([0, 1])
chosen = x[row_indices, col_indices] # values from (0, 0) & (2, 1)
print("Values: \n{}".format(chosen))
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="tMeqSQtuMXmH" outputId="9fa99c82-78d9-41f8-d070-710cf1b045c7"
# Concatenation
x = torch.randn(2, 3)
print("Values: \n{}".format(x))
y = torch.cat([x, x], dim=0) # stack by rows (dim=1 to stack by columns)
print("Values: \n{}".format(y))
# + [markdown] colab_type="text" id="JqiDuIC-ByvO"
# # Gradients
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="qxpGB7-VL7fs" outputId="a7964762-60d4-4e0e-bed2-b2d392804494"
# Tensors with gradient bookkeeping
x = torch.rand(3, 4, requires_grad=True)
y = 3*x + 2
z = y.mean()
z.backward() # z has to be scalar
print("Values: \n{}".format(x))
print("x.grad: \n", x.grad)
# + [markdown] colab_type="text" id="uf7htaAMDcRV"
# * $ y = 3x + 2 $
# * $ z = \sum{y}/N $
# * $ \frac{\partial(z)}{\partial(x)} = \frac{\partial(z)}{\partial(y)} \frac{\partial(y)}{\partial(x)} = \frac{1}{N} * 3 = \frac{1}{12} * 3 = 0.25 $
# + [markdown] colab_type="text" id="<KEY>"
# # CUDA tensors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="E_C3en05L7iT" outputId="01b0eddc-db28-4786-ae48-a1004c838186"
# Is CUDA available?
print (torch.cuda.is_available())
# + [markdown] colab_type="text" id="za47KWEJ6en2"
# If the code above returns False, then go to `Runtime` → `Change runtime type` and select `GPU` under `Hardware accelerator`.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="BY2DdN3j6ZxO" outputId="ec0ac0bd-461d-4b45-e131-cbf1d19c955b"
# Creating a zero tensor
x = torch.Tensor(3, 4).to("cpu")
print("Type: {}".format(x.type()))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="EcmdTggzEFPi" outputId="0e3326db-8d3d-40aa-accd-b31ab841b572"
# Creating a zero tensor
x = torch.Tensor(3, 4).to("cuda")
print("Type: {}".format(x.type()))
| notebooks/10-steps-to-DS/05_PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import sys, os
sys.path.append(os.path.normpath(os.path.join(os.getcwd(), '..')))
sys.path.append(os.path.normpath(os.path.join(os.getcwd(), '..', 'external', 'MiDaS')))
from midas_hkrm.utils import construct_config
from detectron2.modeling import build_model
import midas_hkrm.objects
cfg = construct_config()
build_model(cfg)
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ISM Week 04 Test
#
# This test will ask you questions very similar to the ones we worked through in week 02. It will cover:
#
# * Holding period returns
# * Arithmetic and Geometric Means
# * Variance
# * Standard Deviation
# * Skewness
# * Kurtosis
# * Value at Risk
#
# importing necessary libraries
import pandas as pd
import numpy as np
# ### Question 1
#
# Consider stock A with closing prices starting at time 0 over the next 4 months as given to you.
#
# Tasks:
# 1. Calculate the monthly holding period returns (regular, not log returns). Name the container/variable/vector containing the holding period returns "returns".
# 2. Calculate the 3-month holding period return by using the monthly returns in part 1. Name it return_3m_rtns.
# 3. Calculate the 3-month holding period return by using the relevant starting and ending prices. Name it return_3m_prcs.
# copy-paste your closing prices below like so and run the cell: lst = [10, 11.20, 13.44, 9.41, 10.35]
lst = []
prices = pd.DataFrame(lst)
prices
# include your code for task 1. here
# include your code for task 2. here
# include your code for task 3. here
# ### Question 2
#
# Use the return calculations you performed in Question 1 to perform the following tasks.
#
# Tasks:
# 1. Calculate the arithmetic mean of stock A, name it mean_a
# 2. Calculate the geometric mean of the returns of Stock A, name it mean_g
# 3. Answer the question that follows based on your calculations
#
# include your code for task 1. here
# include your code for task 2. here
# + active=""
# Task 3
#
# Which of the following statements is correct regarding your calculations above?
#
# I. The arithmetic average tells me what I expect to earn if I buy the stock today and hold it for a month.
# II. The geometric mean tells me what average monthly return I earned over the last 3 months.
# III. The arithmetic mean is always greater than the geometric mean
#
# Option 1: I only
# Option 2: II only
# Option 3: I and II
# Option 4: I, II and III
# -
# enter your answer below by replacing 0 with your choice among options 1, 2, 3 or 4
test1 = 0
# ### Question 3
#
# Use the return calculations you performed in Question 1 to perform the following tasks.
#
# Tasks:
#
# 1. Calculate the variance of the returns of stock A, name it var.
# 2. Calculate the standard deviation of the returns of stock A, name it std.
# 3. Calculate the skewness of the returns of stock A, name it skew.
# 4. Calculate the kurtosis of the returns of stock A, name it kurt.
#
# + nbgrader={"grade": false, "grade_id": "cell-28c5d6c906515adf", "locked": false, "schema_version": 3, "solution": true, "task": false}
# include your code for task 1. here
# + nbgrader={"grade": false, "grade_id": "cell-2ff1f284adbe68e0", "locked": false, "schema_version": 3, "solution": true, "task": false}
# include your code for task 2. here
# + nbgrader={"grade": false, "grade_id": "cell-ee827369b16e8f25", "locked": false, "schema_version": 3, "solution": true, "task": false}
# include your code for task 3. here
# + nbgrader={"grade": false, "grade_id": "cell-98702e013c228887", "locked": false, "schema_version": 3, "solution": true, "task": false}
# include your code for task 4. here
# -
# ### Question 4
#
# You have a csv file of monthly historical prices on an exchange traded fund of US exploration&production oil&gas companies. It has been read in for you.
#
# Tasks:
# 1. Calculate the returns, name the container rets.
# 2. Calculate the 5% VaR on a monthly basis assuming normality per unit of currency invested in the fund, name this val_risk_norm.
# 3. Calculate the 5% VaR on a monthly basis not assuming normality per unit of currency invested in the fund, name this val_risk_non_norm.
# 4. Answer the question regarding your calculations.
prices_fund = pd.read_csv("fund_monthly.csv")
prices_fund.head(5)
prices_fund = pd.read_csv("fund_monthly.csv")
new_header = prices_fund.date
prices_fund = prices_fund.prices
prices_fund.head(5)
# include your code for task 1. here
# include your code for task 2. here
# include your code for task 3. here
# + active=""
# Task 4
#
# Which of the following statements is correct regarding your calculations above?
#
# I. The VaR assuming normality is more conservative than the VaR not assuming normality.
# II. The VaR assuming normality is overestimating the loss compared to the VaR not assuming normality.
# III. At any given month between 19.39% and 30.73% of the value of the fund could be lost in the best case of the 5% worst possible outcomes
#
# Option 1: I and III
# Option 2: II and III
# Option 3: I and II
# Option 4: I, II and III
# -
# enter your answer below by replacing 0 with your choice among options 1, 2, 3 or 4
test2 = 0
| tests_sol/week_04_test_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 5 - EfficientNet and Custom Pretrained Models
# ## Lesson Video:
#hide_input
from IPython.lib.display import YouTubeVideo
from datetime import timedelta
start = int(timedelta(minutes=36, seconds=43).total_seconds())
YouTubeVideo('4w3sEgqDvSo', start=start)
#hide
#Run once per session
# !pip install fastai wwf timm -q --upgrade
#hide_input
from wwf.utils import state_versions
state_versions(['fastai', 'fastcore', 'wwf', 'timm'])
# This notebook will cover:
# * Using a `PyTorch` model
# * Using pre-trained weights for transfer learning
# * Setting up a `cnn_learner` style `Learner`
# ## The Problem:
#
# The problem today will be a familiar one, `PETs`, as we are going to focus on the `Learner` instead
from fastai.vision.all import *
# Below you will find the exact imports for everything we use today
# +
from fastcore.xtras import Path
from fastai.callback.hook import summary
from fastai.callback.progress import ProgressCallback
from fastai.callback.schedule import lr_find, fit_one_cycle
from fastai.data.block import DataBlock, CategoryBlock
from fastai.data.core import Datasets
from fastai.data.external import untar_data, URLs
from fastai.data.transforms import get_image_files, Normalize, RandomSplitter, GrandparentSplitter, RegexLabeller, ToTensor, IntToFloatTensor, Categorize, parent_label
from fastai.learner import Learner
from fastai.losses import LabelSmoothingCrossEntropy
from fastai.metrics import error_rate, accuracy
from fastai.vision.augment import aug_transforms, RandomResizedCrop, Resize, FlipItem
from fastai.vision.core import PILImage, imagenet_stats
from fastai.vision.data import ImageBlock
from fastai.vision.learner import cnn_learner, create_head, create_body, num_features_model, default_split, has_pool_type, apply_init
import torch
from torch import nn
from torchvision.models.resnet import resnet18
from timm import create_model
# -
# Let's make our usual dataloaders real quick
path = untar_data(URLs.PETS)/'images'
fnames = get_image_files(path)
pat = r'/([^/]+)_\d+.*'
batch_tfms = [*aug_transforms(size=224, max_warp=0), Normalize.from_stats(*imagenet_stats)]
item_tfms = RandomResizedCrop(460, min_scale=0.75, ratio=(1.,1.))
bs=64
pets = DataBlock(blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.*'),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
dls = pets.dataloaders(path, bs=bs)
dls.show_batch(max_n=9, figsize=(6,7))
# Now let's focus on our `EfficentNet` model. We'll be working out of Ross Wightman's repository [here](https://github.com/rwightman/pytorch-image-models). Included in this repository is tons of pretrained models for almost every major model in Computer Vision. All were for 224x224 training and validation size. Let's install it
# !pip install timm
# Now we can then use his weights one of two ways. First we'll show the direct way to load it in, then we'll load in the weights ourselves
from timm import create_model
net = create_model('efficientnet_b3a', pretrained=True)
# Now let's take a look at our downloaded model, so we know how to modify it for transfer learning. With fastai models we can do something like so:
learn = cnn_learner(dls, resnet18)
learn.model[-1]
# And we see this head of our model! Let's see if we can do this for our `EfficientNet`
net[-1]
# No! Why?
len(learn.model)
len(net)
# We can see that our `fastai` model was **split** into two different layer groups:
#
# * Group 1: Our encoder, which is everything but the last layer of our original model
# * Group 2: Our head, which is a `fastai` version of a `Linear` layer plus a few extra bits
create_head(2048, 10)
# How do we do this for our model? Let's take a look at it:
net
# We can see that our `Pooling` layer and our `Linear` layer is the last two layers of our model. Let's pop those off
# Now if we use the original `fastai` `create_body` function, we'll get an error:
body = create_body(net, pretrained=False, cut=-1)
# Why? Let's take a look
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
# We can see that arch needs to be a **generator**. Let's try to make a function to help us with specifically his library
def create_timm_body(arch:str, pretrained=True, cut=None):
model = create_model(arch, pretrained=pretrained)
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NameError("cut must be either integer or function")
# Let's try it out!
body = create_timm_body('efficientnet_b3a', pretrained=True)
len(body)
# Now we can see that we have seven seperate groups
body
# But we've popped off the last layers we need! Let's move onto our head of the model. We know the input should be `3072` (we can see this in the last linear layer of the original model). We need it 2x it because of our `AdaptiveConcatPooling` We want it to have an output to our classes. But what if we dont' know that?
nf = num_features_model(nn.Sequential(*body.children())) * (2); nf
head = create_head(nf, dls.c)
head
# Now finally we need to wrap it together
model = nn.Sequential(body, head)
# And then we initialize our new head of our model
apply_init(model[1], nn.init.kaiming_normal_)
# Now we have our two layer-long model! What's next?
len(model)
# Let's try making a `Learner`
learn = Learner(dls, model, loss_func=LabelSmoothingCrossEntropy())
learn.summary()
# Oh no! It isn't frozen, what do we do? We never split the model! Since we have it set to where `model[0]` is the first group and `model[1]` is the second group, we can use the `default_split` splitter. Let's try again
learn = Learner(dls, model, loss_func=LabelSmoothingCrossEntropy(),
splitter=default_split, metrics=error_rate)
learn.freeze()
learn.summary()
# That looks much better. Let's train!
learn.lr_find()
learn.fit_one_cycle(5, slice(3e-2))
learn.save('stage_1')
# Then we can unfreeze it and train a little more
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(5, 1e-4)
learn.save('model_2')
# One of the hardest parts about training the `EfficientNet` models is figuring out how to find the right learning rate that won't break everything, so choose cautiously and always a bit lower than what you may want to use after unfreezing
# We barely under-matched our Resnet34, **but** we're using a model that is **57%** the size of the Resnet34!
# ## Pretrained PKL File
#
# Let's say we have a different situation:
#
# * No "pretrained=True"
# * Have downloaded weights
#
# * Possible scenarios:
# * Experimenting with a model
# * Transfer learning twice
#
# How do we do it? Let's take our Cats and Dogs model and train ImageWoof off of it
path = untar_data(URLs.IMAGEWOOF)
tfms = [[PILImage.create], [parent_label, Categorize()]]
item_tfms = [ToTensor(), Resize(128)]
batch_tfms = [FlipItem(), RandomResizedCrop(128, min_scale=0.35),
IntToFloatTensor(), Normalize.from_stats(*imagenet_stats)]
items = get_image_files(path)
split_idx = GrandparentSplitter(valid_name='val')(items)
dsets = Datasets(items, tfms, splits=split_idx)
dls = dsets.dataloaders(after_item=item_tfms, after_batch=batch_tfms, bs=64)
dls.show_batch()
# Let's walk through how we would do that. First let's grab our model and make our `Learner` like we did before, with **everything** but the pretraining!
body = create_timm_body('efficientnet_b3a', pretrained=False)
head = create_head(3072, dls.c)
model = nn.Sequential(body, head)
apply_init(model[1], nn.init.kaiming_normal_)
learn = Learner(dls, model, loss_func=LabelSmoothingCrossEntropy(),
splitter=default_split, metrics=accuracy)
# Now, remember these are all random weights right now. How do we change this? We look at the `state_dict`
learn.model.state_dict()
# The state dict tells us the parameters and weights at **every** layer. Now to work with it, we'll borrow some code from [viraat](https://forums.fast.ai/t/loading-pretrained-weights-that-are-not-from-imagenet/28821/3). Basically what we want to do is:
# 1. Keep two state_dict's, one of our new model and one of the old
# 2. If a layer matches, copy the weights
# 3. Move until there are no more layers
# 4. Finally load the new state_dict generated
learn.model_dir
def transfer_learn(learn:Learner, name:Path, device:torch.device=None):
"Load model `name` from `self.model_dir` using `device`, defaulting to `self.dls.device`."
if device is None: device = learn.dls.device
learn.model_dir = Path(learn.model_dir)
if (learn.model_dir/name).with_suffix('.pth').exists(): model_path = (learn.model_dir/name).with_suffix('.pth')
else: model_path = name
new_state_dict = torch.load(model_path, map_location=device)['model']
learn_state_dict = learn.model.state_dict()
for name, param in learn_state_dict.items():
if name in new_state_dict:
input_param = new_state_dict[name]
if input_param.shape == param.shape:
param.copy_(input_param)
else:
print('Shape mismatch at:', name, 'skipping')
else:
print(f'{name} weight of the model not in pretrained weights')
learn.model.load_state_dict(learn_state_dict)
return learn
# Now let's load some in!
learn = transfer_learn(learn, 'stage_1')
learn.model[1][8]
# And we can see the only weight that wasn't loaded in was our new layer! Let's freeze and train our model
learn.freeze()
# Let's see if it worked. We'll do a comparison test, 5 epochs without our `transfer_learn` and five with
learn.fit_one_cycle(5, 3e-3)
# And now let's try on a regular non-transfered learner (at the same learning rate, frozen, etc)
body = create_timm_body('efficientnet_b3a', pretrained=False)
head = create_head(3072, dls.c)
model = nn.Sequential(body, head)
apply_init(model[1], nn.init.kaiming_normal_)
learn = Learner(dls, model, loss_func=LabelSmoothingCrossEntropy(),
splitter=default_split, metrics=accuracy)
learn.freeze()
learn.fit_one_cycle(5, 3e-3)
| nbs/course2020/vision/05_EfficientNet_and_Custom_Weights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Pyspark (local)
# language: python
# name: pyspark_local
# ---
# ## Her skal det være en relevant overskrift.
# #### Innhenter verktøy fra bibliotek
# Import-stegene henter inn bibliotek med kode og funksjoner utviklet ekstern.
from datetime import datetime
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql import SQLContext
import pyspark.sql.functions as F
# #### Kjører metoden read.path for å få oversikt over hvilke parquet datasett som er tilrettelagt i tilknytning veilderens lagringsområde i sky. Oversikt blir lest inn i egen dataframe - df_datsets.
# Aktuelt lagringsområde blir lagt inn som parameter (string objekt som vi definerer selv) INNDATA_PATH.
INNDATA_PATH = '/user/matz.ivan.faldmo/*'
df_datasets = spark.read.path(INNDATA_PATH)
# df_datsets skrives ut i output vindu.
df_datasets.show(100, False)
# #### Leser inn parquet datasett
# Leser inn parquet datsett df_sammensatt_pyspark, selekterer de variable vi skal bruke og etablerer dataframen df
df = spark.read.path('/user/matz.ivan.faldmo/df_sammensatt_pyspark').select('Land', 'Areal', 'BNP', 'Innbyggerantall')
df.show()
# #### Kjører ut statistiske størrelser
# Kjører ut statistike størrelser fra datframe df med metoden describe. Resultat fr metode blir lagt i dataframeen df_stat.
df_stat = df.describe()
df_stat.show()
# Transformerer df_stat (med bl.a. pivot funksjonen) slik at rader i df_stat blir kolonner i dataframe df_stat_piv. Formål er å gjøre output mer oversiktlig ved at alle størrelser på en gitt variabel blir liggende i samme rad. Dette kan være hensiktsmessig i de tilfeller antall "statistikkvaiable" er høyt (dvs langt høyere enn de fire som inngår i eksempel.
var_lst = list(df_stat.columns)
var_lst.remove('summary')
i = 1
for col in var_lst:
df = df_stat.groupBy().pivot('summary').agg(F.sum(col)).withColumn('variabel',F.lit(col))
if i == 1:
df_stat_piv = df
else:
df_stat_piv = df_stat_piv.unionByName(df)
i = i + 1
df_stat_piv.show()
# Kjører ut median (ikke fullført)
liste_kolonner_median = ['Areal', 'Innbyggerantall', 'BNP']
df_median = df.approxQuantile(liste_kolonner_median, [0.5], 0)
print(df_median)
| utforske/mif_pyspark_utforsk2.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++11
// language: C++11
// name: xeus-cling-cpp11
// ---
// # Sort for fun! #
#include <vector>
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdlib>
// just use rand to get some random values
std::vector<int> v(10);
for (auto &val:v) val = std::rand()%20; // use ranged-for
v
std::sort(v.begin(), v.end());
v
// for user data
struct Student {
int id;
std::string name;
void swap(Student &other) {
std::swap(id, other.id);
std::swap(name, other.name);
}
// note that for jupyter, it seems like
// we need to use member function for
// operators
bool operator<(const Student &other) const {
return id<other.id;
}
};
// free version
void swap(Student &l, Student &r) { l.swap(r); }
// note that we can use {} to indicate raw struct
std::vector<Student> stdts;
stdts.push_back({2, "Joe"});
stdts.push_back({1, "Janne"});
stdts.push_back({3, "Chow"});
std::cout << "before sorting the student database, the list is\n";
for (const auto &stdt:stdts)
std::cout << "name: " << stdt.name
<< ", id: " << stdt.id << '\n';
std::sort(stdts.begin(), stdts.end());
std::cout << "after sorting the student database, the list is\n";
for (const auto &stdt:stdts)
std::cout << "name: " << stdt.name
<< ", id: " << stdt.id << '\n';
| notebooks/9/sort.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 3: Regridding
# **Prerequisite**: Minimal xarray knowledge in [Chapter01_NetCDF_xarray.ipynb](./Chapter01_NetCDF_xarray.ipynb).
#
# One of the most difficult thing in Earth science data processing is regridding. But it is no longer difficult with the [xESMF](http://xesmf.readthedocs.io) package I wrote.
#
#
# **Windows users** [need some additional tricks](http://xesmf.readthedocs.io/en/latest/installation.html#notes-for-windows-users) in order to use xESMF on your local computer. Besides the Binder cloud, using a Linux server might be the easiest way. If you don't have a Linux server at all, try my [GEOS-Chem on cloud project](http://cloud-gc.readthedocs.io/)
# +
# those modules are almost always imported when working with model data
# %matplotlib inline
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import numpy as np
import xarray as xr
from gamap_colormap import WhGrYlRd
import xesmf as xe # the regridding tool !
# -
# # The first example
# We use the restart file as in Chapter 1. A common need is to regrid a global low-resolution restart file to a regional high-resolution grid.
ds = xr.open_dataset("initial_GEOSChem_rst.4x5_tropchem.nc")
# We first focus on a single variable O3:
ds['TRC_O3']
# Let's regrid it to the [0.5 x 0.625 NA nested grid](http://wiki.seas.harvard.edu/geos-chem/index.php/GEOS-Chem_horizontal_grids#0.5_x_0.625_NA_nested_grid). First compute the target grid.
nested_grid = {'lon': np.linspace(-140, -40, 161),
'lat': np.linspace(10, 70, 121)
}
nested_grid
# xESMF contains [multiple regridding algorithms](http://xesmf.readthedocs.io/en/latest/Compare_algorithms.html). Try the simplest bilinear scheme first.
regridder_bilinear = xe.Regridder(ds, nested_grid, method='bilinear')
# Apply this "regridder" our Ozone field:
dr_nested = regridder_bilinear(ds['TRC_O3'])
dr_nested
# xESMF preserves xarray metadata (coordinate names, etc...), so you can use xarray's quick plotting method.
dr_nested.isel(time=0, lev=0).plot(cmap=WhGrYlRd)
# Compare it against the original global field:
# +
fig, axes = plt.subplots(1, 2, figsize=[10, 4], subplot_kw={'projection': ccrs.PlateCarree()})
# Plot global data on the left side
ds['TRC_O3'].isel(time=0, lev=0).plot(ax=axes[0], cmap=WhGrYlRd,
cbar_kwargs={'shrink': 0.5, 'label': 'mol/mol'})
axes[0].set_title('Global 4x5')
# Plot nested on on the right side
dr_nested.isel(time=0, lev=0).plot(ax=axes[1], cmap=WhGrYlRd,
cbar_kwargs={'shrink': 0.5, 'label': 'mol/mol'})
axes[1].set_title('Nested NA0.5 x 0.625')
for ax in axes:
ax.coastlines()
ax.gridlines(linestyle='--')
# -
# # Conservative regridding
# `bilinear` is not the best algorithm for regridding tracer field since it does not conserve total mass. A better algorithm is `conservative`.
#
# To use `conservative` regridding, you need to specify the cell boundaries, since the boundary information is needed for calculating overlapping areas (thus ensures total mass conservation). Boundary positions cannot be automatically computed from cell center positions, due to many special cases (e.g. GEOS-Chem's half-polar cells, GCHP's cubed-sphere grids), so it is safer to re-calculate it manually!
# +
nested_grid_with_bounds = {'lon': np.linspace(-140, -40, 161),
'lat': np.linspace(10, 70, 121),
'lon_b': np.linspace(-140-0.625/2, -40+0.625/2, 162),
'lat_b': np.linspace(10-0.5/2, 70+0.5/2, 122),
}
global_grid_with_bounds = {'lon': ds['lon'].values,
'lat': ds['lat'].values,
'lon_b': np.linspace(-180-5/2, 175+5/2, 73),
'lat_b': np.linspace(-92, 92, 47).clip(-90, 90), # fix half-polar cells
}
# -
# The clip() function prevents the polar cell from exceeding -90/90
global_grid_with_bounds['lat_b']
# Make a new regridder with `conservative` option and apply it to our data.
regridder_conserve = xe.Regridder(global_grid_with_bounds, nested_grid_with_bounds, method='conservative')
dr_conserve = regridder_conserve(ds['TRC_O3'])
dr_conserve
# Comparing two algorithms:
# +
fig, axes = plt.subplots(1, 2, figsize=[10, 4], subplot_kw={'projection': ccrs.PlateCarree()})
# Plot global data on the left side
dr_conserve.isel(time=0, lev=0).plot(ax=axes[0], cmap=WhGrYlRd,
cbar_kwargs={'shrink': 0.5, 'label': 'mol/mol'})
axes[0].set_title('Conservative regridding')
# Plot nested on on the right side
dr_nested.isel(time=0, lev=0).plot(ax=axes[1], cmap=WhGrYlRd,
cbar_kwargs={'shrink': 0.5, 'label': 'mol/mol'})
axes[1].set_title('Bilinear regridding')
for ax in axes:
ax.coastlines()
ax.gridlines(linestyle='--')
# -
# You can see that conservative regridding preserves the coarse-grid structure, while the bilinear algorithm produces smooth results.
# To more rigorously conserve mass, you would scale the mixing ratio field by air density before regridding, and then scale the regridding result back to mixing ratio. But let's stick to this simple method for now.
# # Loop over all variables
# Here's how you loop over all variables in an xarray `Dataset`.
for varname, dr in ds.data_vars.items():
# You have access to each DataArray `dr` inside the for loop,
# but here we just print the variable name and go to the next variable
print(varname, end=', ')
# We can regrid each variable one by one and merge them together.
# +
# %%time
result_list = [] # an emtpy list to hold regridding result
for varname, dr in ds.data_vars.items():
dr_temp = regridder_conserve(dr) # temporary variable for the current tracer
result_list.append(dr_temp) # add the current result to the list
ds_result = xr.merge(result_list) # merge a list of DataArray to a single Dataset
# NOTE: The next version of xESMF (v0.2) will be able to directly regrid a Dataset,
# so you will not need those additional code. But it is a nice coding exercise anyway.
# -
# Notice how fast it is! On my Mac, it takes less than a second to regrid the entire file. This is orders of magnitude faster than IDL/gamap, and can save a lot of research time. In the past, I could spend an entire morning just to create a nested restart file (by regridding), because IDL was too slow.
ds_result # it contains all variables, on the nested grid.
# Finally, write the results into an NC file with just one line of code.
ds_result.to_netcdf('regridded_nested_NA.nc')
# !ncdump -h regridded_nested_NA.nc | head -n 30 # only print the first several lines
# Clean-up: remove the regridder "cache" if you don't need it next time
regridder_bilinear.clean_weight_file()
regridder_conserve.clean_weight_file()
| Chapter03_regridding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# # 13.15.1 Self Check
# **1. _(Fill-In)_** The folium classes `________` and `________` enable you to mark locations on a map and add text that displays when the user clicks a marked location.
# **Answer:** `Marker`, `Popup`.
#
# **2. _(Fill-In)_** Pandas DataFrame method `________` creates an iterator for accessing the rows of a DataFrame as tuples.
# **Answer:** `itertuples`.
#
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
| examples/ch13/snippets_ipynb/13_15_01selfcheck.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:packt]
# language: python
# name: conda-env-packt-py
# ---
# ## Original Text Rank
import numpy as np
import pandas as pd
import nltk
nltk.download('punkt') # one time execution
import re
df = pd.read_csv("data/tennis_articles_v4.csv")
df.head()
df['article_text'][0]
df['article_text'][1]
| Lesson6/OriginalTextRank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3.8.1 64-bit
# name: python381jvsc74a57bd0c1a071b906091fcb496007d9183923f945442b6fb9de9e58e6d35971571326e8
# ---
# # Analyzing Receipts with Form Recognizer
#
# 
#
# In the artificial intelligence (AI) field of computer vision, optical character recognition (OCR) is commonly used to read printed or handwritten documents. Often, the text is simply extracted from the documents into a format that can be used for further processing or analysis.
#
# A more advanced OCR scenario is the extraction of information from forms, such as purchase orders or invoices, with a semantic understanding of what the fields in the form represent. The **Form Recognizer** service is specifically designed for this kind of AI problem.
#
# ## View a receipt
#
# In this example, you'll use the Form Recognizer's built-in model for analyzing receipts.
#
# Click the **Run cell** (▷) button (to the left of the cell) below to run it and see an example of a receipt that you'll use Form Recognizer to analyze.
# + gather={"logged": 1599694427893}
import matplotlib.pyplot as plt
from PIL import Image
import os
# %matplotlib inline
# Load and display a receipt image
fig = plt.figure(figsize=(6, 6))
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
img = Image.open(image_path)
plt.axis('off')
plt.imshow(img)
# -
# ## Create a Form Recognizer resource
#
# >**Note:** You can either use a Cognitive Service resource or a Form Recognizer resource to access Form Recognizer services.
#
# To create a Form Recognizer resource in your Azure subscription:
#
# 1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.
# 2. Select **+ Create a resource**, and search for *Form Recognizer*.
# 3. In the list of services, select **Form Recognizer**.
# 4. In the **Form Recognizer** blade, select **Create**.
# 5. In the **Create** blade, enter the following details and select **Create**
# - **Name**: A unique name for your service
# - **Subscription**: Your Azure subscription
# - **Region**: Any available region
# - **Pricing tier**: F0
# - **Resource Group**: The existing resource group you used previously
# - **I confirm I have read and understood the notice below**: Selected.
# 6. Wait for the service to be created.
# 7. View your newly created Form Recognizer service in the Azure portal and on the **Keys and Endpoint** page, copy the **Key1** and **Endpoint** values and paste them in the code cell below, replacing **YOUR_FORM_KEY** and **YOUR_FORM_ENDPOINT**.
# + gather={"logged": 1599694505850}
form_key = '<KEY>'
form_endpoint = 'https://csvisionx.cognitiveservices.azure.com/'
print('Ready to use form recognizer at {} using key {}'.format(form_endpoint, form_key))
# -
# ## Analyze a receipt
#
# Now you're ready to use Form Recognizer to analyze a receipt.
# + gather={"logged": 1599694525404}
import os
from azure.ai.formrecognizer import FormRecognizerClient
from azure.core.credentials import AzureKeyCredential
# Create a client for the form recognizer service
form_recognizer_client = FormRecognizerClient(endpoint=form_endpoint, credential=AzureKeyCredential(form_key))
try:
print("Analyzing receipt...")
# Get the receipt image file
image_path = os.path.join('data', 'form-receipt', 'receipt.jpg')
# Submit the file data to form recognizer
with open(image_path, "rb") as f:
analyze_receipt = form_recognizer_client.begin_recognize_receipts(receipt=f)
# Get the results
receipt_data = analyze_receipt.result()
# Print the extracted data for the first (and only) receipt
receipt = receipt_data[0]
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {}".format(receipt_type.value))
merchant_address = receipt.fields.get("MerchantAddress")
if merchant_address:
print("Merchant Address: {}".format(merchant_address.value))
merchant_phone = receipt.fields.get("MerchantPhoneNumber")
if merchant_phone:
print("Merchant Phone: {}".format(merchant_phone.value))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {}".format(transaction_date.value))
print("Receipt items:")
items = receipt.fields.get("Items")
if items:
for idx, item in enumerate(receipt.fields.get("Items").value):
print("\tItem #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("\t - Name: {}".format(item_name.value))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("\t - Price: {}".format(item_total_price.value))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} ".format(subtotal.value))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {}".format(tax.value))
total = receipt.fields.get("Total")
if total:
print("Total: {}".format(total.value))
except Exception as ex:
print('Error:', ex)
# -
# Note that Form Recognizer is able to interpret the data in the form, correctly identifying the merchant address and phone number, and the transaction date and time, as well as the line items, subtotal, tax, and total amounts.
#
# ## More Information
#
# For more information about the Form Recognizer service, see [the Form Recognizer documentation](https://docs.microsoft.com/en-us/azure/cognitive-services/form-recognizer/index)
| 06 - Receipts with Form Recognizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use Paddle Quantum on GPU
#
# <em> Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved. </em>
# ## Introduction
#
# > Note that this tutorial is time-sensitive. And different computers will have individual differences. This tutorial does not guarantee that all computers can install it successfully.
#
# In deep learning, people usually use GPU for neural network model training because GPU has significant advantages in floating-point operations compared with CPU. Therefore, using GPU to train neural network models has gradually become a common choice. In Paddle Quantum, our quantum states and quantum gates are also represented by complex numbers based on floating-point numbers. If our model can be deployed on GPU for training, it will also significantly increase the training speed.
# ## GPU selection
#
# Here, we choose Nvidia's hardware devices, and its CUDA (Compute Unified Device Architecture) supports deep learning framework better. PaddlePaddle can also be easily installed on CUDA.
#
# ## Configure CUDA environment
#
# ### Install CUDA
#
# Here, we introduce how to configure the CUDA environment in Windows 10 on the x64 platform. First, check on [CUDA GPUs | NVIDIA Developer](https://developer.nvidia.com/cuda-gpus) to see if your GPU support the CUDA environment. Then, download the latest version of your graphics card driver from [NVIDIA Driver Download](https://www.nvidia.cn/Download/index.aspx?lang=cn) and install it on your computer.
#
# In [PaddlePaddle Installation Steps](https://www.paddlepaddle.org.cn/install/quick), we found that **Paddle Paddle only supports CUDA CUDA 9.0/10.0/10.1/10.2/11.0 single card mode under Windows**, so we install CUDA10.2 here. Find the download link of CUDA 10.2 in [CUDA Toolkit Archive | NVIDIA Developer](https://developer.nvidia.com/cuda-toolkit-archive): [CUDA Toolkit 10.2 Archive | NVIDIA Developer](https://developer.nvidia.com/cuda-10.2-download-archive). After downloading CUDA, run the installation.
#
# During the installation process, select **Custom Installation** in the CUDA options, check all the boxes except for Visual Studio Integration (unless you are familiar with it). Then check CUDA option only. Then select the default location for the installation location (please pay attention to the installation location of your CUDA, you need to set environment variables later), and wait for the installation to complete.
#
# After the installation is complete, open the Windows command line and enter `nvcc -V`. If you see the version information, the CUDA installation is successful.
#
# ### Install cuDNN
#
# Download cuDNN in [NVIDIA cuDNN | NVIDIA Developer](https://developer.nvidia.com/cudnn), according to [PaddlePaddle Installation Steps](https://www.paddlepaddle.org.cn/install/quick) requirements, we **need to use cuDNN 7.6.5+**, so we can download the version 7.6.5 of cuDNN that supports CUDA 10.2. After downloading cuDNN, unzip it. Assuming the installation path of our CUDA is `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2`. After decompressing cuDNN, we take the files in `bin`, `include` and `lib` and replace the corresponding original files in the CUDA installation path (if the file already exists, replace it, if it does not exist, paste it directly into the corresponding directory). At this point, cuDNN has been installed.
#
# ### Configure environment variables
#
# Next, you need to configure environment variables. Right-click "This PC" on the desktop of the computer (or "This PC" in the left column of "File Explorer"), select "Properties", and then select "Advanced System Settings" on the left, under the "Advanced" column Select "Environmental Variables".
#
# Now you enter the setting page of environment variables, select `Path` in the `System variables`, and click `Edit`. In the page that appears, check if there are two addresses `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\bin` and `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\libnvvp` (the prefix `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2` should be your CUDA installation location), if not, please add them manually.
#
# ### Verify that the installation is successful
#
# Open the command line and enter `cd C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\extras\demo_suite` to enter the CUDA installation path (this should also be your CUDA installation location). Then execute `.\bandwidthTest.exe` and `.\deviceQuery.exe` respectively. If both `Result = PASS` appear, the installation is successful.
#
#
# ## Install PaddlePaddle on CUDA environment
#
# According to the instructions in [PaddlePaddle Installation Steps](https://www.paddlepaddle.org.cn/install/quick), we first need to make sure our python environment is correct and use `python --version` to check the python version. Ensure that the **python version is 3.5.1+/3.6+/3.7/3.8**, and use `python -m ensurepip` and `python -m pip --version` to check the pip version, **confirm it is 20.2.2+**. Then, use `python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple` to install the GPU version of PaddlePaddle.
#
# ## Install Paddle Quantum
#
# Download the Paddle Quantum installation package, modify `setup.py` and `requirements.txt`, change `paddlepaddle` to `paddlepaddle-gpu`, and then execute `pip install -e .` according to the installation guide of Paddle Quantum from source code.
#
# > If you have installed paddlepaddle-gpu and paddle_quantum in a new python environment, please also install jupyter in the new python environment, and reopen this tutorial under the new jupyter notebook and run it.
#
# ## Check if the installation is successful
#
# Open the new environment where we installed the GPU version of PaddlePaddle and execute the following command. If the output is `True`, it means that the current PaddlePaddle framework can run on the GPU.
#
#
import paddle
print(paddle.is_compiled_with_cuda())
# ## Use tutorials and examples
#
# In Paddle Quantum, we use the dynamic graph mode to define and train our parameterized quantum circuits. Here, we still use the dynamic graph mode and only need to define the GPU core where we run the dynamic graph mode.
# ```python
# # 0 means to use GPU number 0
# paddle.set_device('gpu:0')
# # build and train your quantum circuit model
# ```
# If we want to run on CPU, pretty much the same, define the running device as CPU:
# ```python
# paddle.set_device('cpu')
# # build and train your quantum circuit model
# ```
# We can enter `nvidia-smi` in the command line to view the usage of the GPU, including which programs are running on which GPUs, and its memory usage.
#
# Here, we take [VQE](../tutorial/quantum_simulation/VQE_EN.ipynb) as an example to illustrate how we should use GPU. First, import the related packages and define some variables and functions.
# +
import os
import numpy
from numpy import concatenate
from numpy import pi as PI
from numpy import savez, zeros
from paddle import matmul, transpose
from paddle_quantum.circuit import UAnsatz
import matplotlib.pyplot as plt
import numpy
def H2_generator():
H = [
[-0.04207897647782277, 'i0'],
[0.17771287465139946, 'z0'],
[0.1777128746513994, 'z1'],
[-0.2427428051314046, 'z2'],
[-0.24274280513140462, 'z3'],
[0.17059738328801055, 'z0,z1'],
[0.04475014401535163, 'y0,x1,x2,y3'],
[-0.04475014401535163, 'y0,y1,x2,x3'],
[-0.04475014401535163, 'x0,x1,y2,y3'],
[0.04475014401535163, 'x0,y1,y2,x3'],
[0.12293305056183797, 'z0,z2'],
[0.1676831945771896, 'z0,z3'],
[0.1676831945771896, 'z1,z2'],
[0.12293305056183797, 'z1,z3'],
[0.1762764080431959, 'z2,z3']
]
N = 4
return H, N
Hamiltonian, N = H2_generator()
def U_theta(theta, Hamiltonian, N, D):
"""
Quantum Neural Network
"""
# Initialize the quantum neural network according to the number of qubits/network width
cir = UAnsatz(N)
# Built-in {R_y + CNOT} circuit template
cir.real_entangled_layer(theta[:D], D)
# Add in the last row a layer of R_y rotation gates
for i in range(N):
cir.ry(theta=theta[D][i][0], which_qubit=i)
# The quantum neural network acts on the default initial state |0000>
cir.run_state_vector()
# Calculate the expected value of a given Hamiltonian
expectation_val = cir.expecval(Hamiltonian)
return expectation_val
class StateNet(paddle.nn.Layer):
"""
Construct the model net
"""
def __init__(self, shape, dtype="float64"):
super(StateNet, self).__init__()
# Initialize the theta parameter list and fill the initial value with the uniform distribution of [0, 2*pi]
self.theta = self.create_parameter(
shape=shape,
default_initializer=paddle.nn.initializer.Uniform(low=0.0, high=2 * PI),
dtype=dtype,
is_bias=False)
# Define loss function and forward propagation mechanism
def forward(self, Hamiltonian, N, D):
# Calculate loss function/expected value
loss = U_theta(self.theta, Hamiltonian, N, D)
return loss
ITR = 80 # Set the total number of training iterations
LR = 0.2 # Set the learning rate
D = 2 # Set the depth of the repeated calculation module in the neural network
# -
# If you want to use GPU to train, run the following program:
# +
# 0 means to use GPU number 0
paddle.set_device('gpu:0')
# Determine the parameter dimension of the network
net = StateNet(shape=[D + 1, N, 1])
# Generally speaking, we use Adam optimizer to get relatively good convergence
# Of course, you can change to SGD or RMSProp
opt = paddle.optimizer.Adam(learning_rate=LR, parameters=net.parameters())
# Record optimization results
summary_iter, summary_loss = [], []
# Optimization cycle
for itr in range(1, ITR + 1):
# Forward propagation to calculate loss function
loss = net(Hamiltonian, N, D)
# Under the dynamic graph mechanism, back propagation minimizes the loss function
loss.backward()
opt.minimize(loss)
opt.clear_grad()
# Update optimization results
summary_loss.append(loss.numpy())
summary_iter.append(itr)
# Print results
if itr% 20 == 0:
print("iter:", itr, "loss:", "%.4f"% loss.numpy())
print("iter:", itr, "Ground state energy:",
"%.4f Ha"% loss.numpy())
# -
# If you want to use CPU to train, run the following program:
# +
# Use CPU
paddle.set_device("cpu")
# Determine the parameter dimension of the network
net = StateNet(shape=[D + 1, N, 1])
# Generally speaking, we use Adam optimizer to get relatively good convergence
# Of course you can change to SGD or RMSProp
opt = paddle.optimizer.Adam(learning_rate=LR, parameters=net.parameters())
# Record optimization results
summary_iter, summary_loss = [], []
# Optimization cycle
for itr in range(1, ITR + 1):
# Forward propagation to calculate loss function
loss = net(Hamiltonian, N, D)
# Under the dynamic graph mechanism, back propagation minimizes the loss function
loss.backward()
opt.minimize(loss)
opt.clear_grad()
# Update optimization results
summary_loss.append(loss.numpy())
summary_iter.append(itr)
# Print results
if itr% 20 == 0:
print("iter:", itr, "loss:", "%.4f"% loss.numpy())
print("iter:", itr, "Ground state energy:",
"%.4f Ha"% loss.numpy())
# -
# ## Summary
#
# According to our test, the current version of paddle_quantum can run under GPU, but it needs better GPU resources to show sufficient acceleration. In future versions, we will continue to optimize the performance of Paddle Quantum under GPU.
#
# _______
#
# ## Reference
#
# [1] [Installation Guide Windows :: CUDA Toolkit Documentation](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html)
#
# [2] [Installation Guide :: NVIDIA Deep Learning cuDNN Documentation](https://docs.nvidia.com/deeplearning/cudnn/install-guide/index.html#installwindows)
#
# [3] [Getting Started PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick)
#
#
| introduction/PaddleQuantum_GPU_EN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Lesson 6 - Taking Input, Reading and Writing Files, Functions
#
# ### Readings
#
# * Shaw: [Exercises 11-26](https://learnpythonthehardway.org/python3/ex11.html)
# * Lutz: Chapters 9, 14-17
#
# ### Table of Contents
#
# * [Taking Input](#input)
# * [Reading Files](#reading)
# * [Writing Files](#writing)
# * [Functions](#functions)
# <a id="input"></a>
#
# ### Taking Input
# In Shaw's _Learn Python The Hard Way_, he uses `input()` and `argv` to take input from the user. These don't work very well with Jupyter notebooks, but we will cover them because they can be useful in Python scripts.
# #### `input()`
#
# In Python 2, this command was `raw_input()`; in Python 3, it's been renamed `input()`. Newer versions of Jupyter Notebook support this kind of input, but it's kind of weird. Better to just 'hard code' the value for a variable
x = input()
y = 6
# #### `argv`
#
# When you import the `argv` special variable, it allows you to pass strings, numbers, and filenames to your python code. It doesn't work in Jupyter notebooks, however, so you'll have to use a workaround. We can comment out the `argv` calls and hard code the values we would have passed. Later, when we select "Download as > Python (.py)", we can open up that .py file and uncomment the `argv` calls. Still, it's a good idea to define all your variables and file paths at the start of your notebook.
# +
#from sys import argv
script = 'something.py' #argv[0]
value1 = 5 #argv[1]
value2 = 6 #argv[2]
value3 = 'hello' #argv[3]
print("script: %s\nfirst: %s\nsecond: %s\nthird: %s" % (script, value1, value2, value3))
# -
# In the Python script, we would uncomment the argv calls like so:
#
# ```python
# from sys import argv
#
# script = argv[0]
# value1 = argv[1]
# value2 = argv[2]
# value3 = argv[3]
#
# print("script: %s\nfirst: %s\nsecond: %s\nthird: %s" % (
# script, value1, value2, value3))
# ```
# Adding code to the begging of your code would check if the correct number of arguments is entered and exit if not:
# ```python
# from sys import argv
# from sys import exit
#
# if len(argv) != 4:
# print("Usage: python ex.py value1 value2 value3")
# exit(1)
#
# script = argv[0]
# value1 = argv[1]
# value2 = argv[2]
# value3 = argv[3]
#
# print("script: %s\nfirst: %s\nsecond: %s\nthird: %s" % (
# script, value1, value2, value3))
# ```
# #### `click`
# Click is a conda- and pip-installable package that simplifies command-line interfaces and handling of arguments. Read the documentation here: http://click.pocoo.org/5/.
#
# Here is an example of a simple Click program:
#
# ```python
# import click
#
# @click.command()
# @click.option('--count', default=1, help='Number of greetings.')
# @click.option('--name', prompt='Your name',
# help='The person to greet.')
# def hello(count, name):
# """Simple program that greets NAME for a total of COUNT times."""
# for x in range(count):
# click.echo('Hello %s!' % name)
#
# if __name__ == '__main__':
# hello()
# ```
#
# And what it looks like when run:
#
# ```
# $ python hello.py --count=3
# Your name: John
# Hello John!
# Hello John!
# Hello John!
# ```
#
# It automatically generates nicely formatted help pages:
#
# ```
# $ python hello.py --help
# Usage: hello.py [OPTIONS]
#
# Simple program that greets NAME for a total of COUNT times.
#
# Options:
# --count INTEGER Number of greetings.
# --name TEXT The person to greet.
# --help Show this message and exit.
# ```
# #### Beginning your notebook or script
# Without using argv or click, a typical script or IPython notebook might begin like this:
# import required packages
import pandas as pd
import numpy as np
# define file paths and variables
path_input_file = '~/sio209/input.txt'
path_output_file = '~/sio209/output.txt'
iterations = 10
evalue = 1e-5
color = 'dark blue'
title = 'My plot'
# <a id="reading"></a>
#
# ### Reading Files
# We can read in a text file using `open()` and then print or use it all at once or one line at a time. Note that when we read the lines of a file, the lines are removed from the file handle object (called a `TextIOWrapper`).
filename = '../data/woodchuck.txt'
# #### Read all at once
# +
txt = open(filename)
print("Content of file %r:" % filename)
print(txt.read())
txt.close()
# +
txt = open(filename)
txt.read()
# -
txt.close()
type(txt)
# #### Read one line at a time
# +
txt = open(filename)
txt.readline()
# -
txt.readline()
txt.readline()
txt.readline()
txt.close()
# #### Read lines as a list
# +
txt = open(filename)
txt.readlines()
# -
txt.close()
# #### Open in a `with` block. Then use `for` loop, `read()`, `readline()`, or `readlines()`.
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
print(line)
with open(filename, 'r') as f:
lines = f.read()
lines
with open(filename, 'r') as f:
line = f.readline()
line
with open(filename, 'r') as f:
lines = f.readlines()
lines
with open(filename, 'r') as f:
lines = [line.rstrip() for line in f.readlines()]
lines
# #### Pandas can also read files, but it's better with tables.
df = pd.read_csv(filename, header=None)
df
# <a id="writing"></a>
#
# ### Writing Files
# We can write files using `write()`.
outfile = 'limerick.txt'
# some text to write (a limerick by <NAME>)
line1 = "There was an Old Man with a beard\nWho said, 'It is just as I feared!"
line2 = "Two Owls and a Hen\nFour Larks and a Wren,"
line3 = "Have all built their nests in my beard!'"
# #### Write the most basic way
# +
target = open(outfile, 'w')
target.write(line1)
target.write('\n')
target.write(line2)
target.write('\n')
target.write(line3)
target.write('\n')
target.close()
# -
type(target)
# #### Write in a `with` block
#
# Again, we can use `with` to simplify things (avoid having to `close()` the file).
with open(outfile, 'w') as target:
target.write(line1)
target.write('\n')
target.write(line2)
target.write('\n')
target.write(line3)
target.write('\n')
# #### Write with Pandas to comma-separated values or tab-separated values
#
# The dataframe `df` contains the woodchuck text from above.
df.to_csv('woodchuck_pandas.csv')
df.to_csv('woodchuck_pandas.tsv', sep='\t')
# <a id="functions"></a>
#
# ### Functions
# Functions allow you to carry out the same task multiple times. This reduces the amount of code you write, reduces mistakes, and makes your code easier to read.
# #### Printing
def say_hello():
print('Hello, world!')
say_hello()
def print_a_string(foo):
print('%s' % foo)
print_a_string('Here is a string.')
x = 'A string saved as a variable.'
print_a_string(x)
y = 300
print_a_string(y)
def print_two_things(one, two):
print('%s AND %s' % (one, two))
x = 'yes'
y = 10
print_two_things(x, y)
def print_three_things(*blob):
v1, v2, v3 = blob
print('%s, %s, %s' % (v1, v2, v3))
print_three_things('a', 31, ['x', 'y', 'z'])
def add_two(num1, num2):
print(num1 + num2)
add_two(10, 5)
add_two(1.3, 4.4)
add_two('AAA', 'bbb')
# #### Returning
def return_sum(a, b):
return(a + b)
return_sum(5, 8)
x = return_sum(8, 13)
x
x * 2
def combine_with_commas(*blob):
mystring = ''
for element in blob:
mystring = mystring + str(element) + ','
mystring = mystring[:-1]
return(mystring)
combine_with_commas(40, 50, 60)
combine_with_commas(40, 50, 60, 70, 'hello')
# we have to redefine this function to return instead of print
def add_two(num1, num2):
return(num1 + num2)
x = 100
y = 100
z = add_two(x, y)
z
type(z)
a = '100'
b = '100'
c = add_two(a, b)
c
type(c)
a = '100'
b = '100'
c = add_two(int(a), int(b))
c
type(c)
def sum_product_exponent(v1, v2):
s = v1 + v2
p = v1 * v2
e = v1 ** v2
return(s, p, e)
sum_product_exponent(2, 5)
my_sum, my_product, my_exponent = sum_product_exponent(2, 5)
my_sum
my_product
my_exponent
| lessons/lesson06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# !pip install pycapnp
# +
# %load_ext autoreload
# %autoreload 2
# # %load_ext line_profiler
import numpy as np
import pandas as pd
# import torch
# import gc
import syft as sy
from syft.core.adp.entity import Entity
from syft.core.adp.entity_list import EntityList
# from pympler.asizeof import asizeof #pip install pympler
from syft import serialize
from syft import deserialize
import timeit
# from syft.core.tensor.autodp.dp_tensor_converter import convert_to_gamma_tensor
# from functools import reduce
import time
import pyarrow as pa
from syft.core.tensor.autodp.row_entity_phi import RowEntityPhiTensor as REPT
# -
df = pd.read_parquet("1M_rows_dataset_sample.parquet")
print("Number of Rows: ",df.shape[0])
df.head()
print(df.shape[0])
# +
# factor = 1
# df = pd.concat([df] * factor, ignore_index=True)
# +
# df.shape[0] / 1e6
# + tags=[]
# # Let's login into the domain node
# domain_node = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8081)
# -
# %%time
name = "Tweets- 100000 rows dataset "
impressions = ((np.array(list(df['impressions'])))).astype(np.int32)
publication_title = ((list(df['publication_title'])))
type(publication_title[0])
# %%time
entities = list()
for i in range(len(publication_title)):
entities.append(Entity(name=publication_title[i]))
t0 = time.time()
tweets_data = sy.Tensor(impressions).private(min_val=0, max_val=30, entities = entities)
tf = time.time() - t0
print(tf)
def extend_tweet_data(data, multiplier: int):
new_data = data.copy()
new_data.child.child = new_data.child.child.repeat(multiplier)
new_data.child._entities = EntityList.from_objs(np.array(["ishan"] * multiplier * len(new_data.child._entities.entities_indexed)))
return new_data
def benchmark(data):
start = time.time()
res = data.child.arrow_serialize()
end = time.time()
print(end-start)
return res
mil = tweets_data
len(mil)
b = benchmark(mil)
type(b)
x = REPT.arrow_deserialize(b)
x
len(mil.child)
len(x.child)
(mil.child.child == x.child).all()
mil.child._min_vals == x._min_vals
mil.child._max_vals == x._max_vals
mil.child._entities.entities_indexed == x._entities.entities_indexed
mil.child._entities.one_hot_lookup == x._entities.one_hot_lookup
mil.child._entities.one_hot_lookup == x._entities.one_hot_lookup
mil.child._entities.one_hot_lookup
x._entities.one_hot_lookup
ten_mil = extend_tweet_data(mil, 10)
b = benchmark(ten_mil)
hundred_mil = extend_tweet_data(mil, 100)
len(hundred_mil)
b = benchmark(hundred_mil)
# +
# pyarrow record batch
# benchmark(hundred_mil)
# step 1: 11.958800077438354
# Total Size: 900.042533
# step 2: 0.91678786277771
# 13.002313137054443
# -
benchmark(hundred_mil)
bill = extend_tweet_data(mil, 1000)
benchmark(bill)
# +
# len(tweets_data.child.child)/1e6
# +
# len(tweets_data.child._entities)/1e6
# +
# # %lprun -f tweets_data.child.child[0].entity.simple_assets_for_serde tweets_data.child.child[0].entity.simple_assets_for_serde()
# +
# # %lprun -f tweets_data.child.arrow_serialize tweets_data.child.arrow_serialize()
# + tags=[]
# -
benchmark(bill)
# + tags=[]
# tweets_data.child.child[0].scalar_manager.prime2symbol
# -
# %%time
pa.serialize(np.random.rand(1_000_000))
# %%time
for i in range(1_000_000):
pa.serialize(np.random.rand(1))
# %%time
serialize(tweets_data.child.child[0],to_bytes=True)
# %%time
result = tweets_data.sum()
tweets_data.child.serde_concurrency= 1
# +
#measuring size of objects in python is not easy,
#as there might be several references in a user defined class
#we pympler library to measure the size of an object.
#pymler does not give an exact size on disk, but a close accurate instead of sys.getsizeof()
print("Size of Twitter Tensor Data(MB) ====> " , asizeof(tweets_data)/(1024*1024))
# + tags=[]
#Twitter data serialization.
start = timeit.default_timer()
serialized_data = serialize(tweets_data,to_bytes=True)
end = timeit.default_timer()
print("Serialization Time =====>" , end-start," seconds" )
print("Twitter Serialized Data Size ",len(serialized_data)/(1024*1024))
# + tags=[]
# %lprun -f tweets_data.child._object2proto tweets_data.child._object2proto()
# -
#Twitter data derserialization
start = timeit.default_timer()
deserialized_data = deserialize(serialized_data,from_bytes=True)
end = timeit.default_timer()
print("Deserialization Time =====>" , end-start," seconds" )
print("Twitter Deserialized Data Size ",asizeof(deserialized_data)/(1024*1024))
# + tags=[]
# %%time
#Uploading Dataset....
domain_node.load_dataset(
assets={"tweets": tweets_data},
name=name,
description="""Tweets- 1M rows """
)
#The main bottleneck is the serialization as uploading dataset is a blocking call(immediate_msg_with_reply),
#bottleneck = serialization at client + DeSerialization at node
# -
domain_node.datasets
dataset = domain_node.datasets[0]["tweets"]
# %%time
res = dataset.sum()
res.block
res.exists
res = res.publish(sigma=0.1)
res.get()
l = tweets_data.child.child
a = l[0]
b= l[50611]
c = l[83931]
print(a)
print(b)
print(c)
split_lst=[]
d = {}
c=0
for i in l:
if i.entity not in d:
d[i.entity]=c
split_lst.append([i])
c+=1
else:
split_lst[d[i.entity]].append(i)
# + tags=[]
first= split_lst[0]
second = split_lst[1]
third = split_lst[2]
fourth = split_lst[3]
# -
def list_sum(lst):
s=lst[0]
for i in range(1,len(lst)):
s+=lst[i]
return s
s=first[0]
for i in first[1::]:
s=s+i
print("final: ",s)
def lst_sum(a,b):
return a+b
t1 = reduce(lst_sum,first)
t2 = reduce(lst_sum,second)
t3 = reduce(lst_sum,third)
t4 = reduce(lst_sum,fourth)
# %%time
r1 = t1+t2
r2 = t3+t4
tweets_data.child.child[1].dtype
from syft.core.adp.entity import Entity
a = Entity("a")
b = Entity("a")
a == b
a.id
b.id
a = ["a", "b", "c"]
x = np.array(a)
x
y = pa.array(x)
y
x
apache_arrow = pa.Tensor.from_numpy(obj=x)
rb = pa.RecordBatch.from_pylist([{"entity":x}])
rb
y = rb[0].to_numpy(zero_copy_only=False)
y
| notebooks/Experimental/Rasswanth/Data Upload-Rasswanth-Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homogeneus Transformation
# 
import math
def homogeneusTransformation(x_particle, y_particle, theta, x_obs, y_obs):
x_map = x_particle + (math.cos(theta)*x_obs) - (math.sin(theta)*y_obs)
x_map = int(round(x_map,0))
y_map = y_particle + (math.sin(theta)*x_obs) + (math.cos(theta)*y_obs)
y_map = int(round(y_map,0))
return x_map,y_map
## Coordinates OBS1
x_part = 4
y_part = 5
x_obs = 2
y_obs = 2
theta = -math.pi/2
homogeneusTransformation(x_part,y_part,theta,x_obs,y_obs)
## Coordinates OBS2
x_part = 4
y_part = 5
x_obs = 3
y_obs = -2
theta = -math.pi/2
homogeneusTransformation(x_part,y_part,theta,x_obs,y_obs)
## Coordinates OBS3
x_part = 4
y_part = 5
x_obs = 0
y_obs = -4
theta = -math.pi/2
homogeneusTransformation(x_part,y_part,theta,x_obs,y_obs)
# # Associations
# Now that observations have been transformed into the map's coordinate space, the next step is to associate each transformed observation with a land mark identifier.
# 
# In the map exercise above we have 5 total landmarks each identified as L1, L2, L3, L4, L5, and each with a known map location. We need to associate each transformed observation TOBS1, TOBS2, TOBS3 with one of these 5 identifiers. To do this we must associate the closest landmark to each transformed observation.
# # Calculating the Particle's Final Weight
# Now we that we have done the measurement transformations and associations, we have all the pieces we need to calculate the particle's final weight. The particles final weight will be calculated as the product of each measurement's Multivariate-Gaussian probability density.
#
# The Multivariate-Gaussian probability density has two dimensions, x and y. The mean of the Multivariate-Gaussian is the measurement's associated landmark position and the Multivariate-Gaussian's standard deviation is described by our initial uncertainty in the x and y ranges. The Multivariate-Gaussian is evaluated at the point of the transformed measurement's position. The formula for the Multivariate-Gaussian can be seen below.
# 
# To complete the next set of quizzes, calculate each measurement's Multivariate-Gaussian probability density using the formula above and the previously calculated values. In this example the standard deviation for both x and y is 0.3.
#
# Note that x and y are the observations in map coordinates from the landmarks quiz and \mu_xμ
# x
# , \mu_yμ
# y
# are the coordinates of the nearest landmarks. These should correspond to the correct responses from previous quizzes.
def multiv_prob(sig_x,sig_y,x_obs,y_obs,mu_x,mu_y):
gauss_norm = 1 / (2 * math.pi * sig_x * sig_y)
exponent = (pow(x_obs - mu_x, 2) / (2 * pow(sig_x, 2)))+ (pow(y_obs - mu_y, 2) / (2 * pow(sig_y, 2)))
weight = gauss_norm * math.exp(-exponent)
return weight
## OBS1 values
sig_x = 0.3;
sig_y = 0.3;
x_obs = 6;
y_obs = 3;
mu_x = 5;
mu_y = 3;
weight1 = multiv_prob(sig_x, sig_y, x_obs, y_obs, mu_x, mu_y)
print('{:0.2e}'.format(weight1))
## OBS2 values
sig_x = 0.3;
sig_y = 0.3;
x_obs = 2;
y_obs = 2;
mu_x = 2;
mu_y = 1;
weight2 = multiv_prob(sig_x, sig_y, x_obs, y_obs, mu_x, mu_y)
print('{:0.2e}'.format(weight2))
## OBS3 values
sig_x = 0.3;
sig_y = 0.3;
x_obs = 0;
y_obs = 5;
mu_x = 2;
mu_y = 1;
weight3 = multiv_prob(sig_x, sig_y, x_obs, y_obs, mu_x, mu_y)
print('{:0.2e}'.format(weight3))
## Output final weight
final_weight = weight1 * weight2 * weight3
print('Final weight: ' + str('{:0.2e}'.format(final_weight)))
print('Weight1: ' + str('{:0.5e}'.format(weight1)))
print('Weight2: ' + str('{:0.5e}'.format(weight2)))
print('Weight3: ' + str('{:0.5e}'.format(weight3)))
print('Final weight: ' + str('{:0.5e}'.format(final_weight)))
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://youtu.be/3VRp4chnPE4" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
| Particle_Filter_Implementation/particleFilterImplementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Custom URL prefix with Seldon and Ambassador
#
# This notebook shows how you can deploy Seldon Deployments with custom Ambassador configuration.
# +
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def writetemplate(line, cell):
with open(line, "w") as f:
f.write(cell.format(**globals()))
# -
# VERSION=!cat ../../../version.txt
VERSION=VERSION[0]
VERSION
# ## Setup Seldon Core
#
# Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).
# ## Launch main model
#
# We will create a very simple Seldon Deployment with a dummy model image `seldonio/mock_classifier:1.0`. This deployment is named `example`. We will add custom Ambassador config which sets the Ambassador prefix to `/mycompany/ml`
#
# We must ensure we set the correct service endpoint. Seldon Core creates an endpoint of the form:
#
# `<spec.name>-<predictor.name>.<namespace>:<port>`
#
# Where
#
# * `<spec-name>` is the name you give to the Seldon Deployment spec: `example` below
# * `<predcitor.name>` is the predictor name in the Seldon Deployment: `single` below
# * `<namespace>` is the namespace your Seldon Deployment is deployed to
# * `<port>` is the port either 8000 for REST or 5000 for gRPC
#
# This will allow you to set the `service` value in the Ambassador config you create. So for the example below we have:
#
# ```
# service: production-model-example.seldon:8000
# ```
#
#
# %%writetemplate model_custom_ambassador.yaml
apiVersion: machinelearning.seldon.io/v1alpha2
kind: SeldonDeployment
metadata:
labels:
app: seldon
name: example-custom
spec:
annotations:
seldon.io/ambassador-config: 'apiVersion: ambassador/v2
kind: Mapping
name: seldon_example_rest_mapping
prefix: /mycompany/ml/
service: example-custom-single.seldon:8000
timeout_ms: 3000'
name: production-model
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/mock_classifier:{VERSION}
imagePullPolicy: IfNotPresent
name: classifier
terminationGracePeriodSeconds: 1
graph:
children: []
endpoint:
type: REST
name: classifier
type: MODEL
name: single
replicas: 1
# !kubectl create -f model_custom_ambassador.yaml
# !kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=example-custom -o jsonpath='{.items[0].metadata.name}')
# ### Get predictions
# +
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="example-custom", namespace="seldon")
# -
# #### REST Request
r = sc.predict(gateway="ambassador", transport="rest", gateway_prefix="/mycompany/ml")
assert r.success == True
print(r)
# !kubectl delete -f model_custom_ambassador.json
| examples/ambassador/custom/ambassador_custom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: learn-env
# language: python
# name: learn-env
# ---
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
df=pd.read_csv('E0 (1).csv')
df2 = pd.read_csv('E0 (2).csv')
df3 =pd.read_csv('E0 (3).csv')
df4 = pd.read_csv('E0 (4).csv')
df5 = pd.read_csv('E0 (5).csv')
pd.set_option('display.expand_frame_repr', False)
df.tail()
new_df = df.append(df2)
new_df = new_df.append(df3)
new_df = new_df.append(df4)
new_df = new_df.append(df5)
new_df = new_df[['Div', 'Date', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'HTHG',
'HTAG', 'HTR', 'Referee', 'HS', 'AS', 'HST', 'AST', 'HF', 'AF', 'HC',
'AC', 'HY', 'AY', 'HR', 'AR', 'PSH', 'PSD', 'PSA' ]]
new_df.drop(columns = 'Div', inplace = True)
# +
new_df2 = new_df.copy()
new_df['Team'] = new_df.HomeTeam
new_df['Opponent'] = new_df.AwayTeam
new_df2['Team'] = new_df.AwayTeam
new_df2['Opponent'] = new_df.HomeTeam
df3 = pd.concat([new_df, new_df2])
# -
def home_away_numerical(df):
"""
Simple function to assign a numerical value to whether a team is home/away. In this case, home=1 away=0. Same methodology
as above, chunk list to pd.concat().
"""
team_list = df.HomeTeam.unique()
chunks = []
for team in team_list:
all_games = df[df['Team']==team]
all_games['HomeAway'] = 1
all_games.loc[all_games['AwayTeam']==team,'HomeAway'] = 0
chunks.append(all_games)
dataframe = pd.concat(chunks,ignore_index=True)
return dataframe
df3 = home_away_numerical(df3) # generates home dummy variable
df3.HomeAway.unique()
df3 = df3[[ 'Date', 'HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR', 'HTHG',
'HTAG', 'HTR', 'Referee','HST', 'AST', 'HC',
'AC', 'HR', 'AR', 'PSH', 'PSD', 'PSA', 'Team', 'Opponent', 'HomeAway' ]]
df4 = df3[['Team', 'Opponent', 'HomeAway', 'Date', 'FTHG', 'FTAG', 'FTR', 'HTHG',
'HTAG', 'HTR', 'Referee','HST', 'AST', 'HC',
'AC', 'HR', 'AR', 'PSH', 'PSD', 'PSA', ]].copy()
# +
team_list = df4.Team.unique()
team_list = team_list.tolist()
list_of_hthg = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 1)]['HTHG'].mean()
list_of_hthg.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_hthg = dict(zip(team_list, list_of_hthg))
list_of_htag = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 0)]['HTAG'].mean()
list_of_htag.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_htag = dict(zip(team_list, list_of_htag))
list_of_fthg = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 1)]['FTHG'].mean()
list_of_fthg.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_fthg = dict(zip(team_list, list_of_fthg))
list_of_ftag = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 0)]['FTAG'].mean()
list_of_ftag.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_ftag = dict(zip(team_list, list_of_ftag))
list_of_hst = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 1)]['HST'].mean()
list_of_hst.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_hst = dict(zip(team_list, list_of_hst))
list_of_ast = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 0)]['AST'].mean()
list_of_ast.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_ast = dict(zip(team_list, list_of_ast))
list_of_hc = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 1)]['HC'].mean()
list_of_hc.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_hc = dict(zip(team_list, list_of_hc))
list_of_ac = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 0)]['AC'].mean()
list_of_ac.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_ac = dict(zip(team_list, list_of_ac))
list_of_hr = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 1)]['HR'].mean()
list_of_hr.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_hr = dict(zip(team_list, list_of_hr))
list_of_ar = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 0)]['AR'].mean()
list_of_ar.append(dfnew)
team_list = df4.Team.unique()
team_list = team_list.tolist()
average_ar = dict(zip(team_list, list_of_ar))
# +
df4['average_hthg'] = df4['Team'].map(average_hthg)
df4['average_htag'] = df4['Team'].map(average_htag)
df4['average_fthg'] = df4['Team'].map(average_fthg)
df4['average_ftag'] = df4['Team'].map(average_ftag)
df4['average_hst'] = df4['Team'].map(average_hst)
df4['average_ast'] = df4['Team'].map(average_ast)
df4['average_hc'] = df4['Team'].map(average_hc)
df4['average_ac'] = df4['Team'].map(average_ac)
df4['average_hr'] = df4['Team'].map(average_hr)
df4['average_ar'] = df4['Team'].map(average_ar)
# -
df5 = df4[['Team', 'Opponent', 'HomeAway', 'Date', 'Referee', 'PSH',
'PSD', 'PSA', 'average_hthg', 'average_htag',
'average_fthg', 'average_ftag', 'average_hst', 'average_ast',
'average_hc', 'average_ac', 'average_hr', 'average_ar', 'FTR']].copy()
df5.Date = pd.to_datetime(df5.Date)
df5 = df5.sort_values(by='Date', ascending = True)
df_odds = df5[['PSH', 'PSD', 'PSA']]
df5 = df5.drop(df_odds, axis=1 )
df6 = df5.drop(columns = 'Referee')
# +
y = df6.FTR
x = df6.drop(columns = ['FTR', 'Date'])
saved = df6.drop(columns = ['FTR', 'Date'])
# -
x= pd.get_dummies(x)
new_df.Date = pd.to_datetime(new_df.Date)
df_final = new_df.sort_values(by='Date', ascending = True)
df_final.shape
len(df_final)
table = pd.pivot_table(df_final, index=['HomeTeam', 'AwayTeam'])
table2 = pd.pivot_table(df_final, index=['AwayTeam', 'HomeTeam'])
team_list = df4.Team.unique()
team_list = team_list.tolist()
list_of_hthg = []
for team in team_list:
dfnew = df4.loc[(df4.Team==team) & (df4.HomeAway == 1)]['HTHG'].mean()
list_of_hthg.append(dfnew)
average_hthg = dict(zip(team_list, list_of_hthg))
df_final['average_halftimehomegoals'] = df_final['HomeTeam'].map(average_hthg)
# +
df_final['average_halftimehomegoals'] = df_final['HomeTeam'].map(average_hthg)
df_final['average_halftimeawaygoals'] = df_final['AwayTeam'].map(average_htag)
df_final['average_fulltimehomegoals'] = df_final['HomeTeam'].map(average_fthg)
df_final['average_fulltimeawaygoals'] = df_final['AwayTeam'].map(average_ftag)
df_final['average_hometeamshotsontarget'] = df_final['HomeTeam'].map(average_hst)
df_final['average_awayteamshotsontarget'] = df_final['AwayTeam'].map(average_ast)
df_final['average_homecorners'] = df_final['HomeTeam'].map(average_hc)
df_final['average_awaycorners'] = df_final['AwayTeam'].map(average_ac)
df_final['average_homereds'] = df_final['HomeTeam'].map(average_hr)
df_final['average_awayreds'] = df_final['AwayTeam'].map(average_ar)
# -
table = pd.pivot_table(df_final, index=['HomeTeam', 'AwayTeam'])
table.head()
# +
df_final['avg_half_timegoals_homevsawayratio'] = df_final['average_halftimehomegoals']/df_final['average_halftimeawaygoals']
df_final['avg_full_timegoals_homevsawayratio'] = df_final['average_fulltimehomegoals']/df_final['average_fulltimeawaygoals']
df_final['avg_full_timeshotsontarget_homevsawayratio'] = df_final['average_hometeamshotsontarget']/df_final['average_awayteamshotsontarget']
df_final['avg_corners_homevsawayratio'] = df_final['average_homecorners']/df_final['average_awaycorners']
df_final['avg_reds_homevsawayratio'] = df_final['average_homereds']/df_final['average_awayreds']
# -
pd.options.display.max_rows = 1670
df_final.columns
x = df_final[['Date', 'HomeTeam', 'AwayTeam', 'average_halftimehomegoals', 'average_halftimeawaygoals',
'average_fulltimehomegoals', 'average_fulltimeawaygoals',
'average_hometeamshotsontarget', 'average_awayteamshotsontarget',
'average_homecorners', 'average_awaycorners', 'average_homereds',
'average_awayreds', 'avg_half_timegoals_homevsawayratio',
'avg_full_timegoals_homevsawayratio',
'avg_full_timeshotsontarget_homevsawayratio',
'avg_corners_homevsawayratio', 'avg_reds_homevsawayratio']]
y = df_final[['FTR']]
x_train = x.iloc[:1510]
x_test = x.iloc[1511:]
y_train = y.iloc[:1510]
y_test = y.iloc[1511:]
new_df = new_df.sort_values(by = 'Date')
odds_df = new_df[['PSH', 'PSD', 'PSA']]
new_df.tail()
x_train.head()
| footballpredictor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Get ID & its parent ID pairs using annotation ontology data
# This notebook prepares ID & its parent ID pairs in csv format.
#
# - input
# - 1_VC_pruned_segmented_VC.json
# - output
# - ID_parentID_pairs.csv
# # Set variables
# +
dir_data = 'data'
fn_input_AO = 'AObase.json'
fn_output_csv = 'ID_parentID_AObase.csv'
# -
import os
import json
import pandas as pd
from collections import OrderedDict
from jsonpath_rw import jsonpath, parse
# # Load data
with open(os.path.join(dir_data, fn_input_AO)) as f:
df_AO = json.load(f, object_pairs_hook=OrderedDict)
# # Get pairs of ID and its parent ID using an annotation ontology file
# pandas dataframe [id, parent id]を用意した
# OriginalID, ParentID
jsonpath_expr = parse('$..id')
IDparentID_list = [[match.value, \
eval("df_AO['msg'][0]" + str(match.full_path).\
replace('.', '').replace('children', "['children']").\
replace('id',"") + "['parent_structure_id']")]
for match in jsonpath_expr.find(df_AO['msg'][0])]
IDparentID = pd.DataFrame(IDparentID_list, columns = ['ID', 'parentID'])
# # Save a csv file of ID-parentID pairs
IDparentID.to_csv(os.path.join(dir_data, fn_output_csv), index=False)
# # Check data
IDparentID.head()
| notebooks/Get_ID_parentID_pairs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 5 - Welicome to Sandbox
#
# In the last tutorials, we've been initializing our hook and all of our workers by hand every time. This can be a bit annoying when you're just playing around / learning about the interfaces. So, from here on out we'll be creating all these same variables using a special convenience function.
#
#
# Person wey translate am:
# - <NAME> - Twitter: [@techie991](https://twitter.com/techie991)
#
import torch
import syft as sy
sy.create_sandbox(globals())
# ### Wetin sandbox go give us?
#
# As you don see sey we create several virtual workers and we come load am for plenti test dataset, to dey do privacy preserving techniques such as Federated Learning we go distribute am to various workers.
#
# We don create six workers....
workers
# We go add sometin for the global variables wey we go use right away!
hook
bob
# # Part 2: Worker Search Functionality
#
# To get abiliti to search for datasets on a remaote machine na one important aspect of doing remote data science. Make we think of research lab wey dey ask hospital for "radio" datasets.
torch.Tensor([1,2,3,4,5])
x = torch.tensor([1,2,3,4,5]).tag("#fun", "#boston", "#housing").describe("The input datapoints to the boston housing dataset.")
y = torch.tensor([1,2,3,4,5]).tag("#fun", "#boston", "#housing").describe("The input datapoints to the boston housing dataset.")
z = torch.tensor([1,2,3,4,5]).tag("#fun", "#mnist",).describe("The images in the MNIST training dataset.")
x
# +
x = x.send(bob)
y = y.send(bob)
z = z.send(bob)
# make we search for exact match for this tag or for the description
results = bob.search(["#boston", "#housing"])
# -
results
print(results[0].description)
# # Part 3: Virtual Grid
#
# A Grid na collection of workers wey go fit give convenience functions for when you want put dataset togeda.
grid = sy.PrivateGridNetwork(*workers)
results = grid.search("#boston")
boston_data = grid.search("#boston","#data")
boston_target = grid.search("#boston","#target")
| examples/tutorials/translations/Pidgin/Part 05 - Welicome to Sandbox-Pidgin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Data Prerocessing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('datasets/studentscores.csv')
dataset.head()
X = dataset.iloc[ : , : 1 ].values
Y = dataset.iloc[ : , 1].values
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size = 1/4, random_state = 0)
# Fitting Simple Linear Regression Model to the train set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor = regressor.fit(X_train, Y_train)
# Predicting the result
Y_pred = regressor.predict(X_test)
Y_pred
# Visualizations the training results
plt.scatter(X_train, Y_train, color='red')
plt.plot(X_train, regressor.predict(X_train), color='blue')
# Visualizing the test results
plt.scatter(X_test, Y_test, color='red')
plt.plot(X_test, regressor.predict(X_test), color='blue')
| Code-ipynb/Simple Linear Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Understanding ROS Nodes
#
# This tutorial introduces ROS graph concepts and discusses the use of `roscore`, `rosnode`, and `rosrun` commandline tools.
#
# Source: [ROS Wiki](http://wiki.ros.org/ROS/Tutorials/UnderstandingNodes)
# ### Quick Overview of Graph Concepts
# * Nodes: A node is an executable that uses ROS to communicate with other nodes.
# * Messages: ROS data type used when subscribing or publishing to a topic.
# * Topics: Nodes can publish messages to a topic as well as subscribe to a topic to receive messages.
# * Master: Name service for ROS (i.e. helps nodes find each other)
# * rosout: ROS equivalent of stdout/stderr
# * roscore: Master + rosout + parameter server (parameter server will be introduced later)
# ### roscore
#
# `roscore` is the first thing you should run when using ROS.
# + magic_args="--bg" language="bash"
# roscore
# -
# ### Using `rosnode`
#
# `rosnode` displays information about the ROS nodes that are currently running. The `rosnode list` command lists these active nodes:
# + language="bash"
# rosnode list
# + language="bash"
# rosnode info rosout
# -
# ### Using `rosrun`
#
# `rosrun` allows you to use the package name to directly run a node within a package (without having to know the package path).
# + magic_args="--bg" language="bash"
# rosrun turtlesim turtlesim_node
# -
# NOTE: The turtle may look different in your turtlesim window. Don't worry about it - there are [many types of turtle](http://wiki.ros.org/Distributions#Current_Distribution_Releases) and yours is a surprise!
# + language="bash"
# rosnode list
# -
# One powerful feature of ROS is that you can reassign Names from the command-line.
#
# Close the turtlesim window to stop the node. Now let's re-run it, but this time use a [Remapping Argument](http://wiki.ros.org/Remapping%20Arguments) to change the node's name:
# + magic_args="--bg" language="bash"
# rosrun turtlesim turtlesim_node __name:=my_turtle
# -
# Now, if we go back and use `rosnode list`:
# + language="bash"
# rosnode list
# -
# ### Review
# What was covered:
#
# * roscore = ros+core : master (provides name service for ROS) + rosout (stdout/stderr) + parameter server (parameter server will be introduced later)
# * rosnode = ros+node : ROS tool to get information about a node.
# * rosrun = ros+run : runs a node from a given package.
#
# Now that you understand how ROS nodes work, let's look at how [ROS topics](ROS%20Topics.ipynb) work.
| ROS Nodes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# %load_ext autoreload
# %autoreload 2
#hide
from time_series_segmentation.data import *
# # Time series segmentation
#
# > Univariate and multivariate time series segmentation. Including example data sets.
# This file will become your README and also the index of your documentation.
# ## How to use
# Fill me in please! Don't forget code examples:
df_train, df_test, df_sample = get_liverpool_ion_data()
df_train.head()
train_train, train_val = dataframe_split(df_train, fraction = 0.1, random_state = 42, sep_col='seq_idx')
# hide
from nbdev.export import notebook2script
notebook2script()
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Imports
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import sem
# +
# Raw data import
ct_df = pd.read_csv("data/clinical_trial_data.csv")
md_df = pd.read_csv("data/mouse_drug_data.csv")
merged_df = pd.merge(ct_df, md_df, on="Mouse ID")
merged_df.head()
# +
# Data cleaning and engineering/grouping pre-process before visualization
merged_df.loc[(merged_df['Drug'] != 'Ceftamin') & \
(merged_df['Drug'] !='Naftisol') & \
(merged_df['Drug'] !='Naftisol') & \
(merged_df['Drug'] != 'Propriva') & \
(merged_df['Drug'] !='Ramicane') & \
(merged_df['Drug'] !='Stelasyn') & \
(merged_df['Drug'] !='Zoniferol') ,:]
group_drug_df = merged_df.groupby(['Drug','Timepoint'])
TumorVolumeMean_df = group_drug_df.mean()['Tumor Volume (mm3)']
TumorVolumeMean_df.head()
TVM_indexreset_df = TumorVolumeMean_df.reset_index()
TVM_indexreset_df.head()
CapomulinMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Capomulin']
InfubinolMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Infubinol']
KetaprilMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Ketapril']
PlaceboMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Placebo']
# -
# # Tumor volume change over time per treatment
# +
fig, ax = plt.subplots()
plt.scatter(CapomulinMeanVolume['Timepoint'],CapomulinMeanVolume['Tumor Volume (mm3)'], label = 'Capomulin', marker="o", facecolors="red", edgecolors="black",alpha=0.75)
plt.scatter(InfubinolMeanVolume['Timepoint'],InfubinolMeanVolume['Tumor Volume (mm3)'], label = 'Infubinol',marker="^", facecolors="blue", edgecolors="black",alpha=0.75)
plt.scatter(KetaprilMeanVolume['Timepoint'],KetaprilMeanVolume['Tumor Volume (mm3)'], label = 'Ketapril',marker="s", facecolors="green", edgecolors="black",alpha=0.75)
plt.scatter(PlaceboMeanVolume['Timepoint'],PlaceboMeanVolume['Tumor Volume (mm3)'], label = 'Placebo',marker="D", facecolors="black", edgecolors="black",alpha=0.75)
plt.xlim(0, max(PlaceboMeanVolume['Timepoint']+1))
plt.title('Tumor Response To Treatment')
plt.xlabel("Timepoint (Days)")
plt.ylabel("Tumor Volume (mm3)")
plt.legend(loc="best")
plt.grid(b=True,axis='both')
plt.plot(CapomulinMeanVolume['Timepoint'],CapomulinMeanVolume['Tumor Volume (mm3)'], linestyle='--',linewidth=0.7, color="red")
plt.plot(InfubinolMeanVolume['Timepoint'],InfubinolMeanVolume['Tumor Volume (mm3)'], linestyle='--',linewidth=0.7,color="blue")
plt.plot(KetaprilMeanVolume['Timepoint'],KetaprilMeanVolume['Tumor Volume (mm3)'], linestyle='--',linewidth=0.7,color="green")
plt.plot(PlaceboMeanVolume['Timepoint'],PlaceboMeanVolume['Tumor Volume (mm3)'], linestyle='--',linewidth=0.7,color="black")
plt.errorbar(CapomulinMeanVolume['Timepoint'], CapomulinMeanVolume['Tumor Volume (mm3)'], yerr = sem(CapomulinMeanVolume['Tumor Volume (mm3)']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='red')
plt.errorbar(InfubinolMeanVolume['Timepoint'],InfubinolMeanVolume['Tumor Volume (mm3)'], yerr = sem(InfubinolMeanVolume['Tumor Volume (mm3)']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='blue')
plt.errorbar(KetaprilMeanVolume['Timepoint'],KetaprilMeanVolume['Tumor Volume (mm3)'], yerr = sem(KetaprilMeanVolume['Tumor Volume (mm3)']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='green')
plt.errorbar(PlaceboMeanVolume['Timepoint'],PlaceboMeanVolume['Tumor Volume (mm3)'], yerr = sem(PlaceboMeanVolume['Tumor Volume (mm3)']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='black')
plt.savefig('reports/figures/tumor_response.png')
plt.show()
# -
# # Metastatic site change over time
# +
fig, ax = plt.subplots()
metaStaticMean_df = group_drug_df.mean()['Metastatic Sites']
MSS_indexreset_df = metaStaticMean_df.reset_index()
MSS_indexreset_df.head()
CapomulinMeanVolumeMSS = MSS_indexreset_df.loc[MSS_indexreset_df['Drug'] == 'Capomulin']
InfubinolMeanVolumeMSS = MSS_indexreset_df.loc[MSS_indexreset_df['Drug'] == 'Infubinol']
KetaprilMeanVolumeMSS = MSS_indexreset_df.loc[MSS_indexreset_df['Drug'] == 'Ketapril']
PlaceboMeanVolumeMSS = MSS_indexreset_df.loc[MSS_indexreset_df['Drug'] == 'Placebo']
plt.scatter(CapomulinMeanVolumeMSS['Timepoint'],CapomulinMeanVolumeMSS['Metastatic Sites'], label = 'Capomulin', marker="o", facecolors="red", edgecolors="black",alpha=0.75)
plt.scatter(InfubinolMeanVolumeMSS['Timepoint'],InfubinolMeanVolumeMSS['Metastatic Sites'], label = 'Infubinol',marker="^", facecolors="blue", edgecolors="black",alpha=0.75)
plt.scatter(KetaprilMeanVolumeMSS['Timepoint'],KetaprilMeanVolumeMSS['Metastatic Sites'], label = 'Ketapril',marker="s", facecolors="green", edgecolors="black",alpha=0.75)
plt.scatter(PlaceboMeanVolumeMSS['Timepoint'],PlaceboMeanVolumeMSS['Metastatic Sites'], label = 'Placebo',marker="D", facecolors="black", edgecolors="black",alpha=0.75)
plt.xlim(0, max(PlaceboMeanVolumeMSS['Timepoint']+1))
plt.title('Metastatic Spread During Treatment')
plt.xlabel("Timepoint/Treatment Duration (Days)")
plt.ylabel("Metastatic Sites")
plt.legend(loc="best")
plt.grid(b=True,axis='both')
plt.plot(CapomulinMeanVolumeMSS['Timepoint'],CapomulinMeanVolumeMSS['Metastatic Sites'], linestyle='--',linewidth=0.7, color="red")
plt.plot(InfubinolMeanVolumeMSS['Timepoint'],InfubinolMeanVolumeMSS['Metastatic Sites'], linestyle='--',linewidth=0.7,color="blue")
plt.plot(KetaprilMeanVolumeMSS['Timepoint'],KetaprilMeanVolumeMSS['Metastatic Sites'], linestyle='--',linewidth=0.7,color="green")
plt.plot(PlaceboMeanVolumeMSS['Timepoint'],PlaceboMeanVolumeMSS['Metastatic Sites'], linestyle='--',linewidth=0.7,color="black")
plt.errorbar(CapomulinMeanVolumeMSS['Timepoint'], CapomulinMeanVolumeMSS['Metastatic Sites'], yerr = sem(CapomulinMeanVolumeMSS['Metastatic Sites']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='red')
plt.errorbar(InfubinolMeanVolumeMSS['Timepoint'],InfubinolMeanVolumeMSS['Metastatic Sites'], yerr = sem(InfubinolMeanVolumeMSS['Metastatic Sites']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='blue')
plt.errorbar(KetaprilMeanVolumeMSS['Timepoint'],KetaprilMeanVolumeMSS['Metastatic Sites'], yerr = sem(KetaprilMeanVolumeMSS['Metastatic Sites']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='green')
plt.errorbar(PlaceboMeanVolumeMSS['Timepoint'],PlaceboMeanVolumeMSS['Metastatic Sites'], yerr = sem(PlaceboMeanVolumeMSS['Metastatic Sites']),linestyle ="--",fmt = 'o',capsize=3,capthick=1,markeredgecolor='black')
plt.savefig('reports/figures/metastatic_spread.png')
plt.show()
# -
# # Survival rate over time
# +
fig, ax = plt.subplots()
SR_df = merged_df.groupby(['Drug', 'Timepoint']).count()['Mouse ID']
SR_indexreset_df = SR_df.reset_index()
SR_indexreset_df.head()
CapomulinMeanVolumeSR = SR_indexreset_df.loc[SR_indexreset_df['Drug'] == 'Capomulin']
InfubinolMeanVolumeSR = SR_indexreset_df.loc[SR_indexreset_df['Drug'] == 'Infubinol']
KetaprilMeanVolumeSR = SR_indexreset_df.loc[SR_indexreset_df['Drug'] == 'Ketapril']
PlaceboMeanVolumeSR = SR_indexreset_df.loc[SR_indexreset_df['Drug'] == 'Placebo']
def testfunc(num1):
num1 = float(num1)
percentage = num1/26
return percentage
SR_indexreset_df= pd.pivot_table(SR_indexreset_df, index='Timepoint', columns='Drug', values='Mouse ID', aggfunc = testfunc)
SR_indexreset_df= SR_indexreset_df
plt.scatter(CapomulinMeanVolumeSR['Timepoint'],CapomulinMeanVolumeSR['Mouse ID'], label = 'Capomulin', marker="o", facecolors="red", edgecolors="black",alpha=0.75)
plt.scatter(InfubinolMeanVolumeSR['Timepoint'],InfubinolMeanVolumeSR['Mouse ID'], label = 'Infubinol',marker="^", facecolors="blue", edgecolors="black",alpha=0.75)
plt.scatter(KetaprilMeanVolumeSR['Timepoint'],KetaprilMeanVolumeSR['Mouse ID'], label = 'Ketapril',marker="s", facecolors="green", edgecolors="black",alpha=0.75)
plt.scatter(PlaceboMeanVolumeSR['Timepoint'],PlaceboMeanVolumeSR['Mouse ID'], label = 'Placebo',marker="D", facecolors="black", edgecolors="black",alpha=0.75)
plt.xlim(0, max(PlaceboMeanVolumeSR['Timepoint']+1))
plt.title('Survival Rate During Treatment')
plt.xlabel('Timepoint/Treatment Duration (Days)')
plt.ylabel("Survival Rate")
plt.legend(loc="best")
plt.plot(CapomulinMeanVolumeSR['Timepoint'],CapomulinMeanVolumeSR['Mouse ID'], linestyle='--',linewidth=0.7, color="red")
plt.plot(InfubinolMeanVolumeSR['Timepoint'],InfubinolMeanVolumeSR['Mouse ID'], linestyle='--',linewidth=0.7,color="blue")
plt.plot(KetaprilMeanVolumeSR['Timepoint'],KetaprilMeanVolumeSR['Mouse ID'], linestyle='--',linewidth=0.7,color="green")
plt.plot(PlaceboMeanVolumeSR['Timepoint'],PlaceboMeanVolumeSR['Mouse ID'], linestyle='--',linewidth=0.7,color="black")
vals = ax.get_yticks()
ax.set_yticklabels(['{:3.2f}%'.format(x*4) for x in vals])
SR_indexreset_df.head()
plt.savefig('reports/figures/survival_rate.png')
plt.show()
# -
# +
#Calcuate percentage change for tumor volume chart
def return_pc_45(df):
cmvc0 =df.loc[df['Timepoint'] == 0,'Tumor Volume (mm3)']
cmvc45=df.loc[df['Timepoint'] == 45,'Tumor Volume (mm3)']
cmvc_percentchange=(cmvc0.values[0] - cmvc45.values[0])/cmvc0.values[0]*100
return np.round(cmvc_percentchange,decimals=2)
print(
return_pc_45(CapomulinMeanVolume),
return_pc_45(PlaceboMeanVolume),
return_pc_45(InfubinolMeanVolume),
return_pc_45(KetaprilMeanVolume)
)
# +
pc_45_list = [return_pc_45(CapomulinMeanVolume),return_pc_45(PlaceboMeanVolume),\
return_pc_45(InfubinolMeanVolume),return_pc_45(KetaprilMeanVolume)]
print(pc_45_list)
#Switch negative and positive for chart
pc_45_list=np.negative(pc_45_list)
print(pc_45_list)
#Color list based upon value
colors = []
for value in pc_45_list:
if value < 0:
colors.append('red')
else:
colors.append('green')
print(colors)
# -
# # Tumor Change Over 45 Day Treatment
# +
# Bar graph comparing total % tumor volume change for each drug across the full 45 days
x=['Capomulin','Infubinol','Ketapril','Placebo']
y=pc_45_list
fig, ax = plt.subplots()
sns.set(rc={'figure.figsize':(6,6)})
sns.barplot(x,y,order=x, palette=colors, hue = colors)
ax.set_title("Tumor Change Over 45 Day Treatment")
ax.legend_.remove()
plt.grid(b=True,axis='both')
plt.axhline(y=0, color='b', linestyle='-')
plt.ylabel("% Tumor Volume Change")
plt.savefig('reports/figures/tumor_change.png')
plt.show()
# -
# +
#Debugging
#https://pandas.pydata.org/pandas-docs/stable/reshaping.html
#https://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html
def testfunc(num1):
num1 = float(num1)
percentage = num1/26
return percentage
SR_df = merged_df.groupby(['Drug', 'Timepoint']).count()['Mouse ID']
SR_indexreset_df = SR_df.reset_index()
SR_indexreset_df= pd.pivot_table(SR_indexreset_df, index='Timepoint', columns='Drug', values='Mouse ID', aggfunc = testfunc)
SR_indexreset_df= SR_indexreset_df
SR_indexreset_df.head(45)
# +
# #https://matplotlib.org/1.2.1/examples/pylab_examples/errorbar_demo.html
# plt.scatter(CapomulinMeanVolume['Timepoint'],CapomulinMeanVolume['Tumor Volume (mm3)'], label = 'Capomulin', marker="o", facecolors="red", edgecolors="black",alpha=0.75)
# plt.scatter(InfubinolMeanVolume['Timepoint'],InfubinolMeanVolume['Tumor Volume (mm3)'], label = 'Infubinol',marker="^", facecolors="blue", edgecolors="black",alpha=0.75)
# plt.scatter(KetaprilMeanVolume['Timepoint'],KetaprilMeanVolume['Tumor Volume (mm3)'], label = 'Ketapril',marker="s", facecolors="green", edgecolors="black",alpha=0.75)
# plt.scatter(PlaceboMeanVolume['Timepoint'],PlaceboMeanVolume['Tumor Volume (mm3)'], label = 'Placebo',marker="D", facecolors="black", edgecolors="black",alpha=0.75)
# group_drug_df = merged_df.groupby(['Drug','Timepoint'])
# TumorVolumeMean_df = group_drug_df.mean()['Tumor Volume (mm3)']
# TumorVolumeMean_df.head()
# TVM_indexreset_df = TumorVolumeMean_df.reset_index()
# TVM_indexreset_df.head()
# CapomulinMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Capomulin']
# InfubinolMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Infubinol']
# KetaprilMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Ketapril']
# PlaceboMeanVolume = TVM_indexreset_df.loc[TVM_indexreset_df['Drug'] == 'Placebo']
# plt.grid(b=True,axis='both')
# plt.show()
# -
| pymaceuticals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# ! git clone https://github.com/singhnaveen098/Hamoye_capstone_project_smote.git
train_path = 'Hamoye_capstone_project_smote/Data/train/'
val_path = 'Hamoye_capstone_project_smote/Data/val/'
test_path = 'Hamoye_capstone_project_smote/Data/test/'
# -
import tensorflow as tf
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras
from tensorflow.keras.preprocessing import image_dataset_from_directory
import numpy as np
from tensorflow.keras import Model
os.listdir('Hamoye_capstone_project_smote/Data/test/')
train_df = image_dataset_from_directory(train_path,label_mode='categorical',image_size=(256, 256))
test_df = image_dataset_from_directory(test_path,label_mode='categorical',image_size=(256, 256))
val_df = image_dataset_from_directory(val_path,label_mode='categorical', shuffle=False,image_size=(256, 256))
from tensorflow.keras.preprocessing import image
k=0
for i in os.listdir(train_path+'Buffalo/'):
path = train_path + 'Buffalo/' + i
img = image.load_img(path, target_size=(480, 480))
plt.imshow(img)
plt.show()
k+=1
if k==2:
break
# ! pip install tf-nightly
from tensorflow.keras.applications.efficientnet_v2 import EfficientNetV2L
from tensorflow.keras.applications.efficientnet_v2 import decode_predictions
# +
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
base_model = EfficientNetV2L(weights='imagenet')
pred = base_model.predict(x)
print('Predicted:', decode_predictions(pred))
# +
bmodel = EfficientNetV2L(weights='imagenet', include_top=False, input_shape=(256, 256, 3))
bmodel.trainable = False
from tensorflow.keras import layers
x = bmodel.output
x = layers.GlobalAveragePooling2D()(x)
outputs = layers.Dense(4, activation="softmax")(x)
model = Model(inputs=bmodel.input, outputs=outputs)
# +
checkpoints = tf.keras.callbacks.ModelCheckpoint('EfficientNetV2L.h5',save_best_only=True, save_weights_only=True)
model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])
m_fit = model.fit(train_df, epochs=20, validation_data=val_df, callbacks=[checkpoints])
# -
result = m_fit.history
# +
n_epochs = len(result['loss'])
plt.figure(figsize=[12,4])
plt.grid(True)
plt.subplot(1,2,1)
plt.plot(range(1, n_epochs+1), result['loss'], label='Training')
plt.plot(range(1, n_epochs+1), result['val_loss'], label='Validation')
plt.xlabel('Epoch'); plt.ylabel('Loss'); plt.title('Loss')
plt.legend()
plt.grid(True)
plt.subplot(1,2,2)
plt.plot(range(1, n_epochs+1), result['accuracy'], label='Training')
plt.plot(range(1, n_epochs+1), result['val_accuracy'], label='Validation')
plt.xlabel('Epoch'); plt.ylabel('Accuracy'); plt.title('Accuracy')
plt.legend()
plt.grid(True)
plt.show()
# -
model.load_weights('EfficientNetV2L.h5')
pred = model.evaluate(test_df)
pred = model.predict(test_df)
pred
# save the index of the highest probability
pred = pred.argmax(axis=1)
print(pred)
# get the actual values
test_images = list(test_df.unbatch().as_numpy_iterator())
y_true = np.array([i[1] for i in test_images])
y_true = y_true.argmax(axis=1)
print(y_true)
# calculate f1_score
from sklearn.metrics import f1_score
f1_score(y_true,pred,average='macro')
# get the confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_true,pred)
# %cd /kaggle/working
# !ls
from IPython.display import FileLink
FileLink(r'EfficientNetV2L.h5')
| Deep learning/EfficientNetV2L_0.249/EfficientNetV2L_0.249.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Exploring the Labyrinth
#
# Chapter 2 of [Real World Algorithms](https://mitpress.mit.edu/books/real-world-algorithms).
#
# ---
#
# > <NAME><br />
# > Athens University of Economics and Business
# + [markdown] slideshow={"slide_type": "slide"}
# # Graphs in Python
#
# The most common way to represent graphs in Python is with adjacency lists.
#
# The adjacency lists are put into a Python dictionary.
#
# The keys of the dictionary are the nodes, and the value for each node is its adjacency list.
#
# With a slight abuse of terminology, we could use other data structures instead of a list to represent an adjacency list: for example, a set is a sensible choice, as we don't care about the order of the items in the list, and checking for membership (i.e., checking if a node is a neighbor of another node) is much faster in a set than in a list. In a well-implemented set it takes constant time, while in a list the time is linear and depends on the length of the list.
# + [markdown] slideshow={"slide_type": "slide"}
# For example, here is a graph with 8 nodes (from 0 to 7) and its adjacency lists, represented as lists:
# + slideshow={"slide_type": "fragment"}
g = {
0: [1, 2, 3],
1: [0, 4],
2: [0],
3: [0, 5],
4: [1, 5],
5: [3, 4, 6, 7],
6: [5],
7: [5],
}
# print whole graph
print(g)
# print adjacency list of node 0
print(g[0])
# print adjacency list of node 5
print(g[5])
# + [markdown] slideshow={"slide_type": "slide"}
# Similarly, here is the same graph, but this time the nodes are strings (single-character strings, which are still strings in Python).
#
# Nodes can be anything: numbers, strings, or anything else that can be used as a key in a Python dictionary.
# + slideshow={"slide_type": "fragment"}
g = {
'a': ['b', 'c', 'd'],
'b': ['a', 'e'],
'c': ['a'],
'd': ['a', 'f'],
'e': ['b', 'f'],
'f': ['d', 'e', 'g', 'h'],
'g': ['f'],
'h': ['f'],
}
# print whole graph
print(g)
# print adjacency list of node 'a'
print(g['a'])
# print adjacency list of node 'e'
print(g['e'])
# + [markdown] slideshow={"slide_type": "slide"}
# # Depth-first Search
#
# Suppose we have the following graph and we want to explore it depth-first.
#
# In depth-first search, we follow a path as far as we can; when we reach a dead-end, that is, a node with no-unvisited neighbours, we backgrack to the previous unvisited node.
#
# <img width="300" src="example_graph_1.png"/>
# + [markdown] slideshow={"slide_type": "slide"}
# The graph is represented in Python as follows:
# + slideshow={"slide_type": "fragment"}
g = {
0: [1, 2, 3],
1: [0, 4],
2: [0, 4],
3: [0, 5],
4: [5],
5: [4, 6, 7],
6: [],
7: []
}
# + [markdown] slideshow={"slide_type": "slide"}
# The depth-first recursive search algorithm is then simply:
# + slideshow={"slide_type": "fragment"}
visited = [ False ] * len(g)
def dfs(g, node):
print("Visiting", node)
visited[node] = True
for v in g[node]:
if not visited[v]:
dfs(g, v)
dfs(g, 0)
# + [markdown] slideshow={"slide_type": "slide"}
# It is possible to implement depth-first search without recursion.
#
# To do that, we have to emulate recursion ourselves, by using a stack.
# + slideshow={"slide_type": "fragment"}
def dfs_stack(g, node):
s = []
visited = [ False ] * len(g)
s.append(node)
while len(s) != 0:
print("Stack", s)
c = s.pop()
print("Visiting", c)
visited[c] = True
for v in g[c]:
if not visited[v]:
s.append(v)
return visited
dfs_stack(g, 0)
# + [markdown] slideshow={"slide_type": "slide"}
# The stack-based depth-first search may insert a node in the stack multiple times.
#
# For example, consider the following graph:
# + [markdown] slideshow={"slide_type": "slide"}
# <img width="250" src="example_graph_2.png"/>
# + [markdown] slideshow={"slide_type": "slide"}
# The graph is represented as follows:
# + slideshow={"slide_type": "fragment"}
g2 = {
0: [1, 2, 3],
1: [0, 4],
2: [0],
3: [0, 5],
4: [1, 5],
5: [3, 4, 6, 7],
6: [5],
7: [5]
}
# + [markdown] slideshow={"slide_type": "slide"}
# Then we can traverse with with the stack-based version of depth-first search:
# + slideshow={"slide_type": "fragment"}
dfs_stack(g2, 0)
# + [markdown] slideshow={"slide_type": "slide"}
# You may notice that node 1 enters the stack twice.
#
# That does not affect the correctness of the algorithm, as the algorithm will explore the whole graph, but we can fix it anyway.
#
# One way to fix it would be to search in the stack and if the node is already there, we would not put it.
#
# However, searching in a list takes place in linear time, depending on the length of the list.
#
# It is faster to keep a separate structure in which we record if something is in the stack.
#
# That requires more space: an instance of speed-space trade-off.
# + slideshow={"slide_type": "slide"}
def dfs_nd_stack(g, node):
s = []
visited = [ False ] * len(g)
instack = [ False ] * len(g)
s.append(node)
instack[node] = True
while len(s) != 0:
print("Stack", s)
c = s.pop()
instack[c] = False
print("Visiting", c)
visited[c] = True
for v in g[c]:
if not visited[v] and not instack[v]:
s.append(v)
instack[v] = True
return visited
dfs_nd_stack(g2, 0)
# + [markdown] slideshow={"slide_type": "slide"}
# # Breadth-first Search
#
# In breadth-first search we visit all neighbours of a node, then all the neighbours of the neighbours, and so on.
#
# The exploration is like a ripple spreading outwards.
#
# We can implement breadth-first search using a First-In First-Out (FIFO) queue; in Python this is provided by `collections.deque`.
# + slideshow={"slide_type": "slide"}
from collections import deque
g = {
0: [1, 2, 3],
1: [0, 4],
2: [0, 4],
3: [0, 5],
4: [5],
5: [4, 6, 7],
6: [],
7: []
}
def bfs(g, node):
q = deque()
visited = [ False ] * len(g)
inqueue = [ False ] * len(g)
q.appendleft(node)
inqueue[node] = True
while not (len(q) == 0):
print("Queue", q)
c = q.pop()
print("Visiting", c)
inqueue[c] = False
visited[c] = True
for v in g[c]:
if not visited[v] and not inqueue[v]:
q.appendleft(v)
inqueue[v] = True
bfs(g, 0)
# + [markdown] slideshow={"slide_type": "slide"}
# # Reading a Graph from a File
#
# Usually we read graphs from files, typically text files.
#
# A common way to store graphs is in text files where each line contains a link between two nodes.
#
# For example, the file containing the first graph we saw would be:
# ```
# 0 1
# 0 2
# 0 3
# 1 0
# 1 4
# 2 0
# 2 4
# 3 0
# 3 5
# 4 5
# 5 4
# 5 6
# 5 7
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# To read this file we would go line-by-line.
#
# We would split each line on whitespace.
#
# We would then get the two parts and treat them as nodes.
#
# Note that we assume that the nodes are integers, so we convert the split pieces with `int(x)`. If nodes were strings, this would not be required.
#
# We also assume that the graph is directed.
# + [markdown] slideshow={"slide_type": "slide"}
# The following example will read file [example_graph_1.txt](example_graph_1.txt), the directed graph we used for the depth-first example, which has the following contents:
#
# ```
# 0 1
# 0 2
# 0 3
# 1 0
# 1 4
# 2 0
# 2 4
# 3 0
# 3 5
# 4 5
# 5 4
# 5 6
# 5 7
# ```
# + slideshow={"slide_type": "fragment"}
input_filename = "example_graph_1.txt"
g = {}
with open(input_filename) as graph_input:
for line in graph_input:
# Split line and convert line parts to integers.
nodes = [int(x) for x in line.split()]
if len(nodes) != 2:
continue
# If a node is not already in the graph
# we must create a new empty list.
if nodes[0] not in g:
g[nodes[0]] = []
if nodes[1] not in g:
g[nodes[1]] = []
# We need to append the "to" node
# to the existing list for the "from" node.
g[nodes[0]].append(nodes[1])
print(g)
# + [markdown] slideshow={"slide_type": "slide"}
# Printing a graph like that is not very convenient.
#
# Python offers the ``pprint`` (pretty-print) library that can help us output stuff in a more meaningful manner.
# + slideshow={"slide_type": "fragment"}
import pprint
pprint.pprint(g)
# + [markdown] slideshow={"slide_type": "slide"}
# For undirected graphs, the code is pretty much the same; we only need to take care to enter the edge $(v, u)$ for every edge $(u, v)$ that we meet in the file.
#
# Here is the equivalent for the file [example_graph_2.txt](example_graph_2.txt), which is the undirected graph we used for depth-first search.
# + slideshow={"slide_type": "slide"}
input_filename = "example_graph_2.txt"
g = {}
with open(input_filename) as graph_input:
for line in graph_input:
# Split line and convert line parts to integers.
nodes = [int(x) for x in line.split()]
if len(nodes) != 2:
continue
# If a node is not already in the graph
# we must create a new empty list.
if nodes[0] not in g:
g[nodes[0]] = []
if nodes[1] not in g:
g[nodes[1]] = []
# We need to append the "to" node
# to the existing list for the "from" node.
g[nodes[0]].append(nodes[1])
# And also the other way round.
g[nodes[1]].append(nodes[0])
pprint.pprint(g)
# + [markdown] slideshow={"slide_type": "slide"}
# # Real Graph Processing in Python
#
# In a real program we would not use our own hand-crafted code for handling graphs in Python.
#
# we would use instead use a respected library, used by many developers around the world, and optimized over time.
#
# For that purpose, check the [NetworkX](https://networkx.github.io/) library.
| content/notebooks/chapter_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# !pip install nbformat==5.1.3
import configparser
import os
# +
config = configparser.ConfigParser()
test
config.read('ibm-sql-query.secrets')
config = config['DEFAULT']
for key in config:
exec('{}="{}"'.format(key,config[key]))
os.environ[key] = config[key]
config = configparser.ConfigParser()
config.read('ibm-sql-query.config')
config = config['DEFAULT']
for key in config:
exec('{}="{}"'.format(key,config[key]))
os.environ[key] = config[key]
# + tags=[] language="bash"
# # delete result if existing
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + tags=[]
# !ipython ibm-sql-query.ipynb
# + language="bash"
# # verify result existing
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -eq 0 ]; then
# echo 'creation failed'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + tags=[]
# !ipython ibm-sql-query.ipynb out_format='parquet'
# + tags=[] language="bash"
# # verify result existing
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -eq 0 ]; then
# echo 'creation failed'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + tags=[]
# !ipython ibm-sql-query.ipynb out_partition_columns='x,y,z'
# + tags=[] language="bash"
# # verify result existing and corrent number of partitions
# if [ `ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result" 2> /dev/null |grep result |wc -l` -lt 1000 ]; then
# echo 'creation failed, number of partitions < 1000'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + tags=[]
# !ipython ibm-sql-query.ipynb out_partition_columns='x,y,z' out_format='parquet'
# + tags=[] language="bash"
# # verify result existing and corrent number of partitions
# if [ `ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result" 2> /dev/null |grep result |wc -l` -lt 1000 ]; then
# echo 'creation failed, number of partitions < 1000'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + [markdown] tags=[]
# # TEST
# + tags=[]
# !ipython ibm-sql-query.ipynb out_format='parquet' out_partition_columns='x,y,z' out_number_of_objects="42"
# + tags=[] language="bash"
# # verify result existing and corrent number of partitions
# if [ `ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result" 2> /dev/null |grep result |wc -l` -ne 2 ]; then
# echo 'creation failed, number of partitions <> 2'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + [markdown] tags=[]
# # TEST
# + tags=[]
# !ipython ibm-sql-query.ipynb out_format='parquet' out_partition_columns='x,y,z' out_number_of_objects="42" out_rows_per_object="23"
# + tags=[] language="bash"
# # verify result existing and corrent number of partitions
# if [ `ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result" 2> /dev/null |grep result |wc -l` -ne 2 ]; then
# echo 'creation failed, number of partitions <> 2'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + [markdown] tags=[]
# # TEST
# + tags=[]
# !ipython ibm-sql-query.ipynb out_format='parquet' out_exact_name='True'
# + tags=[] language="bash"
# # verify result existing and corrent number of partitions
# if [ `ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result" 2> /dev/null |grep result |wc -l` -ne 1 ]; then
# echo 'creation failed, number of results <> 1'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + [markdown] tags=[]
# # TEST
# + tags=[]
# !ipython ibm-sql-query.ipynb out_exact_name='True'
# + tags=[] language="bash"
# # verify result existing and corrent number of partitions
# if [ `ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result" 2> /dev/null |grep result |wc -l` -ne 1 ]; then
# echo 'creation failed, number of results <> 1'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
# + [markdown] tags=[]
# # TEST
# + tags=[]
# !ipython ibm-sql-query.ipynb out_exact_name='True' out_no_jobid_folder='True'
# + tags=[]
# !ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result"
# + tags=[] language="bash"
# # verify result existing and corrent number of partitions
# if [ `ipython cloud-object-store-housekeeping.ipynb operation="walk" path="result" 2> /dev/null |grep result |wc -l` -ne 1 ]; then
# echo 'creation failed, number of results <> 1'
# exit -1
# fi
# + tags=[] language="bash"
# # delete result
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# ipython cloud-object-store-housekeeping.ipynb operation="rm" path="result"
# fi
# + language="bash"
# # verify result deleted
# if [ `ipython cloud-object-store-housekeeping.ipynb 2> /dev/null | grep result |wc -l` -gt 0 ]; then
# echo 'deletion failed'
# exit -1
# fi
| component-library/transform/ibm-sql-query-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Train Models
#
# The central goal of machine learning is to train predictive models that can be used by applications. In Azure Machine Learning, you can use scripts to train models leveraging common machine learning frameworks like Scikit-Learn, Tensorflow, PyTorch, SparkML, and others. You can run these training scripts as experiments in order to track metrics and outputs, which include the trained models.
# ## Connect to your workspace
#
# To get started, connect to your workspace.
#
# > **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
# +
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
# -
# ## Create a training script
#
# You;re going to use a Python script to train a machine learning model based on the diabates data, so let's start by creating a folder for the script and data files.
# +
import os, shutil
# Create a folder for the experiment files
training_folder = 'diabetes-training'
os.makedirs(training_folder, exist_ok=True)
# Copy the data file into the experiment folder
shutil.copy('data/diabetes.csv', os.path.join(training_folder, "diabetes.csv"))
# -
# #### Extra note:
#
# Writing a script to train a model
#
# When using an experiment to train a model, your script should save the trained model in the outputs folder. For example, the following script trains a model using Scikit-Learn, and saves it in the outputs folder using the joblib package:
#
# To prepare for an experiment that trains a model, a script like this is created and saved in a folder. For example, you could save this script as training_script.py in a folder named training_folder. Since the script includes code to load training data from data.csv, this file should also be saved in the folder.
# Now you're ready to create the training script and save it in the folder.
#
# > **Note**: This code *creates* the script - it doesn't run it!
# +
# %%writefile $training_folder/diabetes_training.py
# Import libraries
from azureml.core import Run
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
diabetes = pd.read_csv('diabetes.csv')
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Set regularization hyperparameter
reg = 0.01
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
# Save the trained model in the outputs folder
os.makedirs('outputs', exist_ok=True)
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
# -
# ## Run the training script as an experiment
#
# Now you're ready to run the script as an experiment. Note that the default environment does not include the **scikit-learn** package, so you need to explicitly add that to the configuration. The conda environment is built on-demand the first time the experiment is run, and cached for future runs that use the same configuration; so the first run will take a little longer.
#
# #### Extra note:
#
# To run the script, create a ScriptRunConfig that references the folder and script file. You generally also need to define a Python (Conda) environment that includes any packages required by the script. In this example, the script uses Scikit-Learn so you must create an environment that includes that. The script also uses Azure Machine Learning to log metrics, so you need to remember to include the azureml-defaults package in the environment.
#
# #### Working with script arguments
#
# To use parameters in a script, you must use a library such as argparse to read the arguments passed to the script and assign them to variables. For example, the following script reads an argument named --reg-rate, which is used to set the regularization rate hyperparameter for the logistic regression algorithm used to train a model.
#
# #### Passing arguments to an experiment script
#
# To pass parameter values to a script being run in an experiment, you need to provide an arguments value containing a list of comma-separated arguments and their values to the ScriptRunConfig, like this:
#
# ##### Create a script config
# ```
# script_config = ScriptRunConfig(source_directory='training_folder',
# script='training.py',
# arguments = ['--reg-rate', 0.1],
# environment=sklearn_env)
# ```
# +
from azureml.core import Experiment, ScriptRunConfig, Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.widgets import RunDetails
# Create a Python environment for the experiment
sklearn_env = Environment("sklearn-env")
# Ensure the required packages are installed (we need scikit-learn and Azure ML defaults)
packages = CondaDependencies.create(pip_packages=['scikit-learn','azureml-defaults'])
sklearn_env.python.conda_dependencies = packages
# Create a script config
script_config = ScriptRunConfig(source_directory=training_folder,
script='diabetes_training.py',
environment=sklearn_env)
# submit the experiment run
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
# Show the running experiment run in the notebook widget
RunDetails(run).show()
# Block until the experiment run has completed
run.wait_for_completion()
# -
# You can retrieve the metrics and outputs from the **Run** object.
# Get logged metrics and files
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
print('\n')
for file in run.get_file_names():
print(file)
# ## Register the trained model
#
# Note that the outputs of the experiment include the trained model file (**diabetes_model.pkl**). You can register this model in your Azure Machine Learning workspace, making it possible to track model versions and retrieve them later.
#
# #### Extra note:
#
# #### Retrieving model files
#
# After an experiment run has completed, you can use the run objects get_file_names method to list the files generated. Standard practice is for scripts that train models to save them in the run's outputs folder.
#
# You can also use the run object's download_file and download_files methods to download output files to the local file system.
#
# #### Registering a model
#
# Model registration enables you to track multiple versions of a model, and retrieve models for inferencing (predicting label values from new data). When you register a model, you can specify a name, description, tags, framework (such as Scikit-Learn or PyTorch), framework version, custom properties, and other useful metadata. Registering a model with the same name as an existing model automatically creates a new version of the model, starting with 1 and increasing in units of 1.
#
# To register a model from a local file, you can use the register method of the Model object as shown here:
#
# ```
# from azureml.core import Model
#
# model = Model.register(workspace=ws,
# model_name='classification_model',
# model_path='model.pkl', # local path
# description='A classification model',
# tags={'data-format': 'CSV'},
# model_framework=Model.Framework.SCIKITLEARN,
# model_framework_version='0.20.3')
# ```
#
# Alternatively, if you have a reference to the Run used to train the model, you can use its register_model method as shown here:
#
# ```
# run.register_model( model_name='classification_model',
# model_path='outputs/model.pkl', # run outputs path
# description='A classification model',
# tags={'data-format': 'CSV'},
# model_framework=Model.Framework.SCIKITLEARN,
# model_framework_version='0.20.3')
# ```
# +
from azureml.core import Model
# Register the model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'Script'},
properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
# List registered models
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
# -
# ## Create a parameterized training script
#
# You can increase the flexibility of your training experiment by adding parameters to your script, enabling you to repeat the same training experiment with different settings. In this case, you'll add a parameter for the regularization rate used by the logistic regression algorithm when training the model.
#
# Again, lets start by creating a folder for the parameterized script and the training data.
# +
import os, shutil
# Create a folder for the experiment files
training_folder = 'diabetes-training-params'
os.makedirs(training_folder, exist_ok=True)
# Copy the data file into the experiment folder
shutil.copy('data/diabetes.csv', os.path.join(training_folder, "diabetes.csv"))
# -
# Now let's create a script with an argument for the regularization rate hyperparameter. The argument is read using a Python **argparse.ArgumentParser** object.
# +
# %%writefile $training_folder/diabetes_training.py
# Import libraries
from azureml.core import Run
import pandas as pd
import numpy as np
import joblib
import os
import argparse
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Get the experiment run context
run = Run.get_context()
# Set regularization hyperparameter
parser = argparse.ArgumentParser()
parser.add_argument('--reg_rate', type=float, dest='reg', default=0.01)
args = parser.parse_args()
reg = args.reg
# load the diabetes dataset
print("Loading Data...")
# load the diabetes dataset
diabetes = pd.read_csv('diabetes.csv')
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
# -
# ## Run the script with arguments
#
# You run the script as an experiment like you did previously, reusing the environment you created; but this time you must provide the **--reg_rate** parameter that the script expects as an argument.
# +
# Create a script config
script_config = ScriptRunConfig(source_directory=training_folder,
script='diabetes_training.py',
arguments = ['--reg_rate', 0.1],
environment=sklearn_env)
# submit the experiment
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
# -
# Once again, we can get the metrics and outputs from the completed run.
# Get logged metrics
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
print('\n')
for file in run.get_file_names():
print(file)
# ## Register a new version of the model
#
# Now that you've trained a new model, you can register it as a new version in the workspace.
# +
from azureml.core import Model
# Register the model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'Parameterized script'},
properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
# List registered models
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
# -
# You can also view registered models in your workspace on the **Models** page in [Azure Machine Learning studio](https://ml.azure.com).
#
# If you've finished exploring, you can close this notebook and shut down your compute instance.
| 05 - Train Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
## Reimports graph data to nx v 2.1
# +
## Course Data Camp notes
# datasets used in tutorial cannot be imported possibly due to
# nx version vas < 1.9, running in python v eg 3.1, which is not supported in conda,
# and higher python versions are not supported in nx
## SOLUTION:
## nx ver 1.11 -> py 3.6 compatible
# conda create -name nx11py36 python=3.6
# conda install matplotlib
# conda install networkx=1.11
# conda config --add channels conda-forge
# conda install nxviz
# +
## Data loading issues:
# https://networkx.github.io/documentation/stable/release/migration_guide_from_1.x_to_2.0.html
# V1 and V2 not compatible
# Using Pickle with v1 and v2
# The Pickle protocol does not store class methods, only the data. So if you write a pickle file with v1 you should not expect to read it into a v2 Graph. If this happens to you, read it in with v1 installed and write a file with the node and edge information. You can read that into a config with v2 installed and then add those nodes and edges to a fresh graph. Try something similar to this:
# >>> # in v1.x
# >>> pickle.dump([G.nodes(data=True), G.edges(data=True)], file)
# >>> # then in v2.x
# >>> nodes, edges = pickle.load(file)
# >>> G = nx.Graph()
# >>> G.add_nodes_from(nodes)
# >>> G.add_edges_from(edges)
# +
# Quick search plotting tools:
# plotly
# https://plot.ly/python/3d-network-graph/
# networkx
# https://python-graph-gallery.com/network-chart/
# https://python-graph-gallery.com/chord-diagram/
# Network analysis in Python 1 - overview of important information
# <NAME>
# has also other interesting tutorials
# https://github.com/ericmjl/Network-Analysis-Made-Simple
# and video tutorials for this
# shorter (2.5h): https://www.youtube.com/watch?v=E4VKzFmByhE
# longer (6.5h): https://www.youtube.com/watch?v=HkbMUrgzwMs, https://www.youtube.com/watch?v=MRCLwmYTVpc
# Connected necessary content:
# https://www.datacamp.com/community/tutorials/python-list-comprehension
# +
import matplotlib.pyplot as plt
import networkx as nx
import nxviz as nv
from nxviz import MatrixPlot
from nxviz import ArcPlot
from nxviz import CircosPlot
from itertools import combinations
from collections import defaultdict
import pickle
from datetime import date
import pandas as pd
import numpy as np
# -
nx.__version__
# +
# download files from datacamp
# T = pickle.load(open('ego-twitter.p','rb'))
# G = pickle.load(open('github_users.p','rb'))
# load nodelist and edgelist saved in Networks_pytohn-datasets-19_7_18.ipynb
# gives error
#T = nx.read_edgelist('ego-twitter_edgelist.tsv', comments='#', delimiter='\t', create_using=nx.DiGraph(), nodetype=None, data=True, edgetype=None, encoding='utf-8')
# load lists to dataframes and from those build networks
G_node_list = pd.read_csv('github_users_nodelist.tsv',sep='\t')
G_edge_list = pd.read_csv('github_users_edgelist.tsv',sep='\t')
T_node_list = pd.read_csv('ego-twitter_nodelist.tsv',sep='\t')
T_edge_list = pd.read_csv('ego-twitter_edgelist.tsv',sep='\t')
# -
# +
## function build graph from data frame
G = nx.Graph()
G.add_nodes_from(G_node_list[1].values)
for n in G.nodes()
G.nodes[n]['bipartite'] = 714
## make function to make data frame from nx graph
# TODO: vectorization
def frames_to_graph(T):
# nodes
T_node_list = list(T.nodes(data=True))
T_node_list_pd = pd.DataFrame(T_node_list)
data_keys = list(T_node_list[1][1].keys())
if (len(data_keys) > 0):
for ii in range(len(data_keys)):
T_node_list_pd[data_keys[ii]] = ""
for i in range(len(T_node_list)):
for ii in range(len(data_keys)):
T_node_list_pd.loc[i,data_keys[ii]] = T_node_list[i][1][data_keys[ii]]
T_node_list_pd = T_node_list_pd.drop(columns=1).rename(columns={0: "node"})
# edges
T_edge_list = list(T.edges(data=True))
T_edge_list_pd = pd.DataFrame(T_edge_list)
data_keys = list(T_edge_list[1][2].keys())
if (len(data_keys) > 0):
for ii in range(len(data_keys)):
T_edge_list_pd[data_keys[ii]] = ""
for i in range(len(T_edge_list)):
for ii in range(len(data_keys)):
T_edge_list_pd.loc[i,data_keys[ii]] = T_edge_list[i][2][data_keys[ii]]
T_edge_list_pd = T_edge_list_pd.drop(columns=2).rename(columns={0: "node1", 1:"node2"})
return T_node_list_pd, T_edge_list_pd
# -
[G_edge_list[1].values,G_edge_list[2].values]
tmp = G_node_list.iloc[0,2]
## test drawing
nx.draw(T)
plt.show()
# +
### Chapter 1
# Network visualization
# matrix
# arc
# circos
# nxviz plotting tool developed by lecturer
# +
# Basics of NetworkX API
# What is the size of the graph T, the type of T.nodes(), and the data structure of the
# third element of the last entry of T.edges(data=True)? The len() and type() functions will be useful here.
# To access the last entry of T.edges(data=True), you can use T.edges(data=True)[-1].
# Draw the graph to screen
# nx.draw(T_sub)
# plt.show()
T
# -
#T.nodes(data=True)
# +
#T.edges(data=True)
# +
# Queries on a graph
# Now that you know some basic properties of the graph and have practiced using NetworkX's drawing facilities to visualize components of it, it's time to explore how you can query it for nodes and edges. Specifically, you're going to look for "nodes of interest" and "edges of interest". To achieve this, you'll make use of the .nodes() and .edges() methods that Eric went over in the video. The .nodes() method returns a list of nodes, while the .edges() method returns a list of tuples, in which each tuple shows the nodes that are present on that edge. Recall that passing in the keyword argument data=True in these methods retrieves the corresponding metadata associated with the nodes and edges as well.
# You'll write list comprehensions to effectively build these queries in one line. For a refresher on list comprehensions, refer to Part 2 of DataCamp's Python Data Science Toolbox course. Here's the recipe for a list comprehension:
# [ output expression for iterator variable in iterable if predicate expression ].
# You have to fill in the _iterable_ and the _predicate expression_. Feel free to prototype your answer by exploring the graph in the IPython Shell before submitting your solution.
# Use a list comprehension to get the nodes of interest: noi
noi = [n for n, d in T.nodes(data=True) if d['occupation'] == 'scientist']
# Use a list comprehension to get the edges of interest: eoi
eoi = [(u, v) for u, v, d in T.edges(data=True) if d['date'] < date(2010,1,1)]
# +
# Specifying a weight on edges
# Weights can be added to edges in a graph, typically indicating the "strength" of an edge. In NetworkX, the weight is indicated by the 'weight' key in the metadata dictionary.
# Before attempting the exercise, use the IPython Shell to access the dictionary metadata of T and explore it, for instance by running the commands T.edge[1][10] and then T.edge[10][1]. Note how there's only one field, and now you're going to add another field, called 'weight'.
# Set the weight of the edge
T.edge[1][10]['weight'] = 2
# Iterate over all the edges (with metadata)
for u, v, d in T.edges(data=True):
# Check if node 293 is involved
if 293 in [u,v]:
# Set the weight to 1.1
T.edge[u][v]['weight'] = 1.1
# +
# Checking whether there are self-loops in the graph
# As Eric discussed, NetworkX also allows edges that begin and end on the same node; while this would be non-intuitive for a social network graph, it is useful to model data such as trip networks, in which individuals begin at one location and end in another.
# It is useful to check for this before proceeding with further analyses, and NetworkX graphs provide a method for this purpose: .number_of_selfloops().
# In this exercise as well as later ones, you'll find the assert statement useful. An assert-ions checks whether the statement placed after it evaluates to True, otherwise it will return an AssertionError.
# To begin, use the .number_of_selfloops() method on T in the IPython Shell to get the number of edges that begin and end on the same node. A number of self-loops have been synthetically added to the graph. Your job in this exercise is to write a function that returns these edges.
# Define find_selfloop_nodes()
def find_selfloop_nodes(G):
"""
Finds all nodes that have self-loops in the graph G.
"""
nodes_in_selfloops = []
# Iterate over all the edges of G
for u, v in G.edges():
# Check if node u and node v are the same
if u == v:
# Append node u to nodes_in_selfloops
nodes_in_selfloops.append(u)
return nodes_in_selfloops
# Check whether number of self loops equals the number of nodes in self loops
assert T.number_of_selfloops() == len(find_selfloop_nodes(T))
# +
# Convert T to a matrix format: A
A = nx.to_numpy_matrix(T)
# Convert A back to the NetworkX form as a directed graph: T_conv
T_conv = nx.from_numpy_matrix(A, create_using=nx.DiGraph())
# Check that the `category` metadata field is lost from each node
for n, d in T_conv.nodes(data=True):
assert 'category' not in d.keys()
# +
# Visualizing using nxviz
# It is time to try your first "fancy" graph visualization method: a matrix plot. To do this, nxviz provides a MatrixPlot object.
# nxviz is a package for visualizing graphs in a rational fashion. Under the hood, the MatrixPlot utilizes nx.to_numpy_matrix(G), which returns the matrix form of the graph. Here, each node is one column and one row, and an edge between the two nodes is indicated by the value 1. In doing so, however, only the weight metadata is preserved; all other metadata is lost, as you'll verify using an assert statement.
# A corresponding nx.from_numpy_matrix(A) allows one to quickly create a graph from a NumPy matrix. The default graph type is Graph(); if you want to make it a DiGraph(), that has to be specified using the create_using keyword argument, e.g. (nx.from_numpy_matrix(A, create_using=nx.DiGraph)).
# One final note, matplotlib.pyplot and networkx have already been imported as plt and nx, respectively, and the graph T has been pre-loaded. For simplicity and speed, we have sub-sampled only 100 edges from the network.
# Create the MatrixPlot object: m
m = nv.MatrixPlot(T)
# Draw m to the screen
m.draw()
# Display the plot
plt.show()
# +
# Create the CircosPlot object: c
c = CircosPlot(T)
# Draw c to the screen
c.draw()
# Display the plot
plt.show()
# +
# Create the un-customized ArcPlot object: a
a = ArcPlot(T)
# Draw a to the screen
a.draw()
# Display the plot
plt.show()
# +
# Create the customized ArcPlot object: a2
a2 = ArcPlot(T, node_order = 'category', node_color = 'category')
# Draw a2 to the screen
a2.draw()
# Display the plot
plt.show()
# +
### Chapter 2
# neighbors
# degree
# betweenness
# centrality
# +
# Compute number of neighbors for each node
# How do you evaluate whether a node is an important one or not? There are a few ways to do so, and here, you're going to look at one metric: the number of neighbors that a node has.
# Every NetworkX graph G exposes a .neighbors(n) method that returns a list of nodes that are the neighbors of the node n. To begin, use this method in the IPython Shell on the Twitter network T to get the neighbors of of node 1. This will get you familiar with how the function works. Then, your job in this exercise is to write a function that returns all nodes that have m neighbors.
# Define nodes_with_m_nbrs()
def nodes_with_m_nbrs(G, m):
"""
Returns all nodes in graph G that have m neighbors.
"""
nodes = set()
# Iterate over all nodes in G
for n in G.nodes():
# Check if the number of neighbors of n matches m
if len(G.neighbors(n)) == m:
# Add the node n to the set
nodes.add(n)
# Return the nodes with m neighbors
return nodes
# Compute and print all nodes in T that have 6 neighbors
six_nbrs = nodes_with_m_nbrs(T, 6)
print(six_nbrs)
# +
# Compute degree distribution
# The number of neighbors that a node has is called its "degree", and it's possible to compute the degree distribution across the entire graph. In this exercise, your job is to compute the degree distribution across T.
# Compute the degree of every node: degrees
degrees = [len(T.neighbors(n)) for n in T.nodes()]
# Print the degrees
print(degrees)
# +
# Degree centrality distribution
# The degree of a node is the number of neighbors that it has. The degree centrality is the number of neighbors divided by all possible neighbors that it could have. Depending on whether self-loops are allowed, the set of possible neighbors a node could have could also include the node itself.
# The nx.degree_centrality(G) function returns a dictionary, where the keys are the nodes and the values are their degree centrality values.
# Compute the degree centrality of the Twitter network: deg_cent
deg_cent = nx.degree_centrality(T)
# Plot a histogram of the degree centrality distribution of the graph.
plt.figure()
plt.hist(list(deg_cent.values()))
plt.show()
# Plot a histogram of the degree distribution of the graph
plt.figure()
plt.hist(degrees)
plt.show()
# Plot a scatter plot of the centrality distribution and the degree distribution
plt.figure()
plt.scatter(degrees,list(deg_cent.values()))
plt.show()
# +
# Shortest Path
def path_exists(G, node1, node2):
"""
This function checks whether a path exists between two nodes (node1, node2) in graph G.
"""
visited_nodes = set()
queue = [node1]
for node in queue:
neighbors = G.neighbors(node)
if node2 in neighbors:
print('Path exists between nodes {0} and {1}'.format(node1, node2))
return True
break
else:
visited_nodes.add(node)
queue.extend([n for n in neighbors if n not in visited_nodes])
# Check to see if the final element of the queue has been reached
if node == queue[-1]:
print('Path does not exist between nodes {0} and {1}'.format(node1, node2))
return False
# +
# NetworkX betweenness centrality on a social network
# Betweenness centrality is a node importance metric that uses information about the shortest paths in a network. It is defined as the fraction of all possible shortest paths between any pair of nodes that pass through the node.
# NetworkX provides the nx.betweenness_centrality(G) function for computing the betweenness centrality of every node in a graph, and it returns a dictionary where the keys are the nodes and the values are their betweenness centrality measures.
# Compute the betweenness centrality of T: bet_cen
bet_cen = nx.betweenness_centrality(T)
# Compute the degree centrality of T: deg_cen
deg_cen = nx.degree_centrality(T)
# Create a scatter plot of betweenness centrality and degree centrality
plt.scatter(list(bet_cen.values()),list(deg_cen.values()))
# Display the plot
plt.show()
# +
# Deep dive - Twitter network
# You're going to now take a deep dive into a Twitter network, which will help reinforce what you've learned earlier. First, you're going to find the nodes that can broadcast messages very efficiently to lots of people one degree of separation away.
# NetworkX has been pre-imported for you as nx.
# Define find_nodes_with_highest_deg_cent()
def find_nodes_with_highest_deg_cent(G):
# Compute the degree centrality of G: deg_cent
deg_cent = nx.degree_centrality(G)
# Compute the maximum degree centrality: max_dc
max_dc = max(list(deg_cent.values()))
nodes = set()
# Iterate over the degree centrality dictionary
for k, v in deg_cent.items():
# Check if the current value has the maximum degree centrality
if v == max_dc:
# Add the current node to the set of nodes
nodes.add(k)
return nodes
# Find the node(s) that has the highest degree centrality in T: top_dc
top_dc = find_nodes_with_highest_deg_cent(T)
print(top_dc)
# Write the assertion statement
for node in top_dc:
assert nx.degree_centrality(T)[node] == max(nx.degree_centrality(T).values())
# Deep dive - Twitter network part II
# Next, you're going to do an analogous deep dive on betweenness centrality! Just a few hints to help you along: remember that betweenness centrality is computed using nx.betweenness_centrality(G).
# Define find_node_with_highest_bet_cent()
def find_node_with_highest_bet_cent(G):
# Compute betweenness centrality: bet_cent
bet_cent = nx.betweenness_centrality(G)
# Compute maximum betweenness centrality: max_bc
max_bc = max(list(bet_cent.values()))
nodes = set()
# Iterate over the betweenness centrality dictionary
for k, v in bet_cent.items():
# Check if the current value has the maximum betweenness centrality
if v == max_bc:
# Add the current node to the set of nodes
nodes.add(k)
return nodes
# Use that function to find the node(s) that has the highest betweenness centrality in the network: top_bc
top_bc = find_node_with_highest_bet_cent(T)
# Write an assertion statement that checks that the node(s) is/are correctly identified.
for node in top_bc:
assert nx.betweenness_centrality(T)[node] == max(nx.betweenness_centrality(T).values())
# +
### Chapter 3
# cliques (simple = edge, simplest complex = triangle)
# maximal clique (maximal fully connected subgraph)
# community
# subgraph
# +
# Identifying triangle relationships
# Now that you've learned about cliques, it's time to try leveraging what you know to find structures in a network. Triangles are what you'll go for first. We may be interested in triangles because they're the simplest complex clique. Let's write a few functions; these exercises will bring you through the fundamental logic behind network algorithms.
# In the Twitter network, each node has an 'occupation' label associated with it, in which the Twitter user's work occupation is divided into celebrity, politician and scientist. One potential application of triangle-finding algorithms is to find out whether users that have similar occupations are more likely to be in a clique with one another.
# Define is_in_triangle()
def is_in_triangle(G, n):
"""
Checks whether a node `n` in graph `G` is in a triangle relationship or not.
Returns a boolean.
"""
in_triangle = False
# Iterate over all possible triangle relationship combinations
for n1, n2 in combinations(G.neighbors(n), 2):
# Check if an edge exists between n1 and n2
if G.has_edge(n1, n2):
in_triangle = True
break
return in_triangle
# +
# Finding nodes involved in triangles
# NetworkX provides an API for counting the number of triangles that every node is involved in: nx.triangles(G). It returns a dictionary of nodes as the keys and number of triangles as the values. Your job in this exercise is to modify the function defined earlier to extract all of the nodes involved in a triangle relationship with a given node.
# Write a function that identifies all nodes in a triangle relationship with a given node.
def nodes_in_triangle(G, n):
"""
Returns the nodes in a graph `G` that are involved in a triangle relationship with the node `n`.
"""
triangle_nodes = set([n])
# Iterate over all possible triangle relationship combinations
for n1, n2 in combinations(G.neighbors(n),2):
# Check if n1 and n2 have an edge between them
if G.has_edge(n1, n2):
# Add n1 to triangle_nodes
triangle_nodes.add(n1)
# Add n2 to triangle_nodes
triangle_nodes.add(n2)
return triangle_nodes
# Write the assertion statement
assert len(nodes_in_triangle(T, 1)) == 35
# +
# Finding open triangles
# Let us now move on to finding open triangles! Recall that they form the basis of friend recommendation systems; if "A" knows "B" and "A" knows "C", then it's probable that "B" also knows "C".
# Define node_in_open_triangle()
def node_in_open_triangle(G, n):
"""
Checks whether pairs of neighbors of node `n` in graph `G` are in an 'open triangle' relationship with node `n`.
"""
in_open_triangle = False
# Iterate over all possible triangle relationship combinations
for n1, n2 in combinations(G.neighbors(n), 2):
# Check if n1 and n2 do NOT have an edge between them
if not G.has_edge(n1, n2):
in_open_triangle = True
break
return in_open_triangle
# Compute the number of open triangles in T
num_open_triangles = 0
# Iterate over all the nodes in T
for n in T.nodes():
# Check if the current node is in an open triangle
if node_in_open_triangle(T, n):
# Increment num_open_triangles
num_open_triangles += 1
print(num_open_triangles)
# +
# Finding all maximal cliques of size "n"
# Now that you've explored triangles (and open triangles), let's move on to the concept of maximal cliques. Maximal cliques are cliques that cannot be extended by adding an adjacent edge, and are a useful property of the graph when finding communities. NetworkX provides a function that allows you to identify the nodes involved in each maximal clique in a graph: nx.find_cliques(G). Play around with the function by using it on T in the IPython Shell, and then try answering the exercise.
# Define maximal_cliques()
def maximal_cliques(G, size):
"""
Finds all maximal cliques in graph `G` that are of size `size`.
"""
mcs = []
for clique in nx.find_cliques(G):
if len(clique) == size:
mcs.append(clique)
return mcs
# Check that there are 33 maximal cliques of size 3 in the graph T
assert len(maximal_cliques(T, 3)) == 33
# +
# Subgraphs I
# There may be times when you just want to analyze a subset of nodes in a network. To do so, you can copy them out into another graph object using G.subgraph(nodes), which returns a new graph object (of the same type as the original graph) that is comprised of the iterable of nodes that was passed in.
nodes_of_interest = [29, 38, 42]
# Define get_nodes_and_nbrs()
def get_nodes_and_nbrs(G, nodes_of_interest):
"""
Returns a subgraph of the graph `G` with only the `nodes_of_interest` and their neighbors.
"""
nodes_to_draw = []
# Iterate over the nodes of interest
for n in nodes_of_interest:
# Append the nodes of interest to nodes_to_draw
nodes_to_draw.append(n)
# Iterate over all the neighbors of node n
for nbr in G.neighbors(n):
# Append the neighbors of n to nodes_to_draw
nodes_to_draw.append(nbr)
return G.subgraph(nodes_to_draw)
# Extract the subgraph with the nodes of interest: T_draw
T_draw = get_nodes_and_nbrs(T, nodes_of_interest)
# Draw the subgraph to the screen
nx.draw(T_draw, with_labels=True)
plt.show()
# +
# Subgraphs II
# In the previous exercise, we gave you a list of nodes whose neighbors we asked you to extract.
# Let's try one more exercise in which you extract nodes that have a particular metadata property and their neighbors. This should hark back to what you've learned about using list comprehensions to find nodes. The exercise will also build your capacity to compose functions that you've already written before.
# Extract the nodes of interest: nodes
nodes = [n for n, d in T.nodes(data=True) if d['occupation'] == 'celebrity']
# Create the set of nodes: nodeset
nodeset = set(nodes)
# Iterate over nodes
for n in nodes:
# Compute the neighbors of n: nbrs
nbrs = T.neighbors(n)
# Compute the union of nodeset and nbrs: nodeset
nodeset = nodeset.union(nbrs)
# Compute the subgraph using nodeset: T_sub
T_sub = T.subgraph(nodeset)
# Draw T_sub to the screen
nx.draw(T_sub, with_labels=True)
plt.show()
# +
### Chapter 4
# application of methods, analysis of networks
# +
# Characterizing the network
len(G.nodes())
len(G.edges())
# Plot the degree distribution of the GitHub collaboration network
plt.hist(list(nx.degree_centrality(G).values()))
plt.show()
# Plot the degree distribution of the GitHub collaboration network
plt.hist(list(nx.betweenness_centrality(G).values()))
plt.show()
# +
# MatrixPlot
# Let's now practice making some visualizations. The first one will be the MatrixPlot. In a MatrixPlot, the matrix is the representation of the edges.
# Python's built-in sorted() function takes an iterable and returns a sorted list (in ascending order, by default). Therefore, to access the largest connected component subgraph, the statement is sliced with [-1].
# Calculate the largest connected component subgraph: largest_ccs
largest_ccs = sorted(nx.connected_component_subgraphs(G), key=lambda x: len(x))[-1]
# Create the customized MatrixPlot object: h
h = MatrixPlot(graph=largest_ccs, node_grouping='grouping')
# Draw the MatrixPlot to the screen
h.draw()
plt.show()
# +
# ArcPlot
# Next up, let's use the ArcPlot to visualize the network. You're going to practice sorting the nodes in the graph as well.
# Iterate over all the nodes in G, including the metadata
for n, d in G.nodes(data=True):
# Calculate the degree of each node: G.node[n]['degree']
G.node[n]['degree'] = G.degree(n) #nx.degree(G,n)
# Create the ArcPlot object: a
a = ArcPlot(graph=G, node_order='degree')
# Draw the ArcPlot to the screen
a.draw()
plt.show()
# +
# CircosPlot
# Iterate over all the nodes, including the metadata
for n, d in G.nodes(data=True):
# Calculate the degree of each node: G.node[n]['degree']
G.node[n]['degree'] = nx.degree(G,n)
# Create the CircosPlot object: c
c = CircosPlot(G, node_order='degree', node_grouping='grouping', node_color='grouping')
# Draw the CircosPlot object to the screen
c.draw()
plt.show()
# +
# Finding cliques
# You're now going to practice finding cliques in G. Recall that cliques are "groups of nodes that are fully connected to one another", while a maximal clique is a clique that cannot be extended by adding another node in the graph.
# Calculate the maximal cliques in G: cliques
cliques = nx.find_cliques(G)
# Count and print the number of maximal cliques in G
print(len(list(nx.find_cliques(G))))
#Great work! Let's continue by finding a particular maximal clique, and then plotting that clique.
# Import necessary modules
import networkx as nx
import matplotlib.pyplot as plt
# Find the author(s) that are part of the largest maximal clique: largest_clique
largest_clique = sorted(nx.find_cliques(G), key=lambda x:len(x))[-1]
# Create the subgraph of the largest_clique: G_lc
G_lc = G.subgraph(largest_clique)
# Create the CircosPlot object: c
c = CircosPlot(G_lc)
# Draw the CircosPlot to the screen
c.draw()
plt.show()
# +
# Finding important collaborators
# Almost there! You'll now look at important nodes once more. Here, you'll make use of the degree_centrality() and betweenness_centrality() functions in NetworkX to compute each of the respective centrality scores, and then use that information to find the "important nodes". In other words, your job in this exercise is to find the user(s) that have collaborated with the most number of users.
# Compute the degree centralities of G: deg_cent
deg_cent = nx.degree_centrality(G)
# Compute the maximum degree centrality: max_dc
max_dc = max(deg_cent.values())
# Find the user(s) that have collaborated the most: prolific_collaborators
prolific_collaborators = [n for n, dc in deg_cent.items() if dc == max_dc]
# Print the most prolific collaborator(s)
print(prolific_collaborators)
# +
# Characterizing editing communities
# You're now going to combine what you've learned about the BFS algorithm and concept of maximal cliques to visualize the network with an ArcPlot.
# The largest maximal clique in the Github user collaboration network has been assigned to the subgraph G_lmc.
# Identify the largest maximal clique: largest_max_clique
largest_max_clique = set(sorted(nx.find_cliques(G), key=lambda x: len(x))[-1])
# Create a subgraph from the largest_max_clique: G_lmc
G_lmc = G.subgraph(largest_max_clique)
# Go out 1 degree of separation
for node in G_lmc.nodes():
G_lmc.add_nodes_from(G.neighbors(node))
G_lmc.add_edges_from(zip([node]*len(G.neighbors(node)), G.neighbors(node)))
# Record each node's degree centrality score
for n in G_lmc.nodes():
G_lmc.node[n]['degree centrality'] = nx.degree_centrality(G_lmc)[n]
# Create the ArcPlot object: a
a = ArcPlot(graph=G_lmc, node_order='degree centrality')
# Draw the ArcPlot to the screen
a.draw()
plt.show()
# +
# Recommending co-editors who have yet to edit together
# Finally, you're going to leverage the concept of open triangles to recommend users on GitHub to collaborate!
# Initialize the defaultdict: recommended
recommended = defaultdict(int)
# Iterate over all the nodes in G
for n, d in G.nodes(data=True):
# Iterate over all possible triangle relationship combinations
for n1, n2 in combinations(G.neighbors(n), 2):
# Check whether n1 and n2 do not have an edge
if not G.has_edge(n1, n2):
# Increment recommended
recommended[(n1, n2)] += 1
# Identify the top 10 pairs of users
all_counts = sorted(recommended.values())
top10_pairs = [pair for pair, count in recommended.items() if count > all_counts[-10]]
print(top10_pairs)
# -
| datacamp_part1/Networks_python_part1-19_7_18.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys, os, re, csv, codecs, numpy as np, pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from sklearn import metrics
import seaborn as sns
# +
# load data
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
test = test.drop(test.columns[0], axis=1)
all_data = pd.concat([train, test])
# -
all_data
# +
# Data visualizations
# Total clean comments
#marking comments without any tags as "clean"
rowsums = all_data.iloc[:,2:7].sum(axis=1)
all_data['clean'] = np.logical_not(rowsums).astype('int')
x = all_data.iloc[:,2:].sum()
#count number of clean entries
print("Total comments ", len(all_data))
print("Total clean comments ", all_data['clean'].sum())
print("Total tags ", all_data.iloc[:,2:7].sum())
#plot
plt.figure(figsize=(8,4))
ax = sns.barplot(x.index, x.values, alpha=0.8)
plt.title("# per class")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# -
# The above visualization clear shows that there is a class imbalance. Even among the toxic types.
# check for nulls
train.isnull().any(),test.isnull().any()
# split data by class
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
y = train[list_classes].values
y_test = test[list_classes].values
list_sentences_train = train["comment_text"]
list_sentences_test = test["comment_text"]
# tokenize strings
max_features = 20000
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train))
list_tokenized_train = tokenizer.texts_to_sequences(list_sentences_train)
list_tokenized_test = tokenizer.texts_to_sequences(list_sentences_test)
# pad tokens
maxlen = 100
X_t = pad_sequences(list_tokenized_train, maxlen=maxlen)
X_te = pad_sequences(list_tokenized_test, maxlen=maxlen)
# visualize word distribution
totalNumWords = [len(one_comment) for one_comment in list_tokenized_train]
plt.hist(totalNumWords,bins = np.arange(0,410,10))#[0,50,100,150,200,250,300,350,400])#,450,500,550,600,650,700,750,800,850,900])
plt.show()
# +
# Define keras input layer
inp = Input(shape=(maxlen, )) #maxlen=200 as defined earlier
# define embedding layer
embed_size = 128
x = Embedding(max_features, embed_size)(inp)
# define lstm layer
x = LSTM(60, return_sequences=True,name='lstm_layer')(x)
# pooling & dropout
x = GlobalMaxPool1D()(x)
x = Dropout(0.1)(x)
# output layer
x = Dense(50, activation="relu")(x)
x = Dropout(0.1)(x)
# sigmoid for classification
x = Dense(6, activation="sigmoid")(x)
# create model
model = Model(inputs=inp, outputs=x)
model.summary()
# -
# compile model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
from keras.models import load_model
model = load_model('lstm_starter_model.h5')
# train model
batch_size = 1280
epochs = 2
model.fit(X_t,y, batch_size=batch_size, epochs=epochs, validation_split=0.1)
model.save('lstm_starter_model.h5')
batch_size = 1280
epochs = 2
predictions = model.predict(X_te, batch_size=batch_size, verbose=1)
# +
print("roc auc score ", metrics.roc_auc_score(y_test, predictions))
print("accuracy ", metrics.accuracy_score(y_test, np.round(predictions).astype(np.int)))
print("precision score ", metrics.precision_score(y_test, np.round(predictions).astype(np.int), average='weighted'))
print("recall score ", metrics.recall_score(y_test, np.round(predictions).astype(np.int), average='weighted'))
print("f1 score ", metrics.f1_score(y_test, np.round(predictions).astype(np.int), average='weighted'))
print("coverage error ", metrics.coverage_error(y_test, predictions))
print("label ranking average precision score ", metrics.label_ranking_average_precision_score(y_test, predictions))
print("label ranking loss ", metrics.label_ranking_loss(y_test, predictions))
# -
# As we can see, the algorithm has a high auroc but average precision and recall indicating that it may be due to the skewed data (negative sample is much much greater than positive sample).
#
# Test with twitter data
# https://aaai.org/ocs/index.php/ICWSM/ICWSM17/paper/view/15665/14843
test_twitter = pd.read_csv('data/test_twitter.csv')
test_twitter.head()
list_twitter = test_twitter.tweet
tokenizer.fit_on_texts(list(list_twitter))
list_tokenized_twitter = tokenizer.texts_to_sequences(list_twitter)
X_tw = pad_sequences(list_tokenized_twitter, maxlen=maxlen)
batch_size = 1280
epochs = 2
tweet_predictions = model.predict(X_tw, batch_size=batch_size, verbose=1)
# find the number of tweets tagged any type of toxic
toxic_tweets = list(filter(lambda x: any([j >= 0.5 for j in x]), tweet_predictions))
total_tweets = len(list_twitter)
toxic_percent = len(toxic_tweets) / total_tweets
tweet_predictions_consolidated = [max(i) for i in tweet_predictions ]
test_classes = []
for i, r in test_twitter.iterrows():
if r.hate_speech > r.neither or r.offensive_language > r.neither:
test_classes.append(1)
else:
test_classes.append(0)
print("roc auc score ", metrics.roc_auc_score(test_classes, tweet_predictions_consolidated))
print("accuracy ", metrics.accuracy_score(test_classes, np.round(tweet_predictions_consolidated).astype(np.int)))
print("precision score ", metrics.precision_score(test_classes, np.round(tweet_predictions_consolidated).astype(np.int), average='weighted'))
print("recall score ", metrics.recall_score(test_classes, np.round(tweet_predictions_consolidated).astype(np.int), average='weighted'))
print("f1 score ", metrics.f1_score(test_classes, np.round(tweet_predictions_consolidated).astype(np.int), average='weighted'))
| Toxic comments - starter.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# # Automatic Differentiation (AD)
# We all know how to take derivatives.
# ```julia
# f(x) = 5*x^2 + 3
#
# df(x) = 10*x
#
# ddf(x) = 10
# ```
#
# The promise of AD is
#
# ```julia
# df(x) = derivative(f, x)
#
# ddf(x) = derivative(df, x)
# ```
# ### What AD is not
# (https://www.jmlr.org/papers/volume18/17-468/17-468.pdf)
# **Symbolic differentiation:** (at least not exactly)
# $$ \frac{d}{dx}x^n = n x^{n-1}. $$
#
# **Numerical differentiation:**
# $$ \frac{df}{dx} \approx \frac{f(x+h) - f(x)}{\Delta h} $$
# ## Forward mode AD
# Key to AD is the application of the chain rule
# $$\dfrac{d}{dx} f(g(x)) = \dfrac{df}{dg} \dfrac{dg}{dx}$$
#
# Consider the function $f(a,b) = \ln(ab + \sin(a))$.
f(a,b) = log(a*b + sin(a))
f_derivative(a,b) = 1/(a*b + sin(a)) * (b + cos(a))
a = 3.1
b = 2.4
f_derivative(a,b)
# Dividing the function into the elementary steps, it corresponds to the following "*computational graph*":
# <img src="imgs/comp_graph.svg" width=300px>
function f_graph(a,b)
c1 = a*b
c2 = sin(a)
c3 = c1 + c2
c4 = log(c3)
end
f(a,b) == f_graph(a,b)
# To calculate $\frac{\partial f}{\partial a}$ we have to apply the chain rule multiple times.
# $\dfrac{\partial f}{\partial a} = \dfrac{\partial f}{\partial c_4} \dfrac{\partial c_4}{\partial a} = \dfrac{\partial f}{\partial c_4} \left( \dfrac{\partial c_4}{\partial c_3} \dfrac{\partial c_3}{\partial a} \right) = \dfrac{\partial f}{\partial c_4} \left( \dfrac{\partial c_4}{\partial c_3} \left( \dfrac{\partial c_3}{\partial c_2} \dfrac{\partial c_2}{\partial a} + \dfrac{\partial c_3}{\partial c_1} \dfrac{\partial c_1}{\partial a}\right) \right)$
function f_graph_derivative(a,b)
c1 = a*b
c1_ϵ = b
c2 = sin(a)
c2_ϵ = cos(a)
c3 = c1 + c2
c3_ϵ = c1_ϵ + c2_ϵ
c4 = log(c3)
c4_ϵ = 1/c3 * c3_ϵ
c4, c4_ϵ
end
f_graph_derivative(a,b)[2] == f_derivative(a,b)
# **How can we automate this?**
# D for "dual number", invented by Clifford in 1873.
struct D <: Number
x::Float64 # value
ϵ::Float64 # derivative
end
# +
import Base: +, *, /, -, sin, log, convert, promote_rule
a::D + b::D = D(a.x + b.x, a.ϵ + b.ϵ) # sum rule
a::D - b::D = D(a.x - b.x, a.ϵ - b.ϵ)
a::D * b::D = D(a.x * b.x, a.x * b.ϵ + a.ϵ * b.x) # product rule
a::D / b::D = D(a.x / b.x, (b.x * a.ϵ - a.x * b.ϵ)/b.x^2) # quotient rule
sin(a::D) = D(sin(a.x), cos(a.x) * a.ϵ)
log(a::D) = D(log(a.x), 1/a.x * a.ϵ)
Base.convert(::Type{D}, x::Real) = D(x, zero(x))
Base.promote_rule(::Type{D}, ::Type{<:Number}) = D
# -
f(D(a,1), b)
# Boom! That was easy!
f(D(a,1), b).ϵ ≈ f_derivative(a,b)
# **How does this work?!**
# The trick of forward mode AD is to make the computer do the rewrite `f -> f_graph_derivative` for you (and then optimize the resulting code structure).
@code_typed f(D(a,1), b)
# While this is somewhat hard to parse, plugging these operations manually into each other we find that this code equals
#
# ```julia
# D.x = log(a.x*b + sin(a.x))
# D.ϵ = 1/(a.x*b + sin(a.x)) * (a.x*0 + (a.ϵ*b) + cos(a.x)*a.ϵ)
# ```
#
# which, if we drop `a.x*0`, set `a.ϵ = 1`, and rename `a.x` $\rightarrow$ `a`, reads
#
# ```julia
# D.x = log(a*b + sin(a))
# D.ϵ = 1/(a*b + sin(a)) * (b + cos(a)
# ```
# This precisely matches our definitions from above:
#
# ```julia
# f(a,b) = log(a*b + sin(a))
#
# f_derivative(a,b) = 1/(a*b + sin(a)) * (b + cos(a))
# ```
# Importantly, the compiler sees the entire "rewritten" code and can therefore apply optimizations. In this simple example, we find that the code produced by our simple Forward mode AD is essentially identical to the explicit implementation.
@code_llvm debuginfo=:none f_graph_derivative(a,b)
@code_llvm debuginfo=:none f(D(a,1), b)
# It's general:
# utility function for our small forward AD
derivative(f::Function, x::Number) = f(D(x, one(x))).ϵ
derivative(x->f(x,b), a)
derivative(x->3*x^2+4x+5, 2)
derivative(x->sin(x)*log(x), 3)
# Or as a function:
df(x) = derivative(a->f(a,b),x) # partial derivative wrt a
df(1.23)
# ## Taking derivatives of code: Babylonian sqrt
# > Repeat $t \leftarrow (t + x/2)/2$ until $t$ converges to $\sqrt{x}$.
@inline function Babylonian(x; N = 10)
t = (1+x)/2
for i = 2:N
t = (t + x/t)/2
end
t
end
Babylonian(2), √2
# +
using Plots
xs = 0:0.01:49
p = plot(title = "Those Babylonians really knew what they were doing")
for i in 1:5
plot!(p, xs, [Babylonian(x; N=i) for x in xs], label="Iteration $i")
end
plot!(p, xs, sqrt.(xs), label="sqrt", color=:black)
# -
# ## ... and now the derivative, automagically
# The same babylonian algorithm with no rewrite at all computes properly the derivative as the check shows.
Babylonian(D(5, 1))
√5, 0.5 / √5
# It just works and is efficient.
@code_native debuginfo=:none Babylonian(D(5, 1))
# Recursion? Works as well...
pow(x, n) = n <= 0 ? 1 : x*pow(x, n-1)
derivative(x -> pow(x,3), 2)
# Deriving our Vandermonde matrix from yesterday? Sure:
function vander_generic(x::AbstractVector{T}) where T
m = length(x)
V = Matrix{T}(undef, m, m)
for j = 1:m
V[j,1] = one(x[j])
end
for i= 2:m
for j = 1:m
V[j,i] = x[j] * V[j,i-1]
end
end
return V
end
a, b, c, d = 2, 3, 4, 5
V = vander_generic([D(a,1), D(b,1), D(c,1), D(d,1)])
(x->getfield(x, :ϵ)).(V)
# ## Symbolically (because we can)
# The below is mathematically equivalent, **though not exactly what the computation is doing**.
using SymPy
# +
x = symbols("x")
# display("Iterations as a function of x")
# for k = 1:5
# display(simplify(Babylonian(x; N=k)))
# end
display("Derivatives as a function of x")
for k = 1:5
display(simplify(diff(simplify(Babylonian(x; N=k)), x)))
end
# -
@code_native debuginfo=:none Babylonian(D(5, 1); N=5)
# ## ForwardDiff.jl
# Now that we have understood how forward AD works, we can use the more feature complete package [ForwardDiff.jl](https://github.com/JuliaDiff/ForwardDiff.jl).
using ForwardDiff
ForwardDiff.derivative(Babylonian, 2)
@edit ForwardDiff.derivative(Babylonian, 2)
# (Note: [DiffRules.jl](https://github.com/JuliaDiff/DiffRules.jl))
# ## Reverse mode AD
# Forward mode:
# $\dfrac{\partial f}{\partial x} = \dfrac{\partial f}{\partial c_4} \dfrac{\partial c_4}{\partial x} = \dfrac{\partial f}{\partial c_4} \left( \dfrac{\partial c_4}{\partial c_3} \dfrac{\partial c_3}{\partial x} \right) = \dfrac{\partial f}{\partial c_4} \left( \dfrac{\partial c_4}{\partial c_3} \left( \dfrac{\partial c_3}{\partial c_2} \dfrac{\partial c_2}{\partial x} + \dfrac{\partial c_3}{\partial c_1} \dfrac{\partial c_1}{\partial x}\right) \right)$
#
# Reverse mode:
# $\dfrac{\partial f}{\partial x} = \dfrac{\partial f}{\partial c_4} \dfrac{\partial c_4}{\partial x} = \left( \dfrac{\partial f}{\partial c_3}\dfrac{\partial c_3}{\partial c_4} \right) \dfrac{\partial c_4}{\partial x} = \left( \left( \dfrac{\partial f}{\partial c_2} \dfrac{\partial c_2}{\partial c_3} + \dfrac{\partial f}{\partial c_1} \dfrac{\partial c_1}{\partial c_3} \right) \dfrac{\partial c_3}{\partial c_4} \right) \dfrac{\partial c_4}{\partial x}$
# Forward mode AD requires $n$ passes in order to compute an $n$-dimensional
# gradient.
#
# Reverse mode AD requires only a single run in order to compute a complete gradient but requires two passes through the graph: a forward pass during which necessary intermediate values are computed and a backward pass which computes the gradient.
# *Rule of thumb:*
#
# Forward mode is good for $\mathbb{R} \rightarrow \mathbb{R}^n$ while reverse mode is good for $\mathbb{R}^n \rightarrow \mathbb{R}$.
# An efficient source-to-source reverse mode AD is implemented in [Zygote.jl](https://github.com/FluxML/Zygote.jl), the AD underlying [Flux.jl](https://fluxml.ai/) (since version 0.10).
using Zygote
f(x) = 5*x + 3
gradient(f, 5)
@code_llvm debuginfo=:none gradient(f,5)
# ## Some nice reads
# Lectures:
#
# * https://mitmath.github.io/18337/lecture8/automatic_differentiation.html
#
# Blog posts:
#
# * ML in Julia: https://julialang.org/blog/2018/12/ml-language-compiler
#
# * Nice example: https://fluxml.ai/2019/03/05/dp-vs-rl.html
#
# * Nice interactive examples: https://fluxml.ai/experiments/
#
# * Why Julia for ML? https://julialang.org/blog/2017/12/ml&pl
#
# * Neural networks with differential equation layers: https://julialang.org/blog/2019/01/fluxdiffeq
#
# * Implement Your Own Automatic Differentiation with Julia in ONE day : http://blog.rogerluo.me/2018/10/23/write-an-ad-in-one-day/
#
# * Implement Your Own Source To Source AD in ONE day!: http://blog.rogerluo.me/2019/07/27/yassad/
#
# Repositories:
#
# * AD flavors, like forward and reverse mode AD: https://github.com/MikeInnes/diff-zoo (Mike is one of the smartest Julia ML heads)
#
# Talks:
#
# * AD is a compiler problem: https://juliacomputing.com/assets/pdf/CGO_C4ML_talk.pdf
| Day2/5a_automatic_differentiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Lecture 8
# +
import os
if not os.path.exists('thinkdsp.py'):
# !wget https://github.com/AllenDowney/ThinkDSP/raw/master/code/thinkdsp.py
from thinkdsp import CosSignal, SinSignal, SquareSignal, Wave
from thinkdsp import decorate
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
mpl.rcParams['figure.dpi'] = 150
# -
def h_omega_hat(omega_hat):
return 0.2*np.exp(-1j*2*omega_hat) * (1 + 2 * np.cos(omega_hat) + 2 * np.cos(2* omega_hat))
def h_freq(freq):
sampling_freq = 100
omega_hat = 2*np.pi*freq*(1/sampling_freq)
return 0.2*np.exp(-1j*2*omega_hat) * (1 + 2 * np.cos(omega_hat) + 2 * np.cos(2* omega_hat))
# h_omega_hat(10000)
omega_values = np.linspace(start=0, stop=np.pi, num=10000)
plt.plot(omega_values, np.abs(h_omega_hat(omega_values)))
plt.ylabel("magnitude")
plt.xlabel("omega_hat")
plt.show()
plt.plot(omega_values, np.angle(h_omega_hat(omega_values)))
plt.ylabel("phase")
plt.xlabel("omega-hat")
plt.show()
# "Actual frequency at 100 Hz sampling"
frequencies = np.linspace(start=0, stop=50, num=10000)
plt.plot(frequencies, np.abs(h_freq(frequencies)))
plt.ylabel("magnitude")
plt.xlabel("frequency")
plt.show()
# # Trying out the moving average filter
moving_average_filter_coeff = [0.2,0.2,0.2,0.2,0.2]
signal = SinSignal(freq=5)
signal2 = SinSignal(freq=25)
mix = signal + signal2
mix.plot()
wave = mix.make_wave(duration=2, framerate=100)
segment = wave.segment(duration=2)
spectrum = segment.make_spectrum()
spectrum.plot()
filtered = np.convolve(segment.ys, moving_average_filter_coeff, mode="valid")
# print(filtered)
new_wave = Wave(filtered, framerate=wave.framerate)
new_wave.plot()
filtered_spectrum = new_wave.make_spectrum()
filtered_spectrum.plot()
| Lecture 8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Conditional Operations
a=20
b=10
a == b
a != b
a > b
a < b
a >= b
a <=b
not True
not a == b
not a>b
# # Chained Comparison Operator
c = 5
a > b and c < a
a > b or c > a
1 > 10 and 2 < 3
1 < 10 and 2 < 10 # true AND true
1 < 10 or 2 < 10 # true OR true
1 < 10 and 2 > 10
1 < 10 or 2 > 10
1 >10 and 2 > 10 # false AND false
1 > 10 or 2 > 10
# # Python Statement
# - if else
# - if elif else
# - for
# - while
a = 3
b = 3
if a == b :
print("Equal")
else:
print ("Not equal")
a = 2
if(a%2 == 0) :
print ("Even");
else :
print ("Odd");
a = 2
if (type(a) != int):
print ("Not int")
elif (a%2 == 0) :
print ("Even");
else :
print ("Odd");
type(a)
mark = 0
if( mark >= 75 ):
print ("Above Average")
elif ( mark >= 50):
print ("Average")
elif ( mark > 0):
print ("Below Average")
else:
print ("fail")
# # For Loops
my_list = [1,2,3,4,5,6,7,8,9,10]
for num in my_list:
print (num)
my_list = [1,2,3,4,5,6,7,8,9,10]
for num in my_list:
if(num %2 == 0):
print ("Even:", num)
else:
print ("Odd:", num)
name="sumathi"
for c in name:
print (c)
# +
tuples = (1,2,3)
for c in tuples:
print (c)
list_of_tuples = [(1,2),(3,4),(5,6),(7,8)]
list_of_tuples.append((9,10))
for (t1,t2) in list_of_tuples:
print(t1, " And ", t2)
# +
dictionary = {"cat":"animal", "sparrow":"bird",'spider':'insect'}
for item in dictionary:
print (item)
#Unpack dictionary
for key,item in dictionary.items():
print (key , "is" ,item)
list(dictionary.keys())
sorted(dictionary.values())
# -
set = {1,3,4}
set.add(5)
# +
number = range(2,100)
for num in number:
flag = 'prime'
for i in range(2,num):
if (num % i) == 0:
#print(num,"is not a prime number")
flag = 'not prime'
break
if flag == 'prime':
print(num,"is a prime number")
# -
number
# # While Loop
my_list = [1,2,3,4,5,6,7,8,9,10]
for num in my_list:
print (num)
else:
print ("Completed")
print ("End")
x = 10
x = 0
while x < 10:
pass
x +=1
if x == 3:
#break
continue
print ("x is", x)
else :
print ("Done")
while (x > 10):
pass #No operation. Just place holder
list = range(3,15)
#list(range(3,5))
for num in list :
print(num)
min(list)
max(list)
from random import shuffle
shuffle(list)
from random import randint
randint(0,99)
input('Enter any number:')
int(input('Enter any number:'))
| Condition Operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ShesterG/Sentiment_Tools_Evaluation/blob/master/scratch_work/sentiment_analyzer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="fZUjoTsRzRj_" colab={"base_uri": "https://localhost:8080/"} outputId="c1fad473-2ce2-444e-a903-bf070c189ae7"
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
# + id="jowcDJCszwrI" colab={"base_uri": "https://localhost:8080/"} outputId="45fd4233-1e20-42f7-90dc-2762c472ba06"
# !pip3 install config
# + id="FxF8pmvZ0TvR" colab={"base_uri": "https://localhost:8080/"} outputId="a0b140c1-3bbe-43af-d27d-13766492de3e"
# !pip install vaderSentiment
# + id="ZIEFEXiozJTo"
import pandas as pd
import glob
import sys
sys.path.append('../../')
import config
import warnings
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
warnings.filterwarnings('ignore')
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from sklearn.metrics import confusion_matrix, accuracy_score,classification_report,f1_score
# + id="mEILAGGx0-dW"
DATA_FILE_PATH = '/content/drive/MyDrive/NLPGh/'
CLEAN_DATA_FILE_NAME = 'Book8Clean.csv'
SAVE_FILE = True
TOKENIZED_DATA_FILE_NAME = 'Book8Tokenized.csv'
# + id="Sr61DDSS1Eic"
reviews = pd.read_csv(DATA_FILE_PATH + TOKENIZED_DATA_FILE_NAME)
# + id="pguWdb9I1LvQ"
pd.set_option('display.max_colwidth', None)
# + id="BK0oRQmozJTx" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="20f4746f-51cd-4c38-e849-e05617a22e91"
reviews.head()
# + id="kPNIvfgUzJT2"
minimum_value = reviews['Sentiment'].value_counts().min()
# + id="2lYlEErRzJT3"
def sample_minimum(reviews):
return reviews.sample(minimum_value)
# + id="rNxP5xHozJT4"
g = reviews.groupby('Sentiment')
# + id="BSjdhC1pzJT4" colab={"base_uri": "https://localhost:8080/", "height": 765} outputId="020c1e83-b053-48f4-fea6-3570137851f6"
new_df = g.apply(sample_minimum).reset_index(drop=True)
new_df.head()
# + id="ZoWb5XY2zJT5" colab={"base_uri": "https://localhost:8080/"} outputId="6a6b24eb-d8c9-4233-b2f2-8b0bee130ca3"
new_df['Sentiment'].value_counts()
# + [markdown] id="LP1VlMv7zJT6"
# ### For VADERSentiment
# + id="V24g6E4hzJT7"
analyser = SentimentIntensityAnalyzer()
# + id="LwL29SjnzJT7"
def vader_sentiment(text):
score = analyser.polarity_scores(text)
if score['compound'] > 0:
result = 1
else:
result = -1
return result
# + [markdown] id="KpU1ch3rzJT8"
# ### For TextBlob
# + id="r1zjKvRTzJT8"
def text_blob(text):
score = TextBlob(text).sentiment.polarity
if score > 0:
result = 1
else:
result = -1
return result
# + [markdown] id="ZH-KMsF0zJT9"
# ### Analysis and Evaluation
#
# + id="dY4BI4enzJUA"
new_df['vader_sent'] = new_df['tokens'].apply(vader_sentiment)
new_df['textblob_sent'] = new_df['tokens'].apply(text_blob)
# + id="L4BKlSGMzJUD"
col = ['vader_sent','textblob_sent']
def pd_score(df,col,target):
score = []
for items in col:
rate = accuracy_score(target, df[items])
score.append(rate)
scores = pd.Series(score, index=['VADER', 'TextBlob'])
return scores
# + id="efknKG0_zJUF"
scores = pd_score(new_df,col,new_df['Sentiment'])
# + id="N_jfMTZuzJUG"
rate = accuracy_score(new_df['Sentiment'], new_df['vader_sent'])
# + id="VCLlKr2YzJUH" colab={"base_uri": "https://localhost:8080/"} outputId="cb438e7f-2e30-4f54-be4f-c3eb4f0c1c1d"
rate
# + id="0xy6WGWWzJUI"
rate = accuracy_score(new_df['Sentiment'], new_df['textblob_sent'])
# + id="1asYlZMyzJUI" colab={"base_uri": "https://localhost:8080/"} outputId="aaea2991-e752-4e42-9d00-28dbe9aeed14"
rate
# + id="FmNfhkFuzJUK" outputId="c70ca3ad-9828-4907-fe00-4441b3830061" colab={"base_uri": "https://localhost:8080/", "height": 1000}
new_df.head()
# + id="PfVMdB2mzJUN" colab={"base_uri": "https://localhost:8080/"} outputId="5d860d10-b76c-4202-b11d-b4b7d4224aa4"
confusion_matrix(new_df['Sentiment'], new_df['vader_sent'])
# + id="4MvKs38czJUO" colab={"base_uri": "https://localhost:8080/"} outputId="f86200d7-da90-4b84-c635-f19b9d0ca9a8"
confusion_matrix(new_df['Sentiment'], new_df['textblob_sent'])
# + id="jxkHRLO1zJUP" colab={"base_uri": "https://localhost:8080/"} outputId="42e62050-93ac-4f74-e3d1-a0f5963d4b9b"
print(classification_report(new_df['Sentiment'], new_df['vader_sent']))
# + id="rSib-YYmzJUP" colab={"base_uri": "https://localhost:8080/"} outputId="e02e5686-890b-42c8-f96c-395a0261f804"
print(classification_report(new_df['Sentiment'], new_df['textblob_sent']))
| scratch_work/sentiment_analyzer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["remove_cell"]
# This cell is tagged for removal
import os
import math
import pickle
from glob import glob
import numpy as np
import pandas as pd
from IPython import display as idisplay
from sklearn.metrics import r2_score
import aliases # important this goes first to configure PATH
from everest.window import Canvas, plot, raster, DataChannel as Channel, get_cmap
from everest.window.colourmaps import *
from everest.window import image, imop
from referencing import search
from myst_nb import glue
from analysis import isovisc, arrhenius, utilities, common, analysis, visualisation
# %matplotlib inline
osjoin = os.path.join
import warnings
warnings.filterwarnings("ignore",category=UserWarning)
# -
# # Simple isoviscous rheology
# Due to their relative simplicity and amenability to symbolic analysis, isoviscous models were among the earliest published mantle convection models {cite}`McKenzie1973-gt,McKenzie1974-wb,Jarvis1986-me,Blankenbach1989-li`, and they continue to be produced and discussed today {cite}`Zhong2005-lh,Weller2016-cc,Weller2016-nm,Vilella2018-il`.
#
# In an isoviscous model, the viscosity function (usually set to $\eta=1$) is constant throughout space and time. Though simple, it is nevertheless able to reproduce appropriate surface velocities, gravitational profiles, and even topographic wavelengths {cite}`McKenzie1973-gt,McKenzie1974-wb`. Though its parameters are few, there remain limitless possible variations through *Rayleigh* number, internal heat $H$, domain geometry, and choice of boundary condition - many of which boast long-term stability solutions with enough implicit nonlinearity to make purely analytical studies infeasible {cite}`Daly1980-xl`. Even within each parameter set, chaotic dynamics ensure that two nearly identical configurations may yet have wildly divergent outcomes {cite}`Stewart1989-os,Palymskiy2003-fq`. And while the isoviscous model is certainly the most computationally tractable of all mantle-like rheologies, it is only in the last decade that long-run simulations of appropriate scale for the Earth ($Ra>10^7$) have become possible {cite}`Vynnycky2013-wg,Trubitsyn2018-jo`; these have confirmed earlier intuitions that stable convective planforms may either not exist, or may never manifest, on planetary spatiotemporal scales {cite}`Huttig2011-jt`.
#
# Although the isoviscous model does bely considerable complexity, it is simple enough to make some solutions analytically attainable. Like all convecting systems, a 'critical' *Rayleigh* number $Ra_{cr}$ should exist below which convection ceases and conduction dominates (i.e. $Nu=1$), defining a 'supercritical $Ra$':
#
# $$ R \equiv \frac{Ra}{Ra_{cr}} $$
#
# At $R=1$, perturbations of a certain 'critical' wavelength are uniquely able to grow faster than the conductive geotherm and hence become unstable; increasing $R$ beyond $1$ makes more wavelengths available for convective growth, until at extreme values ($Ra >> 10^7$) even artificial heterogeneities introduced by random noise can grow, such that large-scale models become overwhelmingly time-dependent {cite}`Jarvis1984-xo`. For a plane domain of infinite horizontal extent, the critical wavelength $\lambda_{cr}$ should be exactly $\sqrt{2}$ {cite}`Chandrasekhar1961-ez`, corresponding to a $Ra_{cr}$ of exactly {cite}`Malkus1954-ee`:
#
# $$ Ra_{cr} = \frac{27\pi^4}{4} \approx 657.5 $$
#
# In any real system, however, $A$ cannot be infinite, and may be literally or effectively compressed such that the critical wavelength is no longer available. The effect of this is to create a dependency of $Ra_{cr}$ on $A$ {cite}`Chandrasekhar1961-ez`:
#
# $$ Ra_{cr} = \frac{\pi^4 \left( 1 + A^2 \right)^3}{A^4} $$
#
# At the unit aspect ratios typically modelled, for instance, $Ra_{cr}$ should instead approach ({cite}`Grover1968-wa`):
#
# $$ Ra_{cr} = 2^3\pi^4 \approx 779.3 $$
#
# A value which is borne out in laboratory testing {cite}`Whitehead2011-gs`.
#
# While heat may be transported by convection in the interior of the system, heat may only cross in or out of the system as a whole via conduction. This occurs across two thin layers at the outer and inner boundaries. Since we stipulate that these layers are purely conductive, a *Rayleigh* number defined only across each layer must be below the critical value for that layer: ${Ra}_{layer} < {{Ra}_{layer}}_{cr}$ `Olson1987-do`. This is the first observation of boundary layer theory, whence can be deduced the following fundamental power law relationship between the *Rayleigh* and *Nusselt* numbers {cite}`Schubert2001-ea`:
#
# $$ Nu \propto Ra^{\beta}, \quad \beta \to \frac{1}{3} $$
#
# Where $Nu$ is the Nusselt number. The coefficient of proportionality is theoretically $\approx 0.1941$ {cite}`Olson1987-do`, though it has been argued that its value will tend be dominated by uncertainty in practice {cite}`Lenardic2003-wd`; reported values have ranged between $0.25-0.27$ {cite}`Olson1987-do,Jarvis1989-qj`.
#
# An equivalent scaling {cite}`Jarvis1982-ua` has instead:
#
# $$ Nu \propto R^{\beta} $$
#
# Where $R$, again, is the proportion by which $Ra$ exceeds $Ra_{cr}$. Defining $Ra$ in this way preserves the value of $\beta$ insofar as $Ra^{cr}$ is independent of it, but allows the coefficient of proportionality to relate more strictly to non-thermal factors like the domain geometry - for example the aspect ratio, which (above a certain threshold) has been observed to stretch or compress the planform horizontally without changing the underlying boundary stability criteria {cite}`Jarvis1982-ua`.
#
# In any case, at the state where $Nu$ satisfies this scaling, the interior of each cell becomes a homogeneous region of uniform temperature $T^{cell}$ and variable but low velocities, with strong gradients and shears at the margins, and overall cell dimensions approaching an aspect ratio of $\sqrt{2}$. Because of the fixed temperature scale, the only way heat transport can be enhanced in such a system is by thinning the boundary layers, which in practice occurs by dripping/pluming until only the theoretical stable boundary thickness is left. For this reason, $Nu$ also functions as a useful proxy for boundary layer thickness when this is otherwise hard to define.
#
# The canonical *beta* scaling is seductive because it connects the relatively well-constrained fact of surface geothermal flux with the more mysterious thermal state of the mantle, and so allows parameterised thermal histories to be projected through deep time. The $\beta \to \frac{1}{3}$ limit itself ultimately derives from the *Rayleigh* number's dependence on length cubed, and while there is no *a priori* reason to believe that this analytical justification must be borne out in practice, it has been recognised as extremely suggestive for over half a century {cite}`Chan1971-xv`. Testing this scaling behaviour empirically was an early priority of computational geodynamics, with several studies producing estimates that converged on, but did not achieve, the theoretical $\frac{1}{3}$ scaling: the value has been reported as any of $0.313$ {cite}`Jarvis1982-ua`, $0.318$ {cite}`Jarvis1986-me`, $0.319$ {cite}`Schubert1985-sy`, $0.326$ {cite}`Jarvis1989-qj`, $0.36$ {cite}`Quareni1985-ff`, and $0.31$ {cite}`Niemela2000-cu`, using various methods both numerical and laboratory-based. The reason for the deviation is uncertain. One possibility is that the boundary layer instability theory is only valid in the limit $Ra\to\infty$ {cite}`Olson1987-do`. Alternatively, high $Ra$ values may witness transitions to alternate scaling logics altogether - perhaps lowering $beta$ It was for a time suggested that, at very high *Rayleigh* numbers, an 'asymptotic regime' of $\beta \to \frac{1}{2}$ might emerge, but this has not yet been observed {cite}`Niemela2000-cu`.
#
# While the *beta* scaling strictly holds only for those isoviscous systems with purely basal (no volumetric) heating, Cartesian geometry, and free-slip boundaries, it has been found to hold for a wide range of systems if certain corrections are made.
| book/_build/html/_sources/content/chapter_04_isoviscous/background/simple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Table of Contents
#
#
# - **Geting Started**
# - Set Up Environment
# - Import Data
# - **ModelSearcher**
# - Define Models & Hyper Parameters to Serach
# - Create/Run the ModelSearcher object
# - Evaluate
# Note: this notebook is meant to be a demo of some of the capabilities of **`oo-learning`** (https://github.com/shane-kercheval/oo-learning); it is not meant to show the best approach to exploring/cleaning/modeling this particular dataset. Also, with most graphs (e.g. correlations/box-plots/etc.) I will spend very little time commenting on the significance of any interesting or patterns. Again, the intent is to show a demo, not a guide to data analysis.
#
#
# # Getting Started
#
# In this notebook, we'll see how to search across different models and hyper-parameters (or the same models and hyper-parameters with different transformations, for example) with the goal of finding the "best" or ideal model candidates for further tuning and optimization.
# ## Set Up Environment
# +
# # !pip install oolearning --upgrade
# +
from oolearning import *
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
width = 10
plt.rcParams['figure.figsize'] = [width, width/1.333]
# -
# ## Import Data
# `ExploreClassificationDataset` is a convenience class described in the [first notebook of this series](https://github.com/shane-kercheval/oo-learning/blob/master/examples/classification-titanic/1-Exploring%20the%20Titanic%20Dataset.ipynb).
# +
csv_file = '../data/titanic.csv'
target_variable = 'Survived'
target_mapping = {0: 'died', 1: 'lived'} # so we can convert from numeric to categoric
explore = ExploreClassificationDataset.from_csv(csv_file_path=csv_file,
target_variable=target_variable,
map_numeric_target=target_mapping)
# -
explore.dataset.head()
# # `ModelSearcher`
# A "Searcher" searches across different models and hyper-parameters (or the same models and hyper-parameters with different transformations, for example) with the goal of finding the "best" or ideal model candidates for further tuning and optimization.
#
# We'll supply the searcher with ModelInfo objects, for each model, encapsulates a description, a model object, hyper-param object, and a grid of hyper parameter values to evaluate.
#
# The data is split (via a Splitter) into training and holding sets. The training set will be used for selecting the "best" hyper parameters via (Tuner & Resampler) and then the model will be retrained and evaluated with selected hyper parameters with the holdout set.
# ## Define Models & Hyper Parameters to Serach
# Below we define the information necessary to search **`Logistic Regression`** and **`Random Forest`** models, across various hyper parameters.
#
# We also include two **`DummyClassifier`** objects. These objects are simple wrappers around **`sklearn.dummy.DummyClassifier`**. The first object (i.e. **`DummyClassifierStrategy.STRATIFIED`**) generates predictions by respecting the training set’s class distribution. The second `DummyClassifier` (i.e. **`DummyClassifierStrategy.MOST_FREQUENT`**) always predicts the most frequent label in the dataset.
#
# These dummy classifiers allow us to compare our results to various random guessing strategies, which is especially useful for datasets that have a high degree of class imbalance (which we do not have in the Titanic dataset).
# +
# define the transformations that will be applied to ALL models
global_transformations = [RemoveColumnsTransformer(['PassengerId', 'Name', 'Ticket', 'Cabin']),
CategoricConverterTransformer(['Pclass', 'SibSp', 'Parch']),
ImputationTransformer(),
DummyEncodeTransformer(CategoricalEncoding.ONE_HOT)]
# Logistic Regression Hyper-Param Grid
log_grid = HyperParamsGrid(params_dict=dict(penalty=['l1', 'l2'],
regularization_inverse=[0.001, 0.01, 0.1, 1, 100, 1000]))
# get the expected columns at the time we do the training, based on the transformations
columns = TransformerPipeline.get_expected_columns(transformations=global_transformations,
data=explore.dataset.drop(columns=[target_variable]))
# Random Forest Hyper-Param Grid
rm_grid = HyperParamsGrid(params_dict=dict(criterion='gini',
max_features=[int(round(len(columns) ** (1 / 2.0))),
int(round(len(columns) / 2)),
len(columns) - 1],
n_estimators=[10, 100, 500],
min_samples_leaf=[1, 50, 100]))
# define the models and hyper-parameters that we want to search through
infos = [ModelInfo(description='dummy_stratified',
model=DummyClassifier(DummyClassifierStrategy.STRATIFIED),
transformations=None,
hyper_params=None,
hyper_params_grid=None),
ModelInfo(description='dummy_frequent',
model=DummyClassifier(DummyClassifierStrategy.MOST_FREQUENT),
transformations=None,
hyper_params=None,
hyper_params_grid=None),
ModelInfo(description='Logistic Regression',
model=LogisticClassifier(),
# transformations specific to this model
transformations=[CenterScaleTransformer(),
RemoveCorrelationsTransformer()],
hyper_params=LogisticClassifierHP(),
hyper_params_grid=log_grid),
ModelInfo(description='Random Forest',
model=RandomForestClassifier(),
transformations=None,
hyper_params=RandomForestHP(),
hyper_params_grid=rm_grid)]
# -
# Here are the hyper-parameter combinations we will try out for the **`LogisticClassifier`**:
log_grid.params_grid
# Here are the hyper-parameter combinations we will try out for the **`RandomForestClassifier`**:
rm_grid.params_grid
# ## Create/Run the ModelSearcher object
# +
# define the Score objects, which will be used to choose the "best" hyper-parameters for a particular model,
# and compare the performance across model/hyper-params,
score_list = [AucRocScore(positive_class='lived'),
# the SensitivityScore needs a Converter,
# which contains the logic necessary to convert the predicted values to a predicted class.
SensitivityScore(converter=TwoClassThresholdConverter(threshold=0.5, positive_class='lived'))]
# create the ModelSearcher object
searcher = ModelSearcher(global_transformations=global_transformations,
model_infos=infos,
splitter=ClassificationStratifiedDataSplitter(holdout_ratio=0.25),
resampler_function=lambda m, mt: RepeatedCrossValidationResampler(
model=m,
transformations=mt,
scores=score_list,
folds=5,
repeats=3))
searcher.search(data=explore.dataset, target_variable='Survived')
# -
# ## Evaluate
# We have access to the **model descriptions** and **model names**.
searcher.results.model_descriptions
searcher.results.model_names
# ### Resampling Scores
#
#
# The following plot shows the resampling (AUC) scores for the **best** hyper-parameter combination found, per model.
searcher.results.plot_resampled_scores(metric=Metric.AUC_ROC)
# The following plot shows the resampling (Sensitivity) scores for the **best** hyper-parameter combination found, per model.
searcher.results.plot_resampled_scores(metric=Metric.SENSITIVITY)
# ### Holdout Scores
#
# Once the best hyper-parameters for a given model are found via the Resampler, the model and hyper-params are fit on the entire training set and evaluated on the holdout set.
#
# We have access to these holdout scores via the following methods.
searcher.results.holdout_scores
searcher.results.plot_holdout_scores()
searcher.results.best_model_index
searcher.results.model_descriptions[searcher.results.best_model_index]
# Although the best model according to the resampled (AUC) scored is Random Forest, the "best" model according to the holdout set is Logistic Regression.
# ### Digging into Tuner & Resampler Results
# We can access the underlying tuner results for each model via the **`tuner_results`** property.
#
# This s useful because we will rarely pick the best hyper-parameters, so we'll want to see the areas where a particular model does better and try more granular hyper parameter values in those areas.
#
# Let's access the tuner results for the `Random Forest` model, and see how each hyper-parameter combination performed via the resampling results.
searcher.results.model_names
rf_tuner_results = searcher.results.tuner_results[3]
rf_tuner_results.plot_resampled_scores(metric=Metric.AUC_ROC)
rf_tuner_results.plot_hyper_params_profile(metric=Metric.AUC_ROC,
x_axis='max_features',
line='n_estimators',
grid='min_samples_leaf')
rf_tuner_results.plot_hyper_params_profile(metric=Metric.AUC_ROC, x_axis='max_features')
| examples/classification-titanic/5-Searching - obsolete.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modeling and Simulation in Python
#
# Rabbit example
#
# Copyright 2017 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
#
# +
# %matplotlib inline
from modsim import *
# -
# ## Rabbit Redux
#
# This notebook starts with a version of the rabbit population growth model and walks through some steps for extending it.
#
# In the original model, we treat all rabbits as adults; that is, we assume that a rabbit is able to breed in the season after it is born. In this notebook, we extend the model to include both juvenile and adult rabbits.
#
# As an example, let's assume that rabbits take 3 seasons to mature. We could model that process explicitly by counting the number of rabbits that are 1, 2, or 3 seasons old. As an alternative, we can model just two stages, juvenile and adult. In the simpler model, the maturation rate is 1/3 of the juveniles per season.
#
# To implement this model, make these changes in the System object:
#
# 0. Before you make any changes, run all cells and confirm your understand them.
#
# 1. Then, add a second initial populations: `juvenile_pop0`, with value `0`.
#
# 2. Add an additional variable, `mature_rate`, with the value `0.33`.
# +
system = System(t0 = 0,
t_end = 10,
adult_pop0 = 10,
birth_rate = 0.9,
death_rate = 0.5,
juvenile_pop0 = 0,
mature_rate = 0.33)
system
# -
# Now update `run_simulation` with the following changes:
#
# 1. Add a second TimeSeries, named `juveniles`, to keep track of the juvenile population, and initialize it with `juvenile_pop0`.
#
# 2. Inside the for loop, compute the number of juveniles that mature during each time step.
#
# 3. Also inside the for loop, add a line that stores the number of juveniles in the new `TimeSeries`. For simplicity, let's assume that only adult rabbits die.
#
# 4. During each time step, subtract the number of maturations from the juvenile population and add it to the adult population.
#
# 5. After the for loop, store the `juveniles` `TimeSeries` as a variable in `System`.
def run_simulation(system):
"""Runs a proportional growth model.
Adds TimeSeries to `system` as `results`.
system: System object with t0, t_end, p0,
birth_rate and death_rate
"""
adults = TimeSeries()
juveniles = TimeSeries()
adults[system.t0] = system.adult_pop0
juveniles[system.t0] = system.juvenile_pop0
for t in linrange(system.t0, system.t_end):
births = system.birth_rate * adults[t]
deaths = system.death_rate * adults[t]
bat_mitzvah = system.mature_rate*juveniles[t]
adults[t+1] = adults[t] + bat_mitzvah - deaths
juveniles[t+1] = juveniles[t] + births - bat_mitzvah
system.adults = adults
system.juveniles = juveniles
# Test your changes in `run_simulation`:
run_simulation(system)
system.adults
# Next, update `plot_results` to plot both the adult and juvenile `TimeSeries`.
def plot_results(system, title=None):
"""Plot the estimates and the model.
system: System object with `results`
"""
newfig()
plot(system.adults, 'bo-', label='adults')
plot(system.juveniles, 'co-', label = 'juvenile')
decorate(xlabel='Season',
ylabel='Rabbit population',
title=title)
# And test your updated version of `plot_results`.
plot_results(system, title='Proportional growth model')
# This notebook demonstrates the steps we recommend for starting your project:
#
# 1. Start with one of the examples from the book, either by copying a notebook or pasting code into a new notebook. Get the code working before you make any changes.
#
# 2. Make one small change, and run the code again.
#
# 3. Repeat step 2 until you have a basic implementation of your model.
#
# If you start with working code that you understand and make small changes, you can avoid spending a lot of time debugging.
#
# One you have a basic model working, you can think about what metrics to measure, what parameters to sweep, and how to use the model to predict, explain, or design.
# ### Bonus question
#
# Suppose you only have room for 30 adult rabbits. Whenever the adult population exceeds 30, you take any excess rabbits to market (as pets for kind children, of course). Modify `run_simulation` to model this strategy. What effect does it have on the behavior of the system? You might have to run for more than 10 seasons to see what happens.
# +
def run_simulation(system):
"""Runs a proportional growth model.
Adds TimeSeries to `system` as `results`.
system: System object with t0, t_end, p0,
birth_rate and death_rate
"""
adults = TimeSeries()
juveniles = TimeSeries()
adults[system.t0] = system.adult_pop0
juveniles[system.t0] = system.juvenile_pop0
for t in linrange(system.t0, system.t_end):
births = system.birth_rate * adults[t]
deaths = system.death_rate * adults[t]
bat_mitzvah = system.mature_rate*juveniles[t]
adults[t+1] = adults[t] + bat_mitzvah - deaths
juveniles[t+1] = juveniles[t] + births - bat_mitzvah
if adults[t+1]>30:
adults[t+1] = 30
system.adults = adults
system.juveniles = juveniles
run_simulation(system)
system.adults
plot_results(system, title='Proportional growth model')
# -
| code/rabbits2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Setup
# https://www.dataquest.io/blog/jupyter-notebook-tutorial/
#import the essential and important related library
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
# -
#Load data using Pandas
df = pd.read_csv('dataset/fortune500.csv')
#Investigating our data set
#read and display first 5 row data
df.head()
#Read and display last 5 row data
df.tail()
#alter columns name as need and easy to remamber
df.columns = ['year', 'rank', 'company', 'revenue', 'profit']
#Inspected the data, as Value missing?, Did pandas read it as expected?
len(df)
df.dtypes
# There are something wrong wuth the profits column, it shuold be float64 like the revenue colume
# let's look
non_numberic_profits = df.profit.str.contains('[^0-9.-]')
df.loc[non_numberic_profits].head()
# Just as we suspected! , Some of the values are strings
set(df.profit[non_numberic_profits])
#check how many are missing
len(df.profit[non_numberic_profits])
# quick look at the distribution
bin_sizes, _, _ = plt.hist(df.year[non_numberic_profits], bins=range(1955, 2006))
# remove these rows.
df = df.loc[~non_numberic_profits]
df.profit = df.profit.apply(pd.to_numeric)
len(df)
df.dtypes
# +
# Plotting with matplotlib
group_by_year = df.loc[:, ['year', 'revenue', 'profit']].groupby('year')
avgs = group_by_year.mean()
x = avgs.index
y1 = avgs.profit
def plot(x, y, ax, title, y_label):
ax.set_title(title)
ax.set_ylabel(y_label)
ax.plot(x, y)
ax.margins(x=0, y=0)
# -
fig, ax = plt.subplots()
plot(x, y1, ax, 'Increase in mean Fortune 500 company profits from 1955 to 2005', 'Profit (millions)')
# +
# Wow, that looks like an exponential, but it's got some huge dips. They must correspond to the early 1990s recession and the dot-com bubble. It's pretty interesting to see that in the data. But how come profits recovered to even higher levels post each recession?
# Maybe the revenues can tell us more.
y2 = avgs.revenue
fig, ax = plt.subplots()
plot(x, y2, ax, 'Increase in mean Fortune 500 company revenues from 1955 to 2005', 'Revenue (millions)')
# +
# With a little help from Stack Overflow, we can superimpose these plots with +/- their standard deviations.
def plot_with_std(x, y, stds, ax, title, y_label):
ax.fill_between(x, y - stds, y + stds, alpha=0.2)
plot(x, y, ax, title, y_label)
fig, (ax1, ax2) = plt.subplots(ncols=2)
title = 'Increase in mean and std Fortune 500 company %s from 1955 to 2005'
stds1 = group_by_year.std().profit.as_matrix()
stds2 = group_by_year.std().revenue.as_matrix()
plot_with_std(x, y1.as_matrix(), stds1, ax1, title % 'profits', 'Profit (millions)')
plot_with_std(x, y2.as_matrix(), stds2, ax2, title % 'revenues', 'Revenue (millions)')
fig.set_size_inches(14, 4)
fig.tight_layout()
# -
| project-python-data-analysis-fortune500/data-analysis-fortune500.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # Predicting Bitcoins price using LSTM RNN
#
# As we have learned that the LSTM models are widely used for sequential datasets, that is dataset in which order matters. In this section, we will learn how can we use LSTM networks for performing time series analysis. We will learn how to predict bitcoin prices using LSTM network.
#
# # Import Libraries
#
# First, we import the required libraries as follows:
# +
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
# %matplotlib inline
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# -
# ## Data Preparation
#
# We will see how can we prepare our dataset in a way that LSTM needs. First, we read the input dataset:
df = pd.read_csv('data/btc.csv')
# Display a few rows of the dataset as follows:
df.head()
# As shown in the above data frame, the column Close represents the closing price of bitcoins. We need only the column Close to make predictions, so we take that particular column alone as follows:
data = df['Close'].values
# Next, we standardize the data and bring it to the same scale,
scaler = StandardScaler()
data = scaler.fit_transform(data.reshape(-1, 1))
# Plot and observe the trend of how the bitcoins price changes as follows, Since we scaled the price, it is not a bigger number:
plt.plot(data)
plt.xlabel('Days')
plt.ylabel('Price')
plt.grid()
# We define a function called get_data which generates the input and output. It takes the data and window_size as an input and generates the input and target column.
#
# What is window size? We move the x values window_size times ahead and get the y values. For instance, as shown in the below table with window_size = 1, Y values are just 1-time step ahead of x values.
#
#
# 
#
# The get_data() function is defined as follows:
def get_data(data, window_size):
X = []
y = []
i = 0
while (i + window_size) <= len(data) - 1:
X.append(data[i:i+window_size])
y.append(data[i+window_size])
i += 1
assert len(X) == len(y)
return X, y
# We choose window size as 7 and generate the input and output:
X, y = get_data(data, window_size = 7)
# Consider the first 1000 points as a train set and the rest of the points in the dataset as the test set:
# +
#train set
X_train = np.array(X[:1000])
y_train = np.array(y[:1000])
#test set
X_test = np.array(X[1000:])
y_test = np.array(y[1000:])
# -
# The shape of X_train is shown as follows:
#
#
X_train.shape
#
# What does the preceding shape mean? It implies that (sample_size, time_steps, features). LSTM requires input exactly in this format, for example:
#
# * 1000 implies the number of data points (sample_size)
#
# * 7 specifies the window size (time_steps)
#
# * 1 specifies the dimension of our dataset (features)
# ## Defining Network Parameters
#
# Define the network parameters:
batch_size = 7
window_size = 7
hidden_layer = 256
learning_rate = 0.001
# ## Defining Placeholders
#
# Define the placeholders for our input and output:
input = tf.placeholder(tf.float32, [batch_size, window_size, 1])
target = tf.placeholder(tf.float32, [batch_size, 1])
# ## Defining weights
#
# Let's define all the weights we use in our LSTM cell.
#
# Weights of the input gate:
U_i = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
W_i = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
b_i = tf.Variable(tf.zeros([hidden_layer]))
# Weights of the forget gate:
U_f = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
W_f = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
b_f = tf.Variable(tf.zeros([hidden_layer]))
# Weights of the output gate:
U_o = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
W_o = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
b_o = tf.Variable(tf.zeros([hidden_layer]))
# Weights of the candidate state:
U_g = tf.Variable(tf.truncated_normal([1, hidden_layer], stddev=0.05))
W_g = tf.Variable(tf.truncated_normal([hidden_layer, hidden_layer], stddev=0.05))
b_g = tf.Variable(tf.zeros([hidden_layer]))
# Output layer weight:
V = tf.Variable(tf.truncated_normal([hidden_layer, 1], stddev=0.05))
b_v = tf.Variable(tf.zeros([1]))
# ## Define the LSTM cell
#
#
#
# We define the function called LSTM_cell, which returns the cell state and hidden state as an output. Recall the steps we saw in the forward propagation of LSTM:
#
# $$ i_t = \sigma ( U_i x_t + W_f h_{t-1} + b_i) $$
#
# $$ f_t = \sigma ( U_f x_t + W_f h_{t-1} + b_f) $$
#
# $$ o_t = \sigma (U_o x_t + W_o h_{t-1} + b_o) $$
#
# $$ g_t = tanh (U_g x_t + W_g h_{t-1} + b_g) $$
#
# $$ c_t = f_t c_{t-1} + i_t {g_t} $$
#
# $$ h_t = o_t tanh(c_t) $$
#
# The LSTM cell is implemented as shown in the following code. It takes the input, previous hidden state, and previous cell state as inputs, and returns the current cell state and current hidden state.
def LSTM_cell(input, prev_hidden_state, prev_cell_state):
it = tf.sigmoid(tf.matmul(input, U_i) + tf.matmul(prev_hidden_state, W_i) + b_i)
ft = tf.sigmoid(tf.matmul(input, U_f) + tf.matmul(prev_hidden_state, W_f) + b_f)
ot = tf.sigmoid(tf.matmul(input, U_o) + tf.matmul(prev_hidden_state, W_o) + b_o)
gt = tf.tanh(tf.matmul(input, U_g) + tf.matmul(prev_hidden_state, W_g) + b_g)
ct = (prev_cell_state * ft) + (it * gt)
ht = ot * tf.tanh(ct)
return ct, ht
# ## Defining forward propagation
#
#
# Now, we will perform forward propagation and predict the output, $\hat{y}_t$.
#
# $$ \hat{y}_t = Vh_t + b_v$$
# +
#initialize the list called y_hat for storing the predicted output
y_hat = []
#for each batch we compute the output and store it in the y_hat list
for i in range(batch_size):
#initialize hidden state and cell state for each batch
hidden_state = np.zeros([1, hidden_layer], dtype=np.float32)
cell_state = np.zeros([1, hidden_layer], dtype=np.float32)
#compute the hidden state and cell state of the LSTM cell for each time step
for t in range(window_size):
cell_state, hidden_state = LSTM_cell(tf.reshape(input[i][t], (-1, 1)), hidden_state, cell_state)
#compute y_hat and append it to y_hat list
y_hat.append(tf.matmul(hidden_state, V) + b_v)
# -
# ## Defining backpropagation
#
# After performing forward propagation and predicting the output, we compute the loss. We use mean squared error as our loss function and the total loss is the sum of losses across all the time steps as follows:
# +
losses = []
for i in range(len(y_hat)):
losses.append(tf.losses.mean_squared_error(tf.reshape(target[i], (-1, 1)), y_hat[i]))
loss = tf.reduce_mean(losses)
# -
# To avoid the exploding gradient problem, we perform gradient clipping:
gradients = tf.gradients(loss, tf.trainable_variables())
clipped, _ = tf.clip_by_global_norm(gradients, 4.0)
# We use Adam optimizer and minimize our loss function:
optimizer = tf.train.AdamOptimizer(learning_rate).apply_gradients(zip(gradients, tf.trainable_variables()))
# ## Training the LSTM model
#
#
# Start the TensorFlow session and initialize all the variables:
session = tf.Session()
session.run(tf.global_variables_initializer())
# Set the number of epochs:
epochs = 100
for i in range(epochs):
train_predictions = []
index = 0
epoch_loss = []
#Sample some batche of data and train the network
while(index + batch_size) <= len(X_train):
#sample batch of data
X_batch = X_train[index:index+batch_size]
y_batch = y_train[index:index+batch_size]
#predict the prices and compute loss
predicted, loss_val, _ = session.run([y_hat, loss, optimizer], feed_dict={input:X_batch, target:y_batch})
#store the loss
epoch_loss.append(loss_val)
#store the predictions
train_predictions.append(predicted)
index += batch_size
#print the loss on every 10 iterations
if (i % 10)== 0:
print 'Epoch {}, Loss: {} '.format(i,np.mean(epoch_loss))
# ## Making predictions using the LSTM model
#
# Now, we will start making predictions on the test set.
predicted_output = []
i = 0
while i+batch_size <= len(X_test):
output = session.run([y_hat],feed_dict={input:X_test[i:i+batch_size]})
i += batch_size
predicted_output.append(output)
# Print the predicted output
predicted_output[0]
# As you can see above, the predicted values are in a nested list. So we will just flatten them.
predicted_values_test = []
for i in range(len(predicted_output)):
for j in range(len(predicted_output[i][0])):
predicted_values_test.append(predicted_output[i][0][j])
# Now if we print the predicted values, it is no longer in a nested list
predicted_values_test[0]
# As we took the first 1000 points as a training set, we make predictions for time step greater
# than 1000.
predictions = []
for i in range(1280):
if i >= 1000:
predictions.append(predicted_values_test[i-1019])
else:
predictions.append(None)
# Plot and see how well the predicted value matches the actual value:
#
#
plt.figure(figsize=(16, 7))
plt.plot(data, label='Actual')
plt.plot(predictions, label='Predicted')
plt.legend()
plt.xlabel('Days')
plt.ylabel('Price')
plt.grid()
plt.show()
# As you can see in the above plot, the actual value is shown in red color and the predicted value is shown in blue color. As we are making predictions for the time step greater than 1000, you can see after the time step 1000, red and blue lines interest each other, which implies that our model has correctly predicted the actual values.
#
# In the next section, we will learn about the GRU cell which acts as a simplified version of GRU cell.
| Chapter05/5.06 Predicting Bitcoins price using LSTM RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Experiment with pyglet
#
# http://pyglet.org/
#
# https://pyglet.readthedocs.io/en/latest/
#
# https://pyglet.readthedocs.io/en/latest/modules/image/index.html
#
# https://pythonhosted.org/pyglet/api/pyglet.image.Texture3D-class.html
#
# https://github.com/pyglet/pyglet
# ## Import, load & save
import pyglet
tornado = pyglet.image.load('tornado3d.jpg')
tornado
tornado.save('torando3d.png')
# ## Let's create and save a 3Dtexture with pyglet
# create image slices k for each w
for k in range(uvw_shape(2)):
slice(k) = pyglet.image.create(uvw_shape(0), uvw_shape(1))
# set pixels in slice(k)
for i in range(uvw_shape(0)):
for j in range (uvw_shape(1)):
#set red-blue-green colors in pixel(i,j,k) to u_norm[i,j,k], v_norm[i,j,k], w_norm[i,j,k]
from http.server import HTTPServer
from server import Server
| notebooks/Try pyglet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pyetrade
consumer_key = "<KEY>"
consumer_secret = "c52c35186ec76bcbe37e308e2e37d942e179dc07921efb3c79094b525785dd25"
oauth = pyetrade.ETradeOAuth(consumer_key, consumer_secret)
print(oauth.get_request_token()) # Use the printed URL
verifier_code = input("Enter verification code: ")
tokens = oauth.get_access_token(verifier_code)
print("\nCopy the following 4 values into the manager's arguments:")
print(consumer_key)
print(consumer_secret)
print(tokens["oauth_token"])
print(tokens["oauth_token_secret"])
# +
import options_manager
import logging
import datetime
logging.basicConfig(level=logging.INFO)
manager = options_manager.OptionsManager(
consumer_key="<KEY>",
consumer_secret="c52c35186ec76bcbe37e308e2e37d942e179dc07921efb3c79094b525785dd25",
oauth_token="<KEY>
oauth_secret="<KEY>mRkXw4WsA="
)
# -
manager.get_market_data("GOOG")
manager.get_options_info(
ticker="LVS",
min_strike=30,
max_strike=20, # all arguments below have defaults and don't need to be passed
increment=100,
month_look_ahead=3,
min_volume=0,
min_open_interest=0,
min_annualized_return=0.0,
contracts_to_buy=1, # defaults to max available
)
# +
import pathlib
import pandas as pd
import threading
from multiprocessing.dummy import Pool as ThreadPool
from pprint import pprint
sector_path = pathlib.Path("./data/sectors.csv")
sector_df = pd.read_csv(sector_path)
sector_df.fillna("",inplace=True)
options_df = pd.DataFrame()
skip = ["NVR", "KSU"]
thread_pool = ThreadPool(5)
tickers = [ticker for ticker in sector_df["Ticker"].unique() if ticker not in skip]
tickers = tickers[:50]
def helper(ticker):
options_info = manager.get_options_info(
ticker=ticker,
min_strike=30,
max_strike=20, # all arguments below have defaults and don't need to be passed
increment=100,
month_look_ahead=3,
min_volume=0,
min_open_interest=0,
min_annualized_return=0.0,
contracts_to_buy=1, # defaults to max available
)
data = [{key: value for key, value in option.items() if not isinstance(value, dict)} for option in options_info]
return pd.DataFrame(data)
results = thread_pool.map(helper, tickers)
pd.concat(results).reset_index()
# -
| option_chains/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright 2020 Montvieux Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gym_plark.envs import plark_env_non_image_state
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# +
env = plark_env_non_image_state.PlarkEnvNonImageState(config_file_path='/Components/plark-game/plark_game/game_config/10x10/panther_easy.json',random_panther_start_position=True)
# -
import random
ob = env.reset()
print("Initial observation: {}".format(ob))
for j in range(5):
action = random.randrange(7)
print("Taking action: {}".format(action))
ob, reward, done, info = env.step(action)
print("Got obs: {}".format(ob))
display(env.render())
observation_space = env.observation_space
observation_space.shape[0]
obs = env.reset()
len(obs)
# +
import helper
import datetime
import os
from stable_baselines import PPO2
from stable_baselines.common.env_checker import check_env
env.reset()
check_env(env)
#model = helper.make_new_model("PPO2", "MlpPolicy", env)
model = PPO2('MlpPolicy', env)
# -
model.learn(200)
video_path = '/data/test_video/'
os.makedirs(video_path, exist_ok=True)
video_path = os.path.join(video_path, 'test_non_image_train.mp4')
basewidth,hsize = helper.make_video(model,env,video_path)
import io
import base64
from IPython.display import HTML
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" width="'''+str(basewidth)+'''" height="'''+str(hsize)+'''" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
| Components/agent-training/agent_training/non_image_state.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('..')
import numpy as np
from dezero import Variable
def f(x):
return x ** 4 - 2 * x ** 2
x = Variable(np.array(2.0))
y = f(x)
y.backward(create_graph=True)
print(x.grad)
gx = x.grad
gx.backward()
print(x.grad)
x = Variable(np.array(2.0))
y = f(x)
y.backward(create_graph=True)
print(x.grad)
gx = x.grad
x.cleargrad()
gx.backward()
print(x.grad)
# +
x = Variable(np.array(2.0))
import copy
xs, ys = [], []
for i in range(10):
print(i, x)
y = f(x)
x.cleargrad()
y.backward(create_graph=True)
xs.append(copy.deepcopy(x.grad.data))
ys.append(copy.deepcopy(y.data))
gx = x.grad
x.cleargrad()
gx.backward()
gx2 = x.grad
x.data -= gx.data / gx2.data
# +
import matplotlib.pyplot as plt
print(xs)
print(ys)
plt.plot(xs, ys, marker='.')
plt.show()
# -
| CHAPTER03/step33.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.2 64-bit (''aceleradev-ds'': conda)'
# language: python
# name: python38264bitaceleradevdsconda640d172524af4417b6a9de4e82d86069
# ---
# # Conhecendo os Dados
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from unidecode import unidecode
pesquisa = pd.read_csv('resultados-pesquisa-anonimizado.csv')
pesquisa.info()
# -
pesquisa.describe()[:2]
# Inicialmente, podemos ver que todos os campos do resultado da pesquisa consistem de objetos, provavelmente representando informações textuais. Isso também é resultado da forma como os dados foram armazenados na planilha no Google Sheets.
#
# Analisando as colunas, podemos considerar que a informação de data/hora da resposta da pesquisa pode ser descartada, já que não traz um dado que possa ser analisado em conjunto com os demais, representando apenas o momento em que aquela resposta foi preenchida.
#
# A maioria das demais colunas possuem quase que todos os registros com valores únicos. Supondo que isso seja resultado da inserção da informação por um campo de texto, podemos tentar analisar respostas que sejam semelhantes e agrupá-las, criando categorias que possam ser mais úteis para a análise.
#
# A coluna que menos apresenta valores únicos para os seus registros é a pergunta <i>"Com qual frequência você busca por informações fornecidas pela prefeitura de Fe<NAME>?"</i>. Supondo que a pergunta tenha disponibilizado opções para resposta, isso faz sentido. Ainda assim, a pergunta supõe uma ordenação entre as respostas, que buscaremos evidenciar.
pesquisa.drop(['Carimbo de data/hora'], axis=1, inplace=True)
# # Limpeza e Exibição dos Dados
# ## Ocupação
pesquisa['Qual sua atual ocupação (profissão ou estudante)? '].unique()
# Vendo os resultados únicos e o texto da coluna para a pergunta sobre ocupação, podemos fazer uma breve limpeza nos dados e classificação, criando uma nova coluna.
pesquisa['OCUPACAO'] = pesquisa['Qual sua atual ocupação (profissão ou estudante)? '].fillna('Não informado').apply(str.strip).apply(str.title)
pesquisa['OCUPACAO'].sort_values().unique()
# Analisando novamente as profissões, podemos considerar algumas categorias.
#
# "Bombeiro Militar", "Policial Militar" e "Polícial Militar" podem ser agrupados em uma categoria "Militar".
#
# "Professor" e "Professora" podem ser agrupados em "Professor/Professora".
#
# "Promotor De Vendas" e "Representante" podem ser agrupados em "Vendas".
#
# "Publicitária" e "Publicitário E Locutor" podem ser agrupados em "Publicitário/Publicitária".
#
# As demais ocupações não parecem se agrupar naturalmente.
# +
def categorize_profession(profession):
if 'militar' in profession.lower():
return 'Militar'
if 'professor' in profession.lower():
return 'Professor/Professora'
if profession in ('Promotor De Vendas', 'Representante'):
return 'Vendedor/Representante'
if 'publicit' in profession.lower():
return 'Publicitário/Publicitária'
if 'community manager' in profession.lower():
return 'Organizador de Comunidade'
else:
return profession
pesquisa['OCUPACAO_CATEGORY'] = pesquisa['OCUPACAO'].apply(categorize_profession)
# -
pesquisa.groupby('OCUPACAO_CATEGORY').size().sort_values().plot(kind='barh', title='Ocupação Profissional')
# ## Frequência de Busca de Informações
# Reescrevendo a resposta em uma escrita mais concisa
pesquisa.loc[pesquisa['Com qual frequência você busca por informações fornecidas pela prefeitura de Feira de Santana?'] == 'Depende da semana, mas geralmente mais de uma vez por semana. ', 'Com qual frequência você busca por informações fornecidas pela prefeitura de Feira de Santana?'] = 'Mais de 1 vez por semana'
pesquisa['Com qual frequência você busca por informações fornecidas pela prefeitura de Feira de Santana?'].value_counts()
# As informações já estão bem descritas, e podemos exibir os dados de forma ordenada.
order = ['Todos os dias', 'Mais de 1 vez por semana', '1 vez por semana', '1 vez por mês', 'Quase nunca', 'Nunca busquei']
pesquisa['Com qual frequência você busca por informações fornecidas pela prefeitura de Feira de Santana?'].value_counts().reindex(order).plot(kind='barh', title='Frequência de busca de informações')
# ## Expectativa das Informações
pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].value_counts()
# Podemos notar que todas as respostas são distintas entre si, provavelmente resultado da coleta através de uma caixa de texto.
#
# Assim como fizemos em outras perguntas, podemos estabelecer categorias através de palavras-chave. Como a quantidade de respostas não é muito extensa, é possível fazer uma análise em cada uma das respostas.
#
# Em uma observação das respostas, podemos estabelecer as seguintes categorias:
#
# 1. Investimentos (infraestrutura, despesas gerais, salário);
# 2. Saúde;
# 3. Educação;
# 4. Segurança;
# 5. Orçamento (verba);
# 6. Transparência;
# 7. Outros;
#
# Uma resposta pode estar representada em mais de uma categoria. Assim, a soma das categorias pode ser maior que a quantidade de respostas obtidas.
# +
def categorize_expectation_public_investments(answer):
if any(x in unidecode(answer.lower()) for x in [
'servidor', 'investimento', 'obra', 'urbanizacao',
'salario', 'compra', 'dinheiro', 'bairro', 'recurso',
'gasto', 'licitacao', 'licitacoes']
):
return 1
return 0
def categorize_expectation_public_health(answer):
if any(x in unidecode(answer.lower()) for x in ['saude']):
return 1
return 0
def categorize_expectation_public_education(answer):
if any(x in unidecode(answer.lower()) for x in ['educacao', 'escola', 'colegio']):
return 1
return 0
def categorize_expectation_public_safety(answer):
if any(x in unidecode(answer.lower()) for x in ['seguranca']):
return 1
return 0
def categorize_expectation_public_budget(answer):
if any(x in unidecode(answer.lower()) for x in ['verba', 'dinheiro', 'receita']):
return 1
return 0
def categorize_expectation_public_transparency(answer):
if any(x in unidecode(answer.lower()) for x in [
'informacoes', 'informacao', 'transparencia',
'gasto', 'dados', 'verdade', 'clareza']
):
return 1
return 0
def categorize_expectation_others(answer):
if any(x in unidecode(answer.lower()) for x in ['geral', 'relato', 'proposta', 'grafico']):
return 1
return 0
def categorize_information(answer):
if any(x in unidecode(answer.lower()) for x in [
'retorno', 'gastos', 'questionamento', 'atende',
'conteudo', 'informacao', 'informacoes', 'dados',
'confiabilidade', 'resumo', 'divulgacao']
):
return 1
return 0
def categorize_frequency(answer):
if any(x in unidecode(answer.lower()) for x in [
'frequente', 'atualizacao', 'tempo real', 'atualizados', 'atualizadas']
):
return 1
return 0
def categorize_usability(answer):
if any(x in unidecode(answer.lower()) for x in ['facilidade', 'intuitiva', 'intuitivo',]):
return 1
return 0
pesquisa['ANSWER_INVESTMENT'] = pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].apply(categorize_expectation_public_investments)
pesquisa['ANSWER_HEALTH'] = pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].apply(categorize_expectation_public_health)
pesquisa['ANSWER_EDUCATION'] = pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].apply(categorize_expectation_public_education)
pesquisa['ANSWER_SAFETY'] = pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].apply(categorize_expectation_public_safety)
pesquisa['ANSWER_BUDGET'] = pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].apply(categorize_expectation_public_budget)
pesquisa['ANSWER_TRANSPARENCY'] = pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].apply(categorize_expectation_public_transparency)
pesquisa['ANSWER_OTHERS'] = pesquisa['Que tipo de informação você ESPERA encontrar no Dados abertos de Feira? (Coisas que você acredita que é certeza ter)'].apply(categorize_expectation_others)
# -
labels = {'ANSWER_INVESTMENT': 'INVESTIMENTO', 'ANSWER_HEALTH': 'SAÚDE', 'ANSWER_EDUCATION': 'EDUCAÇÃO', 'ANSWER_SAFETY': 'SEGURANÇA', 'ANSWER_BUDGET': 'ORÇAMENTO', 'ANSWER_TRANSPARENCY': 'TRANSPARÊNCIA', 'ANSWER_OTHERS': 'OUTROS'}
pesquisa[['ANSWER_INVESTMENT', 'ANSWER_HEALTH', 'ANSWER_EDUCATION', 'ANSWER_SAFETY', 'ANSWER_BUDGET', 'ANSWER_TRANSPARENCY', 'ANSWER_OTHERS']].apply(np.sum).rename(index=labels).sort_values().plot(kind='barh', title='Expectativa do tipo de informação')
# ## Informações Adicionais
pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].value_counts()
# Em relação à pergunta de quais informações seria interessante existirem no portal (indo além do obrigatório), novamente temos respostas únicas para cada respondente. Isso é esperado, decorrente da forma discursiva da coleta dos dados. Faremos a categorização de forma semelhante à pergunta anterior.
pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].fillna('')
# +
pesquisa['SECOND_ANSWER_INVESTMENT'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].apply(categorize_expectation_public_investments)
pesquisa['SECOND_ANSWER_HEALTH'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].apply(categorize_expectation_public_health)
pesquisa['SECOND_ANSWER_EDUCATION'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].apply(categorize_expectation_public_education)
pesquisa['SECOND_ANSWER_SAFETY'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].apply(categorize_expectation_public_safety)
pesquisa['SECOND_ANSWER_BUDGET'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].apply(categorize_expectation_public_budget)
pesquisa['SECOND_ANSWER_TRANSPARENCY'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].apply(categorize_expectation_public_transparency)
pesquisa['SECOND_ANSWER_OTHERS'] = pesquisa['Que tipo de informação você GOSTARIA que tivesse em um portal de transparência de dados da cidade? (Coisas que vão além do obrigado a ter)'].apply(categorize_expectation_others)
# -
labels = {'SECOND_ANSWER_INVESTMENT': 'INVESTIMENTO', 'SECOND_ANSWER_HEALTH': 'SAÚDE', 'SECOND_ANSWER_EDUCATION': 'EDUCAÇÃO', 'SECOND_ANSWER_SAFETY': 'SEGURANÇA', 'SECOND_ANSWER_BUDGET': 'ORÇAMENTO', 'SECOND_ANSWER_TRANSPARENCY': 'TRANSPARÊNCIA', 'SECOND_ANSWER_OTHERS': 'OUTROS'}
pesquisa[['SECOND_ANSWER_INVESTMENT', 'SECOND_ANSWER_HEALTH', 'SECOND_ANSWER_EDUCATION', 'SECOND_ANSWER_SAFETY', 'SECOND_ANSWER_BUDGET', 'SECOND_ANSWER_TRANSPARENCY', 'SECOND_ANSWER_OTHERS']].apply(np.sum).rename(index=labels).sort_values().plot(kind='barh', title='Informações além do obrigatório')
# ## Incentivo a Retorno
pesquisa['O que lhe faria voltar a consultar o portal Dados abertos de Feira com frequência?'].value_counts()
# Para a questão "O que lhe faria voltar a consultar o portal Dados abertos de Feira com frequência?", temos novamente respostas diferentes entre os respondentes. Faremos uma categorização assim como nas perguntas anteriores.
# +
pesquisa['THIRD_ANSWER_INFO'] = pesquisa['O que lhe faria voltar a consultar o portal Dados abertos de Feira com frequência?'].apply(categorize_information)
pesquisa['THIRD_ANSWER_FREQ'] = pesquisa['O que lhe faria voltar a consultar o portal Dados abertos de Feira com frequência?'].apply(categorize_frequency)
pesquisa['THIRD_ANSWER_USABILITY'] = pesquisa['O que lhe faria voltar a consultar o portal Dados abertos de Feira com frequência?'].apply(categorize_usability)
# -
labels = {'THIRD_ANSWER_INFO': 'QUALIDADE DA INFORMAÇÃO', 'THIRD_ANSWER_FREQ': 'FREQUÊNCIA DA INFORMAÇÃO', 'THIRD_ANSWER_USABILITY': 'SISTEMA INTUITIVO'}
pesquisa[['THIRD_ANSWER_INFO', 'THIRD_ANSWER_FREQ', 'THIRD_ANSWER_USABILITY']].apply(np.sum).rename(index=labels).sort_values().plot(kind='barh', title='O que me faria voltar a utilizar o portal?', xticks=np.arange(2, 22, 2))
# ## Outras Fontes de Informação
pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'] = pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'].fillna('Nenhum')
pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'].value_counts()
# Podemos ver que existem algumas respostas semelhantes, embora a maior parte dos respondentes não tenha informado outra fonte de informação dos dados municipais. De qualquer forma, precisamos categorizar algumas das informações. Apesar do Acorda Cidade ser um website, pela quantidade expressiva de respostas, vamos separá-lo em uma categoria própria.
# +
def categorize_acorda_cidade(answer):
if any(x in unidecode(answer.lower()).strip() for x in ['acorda cidade']):
return 1
return 0
def categorize_prefeitura(answer):
if any(x in unidecode(answer.lower()).strip() for x in ['prefeitura']):
return 1
return 0
def categorize_diario_oficial(answer):
if any(x in unidecode(answer) for x in ['DO', 'oficial']):
return 1
return 0
def categorize_sites(answer):
if any(x in unidecode(answer.lower()).strip() for x in ['sites', 'jornal', 'jornais']):
return 1
return 0
def categorize_none(answer):
if any(x in unidecode(answer.lower()).strip() for x in ['nenhum']):
return 1
return 0
# +
pesquisa['FOURTH_ANSWER_ACORDA_CIDADE'] = pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'].apply(categorize_acorda_cidade)
pesquisa['FOURTH_ANSWER_CITY_HALL'] = pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'].apply(categorize_prefeitura)
pesquisa['FOURTH_ANSWER_OFFICIAL'] = pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'].apply(categorize_diario_oficial)
pesquisa['FOURTH_ANSWER_WEBSITES'] = pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'].apply(categorize_sites)
pesquisa['FOURTH_ANSWER_NONE'] = pesquisa['Quais sites você usa ou já usou para pesquisar esses dados?'].apply(categorize_none)
# -
labels = {'FOURTH_ANSWER_ACORDA_CIDADE': 'ACORDA CIDADE', 'FOURTH_ANSWER_CITY_HALL': 'PREFEITURA', 'FOURTH_ANSWER_OFFICIAL': 'DIÁRIO OFICIAL', 'FOURTH_ANSWER_WEBSITES': 'OUTROS SITES', 'FOURTH_ANSWER_NONE': 'NENHUM'}
pesquisa[['FOURTH_ANSWER_ACORDA_CIDADE', 'FOURTH_ANSWER_CITY_HALL', 'FOURTH_ANSWER_OFFICIAL', 'FOURTH_ANSWER_WEBSITES', 'FOURTH_ANSWER_NONE']].apply(np.sum).rename(index=labels).sort_values().plot(kind='barh', title='Outros sites utilizados')
labels = {'FOURTH_ANSWER_ACORDA_CIDADE': 'ACORDA CIDADE', 'FOURTH_ANSWER_CITY_HALL': 'PREFEITURA', 'FOURTH_ANSWER_OFFICIAL': 'DIÁRIO OFICIAL', 'FOURTH_ANSWER_WEBSITES': 'OUTROS SITES'}
pesquisa[['FOURTH_ANSWER_ACORDA_CIDADE', 'FOURTH_ANSWER_CITY_HALL', 'FOURTH_ANSWER_OFFICIAL', 'FOURTH_ANSWER_WEBSITES']].apply(np.sum).rename(index=labels).sort_values().plot(kind='barh', title='Outros sites utilizados (para aqueles que usam)')
| analysis/2020-09-01-felipelfb-resultados-pesquisa-usuarios-do-portal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
import csv
import seaborn
# +
csv_path='raw_data/Zip_Zhvi_2bedroom.csv'
df_2b=pd.DataFrame.from_csv(csv_path)
df_2b=df_2b.reset_index()
df_2b=df_2b.set_index('State')
df_2b.head()
df_2b=df_2b.rename(columns={'RegionName':'Zipcode'})
df_2b
len(df_2b)
df_2b=df_2b.loc['CA',:]
df_2b.head()
# -
df_2b['Bedrooms']='1'
df_2b
#take needed columns and order
subset=['RegionID','Bedrooms']
mask=df_2b.columns.isin(subset)
df_2b=pd.concat([df_2b.loc[:,mask],df_2b.loc[:,~mask]],axis=1)
df_2b=df_2b.loc[:,df_2b.columns!='RegionID']
df_2b
df_2b=df_2b.reset_index()
df_2b=df_2b.set_index('CountyName')
df_2b=df_2b.loc['Orange',:]
df_2b
# +
# subset=['State','Bedrooms','Zipcode','City','SizeRank','Metro']
# mask=df_1b.columns.isin(subset)
# df_1b_ed=pd.concat([df_1b.loc[:,mask]],axis=1)
#everything but prices by date, saved to be merged after drop of every nth column
# df_1b_ed
# +
# df_1b_ed_p2=pd.concat([df_1b.loc[:,~mask]],axis=1)
# df_1b_ed_p2
# +
#get last 88 columns
# clean=df_1b_ed_p2.iloc[:,-100:]
# clean.head()
# -
two_b_df=pd.DataFrame(df_2b.groupby(['City','Zipcode'])['2009-01','2010-01','2011-01','2012-01','2013-01','2014-01','2015-01','2016-01','2017-01','2018-01'].median())
two_b_df.head()
# +
two_b_df['Median Price']=two_b_df.median(axis=1)
two_b_df.head()
# -
two_b_df['Number of Bedrooms']=1
two_b_df.head()
two_b_df=two_b_df.rename(columns={'2009-01':'2009','2010-01':'2010','2011-01':'2011','2012-01':'2012','2013-01':'2013','2014-01':'2014','2015-01':'2015','2016-01':'2016','2017-01':'2017','2018-01':'2018'})
two_b_df.head()
| Zillow_Group_Temp/.ipynb_checkpoints/price_data_allHomes-2bed-Copy1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
from __future__ import print_function
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from modules.stn import STN
from modules.gridgen import CylinderGridGen, AffineGridGen, AffineGridGenV2, DenseAffineGridGen
from PIL import Image
from matplotlib import mlab
import matplotlib.pyplot as plt
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
# + deletable=true editable=true
img = Image.open('cat.jpg').convert('RGB')
img = np.array(img)/255.0
plt.imshow(img)
# + deletable=true editable=true
img_batch = np.expand_dims(img, 0)
inputImages = torch.from_numpy(img_batch.astype(np.float32))
inputImages.size()
s = STN()
g = AffineGridGenV2(328, 582)
input = Variable(torch.from_numpy(np.array([[[1, 0.5, 0], [0.5, 1, 0]]], dtype=np.float32)), requires_grad = True)
#print input
aux_zeros = Variable(torch.zeros(1))
out = g(input)
grid_out = out # save for later use
input1 = Variable(inputImages)
res = s(input1, out)
res = res.data.numpy()
# + deletable=true editable=true
plt.imshow(res[0])
# + deletable=true editable=true
target = Variable(torch.from_numpy(res))
# + deletable=true editable=true
print(input1.size(), target.size())
# + deletable=true editable=true
crt = nn.L1Loss()
crt2 = nn.L1Loss()
# + deletable=true editable=true
crt.forward(input1, target)
# + deletable=true editable=true
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.s = STN()
self.g = AffineGridGenV2(328, 582, lr = 0.01)
def forward(self,input1, input2):
out = self.g(input2)
out2 = self.s(input1, out)
return out2
# + deletable=true editable=true
t = Transformer()
# + deletable=true editable=true
x = np.arange(0.1, 2, 0.01)
y = []
g_ = []
for v in x:
input2 = Variable(torch.from_numpy(np.array([[[1, 0.5, 0], [0.48, v, 0]]], dtype=np.float32)) , requires_grad = True)
out = t(input1, input2)
err = crt(out, target)
err.backward()
y.append(err.data[0])
#print input2.grad.size()
g_.append(input2.grad.data[0,1,1])
# + deletable=true editable=true
plt.figure()
plt.plot(x,y)
plt.figure()
plt.plot(x,g_)
# + deletable=true editable=true
class ConvSpatialTransformer(nn.Module):
def __init__(self, height, width):
super(ConvSpatialTransformer, self).__init__()
self.s = STN()
self.conv = nn.Sequential(
torch.nn.Conv2d(3, 6, 3, stride=1, padding=1),
torch.nn.Conv2d(6, 6, 3, stride=1, padding=1),
torch.nn.Conv2d(6, 6, 3, stride=1, padding=1),
torch.nn.Conv2d(6, 6, 3, stride=1, padding=1),
)
self.g = DenseAffineGridGen(height, width)
def forward(self,input1, input2):
conv = self.conv(input2.transpose(2,3).transpose(1,2))
conv = conv.transpose(1,2).transpose(2,3)
iden = Variable(torch.cat([torch.ones(1, 328, 582, 1), torch.zeros(1, 328, 582, 3), torch.ones(1, 328, 582, 1), torch.zeros(1, 328, 582, 1)],3 ))
print(conv.size(), iden.size())
out = self.g(conv + iden)
out2 = self.s(input1, out)
return out2, out
# + deletable=true editable=true
c = ConvSpatialTransformer(328, 582)
input2 = input1.clone()
# + deletable=true editable=true
res, grid = c(input1, input2)
# + deletable=true editable=true
plt.imshow(res.data.numpy()[0])
# + deletable=true editable=true
grid = grid.data.numpy()[0]
# + deletable=true editable=true
delta = 0.025
x = np.arange(-1, 1, 2/582.0)
y = np.arange(-1, 1, 2/328.0)
X, Y = np.meshgrid(x, y)
Z1 = grid[:,:,0]
Z2 = grid[:,:,1]
# + deletable=true editable=true
fig = plt.figure(figsize=(4,2))
CS = plt.contour(X, Y, Z1, 15, colors='k')
#plt.clabel(CS, fontsize=9, inline=1)
CS = plt.contour(X, Y, Z2, 15, colors='k')
#plt.clabel(CS, fontsize=9, inline=1)
# + deletable=true editable=true
fig.canvas.draw()
# Now we can save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# + deletable=true editable=true
plt.imshow(data)
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
| script/test_conv_stnm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # In Short
import arche
from arche import *
a = Arche("381798/1/4", schema="https://raw.githubusercontent.com/scrapinghub/arche/master/docs/source/nbs/data/books.json", target="381798/1/3")
a.source_items.df.head()
a.report_all()
# Some rules are not included in the above (more in [rules](https://arche.readthedocs.io/en/latest/nbs/Rules.html)), so a common case would be:
arche.rules.duplicates.find_by(a.source_items.df, ["title", "price"]).show()
| docs/source/nbs/In-Short.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rajeshbopate/-Microspectra/blob/main/day3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="KMPNamKJ57-o" outputId="adf3a40c-3a21-483c-c202-bd4cfece3efe" colab={"base_uri": "https://localhost:8080/", "height": 458}
#indexing in python
#operations on python
str = "Microspectra"
print(str[0])
print(str[1])
print(str[2])
print(str[3])
print(str[4])
print(str[5])
print(str[6])
print(str[7])
print(str[8])
print(str[9])
print(str[10])
print(str[11])
print(str[-1])
print(str[-2])
print(str[-3])
print(str[-4])
print(str[-5])
print(str[-6])
print(str[-7])
print(str[-8])
print(str[-9])
print(str[-10])
print(str[-11])
print(str[-12])
# + id="ChxsASMe9giy" outputId="fccb4cd8-4266-44f4-d797-0720b81ed0d4" colab={"base_uri": "https://localhost:8080/", "height": 36}
str[0:6]
# + id="9SRA6GRo9sJH" outputId="c57c49d9-3480-43c4-c8e0-c7969d51f6c0" colab={"base_uri": "https://localhost:8080/", "height": 36}
str[0:]
# + id="-h84dhV2-Hcs" outputId="42d3e15d-6b12-48bc-f7fd-e04979cb6e52" colab={"base_uri": "https://localhost:8080/", "height": 36}
str[0:10:2]
| day3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: fr
# language: python
# name: fr
# ---
# +
import bz2
import os
from urllib.request import urlopen
def download_landmarks(dst_file):
url = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
decompressor = bz2.BZ2Decompressor()
with urlopen(url) as src, open(dst_file, 'wb') as dst:
data = src.read(1024)
while len(data) > 0:
dst.write(decompressor.decompress(data))
data = src.read(1024)
dst_dir = 'models'
dst_file = os.path.join(dst_dir, 'landmarks.dat')
if not os.path.exists(dst_file):
os.makedirs(dst_dir)
download_landmarks(dst_file)
# +
from model import create_model
nn4_small2 = create_model()
# +
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Layer
# Input for anchor, positive and negative images
in_a = Input(shape=(96, 96, 3))
in_p = Input(shape=(96, 96, 3))
in_n = Input(shape=(96, 96, 3))
# Output for anchor, positive and negative embedding vectors
# The nn4_small model instance is shared (Siamese network)
emb_a = nn4_small2(in_a)
emb_p = nn4_small2(in_p)
emb_n = nn4_small2(in_n)
class TripletLossLayer(Layer):
def __init__(self, alpha, **kwargs):
self.alpha = alpha
super(TripletLossLayer, self).__init__(**kwargs)
def triplet_loss(self, inputs):
a, p, n = inputs
p_dist = K.sum(K.square(a-p), axis=-1)
n_dist = K.sum(K.square(a-n), axis=-1)
return K.sum(K.maximum(p_dist - n_dist + self.alpha, 0), axis=0)
def call(self, inputs):
loss = self.triplet_loss(inputs)
self.add_loss(loss)
return loss
# Layer that computes the triplet loss from anchor, positive and negative embedding vectors
triplet_loss_layer = TripletLossLayer(alpha=0.2, name='triplet_loss_layer')([emb_a, emb_p, emb_n])
# Model that can be trained with anchor, positive negative images
nn4_small2_train = Model([in_a, in_p, in_n], triplet_loss_layer)
# +
from data import triplet_generator
# triplet_generator() creates a generator that continuously returns
# ([a_batch, p_batch, n_batch], None) tuples where a_batch, p_batch
# and n_batch are batches of anchor, positive and negative RGB images
# each having a shape of (batch_size, 96, 96, 3).
generator = triplet_generator()
nn4_small2_train.compile(loss=None, optimizer='adam')
nn4_small2_train.fit_generator(generator, epochs=10, steps_per_epoch=100)
# Please note that the current implementation of the generator only generates
# random image data. The main goal of this code snippet is to demonstrate
# the general setup for model training. In the following, we will anyway
# use a pre-trained model so we don't need a generator here that operates
# on real training data. I'll maybe provide a fully functional generator
# later.
# -
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')
# +
import numpy as np
import os.path
class IdentityMetadata():
def __init__(self, base, name, file):
# dataset base directory
self.base = base
# identity name
self.name = name
# image file name
self.file = file
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
def load_metadata(path):
metadata = []
for i in os.listdir(path):
for f in os.listdir(os.path.join(path, i)):
# Check file extension. Allow only jpg/jpeg' files.
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == '.jpeg':
idm = IdentityMetadata(path, i, f)
metadata.append(idm)
return np.array(metadata)
metadata = load_metadata('images')
# +
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from align import AlignDlib
# %matplotlib inline
def load_image(path):
img = cv2.imread(path, 1)
# OpenCV loads images with color channels
# in BGR order. So we need to reverse them
return img[...,::-1]
# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')
# Load an image of <NAME>
jc_orig = load_image(metadata[0].image_path())
# Detect face and return bounding box
bb = alignment.getLargestFaceBoundingBox(jc_orig)
# Transform image using specified face landmark indices and crop image to 96x96
jc_aligned = alignment.align(96, jc_orig, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# Show original image
plt.subplot(131)
plt.imshow(jc_orig)
# Show original image with bounding box
plt.subplot(132)
plt.imshow(jc_orig)
plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red'))
# Show aligned image
plt.subplot(133)
plt.imshow(jc_aligned);
# -
def align_image(img):
return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# +
embedded = np.zeros((metadata.shape[0], 128))
for i, m in enumerate(metadata):
try:
img = load_image(m.image_path())
img = align_image(img)
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
except:
#print(i)
pass
# -
def person2IndexConverter(person,number):
path = "images\\"+str(person)+"\\"+str(person)+"_"
for x in range(4-len(str(number))):
path += "0"
path+=str(number)
for i,val in enumerate(metadata):
if str(val).split('.')[0] == path:
return i
return -1
# +
paired_list = []
unpaired_list = []
def ReadPairsFile(pairFile,setNumber):
global paired_list
global unpaired_list
path = os.getcwd()+"\\"+pairFile
with open(path) as f:
content = f.readlines()
set_number = -1
each_set = -1
set_count = int(content[0].split('\t')[0])
each_set = int(content[0].split('\t')[1])
base_index = (setNumber - 1) * 600 + 1;
paired_list_raw = content[base_index:base_index+each_set]
unpaired_list_raw = content[base_index+each_set:base_index+each_set*2]
for i,v in enumerate(paired_list_raw):
paired_list.append(paired_list_raw[i].split('\t'))
unpaired_list.append(unpaired_list_raw[i].split('\t'))
#ReadPairsFile("pairs.txt",2)
# +
def distance(emb1, emb2):
return np.sum(np.square(emb1 - emb2))
def show_pair(idx1, idx2):
plt.figure(figsize=(8,3))
plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')
plt.subplot(121)
plt.imshow(load_image(metadata[idx1].image_path()))
plt.subplot(122)
plt.imshow(load_image(metadata[idx2].image_path()));
#show_pair(2, 698)
# -
def testPairedList():
correct_paired = 0
incorrect_paired = 0
correct_unpaired = 0
incorrect_unpaired = 0
for i in range(300):
person_paired = paired_list[i]
person_name_paired = person_paired[0]
person_first_number_paired = person_paired[1]
person_second_number_paired = person_paired[2].split('\n')[0]
idx1_paired = person2IndexConverter(person_name_paired,person_first_number_paired)
idx2_paired = person2IndexConverter(person_name_paired,person_second_number_paired)
distance_two_image_paired = distance(embedded[idx1_paired], embedded[idx2_paired])
person_unpaired = unpaired_list[i]
person_name_1_unpaired = person_unpaired[0]
person_name_2_unpaired = person_unpaired[2]
person_first_number_unpaired = person_unpaired[1]
person_second_number_unpaired = person_unpaired[3].split('\n')[0]
idx1_unpaired = person2IndexConverter(person_name_1_unpaired,person_first_number_unpaired)
idx2_unpaired = person2IndexConverter(person_name_2_unpaired,person_second_number_unpaired)
distance_two_image_unpaired = distance(embedded[idx1_unpaired], embedded[idx2_unpaired])
if(distance_two_image_paired < 0.70):
correct_paired += 1
else:
incorrect_paired += 1
if(distance_two_image_unpaired >= 0.70):
correct_unpaired += 1
else:
incorrect_unpaired += 1
print("Correct Number: [PairedList]",correct_paired)
print("Incorrect Number: [PairedList]",incorrect_paired)
print("Ratio: [PairedList]",correct_paired/(300))
print("")
print("Correct Number: [UnPairedList]",correct_unpaired)
print("Incorrect Number: [UnPairedList]",incorrect_unpaired)
print("Ratio: [UnPairedList]",correct_unpaired/(300))
def AllTestCaseResult(specificTestCase = -1):
global paired_list
global unpaired_list
for i in range(10):
paired_list = []
unpaired_list = []
if specificTestCase != -1 and specificTestCase != (i+1):
continue
paired_list = []
unpaired_list = []
print("Test Case: ",str((i+1)))
print("-------------------------------------------")
ReadPairsFile("pairs.txt",(i+1))
testPairedList()
AllTestCaseResult()
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python3
import paratext
import pandas as pd
import lz4.frame
import gzip
import io
import pyarrow.parquet as pq
import pyarrow as pa
import numpy as np
'''
filepath = 'cboe/lz4_test/BTCUSD_order_book_20170627.csv.lz4'
#filepath = 'cboe/lz4_test/BTCUSD_order_book_20170627.csv.gz'
df = pandas.read_csv(io.TextIOWrapper(lz4.frame.open(filepath)))
#df = pandas.read_csv(filepath)
#df = paratext.load_csv_to_pandas(gzip.open(filepath).read())
print((df))
'''
from glob import glob
from plumbum.cmd import rm
# -
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras import regularizers
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
def plotline(data):
plt.figure()
plt.plot(data)
plt.legend()
plt.show()
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# +
df = pq.read_table('cboe/parquet_fills_only_BTCUSD.parquet').to_pandas()
print(df.dtypes)
print(df.shape)
# -
num_samples = df.shape[0] - 2 # subtract 1 because we exclude the first prediction, and another 1 because our feature is price differences
num_samples_training = round(num_samples * 0.9)
num_samples_dev = round(num_samples * 0.05)
num_samples_test = round(num_samples * 0.05)
# +
#train_set = df.iloc[0:num_samples_training]
#dev_set = df.iloc[num_samples_training:num_samples_training+num_samples_dev]
#test_set = df.iloc[num_samples_training+num_samples_dev:]
# +
#small_set = df.iloc[0:10]
#print(small_set)
# -
print(max(df['Event Date'].values.reshape(-1, 1)))
print(max(df['Avg Price (USD)'].values.reshape(-1, 1)))
print(max(df['Limit Price (USD)'].values.reshape(-1, 1)))
print(max(df['Fill Price (USD)'].values.reshape(-1, 1)))
# +
price_scaler = MinMaxScaler().fit(df.iloc[0:num_samples_training]['Fill Price (USD)'].values.reshape(-1, 1))
def get_max_min_price(fulldata):
max_price = data[['Fill Price (USD)']].max().item()
min_price = data[['Fill Price (USD)']].min().item()
return {
'max': max_price,
'min': min_price
}
def extract_price_features(fulldata):
data = fulldata[['Fill Price (USD)', 'Side', 'Order Type']].copy()
length = data.shape[0]
data['isbuy'] = pd.get_dummies(data['Side'])['buy'].values
data['ismarket'] = pd.get_dummies(data['Order Type'])['market'].values
data['price_scaled'] = price_scaler.transform(data['Fill Price (USD)'].values.reshape(-1, 1)).flatten()
return data[['price_scaled', 'isbuy', 'ismarket']].iloc[:length - 1].values.astype('float32')[:, None, :]
#return data[['price_scaled', 'isbuy', 'ismarket']].iloc[:length - 1].values[:, None, :]
def extract_price_features_percentdiff(fulldata):
data = fulldata[['Fill Price (USD)', 'Side', 'Order Type']].copy()
length = data.shape[0]
data['isbuy'] = pd.get_dummies(data['Side'])['buy'].values
data['ismarket'] = pd.get_dummies(data['Order Type'])['market'].values
data['price_diff_percent'] = data['Fill Price (USD)'].pct_change()
return data[['price_diff_percent', 'isbuy', 'ismarket']].iloc[:length - 1].values.astype('float32')[1:, None, :]
#return data[['price_scaled', 'isbuy', 'ismarket']].iloc[:length - 1].values[:, None, :]
extract_features = extract_price_features_percentdiff
#small_features = extract_features(small_set)
# -
print(df.columns)
print(df['Event Date'].tail(10))
print(extract_price_features_percentdiff(df.iloc[0:10, :]))
all_features = extract_features(df)
# +
#print(all_features.shape)
# -
#num_samples = all_features.shape[0]
#num_samples_training = round(num_samples * 0.9)
#num_samples_dev = round(num_samples * 0.05)
#num_samples_test = round(num_samples * 0.05)
X_train = all_features[0:num_samples_training, :]
X_dev = all_features[num_samples_training:num_samples_training+num_samples_dev, :]
X_test = all_features[num_samples_training+num_samples_dev:, :]
print(X_train[:,0,0].max())
print(X_train[:,0,0].min())
print(X_dev[:,0,0].max())
print(X_dev[:,0,0].min())
print(X_test[:,0,0].max())
print(X_test[:,0,0].min())
# +
def extract_y_rawprice(fulldata):
prices = fulldata[['Fill Price (USD)']].values.astype('float32')
#prices = fulldata[['Fill Price (USD)']].values
prices = price_scaler.transform(fulldata['Fill Price (USD)'].values.reshape(-1, 1)).flatten()
return np.delete(prices, 0, axis=0).reshape(-1, 1)
#return np.insert(prices, 0, prices[0])
def extract_y_prevrawprice(fulldata):
prices = fulldata[['Fill Price (USD)']].values.astype('float32')
#prices = fulldata[['Fill Price (USD)']].values
prices = fulldata['Fill Price (USD)'].values.flatten()
return np.delete(prices, [0, prices.shape[0] - 1], axis=0).reshape(-1, 1)
#return np.insert(prices, 0, prices[0])
def extract_y_percentdiff(fulldata):
#prices = fulldata[['Fill Price (USD)']].values
prices = fulldata['Fill Price (USD)'].pct_change().values.astype('float32')
return np.delete(prices, [0, 1], axis=0).reshape(-1, 1)
#return np.insert(prices, 0, prices[0])
extract_y = extract_y_percentdiff
#print(len(extract_y(small_set)))
# -
print(extract_y_prevrawprice(df.iloc[0:10, :]))
print(extract_y_percentdiff(df.iloc[0:10, :]))
all_y = extract_y(df)
all_y_prevrawprice = extract_y_prevrawprice(df)
print(all_features.shape)
print(all_y.shape)
print(all_features.dtype)
print(all_y.dtype)
Y_train = all_y[0:num_samples_training, :]
Y_dev = all_y[num_samples_training:num_samples_training+num_samples_dev, :]
Y_test = all_y[num_samples_training+num_samples_dev:, :]
Y_train_prevrawprice = all_y_prevrawprice[0:num_samples_training, :]
Y_dev_prevrawprice = all_y_prevrawprice[num_samples_training:num_samples_training+num_samples_dev, :]
Y_test_prevrawprice = all_y_prevrawprice[num_samples_training+num_samples_dev:, :]
print(Y_train[:,0].max())
print(Y_train[:,0].min())
print(Y_dev[:,0].max())
print(Y_dev[:,0].min())
print(Y_test[:,0].max())
print(Y_test[:,0].min())
plotline(X_train[:,0,0])
plotline(X_dev[:,0,0])
plotline(X_test[:,0,0])
plotline(df['Fill Price (USD)'].values.flatten())
plotline(all_y[:,0])
plotline(Y_train[:,0])
plotline(Y_dev[:,0])
plotline(Y_test[:,0])
# making the training set smaller so we train faster
downscaling_factor = 1
#downscaling_factor = 100
fraction_used = 1 / downscaling_factor
fraction_skipped = 1 - fraction_used
samples_skipped = round(X_train.shape[0] * fraction_skipped)
print(X_train.shape)
#X_train = X_train[::downscaling_factor]
#Y_train = Y_train[::downscaling_factor]
X_train = X_train[samples_skipped::]
Y_train = Y_train[samples_skipped::]
print(X_train.shape)
X_sample = all_features[0:10]
Y_sample = all_y[0:10]
print(X_sample)
print(Y_sample)
# +
def event_count(time_series, data_name):
time_series = time_series[['Fill Price (USD)']].values
upevents = 0
downevents = 0
sameprice = 0
prev_obv = time_series[0]
for obv in time_series[1:]:
if obv > prev_obv:
upevents += 1
elif obv < prev_obv:
downevents += 1
elif obv == prev_obv:
sameprice += 1
prev_obv = obv
print('=== Event counts on %s ===' % data_name)
print('upevents')
print(upevents)
print('downevents')
print(downevents)
print('sameprice')
print(sameprice)
print()
def mse(time_series, data_name):
time_series = time_series[['Fill Price (USD)']].values
total_squared_error = 0
total_absolute_error = 0
prev_obv = time_series[0]
for obv in time_series[1:]:
total_squared_error += (obv - prev_obv)**2
total_absolute_error += abs(obv - prev_obv)
prev_obv = obv
num_predictions = len(time_series) - 1
mean_squared_error = total_squared_error / num_predictions
mean_absolute_error = total_absolute_error / num_predictions
root_mean_squared_error = np.sqrt(mean_squared_error)
print('=== baseline on %s ===' % data_name)
print('total squared error')
print(total_squared_error)
print('total absolute error')
print(total_absolute_error)
print('mean squared error')
print(mean_squared_error)
print('mean absolute error')
print(mean_absolute_error)
print('root mean squared error')
print(root_mean_squared_error)
print()
# +
def show_summary_statistics():
#event_count(small_set, 'small')
train_set = df.iloc[0:num_samples_training]
dev_set = df.iloc[num_samples_training:num_samples_training+num_samples_dev]
test_set = df.iloc[num_samples_training+num_samples_dev:]
event_count(train_set, 'train')
event_count(dev_set, 'dev')
event_count(test_set, 'test')
mse(train_set, 'train')
mse(dev_set, 'dev')
mse(test_set, 'test')
#show_summary_statistics()
# -
def preprocess(data):
values = np.array(data)
values = values.reshape(-1,1)
values = values.astype('float32')
return values
def plot_losses(model_history, title):
plt.figure()
plt.plot(model_history.history['loss'], label='Train')
plt.plot(model_history.history['val_loss'], label='Dev')
plt.xlabel('Epochs'); plt.ylabel('Loss (mse)')
plt.title(title)
plt.legend(); plt.show()
# +
def inverse_transform_pricescaler(data, Y_prevrawprice, fitted_scaler):
return fitted_scaler.inverse_transform(preprocess(data))
def inverse_transform_percentdiff(data, Y_prevrawprice, fitted_scaler=None):
orig_prices = Y_prevrawprice
change = orig_prices * data
return orig_prices + change
#return fitted_scaler.inverse_transform(preprocess(data))
#print(Y_test_prevrawprice)
#print(inverse_transform_percentdiff(Y_test, Y_test_prevrawprice))
inverse_transform = inverse_transform_percentdiff
# -
def plot_predictions(model, X_test, Y_test, Y_prevrawprice, title, inverse=False, scaler=None):
y_hat = model.predict(X_test)
if inverse:
y_hat = inverse_transform(y_hat, Y_prevrawprice, scaler)
Y_test = inverse_transform(Y_test, Y_prevrawprice, scaler)
plt.plot(y_hat, label='Predicted')
plt.plot(Y_test, label='True')
plt.xlabel('Time');
if inverse:
plt.ylabel('Price')
else:
plt.ylabel('RESCALED Price')
plt.title(title)
plt.legend(); plt.show()
def calculate_MSE_RMSE(model, scaler, X_test, Y_test, Y_prevrawprice, model_name):
y_hat = model.predict(X_test)
y_hat_inverse = inverse_transform(y_hat, Y_prevrawprice, scaler)
Y_test_inverse = inverse_transform(Y_test, Y_prevrawprice, scaler)
mse = mean_squared_error(Y_test_inverse, y_hat_inverse)
rmse = np.sqrt(mean_squared_error(Y_test_inverse, y_hat_inverse))
print('%s:' % model_name)
print('Test MSE: %.3f' % mse)
print('Test RMSE: %.3f' % rmse)
print()
def train_evaluate(model, model_name,
X_train, Y_train, Y_train_prevrawprice, X_dev, Y_dev, Y_dev_prevrawprice, X_test, Y_test, Y_test_prevrawprice,
lag=10, batch_size=100, epochs=10, verbose=1):
# Train model
history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs,
validation_split=0.05, verbose=verbose, shuffle=False)
#train_evaluate_showresults(history, model, model_name,
# X_train, Y_train, X_dev, Y_dev, X_test, Y_test,
# lag, batch_size, epochs, verbose)
return history
def train_evaluate_showresults(history, model, model_name,
X_train, Y_train, Y_train_prevrawprice, X_dev, Y_dev, Y_dev_prevrawprice, X_test, Y_test, Y_test_prevrawprice,
lag=10, batch_size=100, epochs=10, verbose=1):
# Plot losses, predictions, and calculate MSE and RMSE
plot_losses(history, 'Loss\n(%s)' % model_name)
plot_predictions(model, X_dev, Y_dev, Y_dev_prevrawprice, 'Test Predictions\n(%s)' % model_name)
plot_predictions(model, X_dev, Y_dev, Y_dev_prevrawprice, 'Test Predictions\n(%s)' % model_name, inverse=True, scaler=price_scaler)
calculate_MSE_RMSE(model, price_scaler, X_dev, Y_dev, Y_dev_prevrawprice, '%s' % model_name)
def evaluate_test(model, model_name,
X_train, Y_train, Y_train_prevrawprice, X_dev, Y_dev, Y_dev_prevrawprice, X_test, Y_test, Y_test_prevrawprice,
lag=10, batch_size=100, epochs=10, verbose=1):
# Train model
#history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs,
# validation_split=0.05, verbose=verbose, shuffle=False)
# Plot losses, predictions, and calculate MSE and RMSE
#plot_losses(history, 'Loss\n(%s)' % model_name)
plot_predictions(model, X_test, Y_test, Y_test_prevrawprice, 'Test Predictions\n(%s)' % model_name)
plot_predictions(model, X_test, Y_test, Y_test_prevrawprice, 'Test Predictions\n(%s)' % model_name, inverse=True, scaler=price_scaler)
calculate_MSE_RMSE(model, price_scaler, X_test, Y_test, Y_test_prevrawprice, '%s' % model_name)
# +
# HYPERPARAMETERS
#####################
lag = 1
loss = 'mean_squared_error'
optimizer = 'adagrad' # sgd, adagrad, adam, rmsprop, adagrad
batch_size = 100
epochs = 20
model_name = 'model_LAG-%s_LOSS-%s_OPT-%s_BATCHSIZE-%s_EPOCHS-%s' % (lag, loss, optimizer, batch_size, epochs)
#####################
LSTM_input_shape = [X_train.shape[1], X_train.shape[2]]
# +
# DEFINE MODEL
model = Sequential()
#####################
model.add(LSTM(200, input_shape=LSTM_input_shape, return_sequences=True))
model.add(Dropout(0.1))
model.add(LSTM(200, input_shape=LSTM_input_shape, return_sequences=True))
model.add(Dropout(0.1))
model.add(LSTM(200, return_sequences=False))
#model.add(Dropout(0.1))
model.add(Dense(1))
#model.add(Dense(1, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l2(0.01)))
model.add(Activation('linear'))
#####################
model.compile(loss=loss, optimizer=optimizer)
# -
print(X_train.shape)
print(Y_train.shape)
print(X_train)
# Train/evaluate model
history = train_evaluate(model, model_name,
X_train, Y_train, Y_train_prevrawprice, X_dev, Y_dev, Y_dev_prevrawprice, X_test, Y_test, Y_test_prevrawprice,
lag=lag, batch_size=batch_size, epochs=epochs, verbose=1)
train_evaluate_showresults(history, model, model_name,
X_train, Y_train, Y_train_prevrawprice, X_dev, Y_dev, Y_dev_prevrawprice, X_test, Y_test, Y_test_prevrawprice,
lag, batch_size, epochs)
# Train/evaluate model
evaluate_test(model, model_name,
X_train, Y_train, Y_train_prevrawprice, X_dev, Y_dev, Y_dev_prevrawprice, X_test, Y_test, Y_test_prevrawprice,
lag=lag, batch_size=batch_size, epochs=epochs, verbose=1)
| cboe_baseline_priceonly_predictchangepercent-pascal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
word = 'abcde'
for item in enumerate(word):
print(item)
# +
word = 'abcde'
for index,letter in enumerate(word):
print(index)
print(letter)
print("\n")
# -
# ###### zip function
#
mylist1 = [1,2,3,]
mylist2 = ['a','b','c']
for item in zip(mylist1, mylist2):
print(item)
list(zip(mylist1, mylist2))
'x' in [1,2,3]
'x' in ['x','y','z']
'a' in 'a world'
'mykey' in {'mykey':345}
# +
d = {'mykey':345}
345 in d.keys()
# -
mylist = [10,20,30,100]
min(mylist)
max(mylist)
from random import shuffle
mylist = [1,2,3,4,5,6,7,8,9,10]
shuffle(mylist)
mylist
mylist
from random import randint
randint(0,100)
randint(0,100)
mynum = randint(0, 100)
mynum
input('Enter a number here: ')
result = input('What is your name?')
result
| 00-Python Object/EnumarateZipFunction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from recording import Record
record = Record().start('basic')
# %load_ext restmagic
# %rest -q httpbin.org/html
# %rest httpbin.org/html
# +
# %%rest -q
POST https://httpbin.org/post
Content-Type: application/x-www-form-urlencoded
username=Gena&email=<EMAIL>
# -
_.json()['form']
# # SSL server cert
# %rest -q --cacert ../../examples/badssl-server.pem https://self-signed.badssl.com/
# %rest -q -k https://self-signed.badssl.com/
# # SSL client cert
# %rest -q https://client.badssl.com/
# %rest -q --key ../../examples/badssl.key https://client.badssl.com/
# %rest -q --cert ../../examples/badssl.crt --key ../../examples/badssl.key https://client.badssl.com/
# %rest -q --cert ../../examples/badssl-combined.pem https://client.badssl.com/
# # End of tests
record.stop()
| tests/notebooks/basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building a Regression Model for a Financial Dataset
#
# In this notebook, you will build a simple linear regression model to predict the closing AAPL stock price. The lab objectives are:
# * Pull data from BigQuery into a Pandas dataframe
# * Use Matplotlib to visualize data
# * Use Scikit-Learn to build a regression model
# + language="bash"
#
# bq mk -d ai4f
# bq load --autodetect --source_format=CSV ai4f.AAPL10Y gs://cloud-training/ai4f/AAPL10Y.csv
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
plt.rc('figure', figsize=(12, 8.0))
# -
# ## Pull Data from BigQuery
#
# In this section we'll use a magic function to query a BigQuery table and then store the output in a Pandas dataframe. A magic function is just an alias to perform a system command. To see documentation on the "bigquery" magic function execute the following cell:
# %%bigquery?
# The query below selects everything you'll need to build a regression model to predict the closing price of AAPL stock. The model will be very simple for the purposes of demonstrating BQML functionality. The only features you'll use as input into the model are the previous day's closing price and a three day trend value. The trend value can only take on two values, either -1 or +1. If the AAPL stock price has increased over any two of the previous three days then the trend will be +1. Otherwise, the trend value will be -1.
#
# Note, the features you'll need can be generated from the raw table `ai4f.AAPL10Y` using Pandas functions. However, it's better to take advantage of the serverless-ness of BigQuery to do the data pre-processing rather than applying the necessary transformations locally.
# %%bigquery df
WITH
raw AS (
SELECT
date,
close,
LAG(close, 1) OVER(ORDER BY date) AS min_1_close,
LAG(close, 2) OVER(ORDER BY date) AS min_2_close,
LAG(close, 3) OVER(ORDER BY date) AS min_3_close,
LAG(close, 4) OVER(ORDER BY date) AS min_4_close
FROM
`ai4f.AAPL10Y`
ORDER BY
date DESC ),
raw_plus_trend AS (
SELECT
date,
close,
min_1_close,
IF (min_1_close - min_2_close > 0, 1, -1) AS min_1_trend,
IF (min_2_close - min_3_close > 0, 1, -1) AS min_2_trend,
IF (min_3_close - min_4_close > 0, 1, -1) AS min_3_trend
FROM
raw ),
train_data AS (
SELECT
date,
close,
min_1_close AS day_prev_close,
IF (min_1_trend + min_2_trend + min_3_trend > 0, 1, -1) AS trend_3_day
FROM
raw_plus_trend
ORDER BY
date ASC )
SELECT
*
FROM
train_data
# View the first five rows of the query's output. Note that the object `df` containing the query output is a Pandas Dataframe.
print(type(df))
df.dropna(inplace=True)
df.head()
# ## Visualize data
# The simplest plot you can make is to show the closing stock price as a time series. Pandas DataFrames have built in plotting funtionality based on Matplotlib.
df.plot(x='date', y='close');
# You can also embed the `trend_3_day` variable into the time series above.
# +
start_date = '2018-06-01'
end_date = '2018-07-31'
plt.plot(
'date', 'close', 'k--',
data = (
df.loc[pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.scatter(
'date', 'close', color='b', label='pos trend',
data = (
df.loc[df.trend_3_day == 1 & pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.scatter(
'date', 'close', color='r', label='neg trend',
data = (
df.loc[(df.trend_3_day == -1) & pd.to_datetime(df.date).between(start_date, end_date)]
)
)
plt.legend()
plt.xticks(rotation = 90);
# -
df.shape
# ## Build a Regression Model in Scikit-Learn
# In this section you'll train a linear regression model to predict AAPL closing prices when given the previous day's closing price `day_prev_close` and the three day trend `trend_3_day`. A training set and test set are created by sequentially splitting the data after 2000 rows.
# +
features = ['day_prev_close', 'trend_3_day']
target = 'close'
X_train, X_test = df.loc[:2000, features], df.loc[2000:, features]
y_train, y_test = df.loc[:2000, target], df.loc[2000:, target]
# -
# Create linear regression object. Don't include an intercept,
# TODO
regr = linear_model.LinearRegression(fit_intercept = False)
# Train the model using the training set
# TODO
regr.fit(X_train,y_train)
# Make predictaions using the testing set
# TODO
y_pred = regr.predict(X_test)
# Print the root mean squared error of your predictions
# TODO
# The mean squared error
print('Root Mean Squared Error: {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred))))
# Print the variance score (1 is perfect prediction)
# TODO
print('Variance Score: {0:.2f}'.format(r2_score(y_test, y_pred)))
# Plot the predicted values against their corresponding true values
# TODO
plt.scatter(y_test, y_pred)
plt.plot([140, 240], [140, 240], 'r--', label='perfect fit')
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.legend();
# The model's predictions are more or less in line with the truth. However, the utility of the model depends on the business context (i.e. you won't be making any money with this model). It's fair to question whether the variable `trend_3_day` even adds to the performance of the model:
print('Root Mean Squared Error: {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, X_test.day_prev_close))))
# Indeed, the RMSE is actually lower if we simply use the previous day's closing value as a prediction! Does increasing the number of days included in the trend improve the model? Feel free to create new features and attempt to improve model performance!
| Course 1 - Introduction to Trading, Machine Learning & GCP/Qwiklab 1 - Building a Regression Model in AI Platform Notebooks/aapl_regression_scikit_learn_double_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 1 Convex sets (20 points)
# (a, 12 pts) Closed sets and convex sets.
# i. Show that a polyhedron {x ∈ R
# n : Ax ≤ b}, for some A ∈ R
# m×n, b ∈ R
# m, is both convex and
# closed.
# ii. Show that if Si ⊆ R
# n, i ∈ I is a collection of convex sets, then their intersection ∩i∈ISi
# is also
# convex. Show that the same statement holds if we replace “convex” with “closed”.
# iii. Given an example of a closed set in R
# 2 whose convex hull is not closed.
# iv. Let A ∈ R
# m×n. Show that if S ⊆ R
# m is convex then so is A−1
# (S) = {x ∈ R
# n : Ax ∈ S}, which
# is called the preimage of S under the map A : R
# n → R
# m. Show that the same statement holds
# if we replace “convex” with “closed”.
# v. Let A ∈ R
# m×n. Show that if S ⊆ R
# n is convex then so is A(S) = {Ax : x ∈ S}, called the image
# of S under A.
# vi. Give an example of a matrix A ∈ R
# m×n and a set S ⊆ R
# n that is closed and convex but such
# that A(S) is not closed.
# (b, 4 pts) The following is an important property of polyhedra:
# P ⊆ R
# m+n
# is a polyhedron ⇒ {x ∈ R
# n
# : (x, y) ∈ P for some y ∈ R
# m} is a polyhedron. (1)
# (Bonus: prove this property.)
# i. Use the above property (1) about polyhedra to conclude that if A ∈ R
# m×n and P ⊆ R
# n is a
# polyhedron then A(P) is a polyhedron.
# ii. Give an example to show that (1) is no longer true if we replace “polyhedron” with “closed and
# convex set”.
# (c, 4 pts) The following is a “strict” variant of the Separating Hyperplane Theorem: if C, D ⊆ R
# n
# are disjoint, closed and convex, and (say) D is bounded, then there exists a ∈ R
# n, b ∈ R with a 6= 0
# such that a
# T x > b for all x ∈ C and a
# T x < b for all x ∈ D (i.e., the hyperplane {x ∈ R
# n : a
# T x = b}
# strictly separates C, D). Use this to prove Farkas’ Lemma: given A ∈ R
# m×n, b ∈ R
# m, exactly one of
# the following is true:
# 1
# • ∃ x ∈ R
# n such that Ax = b, x ≥ 0;
# • ∃ y ∈ R
# m such that AT y ≥ 0, yT
# b < 0.
# (Hint: it will help you to use part (b.i), to deduce that the set {Ax : x ≥ 0} is a polyhedron, and
# hence closed and convex by part (a.i).)
#
# 2 Convex functions (16 points)
# (a, 6 pts) Prove that f(x, y) = |xy| + a(x
# 2 + y
# 2
# ) is convex if and only if a ≥ 1/2. Also prove that it
# is strongly convex if a > 1/2. (Bonus: produce 3d plots of f, one for each a ∈ {0, 1/4, 1/2, 3/4}.)
# (b, 6 pts) In each case below specify whether the function is strongly convex, strictly convex, convex,
# or nonconvex, and give a brief justification.
# i. The logarithmic barrier function, f : R
# n
# ++ → R defined as
# f(x) = −
# Xn
# i=1
# log(xi).
# ii. The entropy function, f : {x ∈ R
# n
# + :
# Pn
# i=1 xi = 1} → R defined as
# f(x) =
# −
# Pn
# i=1 xi
# log(xi) if x > 0
# 0 otherwise.
# (c, 4 points) Let f be twice differentiable, with dom(f) convex. Prove that f is convex if and only if
# (∇f(x) − ∇f(y))T
# (x − y) ≥ 0,
# for all x, y. This property is called monotonicity of the gradient ∇f.
#
# 3 Lipschitz gradients and strong convexity (16 points)
# Let f be convex and twice differentiable.
# (a, 8 pts) Show that the following statements are equivalent.
# i. ∇f is Lipschitz with constant L;
# ii. (∇f(x) − ∇f(y))T
# (x − y) ≤ Lkx − yk
# 2
# 2
# for all x, y;
# iii. ∇2f(x) LI for all x;
# iv. f(y) ≤ f(x) + ∇f(x)
# T
# (y − x) + L
# 2
# ky − xk
# 2
# 2
# for all x, y.
# (b, 8 pts) Show that the following statements are equivalent.
# i. f is strongly convex with constant m;
# ii. (∇f(x) − ∇f(y))T
# (x − y) ≥ mkx − yk
# 2
# 2
# for all x, y;
# iii. ∇2f(x) mI for all x;
# iv. f(y) ≥ f(x) + ∇f(x)
# T
# (y − x) + m
# 2
# ky − xk
# 2
# 2
# for all x, y.
# 4 Solving optimization problems with CVX (23 points)
# CVX is a fantastic framework for disciplined convex programming—it’s rarely the fastest tool for
# the job, but it’s widely applicable, and so it’s a great tool to be comfortable with. In this exercise
# we will set up the CVX environment and solve a convex optimization problem.
# In this class, your solution to coding problems should include plots and whatever explanation
# necessary to answer the questions asked. In addition, full code should be submitted as an appendix
# to the homework document.
# CVX variants are available for each of the major numerical programming languages. There are
# some minor syntactic and functional differences between the variants but all provide essentially the
# same functionality. The Matlab version (and by extension, the R version which calls Matlab under
# the covers) is the most mature but all should be sufficient for the purposes of this class.
# Download the CVX variant of your choosing:
# • Matlab - http://cvxr.com/cvx/
# • Python - http://www.cvxpy.org/en/latest/
# • Julia - https://github.com/JuliaOpt/Convex.jl
# • R - http://faculty.bscb.cornell.edu/~bien/cvxfromr.html
# and consult the documentation to understand the basic functionality. Make sure that you can solve
# the least squares problem minθ ky − Xθk
# 2
# 2
# for a vector y and matrix X. Check your answer by
# comparing with the analytic least squares solution.
# (a) Using CVX, we will solve the 2d fused lasso problem discussed in Lecture 1:
# min
# θ
# 1
# 2
# Xn
# i=1
# (yi − θi)
# 2 + λ
# X
# {i,j}∈E
# |θi − θj |.
# The set E is the set of all undirected edges connecting horizontally or vertically neighboring pxiels
# in the image. More specifically, {i, j} ∈ E if and only if pixel i is the immediate neighbor of pixel j
# on the left, right, above or below.
# 1. (9 pts) Load the basic test data from circle.csv and solve the 2d fused lasso problem with
# λ = 1. Report the objective value obtained at the solution and plot the solution and original
# data as images. Why does the shape change its form?
# 2. (6 pts) Next, we consider how the solution changes as we vary λ. Load a grayscale 64 × 64
# pixel version of the standard Lenna test image from lenna 64.csv and solve the 2d fused lasso
# problem for this image for λ ∈ {10−k/4
# : k = 0, 1, . . . , 8}. For each λ, report the value of the
# optimal objective value, plot the optimal image and show a histogram of the pixel values (100
# bins between values 0 and 1). What change in the histograms can you observe with varying
# λ?
# (b, 8 pts) Disciplined convex programming or DCP is a system for composing functions while ensuring
# their convexity. It is the language that underlies CVX. Essentially, each node in the parse tree for a
# convex expression is tagged with attributes for curvature (convex, concave, affine, constant) and sign
# (positive, negative) allowing for reasoning about the convexity of entire expressions. The website
# http://dcp.stanford.edu/ provides visualization and analysis of simple expressions.
# Typically, writing problems in the DCP form is natural, but in some cases manipulation is
# required to construct expressions that satisfy the rules. For each set of mathematical expressions
# below (all define a convex set), give equivalent DCP expressions along with a brief explanation of
# why the DCP expressions are equivalent to the original. DCP expressions should be given in a form
# that passes analysis at http://dcp.stanford.edu/analyzer.
# Note: this question is really about developing a better understanding of the various composition
# rules for convex functions.
# 1. k(x, y)k
# 3
# 1 ≤ 5x + 7
# 2. 2
# x +
# 9
# z−y ≤ 3, x > 0, y < z
# 3. √
# x
# 2 + 4 + 2y ≤ −5x
# 4. (x + 3)z(y − 5) ≥ 8, x ≥ −3, z ≥ 0, y ≥ 5
# 5. (x+3z)
# 2
# log y + 2y
# 2 ≤ 10, y > 1
# 6. log
# e
# −
# √
# x + e
# 2z
#
# ≤ −e
# 5y
# , x ≥ 0
# 7. p
# k(2x − 3y, y + x)k1 = 0
# 8. y log
# y
# 2x
#
# ≤ y + x − 30, x > 0, y > 0
| ConvexSets & Convex Function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import som
import numpy as np
import math
import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local").appName("MapaAutoorganizado").getOrCreate()
sc = spark.sparkContext
# +
import numba
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32
import numpy as np
@cuda.jit
def rand_weights(rng_states, d_weights):
"""
Kernel para inicializar aleatoriamente la 'matriz' de pesos con valores
en el intervalo [0, 1) tomados de una distribución aleatoria
:param rng_states Estados aleatorios
:param d_weigths Vector de M * N * d valores que contendrá los pesos asociados a las neuronas
"""
idx = cuda.grid(1)
# Cogemos índices para pasar de array unidimensional a tridimensional
n_rows, n_cols, d = d_weights.shape
row = idx // (n_cols * d)
col_d = idx % (n_cols * d)
col = col_d // d
i = col_d % d
# Sacamos el aleatorio correspondiente
if idx < d_weights.size:
d_weights[row, col, i] = xoroshiro128p_uniform_float32(rng_states, idx)
# -
"""
Implementación de un único kernel para ser ejecutado en cada partición del RDD.
"""
@cuda.jit
def som_iter(d_samples, d_weights, d_nums, d_denums, sigma_squared):
"""
Este kernel realiza el proceso de calcular las distancias euclídeas entre
todas las muestras y los pesos de las neuronas. Encontrar la mejor BMU para
una muestra y realizar el cálculo parcial de los pesos correspondientes.
:param d_samples Conjunto de todas las muestras a evaluar.
:param d_weights Array de filas * columnas * d valores con los pesos
asociados a cada una de las neuronas.
:param d_distances Array de tamaño N * nneurons para almacenar
las distancias
:param d_nums Vector con los numeradores para el cálculo de la fórmula.
:param d_denums Vector con los denominadores para el cálculo de la fórmula.
:param sigma_squared Valor de sigma al cuadrado para el cáculo del vecindario.
"""
# 0. Índices
nrows, ncols, d = d_weights.shape
nneurons = nrows * ncols
sample_idx = cuda.blockIdx.x
neuron_idx = cuda.threadIdx.x
neuron_row = neuron_idx // ncols
neuron_col = neuron_idx % ncols
blockSize = cuda.blockDim.x
# 0. Declaramos e inicializamos la memoria compartida
# Memoria compartida para guardar la muestra del bloque
shared_sample = cuda.shared.array(shape=0, dtype=numba.float32)
# Memoria compartida para guardar las distancias de cada muestra del bloque con
# cada neurona (máximo 1024 neuronas).
shared_distances = cuda.shared.array(shape=1024, dtype=numba.float32)
# Memoria compartida para los índices de la reducción (máximo 1024 neuronas).
shared_idx = cuda.shared.array(shape=1024, dtype=numba.int32)
# 1. Empezamos calculando la distancia euclídea de la muestra con las neuronas
# del bloque.
# 1.a Cargamos la muestra del bloque en memoria compartida
for i in range(d // nneurons + 1):
i_stride = i * nneurons
my_pos = i_stride + cuda.threadIdx.x
if my_pos < d:
shared_sample[my_pos] = d_samples[sample_idx, my_pos]
cuda.syncthreads()
# 1.b Calculamos las distancias euclídeas que nos corresponden.
# Aprovechamos la barrera al final de la operación para inicializar la
# memoria compartida para la reducción.
if neuron_idx < nneurons:
shared_distances[neuron_idx] = 0.0
for i in range(d):
i_distance = shared_sample[i] - d_weights[neuron_row, neuron_col, i]
shared_distances[neuron_idx] += i_distance * i_distance
else:
shared_distances[neuron_idx] = np.inf
# Inicializamos el array de índices para la reducción.
shared_idx[neuron_idx] = neuron_idx
cuda.syncthreads()
# 2. Realizamos la reducción para encontrar la mejor distancia.
# Unroll de bloque
if blockSize >= 1024 and neuron_idx < 512:
if shared_distances[neuron_idx + 512] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 512]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 512]
cuda.syncthreads()
if blockSize >= 512 and neuron_idx < 256:
if shared_distances[neuron_idx + 256] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 256]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 256]
cuda.syncthreads()
if blockSize >= 256 and neuron_idx < 128:
if shared_distances[neuron_idx + 128] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 128]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 128]
cuda.syncthreads()
if blockSize >= 128 and neuron_idx < 64:
if shared_distances[neuron_idx + 64] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 64]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 64]
cuda.syncthreads()
# Unroll de warp
if neuron_idx < 32:
if shared_distances[neuron_idx + 32] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 32]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 32]
if shared_distances[neuron_idx + 16] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 16]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 16]
if shared_distances[neuron_idx + 8] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 8]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 8]
if shared_distances[neuron_idx + 4] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 4]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 4]
if shared_distances[neuron_idx + 2] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 2]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 2]
if shared_distances[neuron_idx + 1] < shared_distances[neuron_idx]:
shared_distances[neuron_idx] = shared_distances[neuron_idx + 1]
shared_idx[neuron_idx] = shared_idx[neuron_idx + 1]
cuda.syncthreads()
# La mejor distancia se encuentra en la posición 0 del array.
bmu = shared_idx[0]
bmu_row = bmu // ncols
bmu_col = bmu % ncols
cuda.syncthreads()
# 3. Realizamos la actualización de los pesos.
if neuron_idx < nneurons:
dist = (neuron_row - bmu_row) * (neuron_row - bmu_row) + \
(neuron_col - bmu_col) * (neuron_col - bmu_col)
# Si estamos dentro del rango de actualización.
if dist <= sigma_squared:
hck = math.exp(-dist/(2 * sigma_squared))
# Guardamos sumatoria del denominador
cuda.atomic.add(d_denums, neuron_row * ncols + neuron_col, hck)
# Guardamos sumatoria del numerador
for i in range(d):
cuda.atomic.add(d_nums, neuron_row*ncols*d + neuron_col*d+i,
hck * shared_sample[i])
@cuda.jit
def finish_update(d_weights, partials, numParts):
"""
Este kernel terminas las sumas parciales.
Se ejecuta en un único nodo de Spark.
:param d_weights Array de pesos de neuronas
:param partials Array con sumas parciales
:param numParts Número de resultados parciales a procesar.
"""
idx = cuda.grid(1)
nrows, ncols, d = d_weights.shape
if idx < nrows * ncols:
row = idx // ncols
col = idx % ncols
# a) Sumamos todos los parciales en el primer array
numsize = nrows * ncols * d
densize = nrows * ncols
fullsize = numsize + densize
for i in range(numParts - 1):
# Suma de numeradores
for k in range(d):
pos = fullsize * i + row * ncols * d + col * d + k
partials[row * ncols * d + col * d + k] += partials[pos]
# Suma de denominadores
pos = fullsize * i + numsize + row * ncols + col
partials[numsize + row * ncols + col] += partials[pos]
# b) Si no es 0 el denominador realizamos la división y cambiamos pesos actuales
if partials[numsize + row * ncols + col] != 0:
for k in range(d):
d_weights[row, col, k] = partials[row*ncols*d + col*d +k] / \
partials[numsize + row * ncols + col]
"""
Spark con GPU
"""
def gpu_work_iter(weights, sigma_squared):
# Declarada función interna para devolverla y poder utilizar
# múltiples parámetros al llamar a mapPartitions
def _gpu_work(data):
# 1. Procesamos el dataset
inp = np.asarray(list(data), dtype=np.float32)
rows, cols, d = weights.shape
nneurons = rows * cols
N = inp.shape[0]
# 2. Pasamos los datos a las memorias del dispositivo
d_samples = cuda.to_device(inp)
d_weights = cuda.to_device(weights)
nums = np.zeros(rows * cols * d, np.float32)
denums = np.zeros(rows * cols, np.float32)
d_nums = cuda.to_device(nums)
d_denums = cuda.to_device(denums)
# 3. Tomamos el número de hebras por bloque
if nneurons > 1024:
raise Exception('Número de neuronas superior al límite')
tpb = max(64,2**(math.ceil(math.log2(nneurons))))
# 3. Lanzamos el kernel.
sm_size = 4 * d # Memoria compartida para almacenar una muestra por bloque
som_iter[N, tpb, 0, sm_size](d_samples, d_weights, d_nums, d_denums, sigma_squared)
return d_nums.copy_to_host(), d_denums.copy_to_host()
return _gpu_work
# +
def spark_gpu_batch_som(rdd_data, d, max_iters, rows, cols, smooth_iters=None, sigma_0=10,
sigma_f=0.1, tau=400, seed=None, tpb=128):
# 1. Inicializamos pesos aleatorios
d_weights = cuda.device_array((rows, cols ,d), np.float32)
rng_states = create_xoroshiro128p_states(rows * cols * d, seed=seed)
rand_weights[(d_weights.size) // tpb + 1, tpb](rng_states, d_weights)
weights = d_weights.copy_to_host()
# 2. Bucle del algoritmo
for t in range(max_iters):
# 2.a Actualizamos los parámetros de control si procede
if smooth_iters is None or t < max_iters:
sigma = sigma_0 * math.exp((-t/tau))
else:
sigma = sigma_f
sigma_squared = sigma * sigma
# 2.b Cada nodo del clúster de spark trabajará con un subconjunto
# de las muestras del RDD para encontrar la BMU y realizar la suma
# parcial de su ecucación de actualización de pesos
out = rdd_data.mapPartitions(gpu_work_iter(weights, sigma_squared))
# 2.c En un único nodo usamos la GPU para juntar todas las sumas parciales obtenidas
# y realizar la división
out = out.collect()
numParts = len(out) // 2
partials = np.concatenate(out)
finish_update[rows * cols // tpb + 1, tpb](weights, partials, numParts)
return weights
# +
"""
Ejemplo de las caras
"""
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces()['data']
rows = 5
cols = 6
d=faces.shape[1]
iters = 50
smooth_iters = 25
sigma_0 = 3
tau = 50
multiprocessors = 10
rdd_faces = sc.parallelize(faces).repartition(multiprocessors).cache()
data = rdd_faces.collect()
# -
inicio = time.time()
weights=spark_gpu_batch_som(rdd_faces, d, iters, rows, cols, smooth_iters, sigma_0, 0.1, tau, seed=2)
fin = time.time()
def pintar(texto, pesos):
W = pesos
pattern_length = faces.shape[1]
pattern_width = pattern_height = int(np.sqrt(pattern_length))
matrix_w = np.empty((rows * pattern_height, cols * pattern_width))
for i in range(rows):
for j in range(cols):
matrix_w[i * pattern_height:i * pattern_height + pattern_height,
j * pattern_height:j * pattern_height + pattern_width] = W[i, j].reshape((pattern_height, pattern_width)) * 255.0
fig, ax = plt.subplots(figsize=(12,12))
ax.matshow(matrix_w.tolist(), cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
print(texto)
print('Tiempo ejecución (s)', fin - inicio)
print('Error de cuantificación:', som.quantification_error(faces, weights) )
print('Error topográfico:', som.topography_error(faces, weights))
plt.show()
pintar('GPU', weights)
inicio = time.time()
weights = som.spark_cpu_batch_som(rdd_faces, d, iters, rows, cols, smooth_iters, sigma_0, 0.1, tau, seed=2)
fin = time.time()
pintar('CPU', weights)
# +
"""
Experimento con SUSY.
"""
d = 18
rows = 8
cols = 7
tau = 10
smooth_iters = 5
max_iters = 10
sigma_0 = 4
from pyspark.ml.linalg import DenseVector
from pyspark.ml.feature import StandardScaler
from pyspark.sql import Row
multiprocessors = 2
rdd = sc.textFile('../datasets/SUSY.csv').repartition(multiprocessors)
rdd = rdd.map(lambda line: line.split(','))
rdd = rdd.map(lambda line: Row(features=DenseVector(line[1:])))
df = rdd.toDF()
scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
withStd=True, withMean=False)
scalerModel = scaler.fit(df)
normalized_df = scalerModel.transform(df)
rdd = normalized_df.rdd
rdd = rdd.map(lambda line: np.float32(line.scaledFeatures))
# -
def subsample_and_repeat(sub_percent, rdd, repeat=5):
subrdd = rdd.sample(True, sub_percent, seed=7).cache()
a = subrdd.take(1)
for my_iter in range(repeat):
print('REPETICIÓN', my_iter)
"""
inicio = time.time()
cpu_weights=som.spark_cpu_batch_som(subrdd, d, max_iters, rows, cols, smooth_iters, sigma_0, 0.1, tau, seed=my_iter)
fin = time.time()
print('TIEMPO CPU', fin - inicio)
"""
inicio = time.time()
weights=spark_gpu_batch_som(subrdd, d, max_iters, rows, cols, smooth_iters, sigma_0, 0.1, tau, seed=my_iter)
fin = time.time()
print('TIEMPO GPU', fin - inicio)
subrdd.unpersist()
subsample_and_repeat(0.1, rdd)
subsample_and_repeat(0.2, rdd)
subsample_and_repeat(0.3, rdd)
subsample_and_repeat(0.4, rdd)
subsample_and_repeat(0.5, rdd)
subsample_and_repeat(0.6, rdd)
subsample_and_repeat(0.7, rdd)
subsample_and_repeat(0.8, rdd)
subsample_and_repeat(0.9, rdd)
subsample_and_repeat(1.0, rdd)
| codigo/mapa_autoorganizado.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/emahiro/colab/blob/main/sample_notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="EbG509_QErgs"
# # Sample Note
#
# Google Colaboratory のテスト
#
# - markdown 出かけるっぽい
# - 共有することもできるっぽい
# - データセットあると楽しそう。
# - python のコードはセクションごとに管理するっぽい
# + colab={"base_uri": "https://localhost:8080/"} id="qo2ghY-HEdPO" outputId="7ab2c37b-5c70-49f8-beba-5bcb899f612b"
print("hello")
| sample_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import panel as pn
pn.extension('echarts')
# The ``ECharts`` pane renders [Apache ECharts](https://echarts.apache.org/en/index.html) and [pyecharts](https://pyecharts.org/#/) plots inside Panel. Note that to use the ``ECharts`` pane in the notebook the Panel extension has to be loaded with 'echarts' as an argument to ensure that echarts.js is initialized.
#
# #### Parameters:
#
# For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).
#
# * **``object``** (dict): An ECharts plot specification expressed as a Python dictionary, which is then converted to JSON. Or a pyecharts chart like `pyecharts.charts.Bar`.
# * **``renderer``** (str): Whether to render with HTML 'canvas' (default) or 'svg'
# * **``theme``** (str): Theme to apply to plots (one of 'default', 'dark', 'light')
# ___
# Lets try the ``ECharts`` pane support for ECharts specs in its raw form (i.e. a dictionary), e.g. here we declare a bar plot:
echart = {
'title': {
'text': 'ECharts entry example'
},
'tooltip': {},
'legend': {
'data':['Sales']
},
'xAxis': {
'data': ["shirt","cardign","chiffon shirt","pants","heels","socks"]
},
'yAxis': {},
'series': [{
'name': 'Sales',
'type': 'bar',
'data': [5, 20, 36, 10, 10, 20]
}],
};
echart_pane = pn.pane.ECharts(echart, height=480, width=640)
echart_pane
# Like all other panes, the ``ECharts`` pane ``object`` can be updated, either in place and triggering an update:
echart['series'] = [dict(echart['series'][0], type= 'line')]
echart_pane.param.trigger('object')
# Vega specification can also be responsively sized by declaring the width or height to match the container:
# +
responsive_spec = dict(echart, responsive=True)
pn.pane.ECharts(responsive_spec, height=400)
# -
# The ECharts pane also hass support for pyecharts. For example, we can pass a `pyecharts.charts.Bar` chart directly the `ECharts` pane.
# +
from pyecharts.charts import Bar
bar1 = pn.widgets.IntSlider(start=1, end=100, value=50)
bar2 = pn.widgets.IntSlider(start=1, end=100, value=50)
@pn.depends(bar1.param.value, bar2.param.value)
def plot(bar1, bar2):
my_plot= (Bar()
.add_xaxis(['Helicoptors', 'Planes'])
.add_yaxis('Total In Flight', [bar1, bar2])
)
return pn.pane.ECharts(my_plot, width=500, height=250)
pn.Row(pn.Column(bar1, bar2), plot).servable()
# -
# The ECharts library supports a wide range of chart types and since the plots are expressed using JSON datastructures we can easily update the data and then emit change events to update the charts:
# +
gauge = {
'tooltip': {
'formatter': '{a} <br/>{b} : {c}%'
},
'series': [
{
'name': 'Gauge',
'type': 'gauge',
'detail': {'formatter': '{value}%'},
'data': [{'value': 50, 'name': 'Value'}]
}
]
};
gauge_pane = pn.pane.ECharts(gauge, width=400, height=400)
slider = pn.widgets.IntSlider(start=0, end=100)
slider.jscallback(args={'gauge': gauge_pane}, value="""
gauge.data.series[0].data[0].value = cb_obj.value
gauge.properties.data.change.emit()
""")
pn.Column(slider, gauge_pane)
# -
# ### Controls
#
# The `EChart` pane exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:
pn.Row(gauge_pane.controls(jslink=True), gauge_pane)
| examples/reference/panes/ECharts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# name: python3
# ---
# # Balancer Simulations Math Challenge - Basic Exercises
# This is a series of exercises to gain intuition for the core algorithm in Balancer Pools: the Value Function, and invariant V.
# **Exercise:** We'll work on Step 1 questions in our working session on May 25 in teams of 2, facilitated by @Octopus and @akrtws.
# **Challenges:** Step 2 will be homework assignments, submitting at least one challenge solved is mandatory for successful participation!
# Here's the notebook with challenges: https://github.com/TokenEngineeringCommunity/BalancerPools_Model/blob/fcb67145e8b0f8a1843fe3c6921dbb5a7085938e/Math%20Challenges-Advanced.ipynb
# ## Exercise:
# Let's set up a pool.
#
# We have 100 Token A and 100 Token B, with equal weights.
# The price definition in our pool is constraint by the
# Invariant V in Balancer Pools.
#
# a_bal = balance of token A
# b_bal = balance of token B
# a_weight = weight of token A
# b_weight = weight of token B
#
# The weights in this pool are equal for both tokens.
# For now, we don't have a swap fee.
# # Q1.1:
# What's the initial spot price of token A in token B?
# The initial spot price is 1:1 due to the equal number of tokens and the equal weigths
# # Q1.2:
# Now let's assume a series of 99 swaps. With every swap, **1.0 token A is bought from the pool, against token B**.
#
# **a) Create a table "buy_A"** with
#
# the token A balances (swap by swap)
# the token B balances (swap by swap) - that are constraint by the value function.
#
# **b) What do you notice in general?**
# Write down your findings (in words).
#
# **c) How much would Alice have to pay in token B when buying the first 1.0 token A?**
# Write down your findings (in words). Compare with the initial Spotprice.
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# +
#set up genesis state
a_bal = 100 # initial balance
b_bal = 100 # initial balance
a_weight = 0.5 #define weight
b_weight = 0.5 #define weight
s_f = 0.0 #swap fee
inv = (a_bal**a_weight)*(b_bal**b_weight) #calculate invariant
a_vals = pd.Series(range(100,0,-1))
#create dataframe with based on a_vals
List = pd.DataFrame(a_vals, columns=['token_A'])
#create values for plot, add Y_balances according to current invariant
List['invariant'] = inv #value required to calculate token B value
List['token_B'] = (List.invariant/(List.token_A**a_weight))**(1/b_weight)# calculate corresponding token_B value according to invariant
List['Spot Price'] = List.token_B / List.token_A
List['In-Given-Out'] = List.token_B * (( List.token_A / (List.token_A-1) )**(a_weight/b_weight) -1)
List
# -
# ## Comments
#
# b) As the value of token_A is defined with respect to the value of token_B, as the amount of token_A decreases, the value of token_A increases. For token_B, the behavior is right the oposit: the amount of tokens increses while the relative value of them against token_A decreases.
# IMPORTANT: spot price is the actual price in the pool, however, due to the limited depth of the pool and the effect of the trades on it (slippage), no transaction is performed at the spot price (only infinitesimal transactions). The real price to be paid at any point is shown in the colum 'In-Given_Out' where slippage is included in the calculations.
#
# c) look at colum 'In-Given_Out'. The price to buy token_A with token_B increases throught the swaps as token_A revalues against token_B
#
#
# # Q1.3:
#
# Now let's assume a series of 99 swaps in the opposite direction. We start again with the original state: We have 100 Token A and 100 Token B.
# With every swap, **1.0 token B is bought from the pool, against token A**.
#
# Create a table **'buy_B'** with
#
# the token A balances (swap by swap)
# the token B balances (swap by swap) - that are constraint by the value function.
# +
#set up genesis state
a_bal = 100 # initial balance
b_bal = 100 # initial balance
a_weight = 0.5 #define weight
b_weight = 0.5 #define weight
s_f = 0.0 #swap fee
inv = (a_bal**a_weight)*(b_bal**b_weight) #calculate invariant
b_vals = pd.Series(range(100,0,-1))
#create dataframe with based on a_vals
List_2 = pd.DataFrame(b_vals, columns=['token_B'])
#create values for plot, add Y_balances according to current invariant
List_2['invariant'] = inv #value required to calculate token B value
List_2['token_A'] = (List_2.invariant/(List_2.token_B**b_weight))**(1/a_weight)# calculate corresponding token_B value according to invariant
List_2['Spot Price'] = List_2.token_A / List_2.token_B
List_2['In-Given-Out'] = List_2.token_A * (( List_2.token_B / (List_2.token_B-1) )**(a_weight/b_weight) -1)
List_2.tail(20)
# -
# We see exactly the same behavior in the opposite direction. This is possible because both initial balances and weights are the same.
# # Q1.4:
#
# a) What are the new balances of token A and token B after 90 swaps token A (in) for B (out)?
#
# b) How much would Alice have to pay in token A in case she wants to buy the 91st token B?
# a) After 90 swaps balances are:
# token_B = 10
# token_A = 1000
# SP = 100
# b) Cost of buying an extra token_B = 111,1111111
# # Q1.5:
# Now create a graph (use plotly or similar), and draw the full curve for this series of both kinds of swaps - the AMM curve.
# +
#plot curves with all
List = List[['token_A', 'token_B']]
List_2 = List_2[['token_B', 'token_A']]
curve = pd.concat([List.iloc[::-1], List_2], axis=0, ignore_index=True)
fig = px.line(curve, x="token_A", y="token_B")
fig.update_xaxes(range=[0, 1000])
fig.update_yaxes(range=[0, 1000])
fig.update_layout(height=1000, width=1000, title_text='<b>AMM Curve</b>')
fig.show()
# -
# # Q1.6:
# Take this plot, and mark
# - the initial price in Q1.1 (starting price)
# - the new price in Q1.4 (after 90 swaps)
#
# Side note: Why is it actually not appropriate to call this a "price". What would be a better name?
# +
fig = px.line(curve, x="token_A", y="token_B")
fig.update_xaxes(range=[0, 1000])
fig.update_yaxes(range=[0, 1000])
fig.update_layout(height=1000, width=1000, title_text='<b> AMM Curve</b>')
fig.add_annotation(x=100, y=100,
text="SP = 100/100 = 1",
showarrow=True,
arrowhead=1,
)
fig.add_annotation(x=10, y=1000,
text="SP = 1000/10 = 100",
showarrow=True,
arrowhead=1,
)
fig.show()
# -
# The accurate name is 'Spot Price' but it only reflects a 'price' understood as the amount of X the buyers needs to pay) when the trade has no effect over the pool (slippage effect ~ 0), that only happens when the amount to trade is despicable vs the amount of liquidity in the pool.
# # Q1.7:
# Formulate a "rule of a thumb", how do swaps effect the price?
# If I buy token_A (what reduces the amount of token_A in the pool) by paying token_B (what increases the amount of token_B in the pool), the relative value of token_A vs token_B increases.
#
# Buying token_B with token_A produces the same effect in the other direction.
# ## Additional questions:
#
# **Q1.8:** Buying A) 99 tokens at once vs. B) buying 99x 1 token
# Is there a difference in terms of the number of tokens you have to swap in? Is it "more expensive" to swap 99 times?
# **Answer: no, there's no difference - you pay the same amount no matter if you buy 1 at a time or 99 at once. Intuition: you are moving on the SAME curve, no matter if you take a huge leap, or walk in small steps.**
#
# **Q1.9:** Swap fees for buying 99 tokens at once vs. buying 99x 1 token
# Again, does it matter in terms of the total amount of tokens (change in balance+ fees) you have to pay?
#
#
# **Q1.10:** Price changes and position at the curve
# For a relative change in price, does it matter where we are at the curve? ( @mark richardson | bancor explained it very nicely, you might want to watch the recording again, will share it by tomorrow)
#
# **Q1.11:** Price changes, position at the curve and unequal weights
# What's the effect of weights on this? If weights are not equal, and we want to move prices, does it matter where we are at the curve?
#
# ## Solution Q1.9 - by @AngelaKTE (Twitter @akrtws)
# Swap fees for buying 99 tokens at once vs. buying 99x 1 token Again, does it matter in terms of the total amount of tokens you have to pay/send to the pool?
#
# **Answer: Yes, is does matter. With fees, it's more expensive to buy 99 x1 tokens.**
# 
# 
# # Now, let's consider weights!
# We continue with the value function V = a^w_a*b^w_b
# where
# a = balance of token asset A
# b = balancer of token asset B
# w_a = weight of token asset A
# w_b = weight of token asset B
# # Q2.1:
# Write down the value function for the pool in Q1.1!
# Value Function:
#
# $V = B_1^{W_1} * B_2^{W_2}$
#
# With other nomenclature:
#
# $V = a^{w_a} * b^{w_b}$
#
# # Q2.2:
# Let's got back to your initial balances in Step 1 in the pool:
# 100 tokens A
# 100 tokens B
#
# How do you need to change the weights in order to land at a **price of
# 4 tokens A : 1 token B**
# Understanding price as Spot Price:
#
# $$SP = \frac{\frac{a}{w_a}}{\frac{b}{w_b}}$$
#
# as $a = b = 100$
#
# $$SP = \frac{\frac{100}{w_b}}{\frac{100}{w_a}} = \frac{w_a}{w_b} = 4$$
#
# $$w_a = 4*w_b$$
#
# and
#
# $$w_a + w_b = 1$$
#
# $$5*w_b = 1$$
#
# $$w_a = 0.8$$
#
# $$w_b = 0.2$$
# # Q2.3:
# Create a graph showing the new AMM Curve in Q2.2
# Compare to the graph in Q1.4 - how does a change in weights change the graph?
# +
#set up genesis state
a_bal = 100 # initial balance
b_bal = 100 # initial balance
a_weight = 0.8 #define weight
b_weight = 0.2 #define weight
s_f = 0.0 #swap fee
inv = (a_bal**a_weight)*(b_bal**b_weight) #calculate invariant
a_vals = pd.Series(range(1000,0,-1))
#create dataframe with based on a_vals
List = pd.DataFrame(a_vals, columns=['token_A'])
#create values for plot, add Y_balances according to current invariant
List['invariant'] = inv #value required to calculate token B value
List['token_B'] = (List.invariant/(List.token_A**a_weight))**(1/b_weight)# calculate corresponding token_B value according to invariant
List['Spot Price'] = List.token_B / List.token_A
List['In-Given-Out'] = List.token_B * (( List.token_A / (List.token_A-1) )**(a_weight/b_weight) -1)
a_bal = 100 # initial balance
b_bal = 100 # initial balance
a_weight = 0.5 #define weight
b_weight = 0.5 #define weight
s_f = 0.0 #swap fee
inv = (a_bal**a_weight)*(b_bal**b_weight) #calculate invariant
a_vals = pd.Series(range(1000,0,-1))
#create dataframe with based on a_vals
List_old = pd.DataFrame(a_vals, columns=['token_A'])
#create values for plot, add Y_balances according to current invariant
List_old['invariant'] = inv #value required to calculate token B value
List_old['token_B_old'] = (List_old.invariant/(List_old.token_A**a_weight))**(1/b_weight)# calculate corresponding token_B value according to invariant
List_old['Spot Price'] = List_old.token_B_old / List_old.token_A
List_old['In-Given-Out'] = List_old.token_B_old * (( List_old.token_A / (List_old.token_A-1) )**(a_weight/b_weight) -1)
# +
#plot curves with all
List = List[['token_A', 'token_B']]
List_old = List_old[['token_B_old']]
df = pd.concat([List, List_old], axis=1, ignore_index=False)
df
# fig = px.line(df, x="token_A", y=["token_B", 'token_B_old'])
# fig.update_xaxes(range=[0, 100])
# fig.update_yaxes(range=[0, 100])
# fig.update_layout(height=1000, width=1000, title_text='<b>AMM Curve </b>')
# fig.show()
fig = px.line(df, x="token_A", y=['token_B', 'token_B'])
fig.update_xaxes(range=[0, 1000])
fig.update_yaxes(range=[0, 1000])
fig.update_layout(height=1000, width=1000, title_text='<b> AMM Curve</b>')
fig.show()
df
# -
# Both curves intersect each other at $x=100 y=100$. To the left, the effect of buying token_A is bigger (bigger change in the amount of token_B) on the blue curve (the new one with 0.2 and 0.8 as weights). From the equilibrium to the right, the effect of selling token_A is bigger on the blue curve as well producing a higher impact on the total amount of token_B.
# # Q2.4:
# Compare token prices in this pool.
# How much would Alice have to pay in case there are only 2 tokens left in the pool
# **a) buy 1.0 token A for token B**
# **b) buy 1.0 token B for token A**
# +
#set up genesis state
a_bal = 100 # initial balance
b_bal = 100 # initial balance
a_weight = 0.8 #define weight
b_weight = 0.2 #define weight
s_f = 0.0 #swap fee
inv = (a_bal**a_weight)*(b_bal**b_weight) #calculate invariant
a_vals = pd.Series(range(1000,0,-1))
#create dataframe with based on a_vals
List = pd.DataFrame(a_vals, columns=['token_A'])
#create values for plot, add Y_balances according to current invariant
List['invariant'] = inv #value required to calculate token B value
List['token_B'] = (List.invariant/(List.token_A**a_weight))**(1/b_weight)# calculate corresponding token_B value according to invariant
List['Spot Price'] = (List.token_B/b_weight) / (List.token_A/a_weight)
List['In-Given-Out'] = List.token_B * (( List.token_A / (List.token_A-1))**(a_weight/b_weight) -1)
List.tail(5)
# -
# a) Buying 1 token_A for token_B (from the previous table): 9.37e+09 tokens are needed
#
#
# b) Token_A needed to buy 1 token_B:
question_b = List.token_A.iloc[998] * ((List.token_B.iloc[998] / (List.token_B.iloc[998]-1))**(b_weight/a_weight) -1)
question_b
| Math Challenge-Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 5 - Multi-class Sentiment Analysis
#
# In all of the previous notebooks we have performed sentiment analysis on a dataset with only two classes, positive or negative. When we have only two classes our output can be a single scalar, bound between 0 and 1, that indicates what class an example belongs to. When we have more than 2 examples, our output must be a $C$ dimensional vector, where $C$ is the number of classes.
#
# In this notebook, we'll be performing classification on a dataset with 6 classes. Note that this dataset isn't actually a sentiment analysis dataset, it's a dataset of questions and the task is to classify what category the question belongs to. However, everything covered in this notebook applies to any dataset with examples that contain an input sequence belonging to one of $C$ classes.
#
# Below, we setup the fields, and load the dataset.
#
# The first difference is that we do not need to set the `dtype` in the `LABEL` field. When doing a mutli-class problem, PyTorch expects the labels to be numericalized `LongTensor`s.
#
# The second different is that we use `TREC` instead of `IMDB` to load the `TREC` dataset. The `fine_grained` argument allows us to use the fine-grained labels (of which there are 50 classes) or not (in which case they'll be 6 classes). You can change this how you please.
# +
import torch
from torchtext.legacy import data
from torchtext.legacy import datasets
import random
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
TEXT = data.Field(tokenize = 'spacy',
tokenizer_language = 'en_core_web_sm')
LABEL = data.LabelField()
train_data, test_data = datasets.TREC.splits(TEXT, LABEL, fine_grained=False)
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
# -
# Let's look at one of the examples in the training set.
vars(train_data[-1])
# Next, we'll build the vocabulary. As this dataset is small (only ~3800 training examples) it also has a very small vocabulary (~7500 unique tokens), this means we do not need to set a `max_size` on the vocabulary as before.
# +
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data,
max_size = MAX_VOCAB_SIZE,
vectors = "glove.6B.100d",
unk_init = torch.Tensor.normal_)
LABEL.build_vocab(train_data)
# -
# Next, we can check the labels.
#
# The 6 labels (for the non-fine-grained case) correspond to the 6 types of questions in the dataset:
# - `HUM` for questions about humans
# - `ENTY` for questions about entities
# - `DESC` for questions asking you for a description
# - `NUM` for questions where the answer is numerical
# - `LOC` for questions where the answer is a location
# - `ABBR` for questions asking about abbreviations
print(LABEL.vocab.stoi)
# As always, we set up the iterators.
# +
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
device = device)
# -
# We'll be using the CNN model from the previous notebook, however any of the models covered in these tutorials will work on this dataset. The only difference is now the `output_dim` will be $C$ instead of $1$.
# +
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim,
dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.convs = nn.ModuleList([
nn.Conv2d(in_channels = 1,
out_channels = n_filters,
kernel_size = (fs, embedding_dim))
for fs in filter_sizes
])
self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
#text = [sent len, batch size]
text = text.permute(1, 0)
#text = [batch size, sent len]
embedded = self.embedding(text)
#embedded = [batch size, sent len, emb dim]
embedded = embedded.unsqueeze(1)
#embedded = [batch size, 1, sent len, emb dim]
conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]
#conv_n = [batch size, n_filters, sent len - filter_sizes[n]]
pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]
#pooled_n = [batch size, n_filters]
cat = self.dropout(torch.cat(pooled, dim = 1))
#cat = [batch size, n_filters * len(filter_sizes)]
return self.fc(cat)
# -
# We define our model, making sure to set `OUTPUT_DIM` to $C$. We can get $C$ easily by using the size of the `LABEL` vocab, much like we used the length of the `TEXT` vocab to get the size of the vocabulary of the input.
#
# The examples in this dataset are generally a lot smaller than those in the IMDb dataset, so we'll use smaller filter sizes.
# +
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
N_FILTERS = 100
FILTER_SIZES = [2,3,4]
OUTPUT_DIM = len(LABEL.vocab)
DROPOUT = 0.5
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_IDX)
# -
# Checking the number of parameters, we can see how the smaller filter sizes means we have about a third of the parameters than we did for the CNN model on the IMDb dataset.
# +
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# -
# Next, we'll load our pre-trained embeddings.
# +
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
# -
# Then zero the initial weights of the unknown and padding tokens.
# +
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
# -
# Another different to the previous notebooks is our loss function (aka criterion). Before we used `BCEWithLogitsLoss`, however now we use `CrossEntropyLoss`. Without going into too much detail, `CrossEntropyLoss` performs a *softmax* function over our model outputs and the loss is given by the *cross entropy* between that and the label.
#
# Generally:
# - `CrossEntropyLoss` is used when our examples exclusively belong to one of $C$ classes
# - `BCEWithLogitsLoss` is used when our examples exclusively belong to only 2 classes (0 and 1) and is also used in the case where our examples belong to between 0 and $C$ classes (aka multilabel classification).
# +
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
model = model.to(device)
criterion = criterion.to(device)
# -
# Before, we had a function that calculated accuracy in the binary label case, where we said if the value was over 0.5 then we would assume it is positive. In the case where we have more than 2 classes, our model outputs a $C$ dimensional vector, where the value of each element is the beleief that the example belongs to that class.
#
# For example, in our labels we have: 'HUM' = 0, 'ENTY' = 1, 'DESC' = 2, 'NUM' = 3, 'LOC' = 4 and 'ABBR' = 5. If the output of our model was something like: **[5.1, 0.3, 0.1, 2.1, 0.2, 0.6]** this means that the model strongly believes the example belongs to class 0, a question about a human, and slightly believes the example belongs to class 3, a numerical question.
#
# We calculate the accuracy by performing an `argmax` to get the index of the maximum value in the prediction for each element in the batch, and then counting how many times this equals the actual label. We then average this across the batch.
def categorical_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
top_pred = preds.argmax(1, keepdim = True)
correct = top_pred.eq(y.view_as(top_pred)).sum()
acc = correct.float() / y.shape[0]
return acc
# The training loop is similar to before, without the need to `squeeze` the model predictions as `CrossEntropyLoss` expects the input to be **[batch size, n classes]** and the label to be **[batch size]**.
#
# The label needs to be a `LongTensor`, which it is by default as we did not set the `dtype` to a `FloatTensor` as before.
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text)
loss = criterion(predictions, batch.label)
acc = categorical_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# The evaluation loop is, again, similar to before.
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text)
loss = criterion(predictions, batch.label)
acc = categorical_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# +
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# -
# Next, we train our model.
# +
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut5-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# -
# Finally, let's run our model on the test set!
# +
model.load_state_dict(torch.load('tut5-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
# -
# Similar to how we made a function to predict sentiment for any given sentences, we can now make a function that will predict the class of question given.
#
# The only difference here is that instead of using a sigmoid function to squash the input between 0 and 1, we use the `argmax` to get the highest predicted class index. We then use this index with the label vocab to get the human readable label.
# +
import spacy
nlp = spacy.load('en_core_web_sm')
def predict_class(model, sentence, min_len = 4):
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
if len(tokenized) < min_len:
tokenized += ['<pad>'] * (min_len - len(tokenized))
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
preds = model(tensor)
max_preds = preds.argmax(dim = 1)
return max_preds.item()
# -
# Now, let's try it out on a few different questions...
pred_class = predict_class(model, "Who is <NAME>?")
print(f'Predicted class is: {pred_class} = {LABEL.vocab.itos[pred_class]}')
pred_class = predict_class(model, "How many minutes are in six hundred and eighteen hours?")
print(f'Predicted class is: {pred_class} = {LABEL.vocab.itos[pred_class]}')
pred_class = predict_class(model, "What continent is Bulgaria in?")
print(f'Predicted class is: {pred_class} = {LABEL.vocab.itos[pred_class]}')
pred_class = predict_class(model, "What does WYSIWYG stand for?")
print(f'Predicted class is: {pred_class} = {LABEL.vocab.itos[pred_class]}')
| 5 - Multi-class Sentiment Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PostgreSQL and Pandas
# ### Histogram to visualize the most common salary ranges for employees.
# 
# ### Bar Chart of Average Salary By Title
# 
#Import Libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import psycopg2
import os
#File paths
file_path=os.path.join("","Charts/")
# +
#Environment Varibale: Postgres db host,ID,password and db name info is stored as environment variables.
#Environment variables are loaded as a part of Jupyter session
# -
#DB Connect function
def dbConnection(db_host,db_name,db_user,db_password):
try:
conn= psycopg2.connect(host=db_host,database=db_name, user=db_user, password=<PASSWORD>)
return conn
except:
print("DB Connection could not be established. Please check DB settings including URL, ID and Password.")
print("Please do not excute subsequent code as it is dependent on database connectivity.")
else:
print("DB Connection was successfully established")
#Establish DB connection - All parameters are available as environment variables
conn = dbConnection(db_host,db_name,db_user,db_password)
# ### Create a histogram to visualize the most common salary ranges for employees.
#Get the Salary
try:
salary_df = pd.read_sql_query('select * from "Salary" order by salary;', conn)
except:
print("Failed to get database result for Salary table.")
#Dispaly sample data
salary_df.head(5)
#Let's clear format of salary column
salary_df["salary"]=salary_df["salary"].str.replace("$","").str.replace(",","").astype(float)
#let's determin bin size (@salary increment of 10K )
maximumvalue =salary_df.salary.max()
minimumvalue= salary_df.salary.min()
numberofbins = ( (maximumvalue - minimumvalue) / 10000 )
numberofbins
# +
#Plot histogram
ax = salary_df.hist(column='salary', bins=9, grid=True, figsize=(12,9), color='#86bf91', zorder=1, rwidth=0.95)
plt.xlabel("Salary", weight='bold', size=12)
plt.ylabel("Employee Count", weight='bold', size=12)
plt.title("Most Common Salary Ranges For Employees", weight='bold', size=15)
#Save plot file
plt.savefig(file_path +"commonsalaryRange.png")
# -
# ### Create a bar chart of average salary by title.
#Get Title salary
try:
title_salary = pd.read_sql_query('SELECT salary,title FROM "Salary" s,"Employee" e,"Title" t \
WHERE s.emp_no=e.emp_no AND \
e.emp_title_id =t.title_id ORDER BY s.salary ASC;', conn)
except:
print("Failed to get database result for Salary by Title query.")
#Display sample data
title_salary.head()
#Remove Salary format for pandas group by
title_salary["salary"]=title_salary["salary"].str.replace("$","").str.replace(",","").astype(float)
#Display sample data
title_salary.head()
# +
#Get average salary for job title
title_salary= title_salary.groupby(["title"]).mean()
#Reset index
title_salary =title_salary.reset_index()
# -
#Display sample records
title_salary.head()
# +
#Set fig size
fig,ax = plt.subplots(figsize=(7,5), dpi=100)
#Get x and y axis
x_axis = title_salary.title
y_axis= title_salary.salary
#Set the plot with x and y axis
plt.bar(x_axis,y_axis,color="#86bf91")
#Set graph legend
plt.xlabel("Job Title", weight='bold', size=12)
plt.ylabel("Average Salary", weight='bold', size=12)
plt.title("Average Salary By Job Title", weight='bold', size=15)
plt.draw()
#Save plot file
plt.savefig(file_path +"salaryTitle.png")
#Set x axis lable alignment
ax.set_xticklabels(ax.get_xticklabels(), rotation=30, ha='right')
# -
#Close DB connection
try:
conn.close()
except:
print("Database connection is aborted or not active")
else:
print("Database Connection closed successfully!")
| EmployeeSQL/EmployeeSalaryAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IMPORT
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import cv2
#keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
import sklearn.metrics as metrics
# +
project_path = '/Users/vandi/Desktop/readForMe/Deep Learning/'
download_path = project_path + 'emnist/'
# -
# # Load Data
def read_idx(filename):
print('Processando as informações de %s.' % filename)
with gzip.open(filename, 'rb') as f:
z, dtype, dim = struct.unpack('>HBB', f.read(4))
print("Dimensoes: ", dim)
shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dim))
print("Shape: ", shape)
return np.frombuffer(f.read(), dtype=np.uint8).reshape(shape)
def load_emnist():
train_images = download_path + 'emnist-byclass-train-images-idx3-ubyte'
train_labels = download_path + 'emnist-byclass-train-labels-idx1-ubyte'
test_images = download_path + 'emnist-byclass-test-images-idx3-ubyte'
test_labels = download_path + 'emnist-byclass-test-labels-idx1-ubyte'
train_x = read_idx(train_images)
train_y = read_idx(train_labels)
test_x = read_idx(test_images)
test_y = read_idx(test_labels)
return (train_x,train_y,test_x,test_y)
raw_train_x,raw_train_y,raw_test_x,raw_test_y = load_emnist()
| Deep Learning/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Casting Classification as Regression, Regressing to Probabilities
# 1. We can turn classification labels into a one-hot vector.
# 2. We can regress to the vector.
# 3. To produce output classes, we can take the element with highest weight.
# 4. The regressed value can be interpreted as an (approximate) probability.
#
# Regressing to probabilities is a useful trick, especially when we start thinking about confidences and unsupervised data analysis.
#
# [Link to Fish Dataset Details](https://www.kaggle.com/aungpyaeap/fish-market)
# +
import numpy as np
import csv
from tqdm import tqdm_notebook
rows = []
with open('Fish.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
rows.append(row)
print(len(rows))
print(rows[0]) # first row is a header
print(rows[1])
rows = rows[1:]
labels = {} # Create a dictionary of label strings to numeric values
for row in rows:
if row[0] not in labels:
labels[row[0]]=len(labels)
print(labels)
inputs = np.array([[float(c) for c in (row[1:])] for row in rows])
outputs = np.array([labels[row[0]] for row in rows])
print(outputs)
# +
def output_to_one_hot(categories, max_val):
data = np.zeros((len(categories), max_val))
data[np.arange(len(categories)), categories] = 1
return data
encodings = output_to_one_hot(outputs, len(labels))
print(encodings[:10])
print(encodings[-10:])
# -
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(inputs, encodings)
# # Assignment:
# 1. Define a network class that regresses to the 7 outputs.
# 2. Train a sufficiently large network to perform the categorization.
# 3. Measure the test accuracy of the model by counting the number of accurate labels
#
# # Stretch Goals:
# - Test out different network architectures (depth, breadth) and examine training performance
from torch.autograd import Variable
from tqdm import tqdm_notebook, trange
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as utils
import matplotlib.pyplot as plt
import numpy as np
import time
class Net(nn.Module):
def __init__(self, breadth=500, depth=3):
super(Net, self).__init__()
hidden_layers = depth - 2
self.fcs = [nn.Linear(6, breadth)]
self.fcs.extend([nn.Linear(breadth, breadth)]*hidden_layers)
for i in np.arange(len(self.fcs)):
self.add_module('fc{}'.format(i), self.fcs[i])
self.final_fc = nn.Linear(breadth, 7)
def forward(self, x):
for fc in self.fcs:
x = F.relu(fc(x))
x = self.final_fc(x)
return x
def train(self, inputs, true_vals, num_epochs=1000):
inputs = torch.from_numpy(inputs).float()
true_vals = torch.from_numpy(true_vals).float()
t0 = time.time()
net = self
net.float() # force float type
net.zero_grad()
outputs = net(Variable(torch.Tensor([0]*6)))
outputs.backward(torch.randn(7)) # Use random gradients to break symmetry?
learning_rate = 1 # Need to initialize carefully
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate)
# create your optimizer
optimizer = optim.Adam(net.parameters())
criterion = nn.MSELoss()
t = trange(num_epochs)
for epoch in t: # loop over the dataset multiple times
running_loss = 0.0
# wrap them in Variable
reshaped_inputs = inputs #.view(-1, 1) # Structure with each input in its own row
reshaped_outputs = true_vals #.view(-1, 1) # Neglecting to have outputs and true vals to match dimension is a common mistake.
# forward + backward + optimize
outputs = net(reshaped_inputs)
#print(outputs)
#print(reshaped_outputs)
#loss = criterion(outputs, reshaped_outputs)
error = reshaped_outputs - outputs
#print("ERROR")
#print(error)
loss = (error ** 2).mean()
loss.backward()
optimizer.step()
# zero the parameter gradients
optimizer.zero_grad()
t.set_description('ML: loss={}'.format(loss.item())) # Updates Loss information
t1 = time.time() - t0
return loss, t1
def categorize(self, inputs):
inputs = torch.from_numpy(inputs).float()
outputs = net(inputs).detach().numpy()
# make hard decisions
hard_output = np.zeros_like(outputs)
hard_output[np.arange(len(outputs)), outputs.argmax(1)] = 1
return hard_output
net = Net()
print(net)
net.train(X_train, y_train)
from tqdm import tqdm_notebook
# +
y_categorized_test = net.categorize(X_test)
# The number of accurate labels
num_accurate_labels = np.sum(y_categorized_test == y_test) / y_test.shape[1]
frac_correct = num_accurate_labels / len(y_test)
print(frac_correct)
# -
num_accurate_labels
# +
# Test different parameters
breadths = [250, 500, 1000]
depths = [3, 4, 5]
frac_corrects = np.zeros([len(breadths), len(depths)])
for b in np.arange(len(breadths)):
breadth = breadths[b]
for d in np.arange(len(depths)):
depth = depths[d]
net = Net(breadth=breadth, depth=depth)
net.train(X_train, y_train)
y_categorized_test = net.categorize(X_test)
# The number of accurate labels
num_accurate_labels = np.sum(y_categorized_test == y_test) / y_test.shape[1]
frac_correct = num_accurate_labels / len(y_test)
print("Breadth: {}, Depth: {}, {} correct".format(breadth, depth, frac_correct))
# -
| Section 2/Part 2/2-2 Supervised Learning - Eric Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
############################################################################################
# Data: National Data on the relative frequency of given names #
# in the population of U.S. births where the individual has a Social Security Number. #
# #
# yob2010.csv is the record of 2010. It has the format "Name, Gender, Number", #
# where name is 2 to 15 characters, gender is M (male) or F (female), #
# and "Number" is the number of occurences of the name. #
############################################################################################
# Run this cell to create a list of tuples called baby_list.
# The three elements of each tuple is in the format of (name, gender, number).
# Both name and gender are of type string and number is of type integer.
# !!! Don't modify this cell !!!
import pandas as pd
df = pd.read_csv('yob2010.csv', header=0)
name_list = list(df.Name)
gender_list = list(df.Gender)
list(df.Number)
number_list = list(map(int,list(df.Number)))
baby_list = list(zip(name_list, gender_list, number_list ))
baby_list
# -
Fname_age = baby_list(0,2)
Fname_age
# +
# You should write your answer within the function provided.
# should return a tuple as (name, number)
def answer_zero():
name, gender, number = baby_list[0]
return (name,number)
answer_zero()
# -
# ## Question 1
# What is the most popular boy name in 2010?
# This function should return a tuple of type string and integer.
# You should write your answer within the function provided.
# should return a string
def answer_one():
num=0
name=baby_list[0][0]
for i in baby_list:
if i[2]>num and i[1]=='M':
num=i[2]
name=i[0]
return name,num
# ## Question 2
# Girls over 1000
# +
# You should write your answer within the function provided.
# should return a list of strings
def answer_two():
girls=[]
for i in baby_list:
if i[2]>10000 and i[1]=='F':
girls.append(i[0])
return girls
answer_two()
# -
# ## Question 3
# Count the occurances of all the boy's names whose number is greater than 8000?
# This function should return an integer.
# +
# You should write your answer within the function provided.
# should return an integer
def answer_three():
occur=0
for i in baby_list:
if i[2]>8000 and i[1]=='M':
occur+=1
return occur
answer_three()
# -
# ## Question 4
# Given a name and gender, what is the number of it?
# This function should return an integer.
# You should write your answer within the function provided.
# should return an integer
def answer_four(name,gender):
for i in baby_list:
if i[0]==name and i[1]==gender:
return i[2]
return None
# Test code for Question 4
# !!! Don't modify this cell !!!
names = ["Emmy","Abc","Allen","Allen","Jacob"]
genders = ["F","F","M","F","M"]
for i in range (0, len(names)):
print ("Name:",names[i],"Gender:",genders[i],"Number:",answer_four(names[i],genders[i]))
| Pandas/yobDS_pandas/.ipynb_checkpoints/StephenIvkov-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming Bootcamp 2019
# # Lesson 4 Exercises
# ---
# **Required Files for this exercise**
# * genes.txt
# * init_sites.txt
# * gene_expr.txt
# * sequences2.txt
#
# **Earning points (optional)**
#
# If you would like to get points/feedback for your work, please **submit your notebook to Piazza**. To do this, follow these steps:
#
# 1. Click "New Post"
# 2. For "Post Type" select `Question`
# 3. For "Post to" select `Individual Student(s)/Instructors(s)`
# 4. Where is says "Enter one or more names..." type `Instructors`
# 5. For "Select Folder(s)" select `lab4`
# 6. In "Summary" type in "[Your Full Name] Lab4 Submission"
# 7. In the "Details" click "Insert" > "Insert File" and then insert your python notebook.
# 8. You can then write whatever else you want in the Summary.
# 9. Then click "Post My Question to PROGRAMMING BOOTCAMP!":
#
# Please also **write your name below**. You do not need to complete all the problems to get points. Points will be assigned, but completion is all that actually matters. Those who consistenly participate throughout bootcamp will get a ~prize~.
# **Name**:
# ---
# ## 1. Guess the output: list practice (1pt)
#
# For the following blocks of code, first try to guess what the output will be, and then run the code yourself. Points will be given for filling in the guesses; guessing wrong won't be penalized.
# +
ages = [65, 34, 96, 47]
print (len(ages))
# -
# Your guess:
#
# +
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
ages = [65, 34, 96, 47]
print (len(ages) == len(names))
# -
# Your guess:
#
# +
ages = [65, 34, 96, 47]
for hippopotamus in ages:
print (hippopotamus)
# -
# Your guess:
#
# +
ages = [65, 34, 96, 47]
print (ages[1:3])
# -
# Your guess:
#
# +
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
if "Willard" not in names:
names.append("Willard")
print (names)
# -
# Your guess:
#
# +
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
ages = [65, 34, 96, 47]
for i in range(len(names)):
print (names[i],"is",ages[i])
# -
# Your guess:
#
# +
ages = [65, 34, 96, 47]
ages.sort()
print (ages)
# -
# Your guess:
#
# +
ages = [65, 34, 96, 47]
ages = None
print (ages)
# -
# Your guess:
#
# > Remember that `.sort()` is an in-place function. Its return value is "None". (This is a special value in Python that basically means "null". It's used as a placeholder sometimes when we don't want to give something a value.)
# +
ages = [65, 34, 96, 47]
print (max(ages))
# -
# Your guess:
#
# +
cat = "Mitsworth"
for i in range(len(cat)):
print (cat[i])
# -
# Your guess:
#
# +
cat = "Mitsworth"
print (cat[:4])
# -
# Your guess:
#
# +
str1 = "Good morning, Mr. Mitsworth."
parts = str1.split()
print (parts)
print (str1)
# -
# Your guess:
#
# +
str1 = "Good morning, Mr. Mitsworth."
parts = str1.split(",")
print (parts)
# -
# Your guess:
#
# +
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
print (names[-1])
# -
# Your guess:
#
# +
oldList = [2, 2, 6, 1, 2, 6]
newList = []
for item in oldList:
if item not in newList:
newList.append(item)
print (newList)
# -
# Your guess:
# > This is an example of how to remove duplicates.
#
# ---
# ## 2. On your own: Lists (3pts)
#
# Write code to accomplish each of the following tasks **using list functions**. Do not copy and paste the list to make changes. ***You must pretend you don't know what's in the list.***
# run this first!
geneNames = ["Ppia", "Gria2", "Mecp2", "Omd", "Zfp410", "Hsp1", "Mtap1a", "Cfl1",
"Slc25a40", "Dync1i1", "V1ra4", "Fmnl1", "Mtap2", "Atp5b", "Olfr259",
"Atf3", "Vapb", "Dhx8", "Slc22a15", "Orai3", "Ifitm7", "Kcna2", "Timm23", "Shank1"]
# **(A)** (1pt) Replace the 12th element in `geneNames` with the string `"Camk2a"`.
# **(B)** (1pt) Add the string `"Shank3"` to the end of the `geneNames` list.
# **(C)** (1pt) Using a loop, print the elements in `geneNames` that start with "S" or "O".
# ---
# ## 3. On your own: Strings & Splitting (3pts)
#
# Write code to accomplish each of the following tasks **using string slicing and splitting**.
# **(A)** (1pt) Print the 3rd to 9th letters of the string stored in the variable `magicWords` using string slicing. (You should get "racadab")
magicWords = "abracadabra"
# **(B)** (1pt) Prompt the user to input a sentence using `input()`. Print each word of their sentence on a separate line.
# **(C)** (1pt) Using `input()`, prompt the user to enter some good ideas for cat names (have them enter the names on one line, separating each name with a comma). Separate the names so that each one is a separate element of a list. Then choose one randomly (using the random module) and print it out.
# > Note: if you want to strip off the extra whitespace, you can do this:
# ```
# print (catNamesList[randNum].strip(" "))
# ```
# ---
# ## 4. File reading and lists (3pts)
#
# For this problem, use the file `genes.txt` provided on Piazza. It contains some gene IDs, one on each line.
# **(A)** (1pt) Read the file and print to the screen only the gene IDs that contain "uc007".
#
# **[ Check your answer ]** You should end up printing 14 gene IDs.
# **(B)** (1 pt) Print to the screen only unique gene IDs (remove the duplicates). Do not assume repeat IDs appear consecutively in the file.
#
# *Hint: see problem 1 for an example of duplicate checking.*
#
# **[ Check your answer ]** You should end up printing 49 gene IDs.
# **(C)** (1 pt) Print to the screen only the gene IDs that are still unique after removing the ".X" suffix (where X is a number).
#
# **[ Check your answer ]** You should end up printing 46 gene IDs.
# ---
# ## 5. Practice with `.split()` (4pts)
#
# Use `init_sites.txt` to complete the following. This file contains a subset of translation initiation sites in mouse, identified by Ingolia et al. (Cell, 2011). Note that this file has a header, which you will want to skip over.
# **(A)** (2 pt) Write a script that reads this file and computes the average CDS length (i.e. average the values in the 7th column).
#
# **[ Check your answer ]** You should get an average of 236.36.
# **(B)** (2 pt) Write a script that reads this file and prints the "Init Context" from each line (i.e. the 6th column) **if and only if** the "Codon" column (column 12) is "aug" for that line.
#
# **[ Check your answer ]** You should print 38 init contexts.
# **(C)** (0 pt) For fun (?), copy and paste your output from (b) into http://weblogo.berkeley.edu/logo.cgi to create a motif logo of the sequence around these initiation sites. What positions/nt seem to be most common?
# Your answer:
# ---
# ## 6. Cross-referencing (4pts)
#
# Here you will extract and print the data from `init_sites.txt` that corresponds to genes with high expression. There isn't gene expression data in `init_sites.txt`, so we'll have to integrate information from another file.
#
# - First, use `gene_expr.txt` to create a list of genes with high expression. We'll say high expression is anything >= 50.
# - Then read through `init_sites.txt` and print the GeneName (2nd column) and PeakScore (11th column) from any line that matches an ID in your high-expression list.
# - Finally, separately compute the average PeakScore for high expression genes and non-high expression genes. Print both averages to the screen.
#
# **[ Check your answer ]** There should be 10 lines corresponding to high-expression genes that you print info about. Your average peak scores should be 4.371 and 4.39325 for high and non-high expression genes, respectively.
# ---
# ## 7. All-against-all comparisons (4pts)
#
# A common situation that arises in data analysis is that we have a list of data points and we would like to compare each data point to each other data point. Here, we will write a script that computes the "distance" between each pair of strings in a file and outputs a distance matrix. We will define "distance" between two strings as the number of mismatches between two strings when they are lined up, divided by their length.
#
# First we'll use a toy dataset. We'll create a list as follows:
# ```
# things = [1, 2, 5, 10, 25, 50]
# ```
# We'll start off by doing a very simple type of pairwise comparison: taking the numerical difference between two numbers. To systematically do this for all possible pairs of numbers in our list, we can make a nested for loop:
# +
things = [1, 2, 5, 10, 25, 50]
for i in range(len(things)):
for j in range(len(things)):
# print absolute value of the difference
print (abs(things[i] - things[j]))
# -
# Try running this code yourself and observe the output. Everything prints out on its own line, which isn't what we want -- we'd usually prefer a matrix-type format. Try this slightly modified code:
# +
things = [1, 2, 5, 10, 25, 50]
for i in range(len(things)):
for j in range(len(things)):
print (abs(things[i] - things[j]), "\t", end="")
print ("")
# -
# This gives us the matrix format we want. Make sure you understand how this code works. To make sure that the `print` function doesn't print a new line at the end of the string, in python3 we reset its end character to an empty string. FYI, the "\t" is a tab character, and much like "\n", it is invisible once it's printed (it becomes a tab).
#
# So now we know how to do an all-against-all comparison. But how do we compute the number of mismatches between strings? As long as the strings are the same length, we can do something simple like the following:
# +
str1 = "Wilfred"
str2 = "Manfred"
diffs = 0
for k in range(len(str1)):
if str1[k] != str2[k]: #compare the two strings at the same index
diffs = diffs + 1
print ("dist =", round(float(diffs) / len(str1), 2))
# -
# So this outputs the distance between the two strings, where the distance is defined as the fraction of the sequence length that is mismatched.
#
# **Using these two pieces of code as starting points, complete the following:**
# **(A)** (2 pt) Create a list of a few short strings *of the same length*. For example:
# ```
# things = ["bear", "pear", "boar", "tops", "bops"]
# ```
# Write code that prints a distance matrix for this list. As in the last example, use the fraction of mismatches between a given pair of words as the measure of their "distance" from each other. Round the distances to 2 decimals.
# **(B)** (2 pt) Now, instead of using a hard coded list like you did in (A), create a list of DNA sequences by reading in the file `sequences2.txt`. Compute the distance matrix between these sequences and print the distance matrix. Looking at this matrix, do you see a pair of sequences that are much less "distant" from each other than all the rest?
# ---
# ## 8. Writing and Using Custom Functions with Lists (5 pt)
# **(A)** (2pts) Create a function called "rand_seq" that takes an integer length as a parameter, and then returns a random DNA sequence of that length.
#
# *Hint: make a list of the possible nucleotides*
# **(B)** (2pts) Create a function called "shuffle_nt" that takes a single sequence as a parameter and returns a string that is a shuffled version of the sequence (i.e. the same nucleotides, but in a random order).
#
# *Hint: Look for Python functions that will make this easier. For example, the `random` module has some functions for shuffling. There may also be some built-in string functions that are useful. However, you can also do this just using things we've learned.*
# **(C)** (1pt) Run the unit tests below to show that all of your functions work. Try to fix any that have problems.
# +
##### testing rand_seq
randSeq1 = rand_seq(23)
randSeq2 = rand_seq(23)
if type(randSeq1) != str:
print (">> Problem with rand_seq: answer is not a string, it is a %s." % type(randSeq1))
elif len(randSeq1) != 23:
print (">> Problem with rand_seq: answer length (%s) does not match expected (%s)." % (len(randSeq1), 23))
elif randSeq1 == randSeq2:
print (">> Problem with rand_seq: generated the same sequence twice (%s) -- are you sure this is random?" % randSeq1)
else:
print ("rand_seq: Passed.")
##### testing shuffle_nt
shuffSeq = shuffle_nt("AAAAAAGTTTCCC")
if type(shuffSeq) != str:
print (">> Problem with shuffle_nt: answer is not a string, it is a %s." % type(shuffSeq))
elif len(shuffSeq) != 13:
print (">> Problem with shuffle_nt: answer length (%s) does not match expected (%s)." % (len(shuffSeq), 12))
elif shuffSeq == "AAAAAAGTTTCCC":
print (">> Problem with shuffle_nt: answer is exactly the same as the input. Are you sure this is shuffling?")
elif shuffSeq.count('A') != 6:
print (">> Problem with shuffle_nt: answer doesn't contain the same # of each nt as the input.")
else:
print ("shuff_seq: Passed.")
# -
# **(D)** (1pt) Use the **functions you created above** to create 20 random nucleotide sequences of length 50 and print them to the screen.
# ---
#
# ---
# ## Extra questions (0pts)
#
# These questions are for people who would like extra practice. They will not be counted for points.
# **(A)** Following from problem 7 above: As you can see, the distance matrix you made in (B) is symmetrical around the diagonal. This means dist(i,j) is the same as dist(j,i), so we're doing some redundant calculations.
#
# Change the code so that we don't do any unnecessary calculations (including comparing a sequence to itself, which always is
# 0). For any calculations you skip, you can print "-" or some other place-holder to keep the printed matrix looking neat.
#
# *Hint: There's a really simple way to do this! Think about the range of the second loop...*
# **(B)** Below is a loop that creates a list. Do the same thing but with a list comprehension instead.
# +
# Loop version:
import random
randomNums = []
for i in range(100):
randomNums.append(random.randint(0,10))
print (randomNums)
# -
# Your list comprehension version here:
# **(C)** Below is a loop that creates a list. Do the same thing but with a list comprehension instead.
# Loop version:
import random
randomNums = []
for i in range(100):
if (i % 2) == 0:
randNum = random.randint(0,10)
randNumStr = str(randNum)
randomNums.append(randNumStr)
print (randomNums)
# Your list comprehension version here:
| class_materials/Data_Struture_List/2019/lab_exercise/lab4_exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Finviz Finance
# ## Quote
# +
import pandas as pd
from finvizfinance.quote import finvizfinance
stock = finvizfinance('tsla')
# -
stock.ticker_charts()
from IPython.display import Image
Image(filename='tsla.jpg')
stock_fundament = stock.ticker_fundament()
stock_description = stock.ticker_description()
outer_ratings_df = stock.ticker_outer_ratings()
news_df = stock.ticker_news()
inside_trader_df = stock.ticker_inside_trader()
print(stock_fundament)
print(stock_description)
outer_ratings_df.head()
news_df.head()
inside_trader_df.head()
stock.ticker_signal()
# ## Statements
from finvizfinance.quote import Statements
statement = Statements()
df = statement.get_statements('AAPL', statement="B", timeframe="Q")
df.head()
# ## News
from finvizfinance.news import News
fnews = News()
all_news = fnews.get_news()
all_news['news'].head()
all_news['blogs'].head()
# ## Insider
from finvizfinance.insider import Insider
finsider = Insider(option='top owner trade')
finsider.get_insider().head()
# Use case: getting specific insider information
from finvizfinance.insider import Insider
finsider = Insider(option='1771340')
finsider.get_insider().head()
# ## Screener
# Use case: setting filters
from finvizfinance.screener.overview import Overview
foverview = Overview()
filters_dict = {'Exchange':'AMEX','Sector':'Basic Materials'}
foverview.set_filter(filters_dict=filters_dict)
df = foverview.screener_view(order='Company')
df.head()
# Use case: getting companys which are similar to TSLA.
# +
ticker = 'tsla'
compare_list = ['Sector','Industry','Country']
from finvizfinance.screener.financial import Financial
ffinancial = Financial()
df = ffinancial.compare(ticker,compare_list,verbose=0)
df
# -
# Use case: getting companys that have 'Triangle Ascending' signal.
signal = 'Triangle Ascending'
foverview.set_filter(signal=signal)
df = foverview.screener_view()
df.head()
# Use case: Getting individual company.
ticker='TSLA'
foverview.set_filter(signal='', filters_dict={}, ticker=ticker)
df = foverview.screener_view()
df.head()
# # Forex
from finvizfinance.forex import Forex
fforex = Forex()
df = fforex.performance()
df
fforex.chart('audusd')
# # Crypto
from finvizfinance.crypto import Crypto
fcrypto = Crypto()
df = fcrypto.performance()
df
# # Group
from finvizfinance.group.overview import Overview
fgoverview = Overview()
df = fgoverview.screener_view(group='Industry')
df.tail()
from finvizfinance.group.valuation import Valuation
fgvaluation = Valuation()
df = fgvaluation.screener_view(group='Industry')
df.tail()
from finvizfinance.group.performance import Performance
fgperformance = Performance()
df = fgperformance.screener_view(group='Industry')
df.tail()
# # Earnings
# +
from finvizfinance.earnings import Earnings
fEarnings = Earnings()
df_days = fEarnings.partition_days(mode='financial')
# -
df_days['Dec 06/a']
| example/.ipynb_checkpoints/example-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_mxnet_latest_p37
# language: python
# name: conda_mxnet_latest_p37
# ---
# !aws ls
# !aws s3 sync s3://sagemaker-us-west-2-355444812467/tomofun/1626497155.52871/ ./s3-new-data3 # --delete
# !aws s3 cp s3://somefile-for-final/ ./s3-new-data --recursive
# +
# # !aws s3 sync s3://sagemaker-us-west-2-355444812467/tomofun/ ./s3-new-data
# +
# # !aws s3 cp augmented.manifest {train_path}/meta_train.csv
# ['s3://sagemaker-us-west-2-355444812467/a2i-demo/e729ab47-50c4-4e41-b7b8-c37291a2d9a7.wav']
| 01-byoc/youjun-test-s3-data-CURD.ipynb |