code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
from examples.single_cond_example import create_conditions
from tensorflow.keras import Input, Model
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM
from cond_rnn import ConditionalRNN
# -
import os, sys
def get_longest_file():
train_dir = './data/train/'
lens = []
longest_len = 0
for fn in os.listdir(train_dir):
with open(train_dir + fn) as f:
lens.append(len(f.readlines()))
return sorted(lens)[30:40]
# Too long! cut around 1000 words
def get_longest_filename():
train_dir = './data/train/'
for fn in os.listdir(train_dir):
with open(train_dir + fn) as f:
if len(f.readlines()) == 133068:
print(fn)
get_longest_filename()
# +
NUM_SAMPLES = 10_000
TIME_STEPS = get_longest_file()
VOCAB_SIZE = 632 # INPUT_DIM?
NUM_CELLS = 1024
# -
print(get_longest_file())
# Import input, output data
| cseq2seq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#List of high schools
high_schools = ["Hernandez High School", "Figueroa High School", "Wilson High School", "Wright High School"]
for school in high_schools:
print(school)
# A dictionary of high schools and the type of school.
high_school_types = [{"High School": "Griffin", "Type":"District"},
{"High School": "Figueroa", "Type": "District"},
{"High School": "Wilson", "Type": "Charter"},
{"High School": "Wright", "Type": "Charter"}]
for high_school in high_school_types:
print(high_school)
# List of high schools
high_schools = ["Huang High School", "Figueroa High School", "Shelton High School", "Hernandez High School","Griffin High School","Wilson High School", "Cabrera High School", "Bailey High School", "Holden High School", "Pena High School", "Wright High School","Rodriguez High School", "Johnson High School", "Ford High School", "Thomas High School"]
#Add the Pandas dependency
import pandas as pd
#Create a Pandas Series from a list.
school_series = pd.Series(high_schools)
school_series
# A dictionary of high schools
high_school_dicts = [{"School ID": 0, "school_name": "Huang High School", "type": "District"},
{"School ID": 1, "school_name": "Figueroa High School", "type": "District"},
{"School ID": 2, "school_name":"Shelton High School", "type": "Charter"},
{"School ID": 3, "school_name":"Hernandez High School", "type": "District"},
{"School ID": 4, "school_name":"Griffin High School", "type": "Charter"}]
school_df = pd.DataFrame(high_school_dicts)
school_df
# +
# Three separate lists of information on high schools
school_id = [0, 1, 2, 3, 4]
school_name = ["Huang High School", "Figueroa High School",
"Shelton High School", "Hernandez High School","Griffin High School"]
type_of_school = ["District", "District", "Charter", "District","Charter"]
# +
# Initialize a new DataFrame.
schools_df = pd.DataFrame()
# Add the list to a new DataFrame.
schools_df["School ID"] = school_id
# -
schools_df["School_name"] = school_name
schools_df["type"] = type_of_school
# Print the DataFrame.
schools_df
# +
# Create a dictionary of information on high schools.
#high_schools_dict = {'School ID': school_id, 'school_name':school_name, 'type':type_of_school}
#hs_df = pd.DataFrame(high_schools_dict)
#hs_df
# -
schools_df.columns
school_df.index
schools_df.values
| pandas_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import json
from pprint import pprint
from mmdet.datasets import VOCTXTDataset
import sys
data_root = 'data/VOCdevkit/'
ann_file=data_root + 'VOC2012/ImageSets/Main/train_unlabeled.txt',
img_prefix='data/VOCdevkit/VOC2012/'
# +
# # !./tools/dist_test.sh configs/pascal_voc/ssd300_voc.py work_dirs/ssd300_voc/epoch_24.pth 8 --out results.pkl --eval bbox
# -
def extract_boxes(fname):
with open(fname) as f:
content = f.readlines()
f.close()
content = [x.strip().split(' ') for x in content]
return content
# +
# for each image 20xnumber of boxes
with open('results.pkl', 'rb') as f:
data = pickle.load(f)
# -
len(data[111])
# +
VOCTXTDataset.CLASSES
cat2label = {cat: i for i, cat in enumerate(VOCTXTDataset.CLASSES)}
label2cat = {i: cat for i, cat in enumerate(VOCTXTDataset.CLASSES)}
print('Loading JSON ... ... ... \n')
with open('results.pkl.bbox.json') as f:
data = json.load(f)
merg = {}
for index, pred in enumerate(data):
img_id = pred['image_id']
if not (img_id in merg):
merg[img_id] = list()
merg[img_id].append(pred)
files = merg.keys()
for index, file in enumerate(files):
line = ''
for box in merg[file]:
if box['category_id'] in label2cat.keys():
category_label = label2cat[box['category_id']] if box['category_id'] in label2cat.keys() else 'background'
bbox = box['bbox']
info = extract_boxes(f'{img_prefix}/Annotations/{file}.txt')
width = int(info[0][1])
height = int(info[0][2])
line = f"{line}{category_label} {width} {height} 0 {int(bbox[0])} {int(bbox[1])} {int(bbox[0]+bbox[2])} {int(bbox[3]+bbox[1])}\n"
else:
print(f"\n {box['category_id']} \n")
sys.stdout.write(f"\r [ {file} ] {index} / {len(files)}")
sys.stdout.flush()
# img_id = f"{pred['image_id']}_psudo_labels"
f = open(f'{img_prefix}/Annotations/{file}_psudo_labels.txt', 'w')
f.write(line)
f.close()
# -
| sudo_labels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Interoperating with Verilog
#
# It is easy to create `magma` `Circuits`
# from existing Verilog modules.
import magma as m
# ## Defining a Circuit with a Verilog body
HalfAdder = m.DefineCircuit('HalfAdder',
'A', m.In(m.Bit),
'B', m.In(m.Bit),
'S', m.Out(m.Bit),
'C', m.Out(m.Bit))
HalfAdder.verilog = '''\
assign S = A ^ B;
assign C = A & B;\
'''
m.EndCircuit()
print(HalfAdder)
# ## Defining a Circuit from Verilog.
#
# If we have verilog source, we can declare a magma circuit from that source.
# `Magma` uses the verilog parser `pyverilog` to parse the source
# and return a list of `Circuits`, one for each module in the file.
# +
verilog_source = '''
module HalfAdder(a, b, c);
input a;
output b;
inout c;
assign a = b & c;
endmodule'''
HalfAdder = m.DefineFromVerilog(verilog_source)[0]
print(HalfAdder)
# -
# The functions:
# ```
# DeclareFromVerilogFile(filename)
# DefineFromVerilogFile(filename)
# ```
# can be used to declare and define `Magma` circuits from verilog files.
# The declare versions declare a `Magma` circuit, but does not include the verilog source code.
# The define versions define a `Magma` circuit which includes the verilog source code.
# ## Defining a Circuit from Templated Verilog
#
# It is also possible to run a text templating engine on the verilog source.
#
# Here is an example using `mako`,
# a simple python templating engine.
# The expressions contained with `${...}` are python.
mako_source = '''module CSA${N} ( input [${N-1}:0] a,b,c, output [${N-1}:0] s, co );
assign s = a ^ b ^c;
assign co = a&b | b&c | a&c;
endmodule'''
# +
from mako.template import Template
template = Template(mako_source)
verilog = template.render(N=4)
print(verilog)
# -
# The functions:
# ```
# DeclareFromTemplatedVerilogFile(filename, **args)
# DefineFromTemplatedVerilogFile(filename, **args)
# ```
# can be used to declare and define `Magma` circuits from templated verilog files.
# The keyword args are passed to the templating engine
# and can be used to control the generation of verilog.
# ## Genesis3
#
# See the repo https://github.com/phanrahan/genesis3
# for a simple verilog generator similar to
# [Genesis2](http://genesis2.stanford.edu/mediawiki/index.php/Main_Page).
| notebooks/advanced/verilog.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.2
# language: julia
# name: julia-0.6
# ---
# # 数学機能
#
# Julia 0.6.2 でやってます。
#
# ## 四則演算など
#
# 文字と数字の掛け算のときは演算子を省略できる。
a = 100
println(2a)
println(2.0a)
# 2バイト文字も使えるのでギリシア文字を使った式が書ける。円周率は`π`と`pi`に入っている。
println(2π)
println(2pi)
# 当然、漢字や絵文字なども使える。
四 = 4
🐶 = "dog"
println(四)
println(🐶)
# 整数型同士の除算で割り切れないときは小数になる (Python3っぽい)。Python3の`//`をやりたいときは`div()`を使う。めんどい。
println(3/2)
println(div(3, 2))
# 余りは`%`か remainder の `rem()`。
println(3%2)
println(rem(3, 2))
# 累乗が`^`で書けるのはうれしい。
2^10
# イコール付きの不等号は次の二通りで書ける。
# +
x = 3
if x ≥ 2
println("a")
end
if x >= 3
println("b")
end
# -
# ## 線形代数
#
# 行列(多次元配列)を書こう! NumPy や MATLAB をよく知らないのであまり比較できない。
# 2x2行列はこんな感じ。
[1 2; 3 4]
# サイズの取得などがクラスメソッドではなく関数なのがちょっと。NumPyでは便利な `reshape()` もちょっと使いにくい。
A = [1 2;3 4;]
println(size(A))
println(ndims(A))
reshape(A, 4, 1)
# 行列と行列の`*`での演算は、NumPyだとアダマール積になってたはずだけど、Juliaだと普通の行列積になる。アダマール積にするには `.*` を使う(分かりにくいので `Hadamard(A, B)`とかにしてくれれば良いのだけど。)
# +
A = reshape(1:4, 2, 2)
B = reshape(5:8, 2, 2)
println("$A * $B = $(A*B)")
println("$A .* $B = $(A.*B)")
| Julia/math.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import gym
import itertools
import matplotlib
import numpy as np
import pandas as pd
import sys
if "../" not in sys.path:
sys.path.append("../")
from collections import defaultdict
from lib.envs.windy_gridworld import WindyGridworldEnv
from lib import plotting
matplotlib.style.use('ggplot')
# -
env = WindyGridworldEnv()
def make_epsilon_greedy_policy(Q, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
best_action = np.argmax(Q[observation])
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
def sarsa(env, num_episodes, discount_factor=1.0, alpha=0.5, epsilon=0.1):
"""
SARSA algorithm: On-policy TD control. Finds the optimal epsilon-greedy policy.
Args:
env: OpenAI environment.
num_episodes: Number of episodes to run for.
discount_factor: Lambda time discount factor.
alpha: TD learning rate.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, stats).
Q is the optimal action-value function, a dictionary mapping state -> action values.
stats is an EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes)
)
# The policy we're following
# Note: We are just declaring a function (in a nutshell) here - no need to
# declare this again just because Q changed.
# (but need to declare again for changes in epsilon)
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
# Print out which episode we're on, useful for debugging.
if (i_episode + 1) % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode+1, num_episodes), end="")
sys.stdout.flush()
# Implement this!
# The very first state(== observation)
observation = env.reset()
# Select the first action given the first state
action = np.random.choice(env.action_space.n, p=policy(observation))
# We are naively assuming finite lengths of episodes here.
# Using while loop like this might lead to infinite loop
# depending on environments. Adding measures to enforce
# finite number of iterations might be useful.
episode_finished = False
while not episode_finished:
# Update the total number of steps in this episode (for stats purposes)
stats.episode_lengths[i_episode] += 1
# Need to backup the current state before moving on
# to do the lookahead
prev_observation = observation
# Record the result of the action
observation, reward, done, _ = env.step(action)
# Update the total reward obtained from this episode (for stats purposes)
stats.episode_rewards[i_episode] += reward
if done:
# If we receive 'done' signal, let's finish up the episode
episode_finished = True
# When we reached the terminal state, there is no more future state-actions
Q[prev_observation][action] += alpha * (reward - Q[prev_observation][action])
else:
# Select next action in advance to do the lookahead
next_action = np.random.choice(env.action_space.n, p=policy(observation))
# Update Q with TD Target and TD Error
Q[prev_observation][action] += alpha * (
reward + discount_factor * Q[observation][next_action] - Q[prev_observation][action]
)
# We will do the next_action at the next iteration
action = next_action
return Q, stats
Q, stats = sarsa(env, 200)
plotting.plot_episode_stats(stats)
| TD/SARSA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
from utils import *
from model import *
import re
import os
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from keras.models import load_model
from sklearn.metrics import classification_report, confusion_matrix
# -
# config
height, width, depth = 96, 96, 3
save_path = 'model/first_model.model'
# load dataset
image_list, label_list, num_classes = load_dataset('simpsons_images', height, width, 300)
# +
# label encoder
le = LabelEncoder()
label_list = le.fit_transform(label_list)
# save label encoder as npy
np.save('classes.npy', le.classes_)
# -
# split into train, dev sets
X_train, X_dev, y_train, y_dev = train_test_split(np.array(image_list), label_list,
test_size=0.2, stratify=label_list)
# convert to one-hot vectors
y_train = np_utils.to_categorical(y_train)
y_dev = np_utils.to_categorical(y_dev)
# model 2
model = build_model_2(height, width, depth, num_classes)
model.summary()
model, history = train_model(model, 200, save_path, X_train, y_train,
X_dev, y_dev, data_augmentation=True)
# +
f, ax = plt.subplots(ncols=2, figsize=(15, 5))
ax[0].plot(history.history['acc'], label='train')
ax[0].plot(history.history['val_acc'], label='dev')
ax[0].legend()
ax[0].set_xlabel('epochs')
ax[0].set_ylabel('Accuracy')
ax[1].plot(history.history['loss'], label='train')
ax[1].plot(history.history['val_loss'], label='dev')
ax[1].legend()
ax[1].set_xlabel('epochs')
ax[1].set_ylabel('Loss')
# -
# ## Evaluation
# +
# load best model
model = load_model(save_path)
# load label encoder
le = LabelEncoder()
le.classes_ = np.load('classes.npy')
# +
# load test set
test_path = 'simpsons_testset'
X_test = []
y_test = []
for image_name in os.listdir(test_path):
image_path = os.path.join(test_path, image_name)
image_name = re.sub('_[0-9]+', '', image_name[:-4])
if image_name in le.classes_:
image = cv2.imread(image_path)
image = preprocess_image(image, height, width)
X_test.append(image)
y_test.append(image_name)
X_test = np.array(X_test)
y_test = le.transform(y_test)
# -
# classification report
pred_test = model.predict(X_test)
print(classification_report(y_test, np.argmax(pred_test, axis=1), target_names=le.classes_))
plt.figure(figsize=(15, 15))
cnf_matrix = confusion_matrix(y_test, np.argmax(pred_test, axis=1))
class_names = le.classes_
plt.imshow(cnf_matrix, interpolation='nearest')
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=90)
plt.yticks(tick_marks, class_names)
# url = "https://upload.wikimedia.org/wikipedia/en/thumb/a/ac/The_Simpsons%E2%80%93S23.jpg/220px-The_Simpsons%E2%80%93S23.jpg"
url = "https://ksassets.timeincuk.net/wp/uploads/sites/55/2017/03/simpsons-the-great-phatsby-1-920x584.jpg"
plot_and_predict(url, model, le.classes_, height, width)
# ## Visualization
# #### Attention Map
from vis.utils import utils
from vis.visualization import visualize_saliency
# +
plt.figure()
f, ax = plt.subplots(1, 3, figsize=(15, 5))
model.layers[-1].activations = activations.linear
temp_model = utils.apply_modifications(model)
for i, img in enumerate(X_test[:3]):
pred_class = np.argmax(model.predict(np.expand_dims(img, axis=0)))
grads = visualize_saliency(temp_model, -1, pred_class,
img, backprop_modifier=None)
ax[i].imshow(img[:, :, (2, 1, 0)])
ax[i].imshow(grads, alpha=0.6)
del temp_model
# -
# ## Video Demo
# load charachter icon
path = 'icon'
icon_list = []
for char_name in os.listdir(path):
icon_path = os.path.join(path, char_name)
icon = cv2.imread(icon_path)
icon = cv2.resize(icon, (48, 48))
icon_list.append(icon)
video_path = 'video/video1.mp4'
output_path = 'video/output1.mp4'
get_predict_video(video_path, output_path, height, width, model, icon_list, le.classes_)
from IPython.display import YouTubeVideo
YouTubeVideo('myXXcyaSwYk')
| simpsons_cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <br>
# <font color='black', size="6">
# <center>
# <b> List Comprehension </b>
# </center>
# </font>
# </br>
# # Comprehension의 필요성
# +
# list comprehension을 사용하지 않은 경우
numbers = list()
for i in range(20):
numbers.append(i)
print(numbers)
# list comprehension을 사용한 경우
numbers = [i for i in range(20)]
print(numbers)
# -
# # Comprehension + 연산
# +
multiple_of_two = [2*num for num in range(20)]
print(multiple_of_two)
added_by_two = [2+num for num in range(20)]
print(added_by_two)
square_num = [num**2 for num in range(20)]
print(square_num)
# -
# # Comprehension으로 List 접근
# +
scores = [20, 60, 20, 10, 70]
double_scores = [2*score for score in scores]
print(double_scores)
# -
# # comprehension에서 Index로 여러개 list 접근 , +zip
# +
e_scores = [10*num for num in range(10)]
m_scores = [20*num for num in range(10)]
sum_scores = list()
for score_idx in range(len(e_scores)):
sum_scores.append(e_scores[score_idx] + m_scores[score_idx])
print("only using for")
print(sum_scores,'\n')
sum_scores = [e_scores[idx] + m_scores[idx] for idx in range(len(e_scores))]
print("using with comprehension")
print(sum_scores,'\n')
sum_scores = list()
for e_score, m_score in zip(e_scores, m_scores):
sum_scores.append(e_score + m_score)
print("only using zip")
print(sum_scores,'\n')
sum_scores = [e_score + m_score for e_score, m_score in zip(e_scores, m_scores)]
print("using with zip and list comprehension")
print(sum_scores)
| Python Practice/10_List Comprehension.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Implementation of LEAP algorithm
# +
# !pip install plinkio
import os
import re
import numpy as np
import pandas as pd
from plinkio import plinkfile
import time
#from scipy.linalg.blas import dsyrk
#--can't find a way to get this working. Perhaps blas routines are missing.
data_path = 'dataset1'
os.chdir(data_path)
# +
##Load data:
bed = plinkfile.open("dataset1")
loci = bed.get_loci()
print("Length of locuses", len(loci))
chromosomes = np.unique([x.chromosome for x in loci])
print("# of chromosomes in data:",chromosomes)
samples = bed.get_samples()
print("Number of individuals in data:", len(samples))
# +
##Place data into a dataframe:
mat = np.zeros((len(loci),len(samples)), dtype='int16') #1/4 of the taken up space by using int16
##don't know a faster method of extracting the data from the bed file.
i=0
for row in bed:
mat[i,:] = np.array([snp for snp in row])
i+=1
#this matrix is equivalent to transposed bed.val
print("Data type:", mat.dtype)
print("Size of bed matrix: %4.0fmb\n" %(mat.nbytes/(1024**2)))
#create a multi-indexed column space
tuples = [(x.chromosome,x.name) for x in loci]
ml_index = pd.MultiIndex.from_tuples(tuples, names = ['chromosome', 'snp'])
df = pd.DataFrame(mat.transpose(), columns=ml_index, index = [x.iid for x in bed.get_samples()])
df.info()
df.iloc[:5,:5]
# -
# $1$. Find and exclude related individuals (kinship coeff > 0.05)
# +
##compute covariance matrix between individuals, remove those who are too close to each other.
#they LEAP code uses dsyrk which halves the computational time. Alas, we can't use it y
df = df.astype('float32')-df.astype('float32').mean()
print(df.iloc[:5,:5])
df.info() #roughly doubled memory usage though still not the 80mb it was earlier
cov = np.dot(df, df.transpose())/df.shape[1] #having difficulties with scipy's linalg module
#note that the above takes more than half the time of np.cov
print("\nCovariance shape:" , cov.shape)
print("Covariance memory usage in mb:", cov.nbytes/(1024**2))
cov[:5,:5]
# +
cutoff = .05
bool_arr = np.tril(cov, k=-1)>cutoff
y_idx,_ = np.where(bool_arr)
print("shape of y:", y_idx.shape)
print("\nremoving %d individuals" %y_idx.shape[0])
#note, they marked 54 so we marked more peeps, we effectively remove 47. Something doesn't line up.
indxToKeep = set(range(cov.shape[0]))
[indxToKeep.remove(i) for i in np.unique(y_idx)]
keepArr = np.array(list(indxToKeep))
keepArr.shape
# -
# Keep nonrelated individuals
df = df.ix[keepArr]
df.shape
# $2$. Compute an eigendecomposition of kinship matrix
import scipy.linalg as la
def eigendecomp(cov):
s,U = la.eigh(cov)
s[s<0]=0
ind = np.argsort(s)
ind = ind[s>1e-12]
U = U[:,ind]
s = s[ind]
return s,U
eigendecomp(cov)
# $3$. Compute heritability (h2) using the method of Golan et al.
# +
from sklearn.linear_model import LogisticRegression
from scipy import stats
#read in phenofile:
phenos = pd.read_csv("dataset1.phe", sep=' ', header=None, engine='c')
phenos.columns = ['fam', 'person', 'pheno']
phenos.set_index(keys = 'person', inplace=True)
phenos.iloc[:5,:5]
# -
def calcH2Binary(XXT_o, phe_o, probs_o, thresholds_o, keepArr_o, prev, h2coeff):
"""
INPUT:
1. XXT - covariance matrix (kinship matrix)
2. phe - np.array of phenotypes. In our case, they're binary.
3. probs - np.array of probabilities
4. thresholds - np.array of something (I believe they're estimated liabilities)
5. keepArr - np.array of indexes that exclude highly related individuals.
6. prev - prevalence
7. h2coeff - no idea. they set it to 1.0
NOTES:
Many items have been removed for sake of more compact code. Namely, the actions if
thresholds is None.
Original code can be found on:
https://github.com/omerwe/LEAP/blob/master/leap/calc_h2.py
"""
K = prev
P = np.sum(phe_o>0) / float(phe_o.shape[0])
#index out individuals we do not want. In order to avoid reassining variables,
#I assign the input objects to new objects which are views.
XXT = XXT_o[np.ix_(keepArr, keepArr)]
phe = phe_o[keepArr]
probs = probs_o[keepArr]
thresholds = thresholds_o[keepArr]
Ki = K*(1-P) / (P*(1-K)) * probs / (1 + K*(1-P) / (P*(1-K))*probs - probs)
phit = stats.norm(0,1).pdf(thresholds)
probsInvOuter = np.outer(probs*(1-probs), probs*(1-probs))
y = np.outer(phe-probs, phe-probs) / np.sqrt(probsInvOuter)
sumProbs = np.tile(np.column_stack(probs).T, (1,probs.shape[0])) + np.tile(probs, (probs.shape[0], 1))
Atag0 = np.outer(phit, phit) * (1 - (sumProbs)*(P-K)/(P*(1-K)) + np.outer(probs, probs)*(((P-K)/(P*(1-K)))**2)) / np.sqrt(probsInvOuter)
B0 = np.outer(Ki + (1-Ki)*(K*(1-P))/(P*(1-K)), Ki + (1-Ki)*(K*(1-P))/(P*(1-K)))
x = (Atag0 / B0 * h2coeff) * XXT
y = y[np.triu_indices(y.shape[0], 1)]
x = x[np.triu_indices(x.shape[0], 1)]
slope, intercept, rValue, pValue, stdErr = stats.linregress(x,y)
return slope
def calcLiabThresholds_3xx(U,s, keepArr, phe, numRemovePCs=10, prevalence = .001, covar=None):
"""
INPUTS:
1. U - left eigenvectors of covariance matrix (ie kinship matrix)
2. S - eigenvalues of covariance matrix (ie kinship matrix)
3. keepArr - np.array of indexes that exclude highly related individuals
4. phe - np.array of phenotypes (binary only)
5. covar - god knows. specified in author functions but remains undefined.
OUTPUT:
1. probs - probability estimates from a regularized logistic regression
2. threshold - no idea what this is, I assume they're estimated liabilities?
NOTES:
original code can be found on:
https://github.com/omerwe/LEAP/blob/master/leap/calc_h2.py
"""
#another part of the calc_h2 function
prev=prevalence
numRemovePCs=10 #their default value; as far as I'm aware, they do not input different values
if numRemovePCs>0:
t_cov = cov - (U[:,-numRemovePCs:]*s[-numRemovePCs:]).dot(U[:,-numRemovePCs:].transpose())
pheUnique = np.unique(phe)
isCaseControl = pheUnique.shape[0] == 2 #trivial condition for us
if ~np.all(pheUnique == np.array([0,1])):
pheMean = phe.mean()
phe[phe <= pheMean] = 0
phe[phe> pheMean] = 1
#probs, thresholds = calcLiabThreholds(U, S, keepArr, phe, numRemovePCs, prevalence, covar)
#This is equivalent to an SVD decomposition; note their covar parameter is defaulted to None
G = U[:, -numRemovePCs:] * np.sqrt(s[-numRemovePCs:])
#perform a regularized logistic regression. I trust their parameter settings.
Logreg = LogisticRegression(penalty='l2', C=500000, fit_intercept=True)
Logreg.fit(G[keepArr, :], phe.iloc[keepArr])
#Compute individual thresholds
probs = Logreg.predict_proba(G)[:,1]
#Compute thresholds
P = np.sum(phe==1) / float(phe.shape[0])
#K = prev --why, why in the (insert explicative) hell do they do this?
Ki = prev*(1-prev) / (P*(1-prev)) * probs / (1 + prev*(1-prev) / (P*(1-prev))*probs - probs)
thresholds = stats.norm(0,1).isf(Ki)
thresholds[Ki>=1.] = -999999999
thresholds[Ki<=0.] = 999999999
return([probs, thresholds])
# $4$. Estimate liabilities
# +
import numpy as np
import sklearn.linear_model
import scipy.optimize as opt
# From LEAP documentation
'''
def evalProbitReg(beta, X, cases, controls, thresholds, invRegParam, normPDF, h2):
"""
NOTES: not much to do here as everything is in numpy.
"""
XBeta = np.ravel(X.dot(beta)) - thresholds
phiXBeta = normPDF.pdf(XBeta)
PhiXBeta = normPDF.cdf(XBeta)
logLik = np.sum(np.log(PhiXBeta[cases])) + np.sum(np.log(1-PhiXBeta[controls]))
w = np.zeros(X.shape[0])
w[cases] = -phiXBeta[cases] / PhiXBeta[cases]
w[controls] = phiXBeta[controls] / (1-PhiXBeta[controls])
grad = X.T.dot(w)
#regularize
logLik -= 0.5*invRegParam * beta.dot(beta) #regularization
grad += invRegParam * beta
return [-logLik, grad]
def probitRegHessian(beta, X, cases, controls, thresholds, invRegParam, normPDF, h2):
"""
NOTES: not much to do here as everything is in numpy. Though, I precalculated
PhiXBeta and then subset that because it was originally done for each subset. It is, trivially,
faster to precompute the element-wise squaring and then subset.
"""
XBeta = np.ravel(X.dot(beta)) - thresholds
phiXBeta = normPDF.pdf(XBeta)
PhiXBeta = normPDF.cdf(XBeta)
XbetaScaled = XBeta #/(1-h2)
PhiXBeta2 = np.square(PhiXBeta)
R = np.zeros(X.shape[0])
R[cases] = (XbetaScaled[cases]*PhiXBeta[cases] + phiXBeta[cases]) / PhiXBeta2[cases]
R[controls] = (-XbetaScaled[controls]*(1-PhiXBeta[controls]) + phiXBeta[controls]) / (1 - PhiXBeta2[controls])
R *= phiXBeta
H = (X.T * R).dot(X)
H += invRegParam
return H
def probitRegression(X, y, thresholds, numSNPs, numFixedFeatures, h2, useHess, maxFixedIters, epsilon, nofail):
"""
If I had more time, I would probably use PyMC3 for this ... eventually. For now, just removed superfluous
parts. Can also cythonize the loop in "Fit fixed effects" -- for later.
"""
regParam = h2 / float(numSNPs)
Linreg = sklearn.linear_model.Ridge(alpha=1.0/(2*regParam), fit_intercept=False, normalize=False, solver='lsqr')
Linreg.fit(X, y)
initBeta = Linreg.coef_
np.random.seed(1234)
normPDF = stats.norm(0, np.sqrt(1-h2))
invRegParam = 1.0/regParam
controls = (y==0)
cases = (y==1)
funcToSolve = evalProbitReg
hess =(probitRegHessian if useHess else None)
jac= True
method = 'Newton-CG'
args = (X, cases, controls, thresholds, invRegParam, normPDF, h2)
print 'Beginning Probit regression...'
t0 = time.time()
optObj = opt.minimize(funcToSolve, x0=initBeta, args=args, jac=jac, method=method, hess=hess)
print 'Done in', '%0.2f'%(time.time()-t0), 'seconds'
if (not optObj.success):
print 'Optimization status:', optObj.status
print optObj.message
if (nofail == 0): raise Exception('Probit regression failed with message: ' + optObj.message)
beta = optObj.x
#Fit fixed effects
if (numFixedFeatures > 0):
thresholdsEM = np.zeros(X.shape[0]) + thresholds
for i in xrange(maxFixedIters):
print 'Beginning fixed effects iteration', i+1
t0 = time.time()
prevBeta = beta.copy()
#Learn fixed effects
thresholdsTemp = thresholdsEM - X[:, numFixedFeatures:].dot(beta[numFixedFeatures:])
args = (X[:, :numFixedFeatures], cases, controls, thresholdsTemp, 0, normPDF, h2)
optObj = opt.minimize(funcToSolve, x0=beta[:numFixedFeatures], args=args, jac=True, method=method, hess=hess)
if (not optObj.success): print optObj.message; #raise Exception('Learning failed with message: ' + optObj.message)
beta[:numFixedFeatures] = optObj.x
#Learn random effects
thresholdsTemp = thresholdsEM - X[:, :numFixedFeatures].dot(beta[:numFixedFeatures])
args = (X[:, numFixedFeatures:], cases, controls, thresholdsTemp, invRegParam, normPDF, h2)
optObj = opt.minimize(funcToSolve, x0=beta[numFixedFeatures:], args=args, jac=True, method=method, hess=hess)
if (not optObj.success): print optObj.message; #raise Exception('Learning failed with message: ' + optObj.message)
beta[numFixedFeatures:] = optObj.x
diff = np.sqrt(np.mean(beta[:numFixedFeatures]**2 - prevBeta[:numFixedFeatures]**2))
print 'Done in', '%0.2f'%(time.time()-t0), 'seconds'
print 'Diff:', '%0.4e'%diff
if (diff < epsilon): break
return beta
def probit(bed, pheno, h2, prev, eigen, outFile, keepArr, thresholds,covar=None, nofail=0,
numSkipTopPCs=10, mineig1e-3, hess=1, recenter=1, maxFixedIters=100, epsilon=1e-3, treatFixedAsRandom=False):
"""
No longer read in the bed file.
Default parameters set from the argparse section in the original code. Original code can be found
in:
https://github.com/omerwe/LEAP/blob/master/leap/probit.py
"""
#Extract phenotype
if isinstance(pheno, dict): phe = pheno['vals']
else: phe = pheno
if (len(phe.shape)==2):
if (phe.shape[1]==1): phe=phe[:,0]
else: raise Exception('More than one phenotype found')
if (keepArr is None): keepArr = np.ones(phe.shape[0], dtype=np.bool)
S = eigen['arr_1'] * bed.sid.shape[0]
U = eigen['arr_0']
S = np.sqrt(S)
goodS = (S>mineig)
if (numSkipTopPCs > 0): goodS[-numSkipTopPCs:] = False
if (np.sum(~goodS) > 0): print 'Removing', np.sum(~goodS), 'PCs with low variance'
G = U[:, goodS]*S[goodS]
#Set binary vector
pheUnique = np.unique(phe)
if (pheUnique.shape[0] != 2): raise Exception('phenotype file has more than two values')
pheMean = phe.mean()
cases = (phe>pheMean)
phe[~cases] = 0
phe[cases] = 1
#run probit regression
t = stats.norm(0,1).isf(prev)
if (thresholds is not None): t = thresholds
#Recenter G to only consider the unrelated individuals
if recenter: G -= np.mean(G[keepArr, :], axis=0)
else: G -= np.mean(G, axis=0)
numFixedFeatures = 0
if (covar is not None):
covar -= covar.mean()
covar /= covar.std()
covar *= np.mean(np.std(G, axis=0))
G = np.concatenate((covar, G), axis=1)
if (not treatFixedAsRandom): numFixedFeatures += covar.shape[1]
#Run Probit regression
probitThresh = (t if thresholds is None else t[keepArr])
beta = probitRegression(G[keepArr, :], phe[keepArr], probitThresh, bed.sid.shape[0], numFixedFeatures, h2, hess, maxFixedIters, epsilon, nofail)
#Predict liabilities for all individuals
meanLiab = G.dot(beta)
liab = meanLiab.copy()
indsToFlip = ((liab <= t) & (phe>0.5)) | ((liab > t) & (phe<0.5))
liab[indsToFlip] = stats.norm(0,1).isf(prev)
if (outFile is not None):
#save liabilities
f = open(outFile+'.liabs', 'w')
for ind_i,[fid,iid] in enumerate(bed.iid): f.write(' '.join([fid, iid, '%0.3f'%liab[ind_i]]) + '\n')
f.close()
#save liabilities after regressing out the fixed effects
if (numFixedFeatures > 0):
liab_nofixed = liab - G[:, :numFixedFeatures].dot(beta[:numFixedFeatures])
f = open(outFile+'.liab_nofixed', 'w')
for ind_i,[fid,iid] in enumerate(bed.iid): f.write(' '.join([fid, iid, '%0.3f'%liab_nofixed[ind_i]]) + '\n')
f.close()
liab_nofixed2 = meanLiab - G[:, :numFixedFeatures].dot(beta[:numFixedFeatures])
indsToFlip = ((liab_nofixed2 <= t) & (phe>0.5)) | ((liab_nofixed2 > t) & (phe<0.5))
liab_nofixed2[indsToFlip] = stats.norm(0,1).isf(prev)
f = open(outFile+'.liab_nofixed2', 'w')
for ind_i,[fid,iid] in enumerate(bed.iid): f.write(' '.join([fid, iid, '%0.3f'%liab_nofixed2[ind_i]]) + '\n')
f.close()
#Return phenotype struct with liabilities
liabsStruct = {
'header':[None],
'vals':liab,
'iid':bed.iid
}
return liabsStruct
'''
# -
# $5$. Test for associations
# +
# Paper uses fastlmm.association.single_snp function
# Dependent on Python 2.7, will attempt statsmodel lmm
# -
# Read in estimated liabilities
liabs = pd.read_csv("dataset1.phe.liab", sep=' ', header=None, engine='c')
liabs.columns = ['fam', 'person', 'liab']
liabs.set_index(keys = 'person', inplace=True)
liabs = liabs.ix[keepArr]
liabs.iloc[:5,:5]
# Merge liabilities with snps
snps_estliabs = pd.concat([liabs, df], axis = 1)
snps_estliabs.iloc[:5,:5]
Y = snps_estliabs.ix[:,1]
snps = snps_estliabs.ix[:,2:]
# ! pip install git+https://github.com/nickFurlotte/pylmm
from pylmm import lmm
TS,PS = lmm.GWAS(Y, snps, cov, REML = True, refit = True)
# Run through LEAP pipeline for each chromosome (parts 2-5)
| GeneBurinskiyAllenRoss_Weissbrod_FinalProject/GeneBurinskiyAllenRoss_Weissbrod_FinalProject.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ArviZ Quickstart
# +
# %matplotlib inline
import arviz as az
import numpy as np
# ArviZ ships with style sheets!
az.style.use('arviz-darkgrid')
# -
# ## Get started with plotting
#
# ArviZ is designed to be used with libraries like [PyStan](https://pystan.readthedocs.io) and [PyMC3](https://docs.pymc.io), but works fine with raw numpy arrays.
az.plot_posterior(np.random.randn(100_000));
# Plotting a dictionary of arrays, ArviZ will interpret each key as the name of a different random variable. Each row of an array is treated as an independent series of draws from the variable, called a _chain_. Below, we have 10 chains of 50 draws each for four different distributions.
size = (10, 50)
az.plot_forest({
'normal': np.random.randn(*size),
'gumbel': np.random.gumbel(size=size),
'student t': np.random.standard_t(df=6, size=size),
'exponential': np.random.exponential(size=size)
});
# ## Plotting with PyMC3 objects
#
# ArviZ is designed to work well with high dimensional, labelled data. Consider the [eight schools model](http://andrewgelman.com/2014/01/21/everything-need-know-bayesian-statistics-learned-eight-schools/), which roughly tries to measure the effectiveness of SAT classes at eight different schools. To show off ArviZ's labelling, I give the schools the names of [a different eight schools](https://en.wikipedia.org/wiki/Eight_Schools_Association).
#
# This model is small enough to write down, is hierarchical, uses labelling, and a centered parameterization causes [divergences](http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html) (which are interesting for illustration):
# +
import pymc3 as pm
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
schools = np.array(['Choate', 'Deerfield', '<NAME>', '<NAME>',
'Hotchkiss', 'Lawrenceville', "<NAME>", '<NAME>'])
with pm.Model() as centered_eight:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
# This pattern is useful in PyMC3
prior = pm.sample_prior_predictive()
centered_eight_trace = pm.sample()
posterior_predictive = pm.sample_posterior_predictive(centered_eight_trace)
# -
# Most ArviZ functions work fine with `trace` objects from PyMC3:
az.plot_autocorr(centered_eight_trace, var_names=['mu', 'tau']);
# ### Convert to InferenceData
#
# For much more powerful querying, analysis and plotting, we can use built-in ArviZ utilities to convert PyMC3 objects to xarray datasets. Note we are also giving some information about labelling.
#
# ArviZ is built to work with `InferenceData`, and the more *groups* it has access to, the more powerful analyses it can perform. Here is a plot of the trace, which is common in PyMC3 workflows. Note the intelligent labels.
data = az.from_pymc3(trace=centered_eight_trace,
prior=prior,
posterior_predictive=posterior_predictive,
coords={'school': schools},
dims={'theta': ['school'], 'obs': ['school']})
data
az.plot_trace(data);
# ## Plotting with PyStan objects
#
# ArviZ is built with first class support for PyStan objects, and can plot raw `fit` objects in a reasonable manner. Here is the same centered eight schools model:
# +
import pystan
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real theta[J];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
theta ~ normal(mu, tau);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
schools_dat = {'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}
sm = pystan.StanModel(model_code=schools_code, verbose=False)
fit = sm.sampling(data=schools_dat, iter=1000, chains=4)
# -
az.plot_density(fit, var_names=['mu', 'tau']);
# Again, converting to `InferenceData` (a netcdf datastore that loads data into `xarray` datasets), we can get much richer labelling and mixing of data. Here is a plot showing where the Hamiltonian sampler had divergences
data = az.from_pystan(posterior=fit,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords={'school': schools},
dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school']})
data
az.plot_pair(data, coords={'school': ['Choate', 'Deerfield', '<NAME>']}, divergences=True);
| doc/notebooks/Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <figure>
# <IMG SRC="https://raw.githubusercontent.com/mbakker7/exploratory_computing_with_python/master/tudelft_logo.png" WIDTH=250 ALIGN="right">
# </figure>
#
# # Exploratory Computing with Python
# *Developed by <NAME>*
# ## Notebook 10: Regression analysis I
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# In this Notebook, we learn how to fit a model to a set of data. In the first half of this Notebook, we fit several different models to the same data set, also called regression analysis. In the second half of this Notebook, we look under the hood of these regression analyses, we discuss how the best parameters are computed, how the goodness of fit can be quantified, and what these other parameters are that some of the regression functions return.
# ### Root mean square error
# One way to quantify the fit between data and a model is to compute the root mean square error. The error is defined as the difference between the observed value and the modeled value. Another term for the error is the residual. If the error of data point $i$ is written as $\varepsilon_i$, and the total number of observations is $N$, then the sum of squared errors $S$ is
#
# $$S = \sum{\varepsilon_i^2}$$
#
# When the total number of observations is $N$, the root mean square error $E$ is computed as
#
# $$E=\sqrt{\frac{1}{N}S}=\sqrt{\frac{1}{N}\sum{\varepsilon_i^2}}$$
#
# The root mean square error is an estimate of the goodness of fit and can be computed for any model and any dataset.
# ### Exercise 1. Fit a straight line
# Load the $x,y$ values of 20 data points from the file `xydatafit.dat`. Fit a straight line through the data using the `linregress` function of `scipy.stats`. Note that the `linregress` function returns 3 other values beyond the slope and intercept (use `linregress?` to find out); more on these 3 additional values later on in this Notebook. Plot the data and the fitted straight line. Add a legend. Add the root mean square error as a title to the graph. Print the optimal values for the slope and intercept of the straight line to the screen.
# +
from scipy.stats import linregress
xy = np.genfromtxt('xydatafit.dat', delimiter=' ' )
x = np.linspace(0, 20, 100)
z = linregress(xy[0],xy[1])
y = z.slope * x + z.intercept
N = 0
e2 = 0
for i in range(len(xy[0])):
e = (xy[1,i] - (z.slope * xy[0,i] + z.intercept)) ** 2
e2 += e
N += 1
E = np.sqrt((1/N) * e2)
plt.plot(xy[0],xy[1], 'rd', x, y, 'b')
plt.title(('The mean square error is:', E))
plt.legend(('data', 'fitted line'), loc='best');
print('The optimal slope is:', z.slope)
print('The optimal intercept is:', z.intercept)
# -
# ### Exercise 2. Fit a polynomial
# Use the $x,y$ values of 20 data points from the file `xydatafit.dat`. Fit a second degree polynomial (a parabola) through the data using the `np.polyfit` function. Plot the data and the fitted parabola. Add a legend. Report the root mean squared error in the title. Did the root mean squared error improve?
# +
z2 = np.polyfit(xy[0],xy[1],2)
y2 = z2[0] * x ** 2 + z2[1] * x + z2[2]
N = 0
e2 = 0
for i in range(len(xy[0])):
e = (xy[1,i] - (z2[0] * xy[0,i] ** 2 + z2[1] * xy[0,i] + z2[2])) ** 2
e2 += e
N += 1
E2 = np.sqrt((1/N) * e2)
plt.plot(xy[0],xy[1], 'rd', x, y2, 'b')
plt.title(('The mean square error is:', E2))
plt.legend(('data', 'fitted parabola'), loc='best');
if E2 < E:
print('The mean square error did improve')
else:
print('The mean square error did not improve')
# -
# ### Fitting an arbitrary function
# Python functions to fit a straight line or polynomial are readily available. There are many other functions that you may want to use to fit to your data. The function `curve_fit` can be used to fit an arbitrary function that you define; `curve_fit` is part of the `scipy.optimize` package. The `curve_fit` function requires you to write a function that takes as its first argument the independent variable (in our case above that are the $x$-values) followed by the parameter(s) that you want to fit and returns the value of the function at all the $x$ values for the supplied parameters. For example, to fit a straight line, you need to write a function
def func(x, a, b):
return a * x + b
# The `curve_fit` function needs to be called with three arguments: the function that you want to fit, the values of the independent variable (in our case $x$), and the values of the depenedent variable (in our case $y$). The `curve_fit` funtion than returns an array with the optimal parameters (in a least squares sense) and a second array containing the covariance of the optimal parameters (more on that later). For example, for the case of Exercise 1:
from scipy.optimize import curve_fit
x, y = np.loadtxt('xydatafit.dat') # in case these were modified in one of the exercises
popt, pcov = curve_fit(func, x, y)
print('optimal parameters:', popt)
# Note that these optimal parameters are identical to the values you computed in Exercise 1.
# ### Exercise 3. Fit an exponential function with `curve_fit`
# Use the $x,y$ values of 20 data points from the file `xydatafit.dat`. Fit the function $f(x) = A\exp(ax) + b$ through the data using the `curve_fit` function of `scipy.optimize`. Plot the data and the fitted function. Report the root mean squared error in the title. Did the root means squared error improve?
# +
def func2(x, A, B, C):
return C + A * np.exp(B * x)
popt, pcov = curve_fit(func2, x, y)
x2 = np.linspace(0, 20, 100)
y2 = func2(x2, popt[0], popt[1], popt[2])
N = 0
e2 = 0
for i in range(len(xy[0])):
e = (y[i] - (func2(x[i], popt[0], popt[1], popt[2]))) ** 2
e2 += e
N += 1
E3 = np.sqrt((1/N) * e2)
plt.plot(x,y, 'rd', x2, y2, 'b')
plt.title(('The mean square error is:', E3))
plt.legend(('data', 'fitted function'), loc='best');
if E3 < E2:
print('The mean square error did improve')
else:
print('The mean square error did not improve')
# -
# ### Least squares
# In the exercises above, the *optimal* or *best* parameters were obtained with either the `linregress`, `polyfit` or `curve_fit` methods. But how do these methods do that? Or maybe a more fundamental question: 'What is *optimal*?' or 'What is *best*?' In this Notebook, we define *best* as the parameter set that minimizes the sum of the squared errors (so it also minimizes the root mean square error). Such an optimization approach is also referred to as a *least squares* approach.
#
# For example, consider the following three data points:
xdata = np.array([5.0, 10.0, 15.0])
ydata = np.array([3.0, 6.0, 7.0])
plt.plot(xdata, ydata, 'bo', label='observed')
plt.legend();
# We can try to fit a straight line through these three points, but you can already see that the three points don't lie on a line, so there is no straight line that goes exactly through the three points. The straight line is written as $y=ax+b$, where $a$ is the slope of the line and $b$ is called the intercept (it is the value of $y$ for $x=0$). We write a function that takes as input arguments an array of observed $x$ values and an array of corresponding $y$ values, and values for the slope $a$ and intercept $b$. The function returns the sum of squared errors, where the error is defined as the difference betweeen the observed value of $y$ and the value of the straight line at that same $x$ value. The equation for the error at point $i$ is $\varepsilon_i$ and may be written as
#
# $\varepsilon_i = y_i - (ax_i + b)$
def sse(a, b, x=xdata, y=ydata):
error = y - (a * x + b)
return np.sum(error ** 2)
# As you can see, different values of $a$ and $b$ give different values for the sum of squared errors `sse`. The `sse` for $a=1$, $b=2$ is larger than for $a=1$, $b=1$.
print('sse of a=1, b=2:', sse(a=1, b=2))
print('sse of a=1, b=1:', sse(a=1, b=1))
# What we can do is compute the `sse` function for a larger number of $a$ and $b$ values. If we do that on a regular grid, we can create contours of the `sse` function. The `sse` function is constant along any contour. A contour map of the `sse` function is similar to an elevation map. The goal is now to find the combination of $a$ and $b$ that gives the smallest value of the sum of squared errors. In the graph below, you can see that the smallest value of `sse` is obtained at $a\approx 0.4$, $b\approx 1.3$ (you have to look closely for the darkest blue in the figure; the area beyond the yellow is $S>10$).
a, b = np.meshgrid(np.linspace(0.2, 0.6, 50), np.linspace(0, 2, 50))
ssevec = np.vectorize(sse)
z = ssevec(a, b)
plt.figure()
plt.contourf(a, b, z, np.linspace(0, 10, 100))
plt.colorbar()
plt.xlabel('a')
plt.ylabel('b');
# How do we minimize the sum of squared errors? As usual, we find the minimum of a function by taking the derivative and setting it to zero. This is a little involved, but not too difficult. The sum of squared errors is written as $E$
#
# $$
# E=\sum_{i=1}^N\varepsilon_i^2=
# \sum_{i=1}^N[y_i-(ax_i+b)]^2
# $$
#
# where $N$ is the number of observations. The slope $a$ and intercept $b$ are determined such that $E$ is minimized, which means that the following derivatives are zero
#
# $$\frac{\partial E}{\partial a}=0 \qquad \frac{\partial E}{\partial b}=0$$
#
# Differentiation gives (using the chain rule)
#
# $$
# \frac{\partial E}{\partial a}=\sum_{i=1}^N[2(y_i-ax_i-b)(-x_i)]=
# 2a\sum_{i=1}^Nx_i^2+2b\sum_{i=1}^Nx_i-2\sum_{i=1}^Nx_iy_i
# $$
#
# $$
# \frac{\partial E}{\partial b}=\sum_{i=1}^N[2(y_i-ax_i-b)(-1)]=
# 2a\sum_{i=1}^Nx_i+2bN-2\sum_{i=1}^Ny_i
# $$
#
# Setting the derivatives equal to zero and division by 2 gives
#
# $$
# a\sum_{i=1}^Nx_i^2+b\sum_{i=1}^Nx_i-\sum_{i=1}^Nx_iy_i=0
# $$
#
# $$
# a\sum_{i=1}^Nx_i+bN-\sum_{i=1}^Ny_i=0
# $$
#
# This system of two linear equations with two unknowns ($a$ and $b$) may be solved to give
#
# $$ a=\frac{N\sum_{i=1}^Nx_iy_i-\sum_{i=1}^Nx_i\sum_{i=1}^Ny_i}
# {N\sum_{i=1}^Nx_i^2-\sum_{i=1}^Nx_i\sum_{i=1}^Nx_i}
# $$
#
# $$
# b=\bar{y}-a\bar{x}
# $$
# where $\bar{x}$ and $\bar{y}$ are the mean values of $x$ and $y$, respectively.
# ### Exercise 4. Fitting a straight line revisited
# Compute the optimal values (in the least squares sense) of $a$ and $b$ using the two equations derived above and the corresponding sum of squared errors (using the `xdata` and `ydata` arrays for the three points given above). Next, use the `linregress` function of the `scipy.stats` package to compute the optimal values and verify that the `linregress` function gives the same answers. Create a graph that shows the three data points and the fitted straight line.
# +
x = np.linspace(5,15,100)
k1b = 0
k2b = 0
k3b = 0
k4b = 0
N = 0
for i in range(len(xdata)):
N +=1
k1a = xdata[i] * ydata[i]
k1b += k1a
k2a = xdata[i]
k2b += k2a
k3a = ydata[i]
k3b += k3a
k4a = xdata[i] ** 2
k4b += k4a
x_ = k2b/N
y_ = k3b/N
a = (N * k1b - k2b * k3b)/(N * k4b - k2b * k2b)
b = y_ - a * x_
N = 0
e2 = 0
for i in range(len(xdata)):
e = (ydata[i] - (a * xdata[i] + b)) ** 2
e2 += e
N += 1
E4 = np.sqrt((1/N) * e2)
print('The values of a and b are:',a,b)
print('The mean square error is:', E4)
plt.plot(xdata,ydata,'rd', x, a * x + b, 'b')
plt.title(('The mean square error is:', E4))
plt.legend(('data', 'fitted line'), loc='best');
# +
from scipy.stats import linregress
z = linregress(xdata,ydata)
y = z.slope * x + z.intercept
print('according to scipy the slope is:',z.slope)
print('according to scipy the intercept is:', z.intercept)
# -
print('Both methodes give the same result')
# ### The correlation coefficient, $p$-value and standard error.
# The `linregress` function returns 5 values. Besides the slope and intercept, these are somewhat cryptically defined as the correlation coefficient, the $p$-value, and the standard error. Each of these three values are a quantification of the goodness of fit. According to statisticians, these terms in the `scipy.stats` documentation are somewhat imprecisely defined (they will likely be updated in the future). This is what they mean:
#
# The square of the correlation coefficient $r$ is the *r-squared value* and is defined as
#
# $$r^2 = 1 - \sum{(y_i - \hat{y}_i)^2} \left/ \sum{(y_i - \bar{y})^2} \right. $$
#
# where $y_i$ is the $y$ value of data point $i$, while $\hat{y}_i$ is the fitted values at data point $i$. It can also be written as
#
# $$r^2 = \frac{\text{var}(y) - \text{var}(y-\hat{y})}{\text{var}(y)}$$
#
# So the $r^2$ value is the variance of $y$ minues the variance of the remaining residuals (the data values minus the fitted values), divided by the variance of $y$, and is also referred to as the 'percentage of variance explained'. If the model goes exactly through the data (a perfect fit), then the variance of the residuals is zero, and $r^2=1$. If the model doesn't do much better than simply the mean of $y$, then the $r^2$ is very close zero. A value of $r^2$ close to 1 is generally a good thing, but it is not possible to say anything definitive about the goodness of fit by just reporting the $r^2$ value (although many people do).
# The standard error returned by the `linregress` model is the estimated standard deviation of the fitted slope. The equation is
# $$s = \sqrt{\frac{\sum(y_i-\hat{y}_i)^2}{N-2}} \left/ \sqrt{\sum{(x_i-\bar{x})^2}} \right.$$
# The standard deviation of the slope should be interpreted similar to the standard deviation of the mean. The computed slope is a statistical value so it has an estimated standard deviation.
# The $p$-value is related to the question whether the estimated slope is significantly different from zero. When the slope is significantly different from zero, you can state that there is a linear relationship between the two variables. The $p$-value is related to the question whether the estimated slope is significantly different from zero when you perform a $t$-test. When the $p$-value is less than 0.05, this means that when you perform a two-sided $t$-test you can reject the null hypothesis that the slope is zero in favor of the alternative hypothesis that the slope is not zero. In layman terms: it means that there is less than 5% chance that the slope is zero and more than 95% chance that the slope is not zero. Or even simpler: the slope is significantly different from zero.
# ### Exercise 5. Verification of goodness of fit parameters
# Implement the equations for $r^2$ and $s$ given above to verify that the values returned by the `linregress` function are correct.
#
# Perform a two-sided hypothesis test with significance level 5% where the null hypothesis is that the slope of the fitted line is zero and the alternative hypothesis is that the slope is not zero.
#
# Draw the probability density function of a $t$-distribution with mean 0 and standard deviation equal to the computed value of $s$. Use $N-2$ as the number of degrees of freedom (You subtract the number of parameters from $N$ as you used up these two degrees of freedom).
#
# - Draw red vertical lines indicating the 2.5% and 97.5% percentiles according to the $t$-distribution.
# - Draw a heavy black vertical line at the position of the computed value of the slope.
# - Decide whether you can reject the null hypothesis that the slope is zero in favor of the alternative hypothesis that the slope is not 0 and add that as a title to the figure.
# +
k5b = 0
k6b = 0
k7b = 0
N = 0
for i in range(len(xdata)):
N +=1
k5a = (ydata[i] - (a * xdata[i] + b)) ** 2
k5b += k5a
k6a = (ydata[i] - y_) ** 2
k6b += k6a
k7a = (xdata[i] - x_) ** 2
k7b += k7a
r = (1 - (k5b / k6b))
print('The computed value fore r^2 is:',r)
print('The value fore r^2 according to linregress is:',z.rvalue ** 2)
print()
s = np.sqrt((k5b/(N-2))/(k7b))
print('The computed value fore s is:',s)
print('The value fore s according to linregress is:',z.stderr)
# -
from scipy.stats import t
x = np.linspace(-2,2,100)
y = t.pdf(x,N-2, loc = 0, scale = s)
x3 = t.ppf(0.025,N-2, loc = 0, scale = s)
x4 = t.ppf(0.975,N-2, loc = 0, scale = s)
plt.plot(x,y, 'b')
plt.axvline(a, color = 'black', linewidth=4)
plt.axvline(x3, color = 'red')
plt.axvline(x4, color = 'red')
if x4 > a > x3:
plt.title('You can not reject the null hypothesis');
else:
plt.title('You can reject the null hypothesis');
p = z.pvalue
print(p)
# ### Meaning of the $p$-value
# If you did the previous exercise correctly, you found out that the slope was not significantly different from zero (you could not reject the null hypothesis that the slope is zero with significance level 5%). The $p$ value returned by the `linregress` function means that if you would have performed the hypothesis with significance level $p$, then you would not have rejected the hypothesis. Let's try it:
from scipy.stats import t
p1, p2 = t.ppf([p / 2, 1 - p / 2], 1, loc=0, scale=s)
print('upper and lower bound for significance level', p, 'is:', p1, p2)
# Just to be complete, we can compute the $p$ value from the $t$ distribution as
print('p-value from t-distribution:', 2 * (1 - t.cdf(a, 1, loc=0, scale=s)))
# Recall that the $p$-value only makes sense if the residuals are independent and Normally distributed. For the problem we are looking at with 3 data points that is, of course, impossible to say. But when you have more data, you really need to check or, alternatively, use a method that doesn't require the Normality assumption.
# One last thing about the significance level. We state that the slope is signficantly different from zero when $p<0.05$. But that means that there is still a 5% chance that the slope is different from zero by chance. Let's try that in the following exercise
# ### Exercise 6. Getting a value of $p<0.05$ by chance
# Perform the following experiment: Generate 100 $x$ values randomly from a uniform distribution between 0 and 10 using the `np.random.rand` function. Generate 100 $y$ values randomly from a uniform distribution between 0 and 10. Fit a straight line using `linregress`. Perform the experiment 1000 time and count the number of times that the $p$-value is smaller than 0.05. As you will see, you will get approximately 50 out of the 1000 experiments where a line is fitted with a $p$-value smaller than 0.05 just by chance (as there really is no correlation between the random $x$ and $y$ values).
k = 0
for i in range(1000):
xr = np.random.rand(100)*10
yr = np.random.rand(100)*10
x = np.linspace(0,10,100)
z = linregress(xr,yr)
if z.pvalue < 0.05:
k+=1
print('A P-value smaller than 0.05 occurd',k, 'times')
| Computerprogrameren TUDelft/Notebook 10 linear regressions 1/Halem_4597591_nb_notebook10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pro28_arcgis19_July
# language: python
# name: pro28_arcgis19_july
# ---
# # Detecting deforestation in the Amazon rainforest using unsupervised K-means clustering on satellite imagery
# ## Table of Contents <a class="anchor" id="0"></a>
# * [Introduction](#1)
# * [Imports](#2)
# * [Connecting to ArcGIS](#3)
# * [Accessing & Visualizing the datasets](#4)
# * [Data Preparation](#5)
# * [Model Building](#9)
# * [Data Preprocessing](#11)
# * [Model Initialization ](#12)
# * [Learning Rate Search ](#13)
# * [Model Training ](#14)
# * [Result Visualization](#16)
# * [Conclusion](#23)
# * [Summary of methods used](#24)
# * [Data resources](#25)
# ## Introduction <a class="anchor" id="1"></a>
# Deforestation around the world has reached a critical level, causing irreversible damage to environmental sustainability that is contributing to climate change around the world. Widespread forest fires, from the Amazon Basin in Brazil, to the west coast of the United States, are raging all year-round. This notebook will allow us to detect deforested areas in the Brazilian Amazon rainforest, using satellite imagery.
# ## Imports <a class="anchor" id="2"></a>
# +
# %matplotlib inline
import pandas as pd
from datetime import datetime
from IPython.display import Image
from IPython.display import HTML
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime as dt
import arcgis
from arcgis.gis import GIS
from arcgis.learn import MLModel, prepare_tabulardata
from arcgis.raster import Raster
from fastai.vision import *
# -
# ## Connecting to ArcGIS <a class="anchor" id="3"></a>
gis = GIS("home")
gis_enterp = GIS("https://pythonapi.playground.esri.com/portal", "arcgis_python", "amazing_arcgis_123")
# ## Accessing & Visualizing datasets <a class="anchor" id="4"></a>
#
# Here, we use Sentinel-2 imagery, which has a high resolution of 10m and 13 bands. This imagery is accessed from the ArcGIS Enterprise portal, where it is sourced from the AWS collection.
# get image
s2 = gis.content.get('fd61b9e0c69c4e14bebd50a9a968348c')
sentinel = s2.layers[0]
s2
# ## Data Preparation <a class="anchor" id="5"></a>
# ## Define Area of Interest in the Amazon
# The area of interest is defined using the four latitude and longitude values from a certain region of the Amazon rainforest where a considerable area of forest has been deforested, as can be seen from the images above.
# extent in 3857 for amazon rainforest
amazon_extent = {
"xmin": -6589488.51,
"ymin": -325145.08,
"xmax": -6586199.09,
"ymax": -327024.74,
"spatialReference": {"wkid": 3857}
}
# Here, we select all the scenes from the sentinel imagery containing the area of interest for our study.
# +
# The respective scene having the above area is selected
selected = sentinel.filter_by(where="(Category = 1) AND (cloudcover <=0.05)",
geometry=arcgis.geometry.filters.intersects(amazon_extent))
df = selected.query(out_fields="AcquisitionDate, GroupName, CloudCover, DayOfYear",
order_by_fields="AcquisitionDate").sdf
df['AcquisitionDate'] = pd.to_datetime(df['acquisitiondate'], unit='ms')
df
# -
# The satellite imagery with the least cloud cover is selected and visualized for further processing.
# The scene is selected with the least cloud cover and extracted using the amazon extent
amazon_scene = sentinel.filter_by('OBJECTID=1584818')
amazon_scene.extent = amazon_extent
amazon_scene
# In the above scene, the brown patches are the deforested areas that are to be identified. This selected scene is then published to the portal.
# publish the scene to the portal
amazon_scene.save('amazon_scene'+ str(dt.now().microsecond), gis=gis_enterp)
# The published imagery of the Amazon rainforest is exported back to an image file on disk for further processing.
raster_amazon_13bands = Raster("https://pythonapi.playground.esri.com/ra/rest/services/Hosted/amazon_scene_may26/ImageServer",
gis=gis_enterp,
engine="image_server")
# visualizing the image
raster_amazon_13bands.export_image(size=[3330,1880])
# ## Model Building <a class="anchor" id="9"></a>
# The first part of model building consists of defining the preprocessors, which will be used to scale the bands before feeding them into the model. The band names use the conventional naming method of the imagery name with an id number appended at the end as follows:
# Sentinel-2 imagery has 13 bands, of which 4 bands, namely the blue, green, red, and near infrared bands, we will use here for modelling. These bands work well for differentiating green forested areas from barren land. The band information, along with the band name and their respective ids, are obtained for selecting the required bands.
# get the band names and their ids, sentinel images have 13 bands
pd.DataFrame(amazon_scene.key_properties()['BandProperties'])
# get the imagery name to define the band names
raster_amazon_13bands.name
# Here, the imagery name is 'Hosted/amazon_scene_april9'. Subsequently, the names of the blue, green, red, and near infrared bands would be 'Hosted/amazon_scene_april9_1', 'Hosted/amazon_scene_april9_2', 'Hosted/amazon_scene_april9_3', 'Hosted/amazon_scene_april9_7' respectively. These bands will be used for defining the preprocessors.
# ### Data Pre-processing <a class="anchor" id="11"></a>
from sklearn.preprocessing import MinMaxScaler
# The four bands are listed in the preprocessors for scaling, with the last item as the designated scaler as follows.
preprocessors = [('Hosted/amazon_scene_may26_1',
'Hosted/amazon_scene_may26_2',
'Hosted/amazon_scene_may26_3',
'Hosted/amazon_scene_may26_7', MinMaxScaler())]
# Here, in the explanatory raster, we pass the name of the explanatory raster and the selected bands by their id's — 1 for blue, 2 for green, 3 for red, and 7 for NIR, as follows:
# Data is prepared for the MLModel using the selected scene and the preprocessors
data = prepare_tabulardata(explanatory_rasters=[(raster_amazon_13bands,(1,2,3,7))], preprocessors=preprocessors)
# visualization of the data to be processed by the model
data.show_batch()
# ### Model Initialization <a class="anchor" id="12"></a>
#
# Once the data is prepared, an unsupervised model of k-means clustering from scikit-learn is used for clustering the pixels into deforested areas and forested areas. The clustering model is passed inside MLModel as follows, with the number of clusters set as three in the parameters.
from arcgis.learn import MLModel, prepare_tabulardata
model = MLModel(data, 'sklearn.cluster.KMeans', n_clusters=3, init='k-means++', random_state=43)
# ### Model Training <a class="anchor" id="14"></a>
# Now the model is ready to be trained, and will label the pixels as being one of three classes, either forested, slightly deforested, or highly deforested.
# here model is trained which would label the pixels into the designated classes
model.fit()
# the labelled pixels can be visualized as follows with the last column returning the predicted labels by the model
model.show_results()
# ### Deforestation Clusters Prediction<a class="anchor" id="21"></a>
#
# Next, the trained model is used to predict clusters within the entire selected scene of the Amazon rainforest. This is passed as the explanatory raster, with the prediction type as raster and a local path provided for output. The `output_layer_name` parameter can also be used for publishing.
pred_new = model.predict(explanatory_rasters=[raster_amazon_13bands],
prediction_type='raster',
output_layer_name=('deforest_predicted2'+str(dt.now().microsecond)),
output_raster_path=r"/tmp/result5.tif")
# ## Result Visualization<a class="anchor" id="16"></a>
#
# The resulting raster with the predicted classes of deforested areas and forested areas is now read back for visualization. The predicted local raster can be accessed here.
amazon_predict = gis.content.get('b81b89aac4cd4e08bcd7cd400fac558f')
amazon_predict
import os, zipfile
filepath_new = amazon_predict.download(file_name=amazon_predict.name)
with zipfile.ZipFile(filepath_new, 'r') as zip_ref:
zip_ref.extractall(Path(filepath_new).parent)
output_path = Path(os.path.join(os.path.splitext(filepath_new)[0]))
output_path = os.path.join(output_path, "result5.tif")
raster_predict = Raster(output_path)
raster_predict.export_image(size=[3290,1880])
# The model has correctly labelled the deforested areas in white, distinguishing them from the rest of the forested areas in black.
#
# The boundaries of the detected deforested areas could be further extracted into polygons using the convert raster to feature function.
# ## Conclusion<a class="anchor" id="23"></a>
#
# In this sample notebook we were able to detect deforestation in the Amazon rainforest using the unsupervised model of k-means clustering on satellite imagery. This was implemented via the MLModel framework, which exhibited the application of several unsupervised models from the scikit learn library on raster imagery.
# ### Summary of methods used <a class="anchor" id="24"></a>
# | Method | Description | Examples |
# | -| - |-|
# | prepare_tabulardata| prepare data including imputation, normalization and train-test split |prepare data ready for fitting a MLModel
# | MLModel() | select the ML algorithm to be used for fitting | any supervised and unsupervised models from scikit learn can be used
# | model.fit() | train a model | training the unsupervised model with suitable input
# | model.score() | find the appropriate model metric of the trained model | returns suitable value after training the unsupervised MLModel
# | model.predict() | predict on a test set | predict values using the trained models on the trained data itself
# ### Data resources <a class="anchor" id="25"></a>
# | Dataset | Source | Link |
# | -| - |-|
# | sat imagery| sentinel2 |https://registry.opendata.aws/sentinel-2/|
| samples/04_gis_analysts_data_scientists/detecting-deforestation-using-kmeans-clustering-on-sentinel-imagery.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 64-bit
# name: python374jvsc74a57bd07945e9a82d7512fbf96246d9bbc29cd2f106c1a4a9cf54c9563dadf10f2237d4
# ---
# # Apartado 6 - Import
# - Import modulos
# - Abrir ficheros
# ------------------------------------------------------------------
# ## Import modulos
# [HELP](https://realpython.com/python-modules-packages/)
#importando un módulo/librería/fichero que se llama mi modulo con un alias"mm"
import mi_modulo as mm
mm.resta_2(a=2,b=3) # importamos todo lo que tiene mi modulo, y llamamos a la funcion resta_2
resta = mi_modulo.resta_2(a=7, b=6)
resta
# +
from mi_modulo import resta_2 #asi importo solo una funcion resta 2 mejor importar asi para no saturar todo lo que no quiera, si sabemos lo que queremos importar
resta2 = resta_2(a=5, b=3) #ahora podemos llamar a esta funcion sin llamar al modulo no se pone mm.resta_2 como arriba
resta2
# -
from mi_modulo import resta_2, suma_2 # para importar dos funciones de un modulo
x=5 #no muestra el 5 xq muestra el ultimo valor
from mi_modulo import resta_2, suma_2, x #puedo importar variables
print(x)
import pandas as pd
import numpy as np
import tensorflow as tf # las librerias desde las que importamos
# +
import mi_modulo as mm
mm.resta_2(a=3,b=5) #con el punto despues de como he llamado a lo que importo me enseña las coas que tienen ese modulo"""
mm.x # asi muestra x recordar muestra la ultima linea
# -
# puedo importar mi modulo porque esta dentro de mi carpeta import porque esta en el path de sys -> mirar abajo.
# --------------------------------------------------------------------------
# +
# si modifico un fichero que se ha importado, necesito volver a ejecutarlo para que aplique los cambios de ese doc
# si me da algun tipo de error de que no existe una funcion importada que he modificado tengo que dar al cuadrado, luego a la flecha de retorno verde(cuadro superior) y volver a ejecutar la importacion y luego la funcion
# -
# si creas una carpeta dentro de 6_import y dentro un archivo .py. y ahora quiero acceder a un archivo que esta dentro de una carpeta. para acceder un fichero dentro de otra carpeta dentro de 6_import se haria asi:
import f.archivo_dentro_de_f as adf
adf.f1()
from f.archivo_dentro_de_f import * #esto importaria todo. no es una buena practica. mejor importar el fichero y acceder a el usando la funcion, variable que queremos
#x=3 si yo tengo esta variable coon este valor, cuando ejecute el import* lo que valga en ese archivo que importo va a aempezar a valer dedse que lo importe. por eso no es bueno importar todo. SEPAMOS QUE EXISTE PERO NO LO USAMOS
f1() # si importo con asterisco no tengo que poner el alias para acceder a las variables
# +
#si un archivo esta dentro de una carpeta seria import f.k.archivo_dentro_de_k as addk -> el punto indica carpetas y en ultima instancia un archivo
# -
def mi_funcion():
print("estoy en paths")
mi_funcion() #aqui muestra mi funcion de aqui
from f.archivo_dentro_de_f import mi_funcion # aqui importo mi funcion del fuchero archivo_dentro_de_f
mi_funcion()
import f.archivo_dentro_de_f as fichero_f
fichero_f.mi_funcion() # si lo hago asi no sobreescribo mi funcion de esta pagina ya que para invocarla tienes uqe decir desde que doc viene.
# +
import re
re.__file__ # si importo algo que no tengo creado, me crea un archivo donde tengo instalado python
# +
import mi_modulo as mm # donde se guarda el modulo en el pc
mm.__file__
# -
import sys #sys es una biblioteca que tiene ya de python tiene instalada. siempre por d efecto aparece la ruta de la carpeta donde esto en este caso estoy en la carpeta import
sys.path #sys.path nos deja importar todo lo que este en las carpetas que salgan dentro de sys.path (archivos .py)
#tendria que estar dentro de la carpeta donde estuviese dentro de las que se muestren con sys
#sys.path muestra rutas que nos deja importar si estan en estos archivos es de tipo lista este tipo de archivo
# +
#si quiero es añadir un archivo que esta en una carpeta por encima de la mia, es decir estoy en f y quiero añadir algo de que esta en 6_import tendria que importar esa 6_import
# +
# import a # a no esta dentro de mi carpeta. por lo tanto hay que añadir a sys.path la ruta de la carpeta superior a "a"
#hay que poner dos barras entre todo
ruta_absoluta1= "c:\\Users\\Administrator\\Desktop\\TheBridge\\Bootcamp-DataScience-2021\\week3_course_python_III\\day1_python_VII\\theory\\6_import"
sys.path.append(ruta_absoluta1) #asi agregamos algo que esta por encima de donde he hecho import y de una carpeta superior. asi que hay que coger la ruta absoluta, y poner doble barra y luego hacer append. tambien se puede eliminar el ultimo elemento con pop
# -
# EXTRA
print("\\") #muestra una barra
print("\""") #muestrauna comilla
#formar relatia es con una barra no doble barra. y siempre depende de otro sitio
import f.archivo_dentro_de_f # esto es relativa depende de otor sitio
# #copy path -> absoluta
ruta_absoluta ="C:\Users\Administrator\Desktop\TheBridge\Bootcamp-DataScience-2021\week3_course_python_III\day1_python_VII\theory\6_import\6_import.ipynb"
# # copy relative path -> relativo, para entregar proyectos es muy importante que se ejecuten en mi pc y en el de cualquier otro por eso se usan paths relativos
ruta_relativa= "week3_course_python_III\day1_python_VII\theory\6_import\6_import.ipynb"
import os # es una libreria archivo .py built-in de python
os.getcwd() #me da la ruta de la carpeta donde estoy en python __file__ para .py
os.path.dirname(os.getcwd()) #me da la ruta de la carpeta por enciam de donde estoy el "path" no es una funcion estoy accediendo a dirname
os.path.dirname(os.path.dirname(os.getcwd())) #me sube dos veces etc etc se puede hacer todas las veces que quiera
ruta=os.getcwd()
print(ruta)
for i in range (4):
ruta=os.path.dirname(ruta) #esto se va a hacer 4 veces vaa a subir en nuestor caso 4 veces
print(ruta)
sys.path.append(ruta)
sys.path #asi importamos la carpeta raiz que en nuestor caso esta en el rango 4 #siempre existe una carpeta raiz que nos hace que todo lo que este por debajo podamos trabajar
import week3_course_python_III.day2_python_VIII.theory.imports.mi_modulo as mi_modulo_bb
mi_modulo_bb.x
# +
#jupyter me da la ruta a la carpeta donde estoy en .py me da la carpeta raiz asi que hay que usar print(__file__
#si usamos jupypter usamos cwd y en .py usamos print(__file__)) en .py no hay que importar os para luego usar file. recordar que en jupyter primero hay que hacer import os y luego ya el codigo cwd
# .py ->"__file__"
#.ipynb os.getcwd()
# -
#para agregar una ruta dentro de una ruta
#os.sep es una variable que contiene una barra de rutas del sistema operativo
ruta+
| week3_course_python_III/day1_python_VII/theory/6_import/6_import.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from tqdm import tqdm_notebook
import gc
path = 'C:/Users/choco/Desktop/Github/Dacon/Energy Bigdata Utilization/'
train = pd.read_csv(path + "input/train2.csv")
test = pd.read_csv(path + "input/test2.csv")
submission = pd.read_csv(path + "input/submission_1002.csv")
test_df = pd.read_csv(path + "input/test_df.csv")
# ## Make a submission
# +
import datetime
sub_df = pd.DataFrame()
date_list_day = [datetime.datetime(2018,7,1,0) + datetime.timedelta(hours=x) for x in range(24)]
date_list = []
meter_id = []
for i in tqdm_notebook(test_df['meter_id'].unique()):
date_list = np.append(date_list, date_list_day)
meter_id = np.append(meter_id, np.array([i] * 24))
sub_df['time'] = date_list
sub_df['meter_id'] = meter_id
# +
import datetime
sub_df_day = pd.DataFrame()
date_list_day = [datetime.datetime(2018,7,1,0) + datetime.timedelta(hours=x) for x in range(24*10)]
date_list = []
meter_id = []
for i in tqdm_notebook(test_df['meter_id'].unique()):
date_list = np.append(date_list, date_list_day)
meter_id = np.append(meter_id, np.array([i] * 24 * 10))
sub_df_day['time'] = date_list
sub_df_day['meter_id'] = meter_id
# +
import datetime
sub_df_month = pd.DataFrame()
date_list_month = [datetime.datetime(2018,7,1,0) + datetime.timedelta(hours=x) for x in range(24 * 153)]
date_list = []
meter_id = []
for i in tqdm_notebook(test_df['meter_id'].unique()):
date_list = np.append(date_list, date_list_month)
meter_id = np.append(meter_id, np.array([i] * 24 * 153))
sub_df_month['time'] = date_list
sub_df_month['meter_id'] = meter_id
# -
# ## Feature Engineering
for df in [test_df, sub_df]:
df['time'] = pd.to_datetime(df['time'])
df['year'] = df['time'].dt.year
df['month'] = df['time'].dt.month
df['day'] = df['time'].dt.day
df['hour'] = df['time'].dt.hour
df['weekday'] = df['time'].dt.weekday
# +
all_df = pd.concat([test_df, sub_df], axis=0, sort=False)
all_df['y_hr'] = all_df.groupby(['meter_id'])['y'].shift(24)
test_df_hour = all_df[:test_df.shape[0]]
sub_df_hour = all_df[test_df.shape[0]:]
# +
all_df = pd.concat([test_df, sub_df_day], axis=0, sort=False)
all_df['y_hr'] = all_df.groupby(['meter_id'])['y'].shift(24 * 10)
test_df_day = all_df[:test_df.shape[0]]
sub_df_day = all_df[test_df.shape[0]:]
# +
all_df = pd.concat([test_df, sub_df_month], axis=0, sort=False)
all_df['y_hr'] = all_df.groupby(['meter_id'])['y'].shift(24 * 153)
test_df_month = all_df[:test_df.shape[0]]
sub_df_month = all_df[test_df.shape[0]:]
# -
del all_df;
gc.collect()
# ## External data - weather
# + active=""
# weather = pd.read_csv("./input/weather.csv", encoding='cp949')[['일시', '기온(°C)', '풍속(m/s)']]
# weather['일시'] = pd.to_datetime(weather['일시'])
# weather.rename(columns={'일시':'time'},inplace=True)
#
#
# test_df_hour = test_df_hour.merge(weather, how='left', on='time')
# test_df_day = test_df_day.merge(weather, how='left', on='time')
# test_df_month = test_df_month.merge(weather, how='left', on='time')
#
#
# sub_df_hour = sub_df_hour.merge(weather, how='left', on='time')
# sub_df_day = sub_df_day.merge(weather, how='left', on='time')
# sub_df_month = sub_df_month.merge(weather, how='left', on='time')
# -
# ## Make a Validation Set
train_hour = test_df[test_df['time'] < '2018-06-30 00:00:00'].reset_index(drop=True)
valid_hour = test_df[test_df['time'] >= '2018-06-30 00:00:00'].reset_index(drop=True)
train_day = test_df_day[test_df_day['time'] < '2018-06-21 00:00:00'].reset_index(drop=True)
valid_day = test_df_day[test_df_day['time'] >= '2018-06-21 00:00:00'].reset_index(drop=True)
train_month = test_df_month[test_df_month['time'] < '2018-01-01 00:00:00'].reset_index(drop=True)
valid_month = test_df_month[test_df_month['time'] >= '2018-01-01 00:00:00'].reset_index(drop=True)
# ## LightGBM - Hour
from sklearn.model_selection import KFold, StratifiedKFold, TimeSeriesSplit
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
params = {'num_leaves': 20,
'objective': 'regression',
'max_depth': -1,
'learning_rate': 0.001,
"boosting_type": "gbdt",
"bagging_seed": 11,
"metric": 'mape',
"verbosity": -1,
'reg_alpha': 0.3899927210061127,
'reg_lambda': 0.6485237330340494,
'random_state': 47
}
# +
features = [c for c in train_hour.columns if c not in ['time','y']]
y = train_hour['y']
y_valid = valid_hour['y']
X = train_hour[features].reset_index(drop=True)
V = valid_hour[features].reset_index(drop=True)
sub = sub_df_hour[features].reset_index(drop=True)
# +
from sklearn.preprocessing import LabelEncoder
for col in tqdm_notebook(['meter_id']):
if X[col].dtype == 'object':
le = LabelEncoder()
le.fit(list(X[col].astype(str).values) + list(sub[col].astype(str).values))
X[col] = le.transform(list(X[col].astype(str).values))
sub[col] = le.transform(list(sub[col].astype(str).values))
V[col] = le.transform(list(V[col].astype(str).values))
# +
from time import time
mape = list()
feature_importances = pd.DataFrame()
feature_importances['feature'] = X.columns
training_start_time = time()
start_time = time()
trn_data = lgb.Dataset(X, label=y, categorical_feature = ['meter_id'])
val_data = lgb.Dataset(V, label=y_valid, categorical_feature = ['meter_id'])
clf = lgb.train(params, trn_data, 2500, valid_sets = [trn_data, val_data],
verbose_eval=100, early_stopping_rounds=50)
mape.append(clf.best_score['valid_1']['mape'])
print('-' * 30)
print('Training has finished.')
print('Total training time is {}'.format(str(datetime.timedelta(seconds=time() - training_start_time))))
print('-' * 30)
# -
best_iter = clf.best_iteration
clf = lgb.LGBMRegressor(**params, num_boost_round=best_iter)
clf.fit(pd.concat([X,V],axis=0), pd.concat([y,y_valid],axis=0))
sub_df_hour['y'] = clf.predict(sub)
# +
submission_hour = pd.DataFrame()
meter_id_list = []
output_list = []
for meter_id in tqdm_notebook(sub_df_hour['meter_id'].unique()):
meter_id_list.append(meter_id)
output_list.append(sub_df_hour[sub_df_hour['meter_id']==meter_id]['y'].values)
submission_hour['merter_id'] = meter_id_list
submission_hour = pd.concat([submission_hour, pd.DataFrame(output_list)], axis=1)
submission_hour.columns = np.append(['meter_id'], ['X2018_7_1_'+str(i+1)+'h' for i in range(24)])
submission_hour['meter_id_2'] = submission_hour['meter_id'].apply(lambda x: x[1:]).astype(int)
submission_hour = submission_hour.sort_values(by='meter_id_2')
del submission_hour['meter_id_2']
submission_hour
# -
# ## LightGBM - day
# +
def create_new_columns(name,aggs):
return [name + '_' + k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
aggs = {}
aggs['y'] = ['sum']
aggs['y_hr'] = ['sum','max','min','mean','var']
#aggs['기온(°C)'] = ['max','min','mean','var']
#aggs['풍속(m/s)'] = ['max','min','mean','var']
new_columns = create_new_columns('day',aggs)
train_day['date'] = train_day['time'].dt.date
train_day_group_df = train_day.groupby(['date','meter_id']).agg(aggs)
train_day_group_df.columns = new_columns
train_day_group_df.reset_index(drop=False,inplace=True)
valid_day['date'] = valid_day['time'].dt.date
valid_day_group_df = valid_day.groupby(['date','meter_id']).agg(aggs)
valid_day_group_df.columns = new_columns
valid_day_group_df.reset_index(drop=False,inplace=True)
sub_df_day['date'] = sub_df_day['time'].dt.date
sub_df_day_group_df = sub_df_day.groupby(['date','meter_id']).agg(aggs)
sub_df_day_group_df.columns = new_columns
sub_df_day_group_df.reset_index(drop=False,inplace=True)
# +
features = [c for c in train_day_group_df.columns if c not in ['date','y','day_y_sum']]
y = train_day_group_df['day_y_sum']
y_valid = valid_day_group_df['day_y_sum']
X = train_day_group_df[features].reset_index(drop=True)
V = valid_day_group_df[features].reset_index(drop=True)
sub = sub_df_day_group_df[features].reset_index(drop=True)
# +
from sklearn.preprocessing import LabelEncoder
for col in tqdm_notebook(['meter_id']):
if X[col].dtype == 'object':
le = LabelEncoder()
le.fit(list(X[col].astype(str).values) + list(sub[col].astype(str).values))
X[col] = le.transform(list(X[col].astype(str).values))
sub[col] = le.transform(list(sub[col].astype(str).values))
V[col] = le.transform(list(V[col].astype(str).values))
# +
from time import time
mape = list()
feature_importances = pd.DataFrame()
feature_importances['feature'] = X.columns
training_start_time = time()
start_time = time()
trn_data = lgb.Dataset(X, label=y, categorical_feature = ['meter_id'])
val_data = lgb.Dataset(V, label=y_valid, categorical_feature = ['meter_id'])
clf = lgb.train(params, trn_data, 2500, valid_sets = [trn_data, val_data],
verbose_eval=100, early_stopping_rounds=50)
mape.append(clf.best_score['valid_1']['mape'])
print('-' * 30)
print('Training has finished.')
print('Total training time is {}'.format(str(datetime.timedelta(seconds=time() - training_start_time))))
print('-' * 30)
# -
best_iter = clf.best_iteration
clf = lgb.LGBMRegressor(**params, num_boost_round=best_iter)
clf.fit(pd.concat([X,V],axis=0), pd.concat([y,y_valid],axis=0))
sub_df_day_group_df['y'] = clf.predict(sub)
sub
# +
submission_day = pd.DataFrame()
meter_id_list = []
output_list = []
for meter_id in tqdm_notebook(sub_df_day_group_df['meter_id'].unique()):
meter_id_list.append(meter_id)
output_list.append(sub_df_day_group_df[sub_df_day_group_df['meter_id']==meter_id]['y'].values)
submission_day['merter_id'] = meter_id_list
submission_day = pd.concat([submission_day, pd.DataFrame(output_list)], axis=1)
submission_day.columns = np.append(['meter_id'], ['X2018_7_'+str(i+1)+'_d' for i in range(10)])
submission_day['meter_id_2'] = submission_day['meter_id'].apply(lambda x: x[1:]).astype(int)
submission_day = submission_day.sort_values(by='meter_id_2').reset_index(drop=True)
del submission_day['meter_id_2']
submission_day
# -
# ## LightGBM - month
# +
def create_new_columns(name,aggs):
return [name + '_' + k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
aggs = {}
aggs['y'] = ['sum']
aggs['y_hr'] = ['sum','max','min','mean','var']
#aggs['기온(°C)'] = ['max','min','mean','var']
#aggs['풍속(m/s)'] = ['max','min','mean','var']
new_columns = create_new_columns('month',aggs)
train_month_group_df = train_month.groupby(['year_month','meter_id']).agg(aggs)
train_month_group_df.columns = new_columns
train_month_group_df.reset_index(drop=False,inplace=True)
valid_month_group_df = valid_month.groupby(['year_month','meter_id']).agg(aggs)
valid_month_group_df.columns = new_columns
valid_month_group_df.reset_index(drop=False,inplace=True)
sub_df_month_group_df = sub_df_month.groupby(['year_month','meter_id']).agg(aggs)
sub_df_month_group_df.columns = new_columns
sub_df_month_group_df.reset_index(drop=False,inplace=True)
# +
features = [c for c in train_month_group_df.columns if c not in ['date','y','month_y_sum','year_month']]
y = train_month_group_df['month_y_sum']
y_valid = valid_month_group_df['month_y_sum']
X = train_month_group_df[features].reset_index(drop=True)
V = valid_month_group_df[features].reset_index(drop=True)
sub = sub_df_month_group_df[features].reset_index(drop=True)
# +
from sklearn.preprocessing import LabelEncoder
for col in tqdm_notebook(['meter_id']):
if X[col].dtype == 'object':
le = LabelEncoder()
le.fit(list(X[col].astype(str).values) + list(sub[col].astype(str).values))
X[col] = le.transform(list(X[col].astype(str).values))
sub[col] = le.transform(list(sub[col].astype(str).values))
V[col] = le.transform(list(V[col].astype(str).values))
# -
params = {'num_leaves': 20,
'objective': 'regression',
'max_depth': 3,
'learning_rate': 0.0001,
"boosting_type": "dart",
"metric": 'mape',
"verbosity": -1,
'random_state': 47
}
# +
from time import time
mape = list()
feature_importances = pd.DataFrame()
feature_importances['feature'] = X.columns
training_start_time = time()
start_time = time()
trn_data = lgb.Dataset(X, label=y, categorical_feature = ['meter_id'])
val_data = lgb.Dataset(V, label=y_valid, categorical_feature = ['meter_id'])
clf = lgb.train(params, trn_data, 2500, valid_sets = [trn_data, val_data],
verbose_eval=100, early_stopping_rounds=50)
mape.append(clf.best_score['valid_1']['mape'])
print('-' * 30)
print('Training has finished.')
print('Total training time is {}'.format(str(datetime.timedelta(seconds=time() - training_start_time))))
print('-' * 30)
# -
best_iter = clf.best_iteration
clf = lgb.LGBMRegressor(**params, num_boost_round=best_iter)
clf.fit(pd.concat([X,V],axis=0), pd.concat([y,y_valid],axis=0))
sub_df_month_group_df['y'] = clf.predict(sub)
# +
submission_month = pd.DataFrame()
meter_id_list = []
output_list = []
for meter_id in tqdm_notebook(sub_df_month_group_df['meter_id'].unique()):
meter_id_list.append(meter_id)
output_list.append(sub_df_month_group_df[sub_df_month_group_df['meter_id']==meter_id]['y'].values)
submission_month['merter_id'] = meter_id_list
submission_month = pd.concat([submission_month, pd.DataFrame(output_list)], axis=1)
submission_month.columns = np.append(['meter_id'], ['X2018_'+str(i+7)+'_m' for i in range(5)])
submission_month['meter_id_2'] = submission_month['meter_id'].apply(lambda x: x[1:]).astype(int)
submission_month = submission_month.sort_values(by='meter_id_2').reset_index(drop=True)
del submission_month['meter_id_2']
submission_month
# -
# ## submission
submission_total = pd.merge(submission_hour, submission_day, how='left', on='meter_id')
submission_total = pd.merge(submission_total, submission_month, how='left', on='meter_id')
submission_total.to_csv("submission_total.csv",index=False)
| Energy Bigdata Utilization/Code/MODEL/3. Modeling - LightGBM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Using Yellowbrick for Machine Learning Visualizations on Facebook Data
#
# <NAME>
#
# The dataset below was provided to the UCI Machine Learning Repository from researchers who used Neural Networks and Decision Trees to predict how many comments a given Facebook post would generate.
#
# There are five variants of the dataset. This notebook only uses the first.
#
# The full paper can be found here:
#
# http://uksim.info/uksim2015/data/8713a015.pdf
#
#
# ### The primary purpose of this notebook is to test Yellowbrick.
#
# # Attribute Information:
#
#
# All features are integers or float values.
#
#
# 1
# Page Popularity/likes
# Decimal Encoding
# Page feature
# Defines the popularity or support for the source of the document.
#
#
# 2
# Page Checkins’s
# Decimal Encoding
# Page feature
# Describes how many individuals so far visited this place. This feature is only associated with the places eg:some institution, place, theater etc.
#
#
# 3
# Page talking about
# Decimal Encoding
# Page feature
# Defines the daily interest of individuals towards source of the document/ Post. The people who actually come back to the page, after liking the page. This include activities such as comments, likes to a post, shares, etc by visitors to the page.
#
#
# 4
# Page Category
# Value Encoding
# Page feature
# Defines the category of the source of the document eg: place, institution, brand etc.
#
#
# 5 - 29
# Derived
# Decimal Encoding
# Derived feature
# These features are aggregated by page, by calculating min, max, average, median and standard deviation of essential features.
#
#
# 30
# CC1
# Decimal Encoding
# Essential feature
# The total number of comments before selected base date/time.
#
#
# 31
# CC2
# Decimal Encoding
# Essential feature
# The number of comments in last 24 hours, relative to base date/time.
#
#
# 32
# CC3
# Decimal Encoding
# Essential feature
# The number of comments in last 48 to last 24 hours relative to base date/time.
#
#
# 33
# CC4
# Decimal Encoding
# Essential feature
# The number of comments in the first 24 hours after the publication of post but before base date/time.
#
#
# 34
# CC5
# Decimal Encoding
# Essential feature
# The difference between CC2 and CC3.
#
#
# 35
# Base time
# Decimal(0-71) Encoding
# Other feature
# Selected time in order to simulate the scenario.
#
#
# 36
# Post length
# Decimal Encoding
# Other feature
# Character count in the post.
#
#
# 37
# Post Share Count
# Decimal Encoding
# Other feature
# This features counts the no of shares of the post, that how many peoples had shared this post on to their timeline.
#
#
# 38
# Post Promotion Status
# Binary Encoding
# Other feature
# To reach more people with posts in News Feed, individual promote their post and this features tells that whether the post is promoted(1) or not(0).
#
#
# 39
# H Local
# Decimal(0-23) Encoding
# Other feature
# This describes the H hrs, for which we have the target variable/ comments received.
#
#
# 40-46
# Post published weekday
# Binary Encoding
# Weekdays feature
# This represents the day(Sunday...Saturday) on which the post was published.
#
#
# 47-53
# Base DateTime weekday
# Binary Encoding
# Weekdays feature
# This represents the day(Sunday...Saturday) on selected base Date/Time.
#
# 54
# Target Variable
# Decimal
# Target
# The no of comments in next H hrs(H is given in Feature no 39).
#
#
#
#
#
#
# ## Data Exploration
#
# +
# %matplotlib inline
import os
import json
import time
import pickle
import requests
import numpy as np
import pandas as pd
import yellowbrick as yb
import matplotlib.pyplot as plt
# +
df=pd.read_csv("/Users/pwitt/Documents/machine-learning/examples/pbwitt/Dataset/Training/Features_Variant_1.csv")
# Fetch the data if required
DATA = df
print('Data Shape ' + str(df.shape))
print(df.dtypes)
# +
FEATURES = [
"Page Popularity/likes",
"Page Checkins’s",
"Page talking about",
"Page Category",
"Derived5",
"Derived6",
"Derived7",
"Derived8",
"Derived9",
"Derived10",
"Derived11",
"Derived12",
"Derived13",
"Derived14",
"Derived15",
"Derived16",
"Derived17",
"Derived18",
"Derived19",
"Derived20",
"Derived21",
"Derived22",
"Derived23",
"Derived24",
"Derived25",
"Derived26",
"Derived27",
"Derived28",
"Derived29",
"CC1",
"CC2",
"CC3",
'CC4',
'CC5',
"Base time",
"Post length",
"Post Share Count",
"Post Promotion Status",
"H Local",
"Post published weekday-Sun",
"Post published weekday-Mon",
"Post published weekday-Tues",
"Post published weekday-Weds",
"Post published weekday-Thurs",
"Post published weekday-Fri",
"Post published weekday-Sat",
"Base DateTime weekday-Sun",
"Base DateTime weekday-Mon",
"Base DateTime weekday-Tues",
"Base DateTime weekday-Wed",
"Base DateTime weekday-Thurs",
"Base DateTime weekday-Fri",
"Base DateTime weekday-Sat",
"Target_Variable"
]
# Read the data into a DataFrame
df.columns=FEATURES
df.head()
#Note: Dataset is sorted. There is variation in the distributions.
# -
# Determine the shape of the data
print("{} instances with {} columns\n".format(*df.shape))
# ## Test Yellowbrick Covariance Ranking
from yellowbrick.features.rankd import Rank2D
from yellowbrick.features.radviz import RadViz
from yellowbrick.features.pcoords import ParallelCoordinates
# +
# Specify the features of interest
# Used all for testing purposes
features = FEATURES
# Extract the numpy arrays from the data frame
X = df[features].as_matrix()
y = df["Base time"].as_matrix()
# +
# Instantiate the visualizer with the Covariance ranking algorithm
visualizer = Rank2D(features=features, algorithm='covariance')
visualizer.fit(X, y) # Fit the data to the visualizer
visualizer.transform(X) # Transform the data
visualizer.poof() # Draw/show/poof the data
# +
# Instantiate the visualizer with the Pearson ranking algorithm
visualizer = Rank2D(features=features, algorithm='pearson')
visualizer.fit(X, y) # Fit the data to the visualizer
visualizer.transform(X) # Transform the data
visualizer.poof() # Draw/show/poof the data
# -
# ## Data Extraction
#
# Create a bunch object to store data on disk.
#
# - **data**: array of shape `n_samples` * `n_features`
# - **target**: array of length `n_samples`
# - **feature_names**: names of the features
# - **filenames**: names of the files that were loaded
# - **DESCR**: contents of the readme
#
#
# +
from sklearn.datasets.base import Bunch
DATA_DIR = os.path.abspath(os.path.join(".", "..", "pbwitt","data"))
# Show the contents of the data directory
for name in os.listdir(DATA_DIR):
if name.startswith("."): continue
print ("- {}".format(name))
def load_data(root=DATA_DIR):
filenames = {
'meta': os.path.join(root, 'meta.json'),
'rdme': os.path.join(root, 'README.md'),
'data': os.path.join(root, 'Features_Variant_1.csv'),
}
#Load the meta data from the meta json
with open(filenames['meta'], 'r') as f:
meta = json.load(f)
feature_names = meta['feature_names']
# Load the description from the README.
with open(filenames['rdme'], 'r') as f:
DESCR = f.read()
# Load the dataset from the data file.
dataset = pd.read_csv(filenames['data'], header=None)
#tranform to numpy
data = dataset.iloc[:,0:53]
target = dataset.iloc[:,-1]
# Extract the target from the data
data = np.array(data)
target = np.array(target)
# Create the bunch object
return Bunch(
data=data,
target=target,
filenames=filenames,
feature_names=feature_names,
DESCR=DESCR
)
# Save the dataset as a variable we can use.
dataset = load_data()
print(dataset.data.shape)
print(dataset.target.shape)
# -
from yellowbrick.regressor import PredictionError, ResidualsPlot
# +
from sklearn import metrics
from sklearn import cross_validation
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.linear_model import ElasticNet, Lasso
from sklearn.linear_model import Ridge, Lasso
from sklearn.model_selection import KFold
# -
# # Build and Score Regression Models
#
# * Create function -- add parameters for Yellowbrick target visulizations
# * Score models using Mean Absolute Error, Mean Squared Error, Median Absolute Error, R2
def fit_and_evaluate(dataset, model, label,vis, **kwargs ):
"""
Because of the Scikit-Learn API, we can create a function to
do all of the fit and evaluate work on our behalf!
"""
start = time.time() # Start the clock!
scores = {'Mean Absolute Error:':[], 'Mean Squared Error:':[], 'Median Absolute Error':[], 'R2':[]}
for train, test in KFold(dataset.data.shape[0], n_folds=12, shuffle=True):
X_train, X_test = dataset.data[train], dataset.data[test]
y_train, y_test = dataset.target[train], dataset.target[test]
estimator = model(**kwargs)
estimator.fit(X_train, y_train)
expected = y_test
predicted = estimator.predict(X_test)
#For Visulizer below
if vis=='Ridge_vis':
return [X_train,y_train,X_test,y_test]
if vis=='Lasso_vis':
return [X_train,y_train,X_test,y_test]
scores['Mean Absolute Error:'].append(metrics.mean_absolute_error(expected, predicted))
scores['Mean Squared Error:'].append(metrics.mean_squared_error(expected, predicted))
scores['Median Absolute Error'].append(metrics.median_absolute_error(expected, predicted ))
scores['R2'].append(metrics.r2_score(expected, predicted))
# Report
print("Build and Validation of {} took {:0.3f} seconds".format(label, time.time()-start))
print("Validation scores are as follows:\n")
print(pd.DataFrame(scores).mean())
# Write official estimator to disk
estimator = model(**kwargs)
estimator.fit(dataset.data, dataset.target)
outpath = label.lower().replace(" ", "-") + ".pickle"
with open(outpath, 'wb') as f:
pickle.dump(estimator, f)
print("\nFitted model written to:\n{}".format(os.path.abspath(outpath)))
print("Lasso Scores and Visualization Below: \n")
fit_and_evaluate(dataset, Lasso, "Facebook Lasso",'NA')
# Instantiate the linear model and visualizer
lasso = Lasso()
visualizer = PredictionError(lasso)
visualizer.fit(fit_and_evaluate(dataset, Lasso, "X_train",'Lasso_vis')[0], fit_and_evaluate(dataset, Lasso, "y_train",'Lasso_vis')[1]) # Fit the training data to the visualizer
visualizer.score(fit_and_evaluate(dataset, Lasso, "X_train",'Lasso_vis')[2], fit_and_evaluate(dataset, Lasso, "y_train",'Lasso_vis')[3])
g = visualizer.poof() # Draw/show/poof the data
# +
# Instantiate the linear model and visualizer
print("Ridge Scores and Target Visualization Below:\n")
fit_and_evaluate(dataset, Ridge, "Facebook Ridge", 'NA')
ridge = Ridge()
visualizer = ResidualsPlot(ridge)
visualizer.fit(fit_and_evaluate(dataset, Ridge, "X_train",'Ridge_vis')[0], fit_and_evaluate(dataset, Ridge, "y_train",'Ridge_vis')[1]) # Fit the training data to the visualizer
visualizer.score(fit_and_evaluate(dataset, Ridge, "X_train",'Ridge_vis')[2], fit_and_evaluate(dataset, Ridge, "y_train",'Ridge_vis')[3]) # Evaluate the model on the test data
g = visualizer.poof() # Draw/show/poof the data
# -
fit_and_evaluate(dataset, ElasticNet, "Facebook ElasticNet", 'NA')
| examples/pbwitt/testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# # Softmax exercise
#
# *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
#
# This exercise is analogous to the SVM exercise. You will:
#
# - implement a fully-vectorized **loss function** for the Softmax classifier
# - implement the fully-vectorized expression for its **analytic gradient**
# - **check your implementation** with numerical gradient
# - use a validation set to **tune the learning rate and regularization** strength
# - **optimize** the loss function with **SGD**
# - **visualize** the final learned weights
#
# +
from __future__ import absolute_import, division, print_function
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
import seaborn
# %matplotlib inline
# set default size of plots
plt.rcParams['figure.figsize'] = (10.0, 8.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# +
def get_CIFAR10_data(num_training=49000, num_validation=1000,
num_test=1000, num_dev=500):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing
to prepare it for the linear classifier. These are the same
steps as we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = '../data/cifar10'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
return (X_train, y_train, X_val, y_val, X_test,
y_test, X_dev, y_dev)
# Invoke the above function to get our data.
(X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev) = \
get_CIFAR10_data()
print('Train data shape:', X_train.shape)
print('Train labels shape:', y_train.shape)
print('Validation data shape:', X_val.shape)
print('Validation labels shape:', y_val.shape)
print('Test data shape:', X_test.shape)
print('Test labels shape:', y_test.shape)
print('dev data shape:', X_dev.shape)
print('dev labels shape:', y_dev.shape)
# -
# ## Softmax Classifier
#
# Your code for this section will all be written inside **cs231n/classifiers/softmax.py**.
#
# +
# First implement the naive softmax loss function with nested loops.
# Open the file cs231n/classifiers/softmax.py and implement the
# softmax_loss_naive function.
from cs231n.classifiers.softmax import softmax_loss_naive
from cs231n.classifiers.softmax import softmax_loss_vectorized
import time
# Generate a random softmax weight matrix and use it to
# compute the loss.
W = np.random.randn(3073, 10) * 0.0001
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
# As a rough sanity check, our loss should be something
# close to -log(0.1).
print('loss: {:f}'.format(loss))
print('sanity check: {:f}'.format(-np.log(0.1)))
# -
# ## Inline Question 1:
# Why do we expect our loss to be close to -log(0.1)? Explain briefly.**
#
# **Your answer:** *Fill this in*
#
# +
# Complete the implementation of softmax_loss_naive and implement a
# (naive) version of the gradient that uses nested loops.
from cs231n.gradient_check import grad_check_sparse
softmax_loss = softmax_loss_naive # djn
# As we did for the SVM, use numeric gradient checking as a
# debugging tool. The numeric gradient should be close to the
# analytic gradient.
print('Gradient check WITHOUT regularization')
print('=====================================')
loss, grad = softmax_loss(W, X_dev, y_dev, 0.0)
f = lambda w: softmax_loss(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
print('')
# similar to SVM case, do another gradient check with regularization
print('Gradient check WITH regularization')
print('==================================')
loss, grad = softmax_loss(W, X_dev, y_dev, 1e2)
f = lambda w: softmax_loss(w, X_dev, y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# +
# Now that we have a naive implementation of the softmax loss
# function and its gradient, implement a vectorized version in
# softmax_loss_vectorized. The two versions should compute the
# same results, but the vectorized version should be much faster.
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print('naive loss: {:e} computed in {:f}s'.format(
loss_naive, toc - tic))
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(
W, X_dev, y_dev, 0.00001)
toc = time.time()
print('vectorized loss: {:e} computed in {:f}s'.format(
loss_vectorized, toc - tic))
# As we did for the SVM, we use the Frobenius norm to compare
# the two versions of the gradient.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized,
ord='fro')
print('Loss difference:', np.abs(loss_naive - loss_vectorized))
print('Gradient difference:', grad_difference)
# +
## [djn] A practice run with softmax
from cs231n.classifiers import Softmax
softmax = Softmax()
tic = time.time()
loss_hist = softmax.train(X_train, y_train,
learning_rate=1e-7, reg=5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took {:f}s'.format(toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the Softmax.predict function and evaluate the performance
# on both the training and validation set
y_train_pred = softmax.predict(X_train)
print('training accuracy: {:f}'.format(
np.mean(y_train == y_train_pred)))
y_val_pred = softmax.predict(X_val)
print('validation accuracy: {:f}'.format(
np.mean(y_val == y_val_pred)))
# +
# Use the validation set to tune hyperparameters (regularization
# strength and learning rate). You should experiment with different
# ranges for the learning rates and regularization strengths;
# if you are careful you should be able to get a classification
# accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
num_iters = 600
#learning_rates = [1e-7, 5e-7]
#regularization_strengths = [5e4, 1e8]
#learning_rates = 10 ** (0.5 * np.random.rand(10) - 6.5)
#regularization_strengths = 10 ** (0.8 * np.random.rand(10) + 2.5)
learning_rates =10 ** (np.random.rand(5) - 6.5)
regularization_strengths = 10 ** (2 * np.random.rand(5) + 3)
num_lr = len(learning_rates)
num_reg = len(regularization_strengths)
num_pts = num_lr * num_reg
####################################################################
# TODO: #
# Use the validation set to set the learning rate and #
# regularization strength. This should be identical to the #
# validation that you did for the SVM; save the best trained #
# softmax classifer in best_softmax. #
####################################################################
import itertools
n_iters = 800
combinations = itertools.product(learning_rates,
regularization_strengths)
count = 0
for lr, reg in combinations:
np.random.seed(4023) # Keep W initialization invariant
softmax = Softmax()
softmax.train(X_train, y_train, learning_rate=lr, reg=reg,
num_iters=n_iters)
y_train_pred = softmax.predict(X_train)
y_val_pred = softmax.predict(X_val)
train_accuracy = np.mean(y_train == y_train_pred)
val_accuracy = np.mean(y_val == y_val_pred)
results[(lr, reg)] = (train_accuracy, val_accuracy)
if val_accuracy > best_val:
best_val = val_accuracy
best_softmax = softmax
count += 1
print('[{}/{}] (lr={}, reg={}) val_acc={}'.format(
count, num_pts, lr, reg, val_accuracy))
####################################################################
# END OF YOUR CODE #
####################################################################
# Print out results.
# for lr, reg in sorted(results):
# train_accuracy, val_accuracy = results[(lr, reg)]
# print('lr {:e} reg {:e} '
# 'train accuracy: {:f} val accuracy: {:f}'.format(
# lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation:',
best_val)
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
cm = plt.cm.viridis #[djn] colormap
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size,
c=colors, cmap=cm)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results]
# default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size,
c=colors, cmap=cm)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# -
# Evaluate the best softmax on test set
y_test_pred = best_softmax.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('softmax on raw pixels final test set accuracy:',
test_accuracy)
# +
# Visualize the learned weights for each class
w = best_softmax.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) \
/ (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
# -
| assignment1/softmax.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "skip"}
from IPython.core.display import HTML
HTML("<style>.container { width:95% !important; }</style>")
# -
# # Lecture 7, direct methods for constrained optimization
# + [markdown] slideshow={"slide_type": "slide"}
# ## Structure of optimization methods
#
# Typically
#
# * Constraint handling **converts** the problem to (a series of) unconstrained problems
# * In unconstrained optimization a **search direction** is determined at each iteration
# * The best solution in the search direction is found with **line search**
#
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Classification of the constraint optimization methods
#
# * **Indirect methods:** the constrained problem is converted into a sequence of unconstrained problems whose solutions will approach to the solution of the constrained problem, the intermediate solutions need not to be feasible
#
# * **Direct methods:** the constraints are taking into account explicitly, intermediate solutions are feasible
# + [markdown] slideshow={"slide_type": "slide"}
# # Direct methods for constrained optimization
# + [markdown] slideshow={"slide_type": "-"}
# Direct methods for constrained optimization are also known as *methods of feasible directions*
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Idea
#
# * in a point $x_k$, generate a feasible search direction where objective function value can be improved
# * use line search to get $x_{k+1}$
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Methods differ in
#
# * how to choose a feasible direction and
# * what is assumed from the constraints (linear/nonlinear, equality/inequality)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Feasible descent directions
# + [markdown] slideshow={"slide_type": "-"}
# Let $S\subset \mathbb R^n$ ($S\neq \emptyset$) and $x^*\in S$.
#
# **Definition:** The set
# $$ D = \{d\in \mathbb R^n: d\neq0,x^*+\alpha d\in S \text{ for all } \alpha\in (0,\delta) \text{ for some } \delta>0\}$$
#
# is called the cone of feasible directions of $S$ in $x^*$.
# + [markdown] slideshow={"slide_type": "fragment"}
# **Definition:** The set
# $$ F = \{d\in \mathbb R^n: f(x^*+\alpha d)<f(x^*)\text{ for all } \alpha\in (0,\delta) \text{ for some } \delta>0\}$$
# is called the cone of descent directions.
# + [markdown] slideshow={"slide_type": "subslide"}
# **Definition:** The set $F\cap D$ is called the cone of feasible descent directions.
#
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# **(Obvious) Theorem:** Consider an optimization problem
# $$
# \begin{align}
# \min &\ f(x)\\
# \text{s.t. }&\ x\in S
# \end{align}
# $$
# and let $x^*\in S$. Now if $x^*$ is a local minimizer **then** the set of feasible descent directions $F\cap D$ is empty.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Idea for the methods of feasible descent directions
#
# 1. Find a feasible solution $x_0$ as the starting point ($k=0$).
# 2. Find a feasible descent direction $d_k\in D\cap F$.
# 3. Determine the step length ($\alpha_k$) to the direction $d_k$ (Use line search to find an optimal step length).
# 4. Update $x$ accordingly ($x_{k+1} = x_k + \alpha_k d_k$).
# 5. Check convegency. If not converged, set $k = k+1$ and go to 2.
# + [markdown] slideshow={"slide_type": "slide"}
# # Rosen's projected gradient method
# -
# Assume a problem with linear equality constraints
#
# $$
# \min f(x)\\
# \text{s.t. } H(x)=Ax-b=0,
# $$
#
# where $A$ is a $l\times n$ matrix ($l\leq n$) and $b$ is a vector.
# + [markdown] slideshow={"slide_type": "subslide"}
# Let $\mathbf{x}$ be a feasible solution to the above problem.
#
# It holds that:
#
# $$
# \mathbf{Ax}=b \\
# \rightarrow \mathbf{A}(\mathbf{x} + \alpha \mathbf{d}) = b \\
# \rightarrow \mathbf{Ax} + \alpha \mathbf{Ad} = b \\
# \rightarrow b + \alpha \mathbf{Ad} = b
# $$
#
# Then, $\mathbf{d}$ is a feasible direction *if and only if* $\mathbf{Ad}=0$
# + [markdown] slideshow={"slide_type": "fragment"}
# Thus, the gradient $-\nabla f(x)$ is a feasible descent direction, if
#
# $$ A\nabla f(x)=0.$$
#
# This may or may not be true (i.e. the gradient may or may not be a feasible descent direction).
# + [markdown] slideshow={"slide_type": "subslide"}
# However, we can project the gradient to the set of feasible descent directions
# $$ \{d\in \mathbb R^n: Ad=0\},$$
# which now is a linear subspace.
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Projection
#
# Let $a\in \mathbb R^n$ be a vector and let $L$ be a linear subspace of $\mathbb R^n$. Now, the following are equivalent
# * $a^P$ is the projection of $a$ on $L$,
# * $\{a^P\} = \operatorname{argmin}_{l\in L}\|a-l\|$, and
#
# -
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projected gradient
# -
# The projection of the gradient $\nabla f(x)$ on the set $\{d\in \mathbb R^n: Ad=0\}$ is denoted by $\nabla f(x)^P$ and called the *projected gradient*.
#
# Now, given some conditions, the projected gradient gives us a feasible descent direction.
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# ## How to compute the projected gradient?
# -
# There are different ways, but at this course we can use optimization. Basically, the optimization problem that we have to solve is
# $$
# \min \|\nabla f(x)-d\|\\
# \text{s.t. }Ad=0.
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# Since it is equivalent to minimize the square of the objective function $\sum_{i=n}\nabla_i f(x)^2+d_i^2-2\nabla_i f(x)d_i$, we can see that the problem is a quadratic problem with equality constraints,
# $$
# \min \frac12 d^TId-\nabla f(x)^Td\\
# \text{s.t. }Ad=0
# $$
# which means that we just need to solve the system of equations (see e.g., https://en.wikipedia.org/wiki/Quadratic_programming#Equality_constraints)
#
# $$
# \left[
# \begin{array}{cc}
# I&A^T\\
# A&0
# \end{array}
# \right]
# \left[\begin{align}d\\\lambda\end{align}\right]
# = \left[
# \begin{array}{c}
# \nabla f(x)\\
# 0
# \end{array}
# \right],
# $$
#
# where $I$ is the identity matrix, and $\lambda$ are the KKT multipliers.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Code in Python
# -
# #### A function for projecting a vector to a linear space defined by $Ax=0$.
import numpy as np
#help(np.linalg.solve)
import numpy as np
def project_vector(A,vector):
#convert A into a matrix
A_matrix = np.matrix(A)
#construct the "first row" of the matrix [[I,A^T],[A,0]]
left_matrix_first_row = np.concatenate((np.identity(len(vector)),A_matrix.transpose()), axis=1)
#construct the "second row" of the matrix
left_matrix_second_row = np.concatenate((A_matrix,np.matrix(np.zeros([len(A),len(A)]))), axis=1)
#combine the whole matrix by combining the rows
left_matrix = np.concatenate((left_matrix_first_row,left_matrix_second_row),axis = 0)
#Solve the system of linear equalities from the previous page
return np.linalg.solve(left_matrix, \
np.concatenate((np.matrix(vector).transpose(),\
np.zeros([len(A),1])),axis=0))[:len(vector)]
# Example: Project gradient such that A*proj_gradient = 0
A = [[1,0,0],[0,1,0]]
gradient = [1,1,1]
project_vector(A,gradient)
# + [markdown] slideshow={"slide_type": "subslide"}
# # Example
# -
# Let us study optimization problem
# $$
# \begin{align}
# \min \qquad& x_1^2+x_2^2+x_3^2\\
# \text{s.t.}\qquad &x_1+x_2=3\\
# &x_1+x_3=4.
# \end{align}
# $$
#
# Let us project a negative gradient from a feasible point $x=(1,2,3)$
# Now, the matrix
# $$
# A = \left[
# \begin{array}{ccc}
# 1& 1 & 0\\
# 1& 0 & 1
# \end{array}
# \right]
# $$.
import ad
A = [[1,1,0],[1,0,1]]
gradient = ad.gh(lambda x:x[0]**2+x[1]**2+x[2]**2)[0]([1,2,3])
print(gradient)
d = project_vector(A,[-i for i in gradient])
print(d)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### d is a feasible direction
# -
np.matrix(A)*d
# ### d is a descent direction
def f(x):
return x[0]**2+x[1]**2+x[2]**2
alpha = 0.001
print("Value of f at [1,2,3] is "+str(f([1,2,3])))
x_mod= np.array([1,2,3])+alpha*np.array(d).transpose()[0]
print(x_mod)
print("Value of f at [1,2,3] +alpha*d is "+str(f(x_mod)))
print("Gradient dot product direction (i.e., directional derivative) is " \
+ str(np.matrix(ad.gh(f)[0]([1,2,3])).dot(np.array(d))))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Finally, the algorithm of the projected gradient
# + slideshow={"slide_type": "-"}
import numpy as np
import ad
def projected_gradient_method(f,A,start,step,precision):
f_old = float('Inf')
x = np.array(start)
steps = []
f_new = f(x)
iters = 0
while abs(f_old-f_new)>precision:
# store the current function value
f_old = f_new
# compute gradient
gradient = ad.gh(f)[0](x)
# project negative gradient
d = project_vector(A,[-i for i in gradient])
# take transpose
d = d.reshape(1,-1)
# take step
x = np.array(x + step*d)[0]
# compute f in new point+
f_new = f(x)
# record new step
steps.append(x)
# update iterations counter
iters = iters + 1
return x,f_new,steps,iters
# + slideshow={"slide_type": "subslide"}
f = lambda x:x[0]**2+x[1]**2+x[2]**2
A = [[1,1,0],[1,0,1]]
start = [1,2,3]
(x,f_val,steps,iters) = projected_gradient_method(f,A,start,0.6,0.000001)
# -
print(x)
print(f(x))
print(f([1,2,3]))
print(np.matrix(A)*np.matrix(x).transpose())
print(iters)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Note
# If there are both linear equality and inequality constraints, the projection matrix does not remain the same
# * At each iteration, it includes only the equality and active inequality constraints
# + [markdown] slideshow={"slide_type": "slide"}
# # Active set method
# Consider a problem
# $$
# \min f(x)\\
# \text{s.t. }Ax\leq b,
# $$
# where $A$ is a $l\times n$ matrix ($l\leq n$) and $b$ is a vector.
#
# ## Idea
# * In $𝑥_k$, the set of constraints is divided into active ($𝑖 ∈ 𝐼$) and inactive constraints
# * Inactive constraints are not taken into account when the search direction $𝑑_k$ is determined
# * Inactive constraints affect only when computing the optimal step length $\alpha_k$
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Feasible directions
# * For $𝑖\in 𝐼$ , $(𝑎_i)^Tx_k = b_i$
# * If $𝑑_k$ is feasible in $𝑥_k$, then $𝑥_k + \alpha 𝑑_k \in 𝑆$ for some $\alpha > 0$
# * $(𝑎_i)^T(x_k+\alpha d_k) = (a_i)^Tx_k + \alpha(a_i)^Td_k\leq b_i$
# * $(𝑎_i)^Td_k\leq 0$ for feasible $𝑑_k$ and the constraint remains active if $(𝑎_i)^Td_k=0$
# + [markdown] slideshow={"slide_type": "subslide"}
# ## On active constraints
# * Optimization problem with inequality constraints is more difficult than problem with equality constraints since the active set in a local minimizer is not known
# * If it would be known, then it would be enough to solve a corresponding equality constrained problem
# * In that case, if the other constraints would be satisfied in the solution and all the Lagrange multipliers were non-negative, then the solution would also be a solution to the original problem
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Using active set
# * At each iteration, a working set is considered which consists of the active constraints in $𝑥_k$
# * The direction $𝑑_k$ is determined so that it is a descent direction in the working set
# * E.g. Rosen’s projected gradient method can be used
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Active set algorithm
# 1. Choose a starting point $x_1$ and determine an initial active set $I_1$ and set $k=1$
# 2. Compute a feasible descent direction $d_k$ in the subspace defined by the active constraints (e.g., by using projected gradient)
# 3. If $||d_k||=0$, go to step 6, otherwise, find optimal step length $\alpha$ by staying in the feasible set and set $x_{k+1} = x_k + \alpha d_k$
# 4. If no new constraint becomes active go to step 7 (active set does not change)
# 5. Addition to active set: a new constraint $j$ becomes active, update $I_{k+1} = _kh \cup \{j\}$ and go to step 7
# 6. Removal from active set: approximate Lagrangian multipliers $\mu_i$, $i\in I_k$. If $\mu_i\geq 0$ for all $i$, stop (active set is correct). Otherwise, remove a constraint $j$ with negative multiplier from the active set: $I_{k+1}=I_k\setminus \{j\}$
# 7. Set $k=k+1$ and go to step 2
#
# <i>Implementation of the active set method is left as a voluntary exercise </i>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Note:
#
# * Projected gradient method can also extended for non-linear constraints.
# * But, this needs some extra steps
#
# 
| Lecture 7, Direct methods for constrained optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
from mmdet3d.apis import init_detector, inference_detector, show_result_meshlab
# + pycharm={"is_executing": false}
config_file = '../configs/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class.py'
# download the checkpoint from model zoo and put it in `checkpoints/`
checkpoint_file = '../../../checkpoints/3d/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class_20200620_230421-aa0f3adb.pth'
# + pycharm={"is_executing": false}
# build the model from a config file and a checkpoint file
model = init_detector(config_file, checkpoint_file, device='cuda:0')
# + pycharm={"is_executing": false}
# test a single sample
pcd = 'kitti_000008.bin'
result, data = inference_detector(model, pcd)
# + pycharm={"is_executing": false}
# show the results
out_dir = './'
show_result_meshlab(data, result, out_dir)
# -
result
data
| demo/inference_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# language: python
# name: python3
# ---
# # Predicting Heart Disease
# <p>This project aims to build a model that will serve in predicting heart disease. It will go through all the steps of a Data Science project, complete with notes in which I will discuss each decision taken.</p>
# **Note: This project was built with python 3.8.10**
# <p> <b>Notebooks content</b> </p>
#
# 1. <b>Initial Data Exploration</b>
# 2. Data Pre-processing
# 3. Model Design
# 4. Final Discussion
#
# ---
# ## 1. Initial Data Exploration
# ### Features
# * **Age**: age of the patient [years]
# * **Sex**: sex of the patient [M: Male, F: Female]
# * **ChestPainType**: chest pain type [TA: Typical Angina, ATA: Atypical Angina, NAP: Non-Anginal Pain, ASY: Asymptomatic]
# * **RestingBP**: resting blood pressure [mm Hg]
# * **Cholesterol**: serum cholesterol [mm/dl]
# * **FastingBS**: fasting blood sugar [1: if FastingBS > 120 mg/dl, 0: otherwise]
# * **RestingECG**: resting electrocardiogram results [Normal: Normal, ST: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV), LVH: showing probable or definite left ventricular hypertrophy by Estes' criteria]
# * **MaxHR**: maximum heart rate achieved [Numeric value between 60 and 202]
# * **ExerciseAngina**: exercise-induced angina [Y: Yes, N: No]
# * **Oldpeak**: oldpeak = ST [Numeric value measured in depression]
# * **ST_Slope**: the slope of the peak exercise ST segment [Up: upsloping, Flat: flat, Down: downsloping]
# * **HeartDisease**: output class [1: heart disease, 0: Normal]
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../data/heart.csv')
df.head()
df.info()
# #### Independent Variables
# +
# Plot distributions of the categorical variables
figure, axes = plt.subplots(2,3,figsize=(30,15), sharey=True)
sns.set_theme(style='dark')
sns.set(font_scale=1.5)
axs = []
axs.append(sns.countplot(data=df, x="Sex", hue="HeartDisease",ax=axes[0][0], palette='Blues_r'))
axs.append(sns.countplot(data=df, x="ChestPainType", hue="HeartDisease", ax=axes[0][1], palette='Blues_r'))
axs.append(sns.countplot(data=df, x="FastingBS", hue="HeartDisease", ax=axes[0][2], palette='Blues_r'))
axs.append(sns.countplot(data=df, x="RestingECG",hue="HeartDisease", ax=axes[1][0], palette='Blues_r'))
axs.append(sns.countplot(data=df, x="ExerciseAngina", hue="HeartDisease", ax=axes[1][1], palette='Blues_r'))
axs.append(sns.countplot(data=df, x="ST_Slope", hue="HeartDisease", ax=axes[1][2], palette='Blues_r'))
for ax in axs:
for p in ax.patches:
ax.annotate('{:.0f}'.format(p.get_height()), (p.get_x(), p.get_height()+1.5))
# +
# Plot distributions of the numerical variables
figure, axes = plt.subplots(2, 2, sharex=False, figsize=(30,15))
sns.histplot(data=df, x = "Age", kde=True, hue="HeartDisease", multiple="layer", ax=axes[0,0], palette='winter')
sns.histplot(data=df, x = "RestingBP", kde=True, hue="HeartDisease", multiple="layer", ax=axes[0,1], palette='winter')
sns.histplot(data=df, x = "Cholesterol", kde=True, hue="HeartDisease", multiple="layer", ax=axes[1,0], palette='winter')
sns.histplot(data=df, x = "MaxHR", kde=True, hue="HeartDisease", multiple="layer", ax=axes[1,1], palette='winter')
print(f"Number of data entries with zero Cholesterol : {df.Cholesterol[df.Cholesterol == 0].count()}")
print(f"Number of data entries with RestingBP lower than 75 : {df.RestingBP[df.RestingBP < 75].count()}")
# -
sns.pairplot(data=df, hue="HeartDisease", palette="winter")
# #### Dependent Variable
plt.figure(figsize=(10,10))
colors = sns.color_palette('Paired')
data = [df[df["HeartDisease"] == 0].count()[0], df[df["HeartDisease"] == 1].count()[0]]
plt.pie(data, colors=colors, labels = ["Heart Disease", "Healthy"], autopct = '%0.0f%%',
textprops = {'color': 'Black','fontsize':50},)
# ### Notes
# * There are no empty (Null) values, so none of the entries have to be removed for this cause.
# * The distributions of the sexes seems to be un-even, with the vast majority of subjects represented in the dataset being men: less than 200 females and over 700 males. This can lead to several problems:
# * The model may be more likely to predict Heart Disease if the patient is male. This would be due to the sample in the dataset not representing the population accurately, with vastly more males than females.
# * The dataset seems to suggest that there are far less females which suffer from heart disease than those who do not. In reality this may not be true. The current sample size (less than 200 females) may just not be large enough to represent the population. For all intents and purposes, we simply do not know. This also could apply to the male sample size, however, with over 700 entries, the risk of misrepresentation is far smaller.
# <p></p>
#
# * The Cholesterol histogram reveals a potential problem in the dataset: There are 172 data entries in which Cholesterol is equal to 0. According to a [MedicalNewsToday article](https://www.medicalnewstoday.com/articles/321519#what-is-it), the total serum cholesterol is the sum of HDL cholesterol, LDL cholesterol and 20% triglycerides present in the blood sample. Considering the fact that the liver already produces the necessary amount of cholesterol for the human body to function correctly (source: same website), a total serum cholesterol of 0 is highly unlikely. As such, these values will be considered invalid.
# * The Resting Blood pressure histogram appears to have one outlier, specifically one entry for which the resting blood pressure is 0. Of course, a person with resting blood pressure of 0 is either dead or a fantastic outlier. More likely, however, this value is invalid, therefore it will be considered as such.
# * Apart from the mentioned issues, the categorical distributions seem to be appropriate, and the numerical distributions seem to follow a normal distribution.
| notebooks/1-initial-data-exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:miniconda3-so-co2-R]
# language: R
# name: conda-env-miniconda3-so-co2-R-r
# ---
# # Filter airborne Medusa data
# - R Program to filter aircraft data for strong local continental influences, subtract off NOAA in situ SPO, and write out flat text files
# + tags=[]
library('ncdf4')
library('yaml')
# + tags=[]
project_tmpdir_obs = read_yaml('../_config_calc.yml')$project_tmpdir_obs
username = Sys.info()['user']
project_tmpdir_obs = gsub('\\{\\{env\\[\'USER\'\\]\\}\\}', username, project_tmpdir_obs)
# -
# read in preprocessed aircraft files from read_aircraft_med.r
load('HIPPO_MED.RData')
load('ORCAS_MED.RData')
load('ATom_MED.RData')
# calculate datetime variables
hippodt=ISOdatetime(hippomerge$year,hippomerge$mon,hippomerge$day,hippomerge$hour,hippomerge$min,hippomerge$sec,tz='UTC')
orcasdt=ISOdatetime(orcasmerge$year,orcasmerge$mon,orcasmerge$day,orcasmerge$hour,orcasmerge$min,orcasmerge$sec,tz='UTC')
atomdt=ISOdatetime(atommerge$year,atommerge$mon,atommerge$day,atommerge$hour,atommerge$min,atommerge$sec,tz='UTC')
# read in NOAA in situ record from SPO
sponc=nc_open(paste(project_tmpdir_obs,'/obspack_co2_1_GLOBALVIEWplus_v6.0_2020-09-11/data/nc/co2_spo_surface-insitu_1_allvalid.nc',sep=''))
spoco2=data.frame(cbind(ncvar_get(sponc,'time_decimal'),t(ncvar_get(sponc,'time_components')),ncvar_get(sponc,'value')*1E6)) ; colnames(spoco2)=c('date','year','mon','day','hour','min','sec','co2')
qcflag=ncvar_get(sponc,'qcflag'); spoco2$co2[substr(qcflag,1,1)!='.']=NA; spoco2$co2[substr(qcflag,2,2)!='.']=NA
spodt=ISOdatetime(spoco2$year,spoco2$mon,spoco2$day,spoco2$hour,spoco2$min,spoco2$sec,tz='UTC')
# # HIPPO
# filter
ints=read.table(paste(project_tmpdir_obs,'/hippo_xsect_filt_datetime.txt',sep=''),header=T)
startdt=ISOdatetime(ints$startyear,ints$startmon,ints$startday,ints$starthour,ints$startmin,ints$startsec,tz='UTC')
stopdt=ISOdatetime(ints$stopyear,ints$stopmon,ints$stopday,ints$stophour,ints$stopmin,ints$stopsec,tz='UTC')
blfilt=rep(T,nrow(hippomerge))
for(i in c(1:nrow(ints))){
blfilt[difftime(hippodt,startdt[i])>=0&difftime(hippodt,stopdt[i])<=0]=F
}
hippodt=hippodt[blfilt]
hippomerge=hippomerge[blfilt,]
print(paste('Filtered ',sum(!blfilt),' of ',length(blfilt),' HIPPO obs (',round(sum(!blfilt)/length(blfilt)*100,1),'%)',sep=''))
# calculate differences
hippomerge$co2mspo=round(hippomerge$co2-approx(as.POSIXct(spodt),spoco2$co2,as.POSIXct(hippodt))$y,3) ## co2 = 'CO2_MED'
hippomerge$co2mqcls=round(hippomerge$co2-hippomerge$co2qcls,3)
hippomerge$co2moms=round(hippomerge$co2-hippomerge$co2oms,3)
hippomerge$co2mao2=round(hippomerge$co2-hippomerge$co2ao2,3)
# +
# write out
write(names(hippomerge),'../data/aircraft-obs/HIPPO_SO_mSPO_medusa.txt',ncol=ncol(hippomerge))
write(t(hippomerge),'../data/aircraft-obs/HIPPO_SO_mSPO_medusa.txt',ncol=ncol(hippomerge),append=T)
print(apply(!is.na(hippomerge),2,sum))
# -
# # ORCAS
# filter
ints=read.table(paste(project_tmpdir_obs,'/orcas_xsect_filt_datetime.txt',sep=''),header=T)
startdt=ISOdatetime(ints$startyear,ints$startmon,ints$startday,ints$starthour,ints$startmin,ints$startsec,tz='UTC')
stopdt=ISOdatetime(ints$stopyear,ints$stopmon,ints$stopday,ints$stophour,ints$stopmin,ints$stopsec,tz='UTC')
blfilt=rep(T,nrow(orcasmerge))
for(i in c(1:nrow(ints))){
blfilt[difftime(orcasdt,startdt[i])>=0&difftime(orcasdt,stopdt[i])<=0]=F
}
orcasdt=orcasdt[blfilt]
orcasmerge=orcasmerge[blfilt,]
print(paste('Filtered ',sum(!blfilt),' of ',length(blfilt),' ORCAS obs (',round(sum(!blfilt)/length(blfilt)*100,1),'%)',sep=''))
# calculate differences
orcasmerge$co2mspo=round(orcasmerge$co2-approx(as.POSIXct(spodt),spoco2$co2,as.POSIXct(orcasdt))$y,2) ## co2 = 'CO2_MED'
orcasmerge$co2mqcls=round(orcasmerge$co2-orcasmerge$co2qcls,3)
orcasmerge$co2mx=round(orcasmerge$co2-orcasmerge$co2x,3)
orcasmerge$co2mnoaa=round(orcasmerge$co2-orcasmerge$co2noaa,3)
orcasmerge$co2mao2=round(orcasmerge$co2-orcasmerge$co2ao2,3)
# +
# write out
write(names(orcasmerge),'../data/aircraft-obs/ORCAS_SO_mSPO_medusa.txt',ncol=ncol(orcasmerge))
write(t(orcasmerge),'../data/aircraft-obs/ORCAS_SO_mSPO_medusa.txt',ncol=ncol(orcasmerge),append=T)
print(apply(!is.na(orcasmerge),2,sum))
# -
# # ATom
# filter
ints=read.table(paste(project_tmpdir_obs,'/atom_xsect_filt_datetime.txt',sep=''),header=T)
startdt=ISOdatetime(ints$startyear,ints$startmon,ints$startday,ints$starthour,ints$startmin,ints$startsec,tz='UTC')
stopdt=ISOdatetime(ints$stopyear,ints$stopmon,ints$stopday,ints$stophour,ints$stopmin,ints$stopsec,tz='UTC')
blfilt=rep(T,nrow(atommerge))
for(i in c(1:nrow(ints))){
blfilt[difftime(atomdt,startdt[i])>=0&difftime(atomdt,stopdt[i])<=0]=F
}
atomdt=atomdt[blfilt]
atommerge=atommerge[blfilt,]
print(paste('Filtered ',sum(!blfilt),' of ',length(blfilt),' ATom obs (',round(sum(!blfilt)/length(blfilt)*100,1),'%)',sep=''))
# calculate differences
atommerge$co2mspo=round(atommerge$co2-approx(as.POSIXct(spodt),spoco2$co2,as.POSIXct(atomdt))$y,2) ## co2 = 'CO2_MED'
atommerge$co2mqcls=round(atommerge$co2-atommerge$co2qcls,3)
atommerge$co2mao2=round(atommerge$co2-atommerge$co2ao2,3)
atommerge$co2mnoaa=round(atommerge$co2-atommerge$co2noaa,3)
# +
# write out
write(names(atommerge),'../data/aircraft-obs/ATOM_SO_mSPO_medusa.txt',ncol=ncol(atommerge))
write(t(atommerge),'../data/aircraft-obs/ATOM_SO_mSPO_medusa.txt',ncol=ncol(atommerge),append=T)
print(apply(!is.na(atommerge),2,sum))
# -
| so-co2-airborne-obs/_prestage-obs/old-split-ipynb/aircraft_filter_mspo_med.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4
# language: python
# name: python-374
# ---
# # iPython Profiler
#
# IPython bietet Zugriff auf eine breite Palette von Funktionen um die Zeiten zu messen und Profile zu erstellen. Hier werden die folgenden magischen IPython-Befehle erläutert:
#
# | Befehl | Beschreibung |
# | -------------- | --------------------------------------------------------------------------------- |
# | `%time` | Zeit für die Ausführung einer einzelnen Anweisung |
# | `%timeit` | Durchschnittliche Zeit für die wiederholte Ausführung einer einzelnen Anweisung |
# | `%prun` | Code mit dem Profiler ausführen |
# | `%lprun` | Code mit dem zeilenweisen Profiler ausführen |
# | `%memit` | Messen der Speichernutzung einer einzelnen Anweisung |
# | `%mprun` | Führt den Code mit dem zeilenweisen Memory-Profiler aus |
#
# Die letzten vier Befehle sind nicht in IPython selbst, sondern in den Modulen [line_profiler](https://github.com/pyutils/line_profiler) und [memory_profiler](https://github.com/pythonprofilers/memory_profiler) enthalten.
# ## `%timeit` und `%time`
#
# Wir haben die `%timeit`Zeilen- und `%%timeit`-Zellmagie bereits in der Einführung der magischen Funktionen in IPython Magic Commands gesehen. Sie können für Zeitmessungen bei der wiederholten Ausführung von Code-Schnipseln verwendet werden:
# %timeit sum(range(100))
# Beachtet, dass `%timeit` die Ausführung mehrfach in einer Schleife (`loops`) ausführt. Wenn mit `-n` nicht die Anzahl der Schleifen festgelegt wird, passt `%timeit` die Anzahl automatisch so an, dass ein ausreichende Messgenauigkeit erreicht wird:
# %%timeit
total = 0
for i in range(1000):
for j in range(1000):
total += i * (-1) ** j
# Manchmal ist das Wiederholen einer Operation nicht die beste Option, z.B. wenn wir eine Liste haben, die wir sortieren möchten. Hier werden wir möglicherweise durch eine wiederholte Operation in die Irre geführt. Das Sortieren einer vorsortierten Liste ist viel schneller als das Sortieren einer unsortierten Liste, sodass die Wiederholung das Ergebnis verzerrt:
import random
L = [random.random() for i in range(100000)]
# %timeit L.sort()
# Dann ist die `%time`-Funktion möglicherweise die bessere Wahl. Auch bei länger laufenden Befehlen, wenn kurze systembedingte Verzögerungen das Ergebnis wahrscheinlich kaum beeinflussen, dürfte `%time` die bessere Wahl sein:
import random
L = [random.random() for i in range(100000)]
# %time L.sort()
# Sortieren einer bereits sortierten Liste:
# %time L.sort ()
# Beachtet, wie viel schneller die vorsortierte Liste zu sortieren ist, aber beachtet auch, wie viel länger das Timing mit `%time` gegenüber `%timeit` dauert, sogar für die vorsortierte Liste. Dies ist auf die Tatsache zurückzuführen, dass `%timeit` einige clevere Dinge unternimmt, um zu verhindern, dass Systemaufrufe die Zeitmessung stören. So wird beispielsweise die *Garbage Collection* nicht mehr verwendeter Python-Objekte verhindert, die sich andernfalls auf die Zeitmessung auswirken könnten. Aus diesem Grund sind die `%timeit`-Ergebnisse normalerweise merklich schneller als die `%time`-Ergebnisse.
# ## Profilerstellung für Skripte: `%prun`
#
# Ein Programm besteht aus vielen einzelnen Anweisungen, und manchmal ist es wichtiger, diese Anweisungen im Kontext zu messen, als sie selbst zu messen. Python enthält einen integrierten [Code-Profiler](https://docs.python.org/3/library/profile.html). IPython bietet jedoch eine wesentlich bequemere Möglichkeit, diesen Profiler in Form der Magic-Funktion zu verwenden: `%prun`.
#
# Als Beispiel definieren wir eine einfache Funktion, die einige Berechnungen durchführt:
def sum_of_lists(N):
total = 0
for i in range(5):
L = [j ^ (j >> i) for j in range(N)]
total += sum(L)
return total
# %prun sum_of_lists(1000000)
# Im Notebook sieht die Ausgabe ungefähr so aus:
#
# ```
# 14 function calls in 9.597 seconds
#
# Ordered by: internal time
#
# ncalls tottime percall cumtime percall filename:lineno(function)
# 5 8.121 1.624 8.121 1.624 <ipython-input-15-f105717832a2>:4(<listcomp>)
# 5 0.747 0.149 0.747 0.149 {built-in method builtins.sum}
# 1 0.665 0.665 9.533 9.533 <ipython-input-15-f105717832a2>:1(sum_of_lists)
# 1 0.065 0.065 9.597 9.597 <string>:1(<module>)
# 1 0.000 0.000 9.597 9.597 {built-in method builtins.exec}
# 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
# ```
# Das Ergebnis ist eine Tabelle, die sortiert nach Gesamtzeit für jeden Funktionsaufruf die Ausführungsdauer angibt. In diesem Fall wird die meiste Zeit mit *List Comprehension* innerhalb von `sum_of_lists` verbraucht. Dies gibt uns Anhaltspunkte, an welcher Stelle wir die Effizienz des Algorithmus verbessern könnten.
# ## Zeilenweise Profilerstellung: `%lprun`
#
# Die Profilerstellung von `%prun` ist nützlich, aber manchmal ist ein zeilenweiser Profilreport aufschlussreicher. Dies ist nicht in Python oder IPython integriert, aber mit [line_profiler](https://github.com/rkern/line_profiler) steht ein Paket zur Verfügung, das dies ermöglicht. Diese kann in eurem Kernel bereitgestellt werden mit
#
# ```
# $ spack env activate python-374
# $ spack install py-line-profiler ^python@3.7.4%gcc@9.1.0
# ```
#
# Alternativ könnt ihr `line-profiler` auch mit anderen Paketmanagern installieren, z.B.
#
# ```
# $ pipenv install line_profiler
# ```
#
# Falls ihr Python 3.7.x verwendet und die Fehlermeldung bekommt `error: command 'clang' failed with exit status 1`, bleibt aktuell nur, `Cython` zusammen mit den Ressourcen aus dem Git-Repository zu installieren:
#
# ```
# $ pipenv install Cython git+https://github.com/rkern/line_profiler.git#egg=line_profiler
# ```
#
# Nun könnt ihr IPython mit der `line_profiler`-Erweiterung laden:
# %load_ext line_profiler
# Der `%lprun`-Befehl führt eine zeilenweise Profilerstellung für jede Funktion durch. In diesem Fall muss explizit angegeben werden, welche Funktionen für die Profilerstellung interessant sind:
# %lprun -f sum_of_lists sum_of_lists(5000)
# Das Ergebnis sieht ungefähr so aus:
#
# ```
# Timer unit: 1e-06 s
#
# Total time: 0.015145 s
# File: <ipython-input-6-f105717832a2>
# Function: sum_of_lists at line 1
#
# Line # Hits Time Per Hit % Time Line Contents
# ==============================================================
# 1 def sum_of_lists(N):
# 2 1 1.0 1.0 0.0 total = 0
# 3 6 11.0 1.8 0.1 for i in range(5):
# 4 5 14804.0 2960.8 97.7 L = [j ^ (j >> i) for j in range(N)]
# 5 5 329.0 65.8 2.2 total += sum(L)
# 6 1 0.0 0.0 0.0 return total
# ```
# Die Zeit wird in Mikrosekunden angegeben und wir können sehen, in welcher Zeile die Funktion die meiste Zeit verbringt. Eventuell können wir das Skript dann so ändern, dass die Effizienz der Funktion gesteigert werden kann.
#
# Weitere Informationen zu `%lprun` sowie die verfügbaren Optionen findet ihr in der IPython-Hilfefunktion `%lprun?`.
# ## Speicherprofil erstellen: `%memit` und `%mprun`
#
# Ein weiterer Aspekt der Profilerstellung ist die Speichermenge, die eine Operation verwendet. Dies kann mit einer anderen IPython-Erweiterung ausgewertet werden, dem `memory_profiler`. Diese kann in eurem Kernel bereitgestellt werden mit
#
# ```
# $ spack env activate python-374
# $ spack install py-memory-profiler ^python@3.7.4%gcc@9.1.0
# ```
#
# Alternativ könnt ihr `memory-profiler` auch mit anderen Paketmanagern installieren, z.B.
#
# ```
# $ pipenv install memory_profiler
# ```
# %load_ext memory_profiler
# %memit sum_of_lists(1000000)
# Wir sehen, dass diese Funktion ungefähr 100 MB Speicher belegt.
#
# Für eine zeilenweise Beschreibung der Speichernutzung können wir die `%mprun`-Magie verwenden. Leider funktioniert diese Magie nur für Funktionen, die in separaten Modulen definiert sind, und nicht für das Notebook selbst. Daher erstellen wir zunächst mit der `%%file`-Magie ein einfaches Modul mit dem Namen `mprun_demo.py`, das unsere `sum_of_lists`-Funktion enthält.
# +
# %%file mprun_demo.py
from memory_profiler import profile
@profile
def my_func():
a = [1] * (10 ** 6)
b = [2] * (2 * 10 ** 7)
del b
return a
# -
from mprun_demo import my_func
# %mprun -f my_func my_func()
# Hier zeigt die `Increment`-Spalte, wie stark sich jede Zeile auf den gesamten Speicherverbrauch auswirkt: Beachtet, dass wir beim Berechnen von `b` etwa 160 MB Speicher zusätzlich benötigen; dieser wird aber durch das Löschen von `b` nicht wieder freigegeben.
#
# Weitere Informationen zu `%memit` und `%mprun` sowie deren Optionen findet ihr in der IPython-Hilfe mit `%memit?`.
# ## pyheatmagic
#
# [pyheatmagic](https://github.com/csurfer/pyheatmagic) ist eine Erweiterung, die den IPython-Magic-Befehl `%%heat` zum Anzeigen von Python-Code als Heatmap mit [Py-Heat](https://github.com/csurfer/pyheat) erlaubt.
#
# Sie lässt sich einfach im Kernel installieren mit
#
# ```
# $ pipenv install py-heat-magic
# Installing py-heat-magic…
# …```
# ### Laden der Extension in IPython
# %load_ext heat
# ### Anzeigen der Heatmap
# +
# %%heat
def powfun(a, b):
"""Method to raise a to power b using pow() function."""
return pow(a, b)
def powop(a, b):
"""Method to raise a to power b using ** operator."""
return a ** b
def powmodexp(a, b):
"""Method to raise a to power b using modular exponentiation."""
base = a
res = 1
while b > 0:
if b & 1:
res *= base
base *= base
b >>= 1
return res
def main():
"""Test function."""
a, b = 2377757, 773
pow_function = powfun(a, b)
pow_operator = powop(a, b)
pow_modular_exponentiation = powmodexp(a, b)
if __name__ == "__main__":
main()
# -
# Alternativ kann die Heatmap auch als Datei gespeichert werden, z.B. mit
# ```
# # %%heat -o pow-heatmap.png
# ```
# ## Siehe auch:
#
# * [Penn Machine Learning Benchmarks](https://github.com/EpistasisLab/pmlb)
| docs/refactoring/performance/ipython-profiler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''algonauts'': conda)'
# language: python
# name: python3
# ---
# +
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
import os
import timm
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import torch.nn as nn
#define variables specific to this model
subject = 'sub01'
roi = 'LOC'
preprocessing = 'RAFT'
model_name = 'model_weights_{}_{}_{}'.format(subject, roi, preprocessing)
input_data_dims = (32,32,222)
input_channels = input_data_dims[2]
# define global variables / hyperparameters
batch_size = 32
num_epochs = 30
learning_rate = 0.001
# detect if GPU/CPU device
use_cuda = torch.cuda.is_available()
print('CUDA available:', use_cuda)
# set RNG seed for reproducibility
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
# setup gpu things
dtype = 'float32' if use_cuda else 'float64' # GPU does better with float32 numbers
torchtype = {'float32': torch.float32, 'float64': torch.float64}[dtype]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# flush out the cache
torch.cuda.empty_cache()
# +
# Training a RNN with PyTorch for roi {roi} with RAFT data
# Load the entire training data into main memory
# This is a huge dataset, so we need to do this in chunks
#isolate subject of interests' data
num_subjects = 10
soi = subject
# read in every npy file in the directory Gunners_training_data/{roi}/RAFT and store them in a list
training_data = []
training_labels = []
# load in every Nth file in the directory
culling_scale = 1
# for each file in the directory
for i, file in enumerate(os.listdir('../Gunners_training_data/{}/{}'.format(roi, preprocessing))):
# if the file name contains the soi string
if not soi in file:
continue
# if the file is a .npy file
if file.endswith('.npy'):
# read in the file
data = np.load('../Gunners_training_data/{}/{}/'.format(roi,preprocessing) + file, allow_pickle=True)
# for each sample, make sure its dimensions are 32x32x225, if not then skip it
if data[0][0].shape != input_data_dims:
continue
# for each sample, add the data to the training_data list
training_data.append(data[0][0])
# for each sample, add the label to the training_labels list
training_labels.append(data[0][1])
print('Number of training samples: ', len(training_data))
num_classes = training_labels[0].shape[0]
print('Number of voxel activations (classes): ', num_classes)
#normalize all labels to be between -1 and 1
training_labels = np.array(training_labels)
training_labels = (training_labels - np.min(training_labels)) / (np.max(training_labels) - np.min(training_labels))
#print the value range of the labels
print('Value range of labels: ', np.min(training_labels), np.max(training_labels))
# verify the data is loaded correctly
training_data = np.array(training_data)
training_labels = np.array(training_labels)
# convert the lists to float 32 tensors
training_data = torch.tensor(training_data).type(torchtype)
training_labels = torch.tensor(training_labels).type(torchtype)
# permute the data so that the first dimension is the number of samples, the second is the number of channels
training_data = training_data.permute(0,3,1,2)
#print the dims of training_data tensor
print('training_data tensor dims:', training_data.shape)
# create a dataset from the tensors
my_dataset = TensorDataset(training_data,training_labels) # create your datset
# split the data into training and validation sets
train_size = int(0.8 * len(training_data))
valid_size = len(training_data) - train_size
# create training and validation sets
train_data, valid_data = torch.utils.data.random_split(my_dataset, [train_size, valid_size])
# create training and validation dataloaders
train_loader = DataLoader(train_data, batch_size = batch_size, shuffle=True)
valid_loader = DataLoader(valid_data, batch_size = batch_size, shuffle=False)
# -
# create a loss function that is 1 - the correlation coefficient
def corrcoef_loss_function(output, target):
x = output
y = target
vx = x - torch.mean(x)
vy = y - torch.mean(y)
cost = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))
# mse_loss = torch.mean((output - target) ** 2)
return (1 - cost)**3
# +
# define RNN model with 255 channels
model = timm.create_model('cspresnext50', num_classes=num_classes, in_chans=input_channels, pretrained=True).to(device)
# add sigmoid activation to the output layer
model = nn.Sequential(model, nn.Sigmoid())
# make the model use floats
model.float()
# define optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# scheduler for Learning Rate
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2, verbose=True)
# define loss function for multi-variable regression
loss_fn = corrcoef_loss_function
# +
# keep track of training/validation loss
train_losses = []
valid_losses = []
# train the model
#progress bar for training
pbar = tqdm(range(num_epochs))
for epoch in pbar:
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
# keep track of training and validation accuracy
train_accuracy = 0.0
valid_accuracy = 0.0
# set the model to training mode
model.train()
# train the model for one epoch
for i, (data, labels) in enumerate(train_loader):
# move tensors to GPU if CUDA is available
if use_cuda:
data = data.to(device)
labels = labels.to(device)
# zero out the gradients
optimizer.zero_grad()
# forward pass
output = model(data)
# calculate loss
loss = loss_fn(output, labels)
# backpropagate
loss.backward()
# update the weights
optimizer.step()
# calculate the training loss
train_loss += loss.item()
# set the model to evaluation mode
model.eval()
# evaluate the model on the validation set
for i, (data, labels) in enumerate(valid_loader):
# move tensors to GPU if CUDA is available
if use_cuda:
data = data.to(device)
labels = labels.to(device)
# validation forward pass
output = model(data)
# calculate the validation loss
valid_loss += loss_fn(output, labels).item()
# calculate the average training loss and accuracy
train_loss = train_loss/len(train_loader)
# calculate the average validation loss and accuracy
valid_loss = valid_loss/len(valid_loader)
# ping the learning rate scheduler
scheduler.step(valid_loss)
# append the training and validation loss and accuracy to the lists
train_losses.append(train_loss)
valid_losses.append(valid_loss)
# if current validation loss was best so far, save the model weights in memory
best_valid_loss = min(valid_losses)
if valid_loss == best_valid_loss:
my_best_weights = model.state_dict()
# display the epoch training loss
pbar.set_postfix({
'Epoch':'{}/{}'.format(epoch+1, num_epochs),
'Training Loss': '{:.4f}'.format(train_loss) ,
'Validation loss' : '{:.4f}'.format(valid_loss)})
# assign the best weights to the model
model.load_state_dict(my_best_weights)
#print the epoch of the best validation loss
print('Best validation loss: ', min(valid_losses))
print('Epoch of best validation loss: ', valid_losses.index(min(valid_losses))+1)
# print the model summary
# print(model)
# +
# plot the training and validation loss and accuracy and a vertical line on the x-axis at the epoch of the best validation loss
best_epoch = valid_losses.index(min(valid_losses))+1
plt.figure(figsize=(12,8))
plt.plot(range(1,num_epochs+1), train_losses, label='Training Loss')
plt.plot(range(1,num_epochs+1), valid_losses, label='Validation Loss')
plt.axvline(best_epoch, color='r', linestyle='--', label='Best Validation Loss Epoch')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
# +
# load the model weights from file
# model.load_state_dict(torch.load('{}.pt'.format(model_name)))
# +
model.eval()
# display a side by side comparison of the original label and the predicted label
def display_side_by_side(original, prediction):
#add title to the figure
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(1, 3, 1)
ax.imshow(original)
ax.set_title('Original')
ax = fig.add_subplot(1, 3, 2)
ax.imshow(prediction)
ax.set_title('Prediction')
# calculate the mean squared error
mse = (original - prediction)**2
# display mse next to the other comparisons
ax = fig.add_subplot(1, 3, 3)
ax.set_title('MSE: {:.4f}'.format (mse.mean()))
ax.imshow(mse)
plt.show()
# display a figure of the mean squared error between the original label and the predicted label on matplotlib
def display_mse(original, prediction):
mse = np.mean((original - prediction)**2)
print('Mean Squared Error: ', mse)
plt.imshow((original - prediction)**2)
plt.show()
print(training_labels[0].unsqueeze(0).numpy().shape)
resized_original = training_labels[0].unsqueeze(0).numpy().reshape(19,97)
resized_prediction = model(training_data[0].unsqueeze(0).to(device)).detach().cpu().numpy().reshape(19,97)
#draw a correlation coefficient graph between the original label and the predicted label
def draw_correlation_coefficient(original, prediction):
# calculate the correlation coefficient
corr_coeff = np.corrcoef(original, prediction)[0,1]
# display the correlation coefficient
print('Correlation Coefficient: ', corr_coeff)
# plot the correlation coefficient graph
plt.plot(original, prediction, 'o')
plt.xlabel('Original')
plt.ylabel('Prediction')
plt.title('Correlation Coefficient: {:.2f}'.format(corr_coeff))
plt.show()
#print out value ranges of prediction
print('Prediction Range: ', np.min(resized_prediction), np.max(resized_prediction))
# display a side by side comparison of the original label and the predicted label
display_side_by_side(resized_original,resized_prediction)
display_mse(resized_original,resized_prediction)
draw_correlation_coefficient(training_labels[0].unsqueeze(0).numpy(),model(training_data[0].unsqueeze(0).to(device)).detach().cpu().numpy())
#find out the correlation coefficient between the original label and the predicted label for the entire dataset
def find_correlation_coeff(valid_dataset):
# calculate the correlation coefficient
corr_coeff_list = []
# separate the pytorch dataset into the data and labels
# set the model to evaluation mode
model.eval()
# evaluate the model on the validation set
for i, (data, labels) in enumerate(valid_loader):
# move tensors to GPU if CUDA is available
if use_cuda:
data = data.to(device)
labels = labels.to(device)
# validation forward pass
output = model(data).to(device)
# for each image in the validation set, calculate the correlation coefficient
for label, prediction in zip(labels, output):
corr_coeff_list.append(np.corrcoef(label.cpu().numpy(), prediction.detach().cpu().numpy())[0,1])
# plot a histogram of the correlation coefficients
plt.hist(corr_coeff_list, bins=20)
plt.xlabel('Correlation Coefficient')
plt.ylabel('Count')
plt.title('Histogram of Correlation Coefficients')
plt.show()
# calculate the mean correlation coefficient
mean_corr_coeff = np.mean(corr_coeff_list)
#print the highest correlation coefficient and lowest correlation coefficient
print('Highest Correlation Coefficient: ', max(corr_coeff_list))
print('Lowest Correlation Coefficient: ', min(corr_coeff_list))
# display the mean correlation coefficient
print('Mean Correlation Coefficient: ', mean_corr_coeff)
return mean_corr_coeff
print ( 'Mean Correlation Coefficient: ', find_correlation_coeff(valid_data))
# +
# find the loss of the model on the validation set
valid_loss = 0.0
# turn the model to evaluation mode
model.eval()
for i, (data, labels) in enumerate(valid_loader):
# move tensors to GPU if CUDA is available
if use_cuda:
data = data.to(device)
labels = labels.to(device)
# validation forward pass
output = model(data)
# calculate the validation loss
valid_loss += loss_fn(output, labels).item()
valid_loss = valid_loss/len(valid_loader)
# print the validation loss
print('Validation loss: ', valid_loss)
# -
# save the model weights to a file
torch.save(model.state_dict(), '{}.pt'.format(model_name))
| LOC_Model_Training/model_creation_sub01_LOC_RAFT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:down-go]
# language: python
# name: conda-env-down-go-py
# ---
import glob
import pandas as pd
# ## 1) Download GAF files
# http://current.geneontology.org/products/pages/downloads.html
# + active=""
# !mkdir -pv goann/
# !wget -O goann/goa_human.gaf.gz goann http://geneontology.org/gene-associations/goa_human.gaf.gz
# !wget -O goann/goa_human_complex.gaf.gz goann http://geneontology.org/gene-associations/goa_human_complex.gaf.gz
# !wget -O goann/goa_human_isoform.gaf.gz goann http://geneontology.org/gene-associations/goa_human_isoform.gaf.gz
# !wget -O goann/goa_human_rna.gaf.gz http://geneontology.org/gene-associations/goa_human_rna.gaf.gz
# + active=""
# !gunzip goann/*
# -
# ## 2) Download associations
# NCBI's gene2go file contains annotations of GO terms to Entrez GeneIDs for over 35 different species. We are interested in human which have the taxid 9606.
# + active=""
# # Get ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz
# from goatools.base import download_ncbi_associations
# gene2go = download_ncbi_associations()
# !mv -v gene2go goann/gene2go
# -
# Follow the instructions in the [**background_genes_ncbi notebook**](https://github.com/tanghaibao/goatools/blob/main/notebooks/backround_genes_ncbi.ipynb) to download a set of background population genes from NCBI.
#
#
#
# > 1. Query [NCBI Gene](https://www.ncbi.nlm.nih.gov/gene)
# "9606"[Taxonomy ID] AND alive[property]
# > 2. Click "Send to:"
# > 3. Select "File"
# > 4. Select "Create File" button The default name of the tsv file is gene_result.txt
# > 5. Convert NCBI Gene tsv file to a Python module
# A goatools Python script will convert a NCBI Gene tsv file to a Python module:
# > `scripts/ncbi_gene_results_to_python.py gene_result.txt -o genes_ncbi_human.py`
# > 6. Import NCBI data from Python module
# + active=""
# !python scripts/ncbi_gene_results_to_python.py goann/gene_result.txt -o scripts/genes_ncbi_human.py
# -
# ___
#
# ## Load GAF files into the GafReader
# +
# %%time
from goatools.anno.gaf_reader import GafReader
godata = {}
for gaf in glob.glob('goann/*.gaf'):
name = gaf.split('/')[1].replace('.gaf','')
godata[name] = {}
godata[name]['Gaf'] = GafReader(gaf)
# +
# %%time
for name in godata:
godata[name]['GO Terms'] = [(x.GO_ID,x.DB_Name,x.NS) for x in godata[name]['Gaf'].get_associations()]
print(name, len(godata[name]['GO Terms']))
GO_Terms_df = pd.DataFrame([(x,''.join(y),z) for name in godata for x,y,z in godata[name]['GO Terms']],columns=['GO_ID','DB_Name','Name_Space']).drop_duplicates('GO_ID').set_index('GO_ID')
len(GO_Terms_df)
# -
# ## Read associations and map GO ids to gene names
#
# +
# Read NCBI gene annotations
from scripts.genes_ncbi_human import GENEID2NT # Already downloaded and converted to a python module ...
gnid2name = [(x.GeneID, x.Symbol) for x in GENEID2NT.values()]
gnid2name_df = pd.DataFrame(gnid2name,columns=['GeneID','Symbol']).set_index('GeneID')
# -
len(gnid2name_df)
# +
from goatools.anno.genetogo_reader import Gene2GoReader
gene2go = Gene2GoReader('goann/gene2go', taxids=[9606]) # Already downloaded ...
# +
# go2gnids = gene2go.get_id2gos(namespace='BP',go2geneids=True)
# go2gnids.update(gene2go.get_id2gos(namespace='CC',go2geneids=True))
# go2gnids.update(gene2go.get_id2gos(namespace='MF',go2geneids=True))
# go2gnnames = {}
# for key, value in go2gnids.items():
# go2gnnames[key] = gnid2name_df.Symbol[value].to_list()
# len(go2gnnames)
# +
gnid2gos = gene2go.get_id2gos(namespace='BP')
gnid2gos.update(gene2go.get_id2gos(namespace='CC'))
gnid2gos.update(gene2go.get_id2gos(namespace='MF'))
gnname2gos = {}
for key, value in gnid2gos.items():
gnname2gos[gnid2name_df.Symbol[key]] = value
len(gnname2gos)
# -
# Subset to uniq GO ids
# +
GOs = {go for val in gnname2gos.values() for go in val}
goann = GO_Terms_df.loc[GOs,:]
len(goann)
# -
# ## Write iPAGE annotations
from ipage_down import *
# mkdir -pv annotations/human_go_gs
write_page_index(gnname2gos,'annotations/human_go_gs/human_go_gs_index.txt.gz')
# !zcat annotations/human_go_gs/human_go_gs_index.txt.gz | head -n 2
write_page_names(goann,'annotations/human_go_gs/human_go_gs_names.txt.gz')
# !zcat annotations/human_go_gs/human_go_gs_names.txt.gz | head
# ## Methylation related pathways
set(GO_Terms_df.DB_Name[['methyl' in x for x in GO_Terms_df.DB_Name]].to_list())
from iPAGE2 import ipage2
# https://github.com/artemy-bakulin/iPAGE-2
# !conda env export
| go2page.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# `jupyter notebook`
# # Matplotlib, tylko bardziej
# +
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
# %matplotlib inline
def gauss(x, A, std, mean):
"""funkcja zwracająca gaussiana
A: amplituda
std: odchylenie (grubość)
mean: środek na x"""
return A * np.exp(-(x-mean)**2 / (2 * std**2))
# +
losowe = np.random.normal(loc = 42, scale=3, size=(10000,))
# # plt.hist?
fig, (oś1, oś2) = plt.subplots(2, figsize=(10, 8), sharex=True)
histogram = oś1.hist(losowe, bins=50)
y = histogram[0]
krawedzie = histogram[1]
srodki = (krawedzie[0:-1] + krawedzie[1:])/2
oś2.set_xlabel("x")
oś1.set_ylabel("n")
oś1.set_title("histogram")
oś1.grid()
oś2.plot(srodki, y, label="histogram jako linia")
parametry, kowariancje = opt.curve_fit(gauss, srodki, y, p0=[500, 5, 42])
plt.plot(srodki, gauss(srodki, *parametry), lw=6, label="fit")
print(parametry)
oś2.legend()
oś2.grid()
# # opt.curve_fit?
# -
A = np.arange(10)
for funkcja in [np.min, np.max, np.mean, np.std]:
print(funkcja, funkcja(A))
# +
#multiline string
testowy_string = """litwo
ojczyzno moja
ty cośtam cośtam"""
print(testowy_string)
def funkcja():
"""to jest dokumentacja"""
return 42
# funkcja?
# +
theta = np.linspace(0, 2*np.pi, 1000)
glowa_x = np.cos(theta)
glowa_y = np.sin(theta)
oko_x = glowa_x / 7
oko_y = glowa_y / 7 + 0.5
t = 1
fig, ax = plt.subplots() #ZMIANA
usmiech_x = np.linspace(-0.5, 0.5, 100)
usmiech_y = -0.5 + t*(usmiech_x)**2 # t od -1 do +1: t = np.cos(theta)
plt.plot(glowa_x, glowa_y, "k-")
plt.plot(oko_x - 0.4, oko_y, "k-")
plt.plot(oko_x + 0.4, oko_y, "k-")
usmiech = plt.plot(usmiech_x, usmiech_y, "k-")[0]
plt.xlim(-2, 2)
plt.ylim(-2, 2)
def animate(t):
usmiech_y = -0.5 + t * (usmiech_x)**2
usmiech.set_data(usmiech_x, usmiech_y)
return [usmiech]
from matplotlib import animation
# %matplotlib qt
czas = np.cos(np.linspace(0, 2*np.pi, 120))
ani = animation.FuncAnimation(fig, animate, frames = czas, interval=1)
plt.show()
| 2016_07_14_drugie.ipynb |
# ---
# jupyter:
# anaconda-cloud: {}
# jupytext:
# notebook_metadata_filter: all,-language_info
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pandas merging and SQL
# + tags=["hide-cell"]
# Don't change this cell; just run it.
import numpy as np
import pandas as pd
# Safe settings for Pandas.
pd.set_option('mode.chained_assignment', 'raise')
# %matplotlib inline
import matplotlib.pyplot as plt
# Make the plots look more fancy.
plt.style.use('fivethirtyeight')
# -
# [SQL](https://en.wikipedia.org/wiki/SQL) (pronounced "sequel") is a standard language for working with data in tables. Among its many features, SQL can merge tables using *queries*. SQL calls these merges - *JOINS*.
#
# Pandas `merge` has the same behavior as SQL's `INNER JOIN`.
cones = pd.DataFrame()
cones['Flavor'] = ['strawberry', 'vanilla', 'chocolate', 'strawberry',
'chocolate']
cones['Price'] = [3.55, 4.75, 6.55, 5.25, 5.75]
cones
ratings = pd.DataFrame()
ratings['Flavor'] = ['strawberry', 'chocolate', 'vanilla']
ratings['Stars'] = [2.5, 3.5, 4]
ratings
# Pandas `merge` is symmetrical - you get the same result from `cones.merge(ratings ...)` and `ratings.merge(cones ...`.
# Do an Pandas merge into "cones" from "ratings"
cones.merge(ratings, on='Flavor')
# Do an Pandas merge into "ratings" from "cones"
ratings.merge(cones, on='Flavor')
# SQL `INNER JOIN` has the same behavior.
#
# See:
#
# * [Pandas to_sql documenation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html)
# * [SQL INNER JOIN tutorial](https://www.sqlitetutorial.net/sqlite-inner-join)
# Use a standard SQL library for Python
from sqlalchemy import create_engine
# Make a database using the basic SQLlite database software
engine = create_engine('sqlite://', echo=False)
# Write the data frames as tables in the SQL database.
cones.to_sql('cones', con=engine)
ratings.to_sql('ratings', con=engine)
# Show that we can recover the cones data from the database.
engine.execute("SELECT * FROM cones").fetchall()
# Show that we can recover the ratings data from the database.
engine.execute("SELECT * FROM ratings").fetchall()
# Do an inner join into "cones" from "ratings"
cones_ratings_query = """
SELECT *
FROM cones
INNER JOIN ratings ON ratings.Flavor = cones.Flavor
"""
engine.execute(cones_ratings_query).fetchall()
# Do an inner join into "ratings" from "cones"
ratings_cones_query = """
SELECT *
FROM ratings
INNER JOIN cones ON cones.Flavor = ratings.Flavor
"""
engine.execute(ratings_cones_query).fetchall()
| useful-pandas/merge_and_sql.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pystan
import bebi103
import numpy as np
import bokeh.io
import bokeh.plotting
bokeh.io.output_notebook()
import matplotlib.pyplot as plt
# %matplotlib notebook
# +
behavior_inference_model_code = """
data {
int N;
int y[N];
}
parameters {
real<lower = 0> k;
vector[N] v;
vector<lower=0, upper=1>[N] r;
}
transformed parameters {
real K = exp(k);
vector[N] s;
vector[N] n;
s = r .* exp(-v);
n = (1 - r) .* exp(-v);
}
model {
// Priors
k ~ uniform(-3, 3);
v[1] ~ uniform(-3, 3);
r[1] ~ uniform(0, 1);
// Likelihood
for (i in 2:N) {
v[i] ~ normal(v[i-1], K);
r[i] ~ beta(s[i], n[i-1]);
}
// Likelihood
y ~ bernoulli_logit(r);
}
"""
beh_inf = pystan.StanModel(model_code=behavior_inference_model_code)
# -
# Generate fake data from the model
# Build the true reward rate
true_prob = [0.75] * 20 + [0.2] * 20
plt.figure()
plt.plot(true_prob)
# Generate data from bernoulli
randvars = np.random.random(len(true_prob))
outcomes = randvars < true_prob
plt.figure()
plt.plot(outcomes, '.')
N = len(outcomes)
# +
data_inf = dict(N=N,
y=outcomes.astype('int'))
#init_prob = [dict(prob=np.ones(N) * 0.1)] * 4
samples_beh_inf = beh_inf.sampling(data=data_inf, warmup=200, iter=1000, control={'max_treedepth':18})
# -
bebi103.stan.check_all_diagnostics(samples_beh_inf)
# Get samples of w1 and plot
plt.figure(figsize=(20,5))
n1samp = samples_beh_inf['prob']
means = np.mean(n1samp, axis=0)
std = np.std(n1samp, axis=0)
Nplot = 60
plt.plot(means, label='Sampled probability', alpha=0.8)
plt.plot(true_prob, 'r--', label='True probability', alpha=0.8)
plt.xlabel('Time step')
plt.ylabel('P(option 1)')
plt.legend(loc='lower right')
#plt.savefig('MCMC.pdf')
bokeh.io.show(bebi103.viz.trace_plot(samples_beh_inf,
pars=['prob[2]'],
inc_warmup=True))
| behrens_MCMC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ziatdinovmax/pyroVED/blob/main/examples/pyroVED_examples.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KPdvJXU4sLfq"
# ## pyroVED: Interactive Examples
#
# The easiest way to start using pyroVED is via [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb), which is a free research tool from Google for machine learning education and research built on top of Jupyter Notebook. The following notebooks can be executed in Google Colab by simply clicking on "Open in Colab" icon:
# <br><br>
#
# * Shift-VAE: Application to 1D spectra with arbitrary offset in peak position [](https://colab.research.google.com/github/ziatdinovmax/pyroVED/blob/master/examples/shiftVAE.ipynb)
#
# * r-VAE: Application to arbitrary rotated 2D images [](https://colab.research.google.com/github/ziatdinovmax/pyroVED/blob/master/examples/rVAE.ipynb)
#
# * j(r)-VAE: Learning (jointly) discrete and continuous representations of the arbitrary rotated image data [](https://colab.research.google.com/github/ziatdinovmax/pyroVED/blob/main/examples/jrVAE.ipynb)
#
# * ss(r)-VAE: Semi-supervised learning for data with orientational disorder [](https://colab.research.google.com/github/ziatdinovmax/pyroVED/blob/main/examples/ssrVAE.ipynb)
| examples/pyroVED_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="3tFGgFKexKF1" colab_type="code" colab={}
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import warnings
from matplotlib.colors import ListedColormap
from mlxtend.plotting import plot_decision_regions
from pylab import rcParams
from sklearn.datasets import make_circles, make_moons
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
# + id="7Le4qZUnxW3P" colab_type="code" colab={}
np.set_printoptions(suppress=True)
zero_one_colourmap = ListedColormap(('white', 'red'))
rcParams['figure.figsize'] = 14, 7
rcParams['axes.facecolor'] = '#383838'
# + id="Hy_Dg1iMyN96" colab_type="code" colab={}
X, y = make_circles(n_samples = 1000,
factor=0.85,
random_state=2021,
noise=0.1)
# + id="XVkM-Ed7xmPs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 381} outputId="2f4e3aa0-dbcf-4b22-9cfa-232de9722b81"
plt.scatter(X[:,0],X[:,1],
c=y, s=100,
cmap = zero_one_colourmap)
plt.show()
# + id="lWOSx8a3zDWo" colab_type="code" colab={}
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.33,
random_state=42)
# + id="VBLshcLzxoeo" colab_type="code" colab={}
def get_model(inp_activation):
model = Sequential()
model.add(Dense(10,input_dim=2, activation=inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(10, activation = inp_activation))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# + [markdown] id="mEU-g-mVJUwm" colab_type="text"
# \begin{equation}w_{n e w}=w_{o l d}-\alpha * \frac{\delta Loss}{\delta w}\end{equation}
# + [markdown] id="kegOPcorJj-A" colab_type="text"
# \begin{equation} \frac{\delta Loss}{\delta w} = \frac{w_{old} - w_{new}}{\alpha } \end{equation}
# + id="9SHY9qTSIr9V" colab_type="code" colab={}
def change_in_weight_gradient(old_weight, new_weight, learning_rate):
gradient = (old_weight - new_weight)/ learning_rate
pct_change_weight = abs(100*(old_weight - new_weight)/ old_weight)
return gradient, pct_change_weight
# + [markdown] id="_Fe3e4HQNDQc" colab_type="text"
# # Sigmoid Model
# + id="S6jLHXGM6BPf" colab_type="code" colab={}
sigmoid_model = get_model("sigmoid")
# + id="W2hH-hL75XkW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 571} outputId="3562fed6-444f-49c7-c551-698687e5863e"
sigmoid_model.summary()
# + id="CF1q-Xllze_b" colab_type="code" colab={}
sigmoid_model_first_layer_weights = sigmoid_model.get_weights()[0][0]
# + id="xTAaQqISzjpO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="b9d878d4-f186-4383-fa3b-c1b492b3e961"
sigmoid_model_first_layer_weights
# + id="ob0m36yb6jeh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="5776aa56-9578-496d-8993-9b013c7f2742"
sigmoid_history = sigmoid_model.fit(X_train, y_train, epochs = 1,
validation_data = (X_test, y_test))
# + id="4SIA6c9M61Er" colab_type="code" colab={}
sigmoid_model_first_layer_weights_updated = sigmoid_model.get_weights()[0][0]
# + id="_dEX2oBX61Ba" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="e62a481b-19ab-473d-faee-82946b30f895"
sigmoid_model_first_layer_weights_updated
# + id="MYO3AOS_K9qa" colab_type="code" colab={}
s_gradient, s_weight_change = change_in_weight_gradient(sigmoid_model_first_layer_weights,
sigmoid_model_first_layer_weights_updated,
sigmoid_model.optimizer.get_config()["learning_rate"])
# + id="7F7lIo9RK9y9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="20c256da-1731-4944-c1bf-c1be2a95c2e8"
s_gradient
# + id="g-Zk_NGX0ekt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="ff45e690-efc8-4a0d-d25f-8776669c4c17"
s_weight_change
# + [markdown] id="66n9UXHpNGvd" colab_type="text"
# # Relu Model
# + id="66IHHyyN7enB" colab_type="code" colab={}
activation_model = get_model("relu")
# + id="LgrAAZK_6fWc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="17400a17-3b91-43ca-e54b-3e6e6fa2a05b"
activation_model_old_first_layer_weight = activation_model.get_weights()[0][0]
activation_model_old_first_layer_weight
# + id="1Ei62qD38F3-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="383c4b66-53b5-48b8-d2d3-fd6e10ef7786"
activation_history = activation_model.fit(X_train, y_train, epochs = 1,
validation_data = (X_test, y_test))
# + id="PqMRHMOG7kA2" colab_type="code" colab={}
activation_model_updated_first_layer_weight = activation_model.get_weights()[0][0]
# + id="dMf2PBIx8NCC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="259690d3-37c5-4697-a2bb-a7b9ddb1a685"
activation_model_updated_first_layer_weight
# + id="yuvEJ5Cj6fdx" colab_type="code" colab={}
relu_gradient, relu_weight_change = change_in_weight_gradient(activation_model_old_first_layer_weight,
activation_model_updated_first_layer_weight,
activation_model.optimizer.get_config()["learning_rate"])
# + id="URsWh0_B8QY9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="b7af5e2b-0910-48e4-86f1-01beb3ae0c96"
relu_gradient
# + id="5dxX6HI77tdw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="0c067dfa-4a88-4691-84bb-659363785762"
relu_weight_change
# + id="MkFQVOOz-XGy" colab_type="code" colab={}
| vanishing-gradient-notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # ml lab1
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option('display.max_rows', 10)
# -
# ### 1. read ex1 data
data1 = np.genfromtxt('data/ex1data1.txt', delimiter=',')
rest = pd.DataFrame(data1, columns=['Population', 'Income'])
rest
# ### 2. plot data
# +
def get_plot():
plt.figure(figsize=(10, 6))
plt.grid(True)
plt.ylabel('Income, $10.000')
plt.xlabel('Population of City, 10.000')
plt.plot(rest['Population'], rest['Income'], 'rx', markersize=10, label='Training Data')
return plt
plot = get_plot()
plot.show()
# -
# ### 3. cost function J(θ)
# +
# Linear hypothesis function
def h(X, theta):
return np.dot(X, theta)
# J = compute_cost(X, y, theta)
# computes the cost of using theta as the parameter for linear regression to fit the data points in X and y
def compute_cost(X, y, theta):
m = y.size
loss = h(X, theta) - y
return np.sum(np.square(loss)) / (2. * m)
# +
(_, n) = rest.shape
theta = np.zeros((1, n)).T
X1 = rest[['Population']]
X1.insert(0, 'theta_0', 1)
y1 = rest[['Income']]
J = compute_cost(X1, y1, theta)
print(f'theta:\t{theta.ravel()}\nJ:\t{float(J)}')
# -
# ### 4. gradient descent
# +
# Performs gradient descent to learn theta
def gradient_descent(X, y, theta, alpha=0.01, iterations=1500):
m = y.size
J_history = []
XT = X.T
for i in range(iterations):
loss = h(X, theta) - y
gradient = np.dot(XT, loss) / m
theta -= alpha * gradient
J_history.append(compute_cost(X, y, theta))
return theta, J_history
theta, j_history = gradient_descent(X1, y1, theta)
print(f'computed theta: {theta.ravel()}')
# -
sample_population = [3, 11, 15, 16, 18.5]
predicted_income = [np.dot([1, x], theta).sum() for x in sample_population]
pd.DataFrame(zip(sample_population, predicted_income), columns=['Sample Population', 'Predicted Income'])
# +
h_values = [np.dot(x, theta).sum() for x in X1.to_numpy()]
plot = get_plot()
plot.plot(rest['Population'], h_values, 'b-', label='Hypothesis')
plot.legend()
plot.show()
# -
# ### 5. visualizing J(θ)
# +
# grid coordinates for plotting
xvals = np.linspace(-10, 10, 50)
yvals = np.linspace(-1, 4, 50)
xx, yy = np.meshgrid(xvals, yvals, indexing='xy')
Z = np.zeros((xvals.size, yvals.size))
# calculate Z-values (Cost) based on grid of coefficients
for (i, j), v in np.ndenumerate(Z):
Z[i, j] = compute_cost(X1, y1, theta=[[xx[i, j]], [yy[i, j]]])
# +
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure(figsize=(15, 6))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2, projection='3d')
# left plot
CS = ax1.contour(xx, yy, Z, np.logspace(-2, 3, 20), cmap=plt.cm.jet)
ax1.scatter(*theta, c='r')
# right plot
ax2.plot_surface(xx, yy, Z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet)
ax2.set_zlabel('Cost')
ax2.set_zlim(Z.min(), Z.max())
ax2.view_init(elev=15, azim=230)
# settings common to both plots
for ax in fig.axes:
ax.set_xlabel(r'$\theta_0$', fontsize=17)
ax.set_ylabel(r'$\theta_1$', fontsize=17)
# -
# ### 6. read ex2 data
data2 = np.genfromtxt('data/ex1data2.txt', delimiter=',')
houses = pd.DataFrame(data2, columns=['Area', 'Bedrooms', 'Price'])
houses
# ### 7. features normalization
# Normalizes the features in X:
# returns a normalized version of X where
# the mean value of each feature is 0 and the standard deviation is 1
def feature_normalization(X):
mu = X.mean(axis=0)
sigma = X.std(axis=0)
return (X - mu) / sigma, mu, sigma
X2 = houses[['Area', 'Bedrooms']]
X2_norm, mu, sigma = feature_normalization(X2)
X2_norm.describe()
# +
y2 = houses[['Price']]
_, n2 = houses.shape
X2.insert(0, 'theta_0', 1)
X2_norm.insert(0, 'theta_0', 1)
# -
t1 = np.zeros((1, n2)).T
t2 = np.zeros((1, n2)).T
(theta1, j_history) = gradient_descent(X2, y2, t1, 0.00000001, 50)
(theta2, j_norm_history) = gradient_descent(X2_norm, y2, t2, 0.1, 50)
print(f'theta1:\t{theta1.ravel()}\ntheta2:\t{theta2.ravel()}')
p1 = plt.plot(range(len(j_history)), j_history, color='black')
p2 = plt.plot(range(len(j_norm_history)), j_norm_history, color='red')
plt.legend((p1[0], p2[0]), ('raw', 'normalized'))
plt.show()
# ### 8. multi gradient descent with vectorizing
# +
alpha = 0.01
iterations = 400
(theta_mul, _) = gradient_descent(X2_norm, y2, np.zeros((1, n2)).T, alpha, iterations)
print(f'theta_mul:\t{theta_mul.ravel()}')
# -
# ### 9. execution time
# +
from timeit import default_timer
iterations = 1000
alpha = 0.02
start = default_timer()
(theta_timer, _) = gradient_descent(X2_norm.to_numpy(), y2.to_numpy(), np.zeros((1, n2)).T, alpha, iterations)
end = default_timer()
print(f'theta_timer:\t{theta_timer.ravel()}\ttime:{end - start}')
# -
# ### 10. ɑ varying plot
def draw_alphas(iterations):
alphas = np.linspace(0.1, 0.001, num=7)
plots = []
for alpha in alphas:
(theta, j_hist) = gradient_descent(X2_norm.to_numpy(), y2.to_numpy(), np.zeros((1, n2)).T, alpha, iterations)
p = plt.plot(range(len(j_hist)), j_hist)
plots.append(p[0])
plt.title(f'iterations: {iterations}')
plt.legend(plots, [f'Alpha: {a:.3f}' for a in alphas])
plt.show()
draw_alphas(30)
draw_alphas(60)
draw_alphas(100)
# ### 11. least squares
# computes the closed-form solution to linear regression using the normal equations
def normal_eqn(X, y):
XX = np.asmatrix(X)
XT = XX.T
return np.array([float(el) for el in ((XT @ XX).I @ XT) @ y])
theta_sq = normal_eqn(X2.to_numpy(), y2.to_numpy())
print(f'theta_sq:\t{theta_sq.ravel()}\ntheta_gd:\t{theta_mul.ravel()}')
# +
AREA = 1890
ROOMS = 4
price_sq = np.array([1, AREA, ROOMS]) @ theta_sq.T
price_gd = (np.array([1, (AREA - mu[0]) / sigma[0], (ROOMS - mu[1]) / sigma[1]]) @ theta_mul)[0]
print(f'price_sq:\t{price_sq}\nprice_gd:\t{price_gd}')
# -
# ### 12. conclusion
# В лабараторной работе были рассмотрены случаи линейной и многомерной регресии с помощью методов **градиентного спуска** [#4] а также аналититеского метода **наименьших квадратов** [#11].
#
# В работе использовался язык программирования **Python**, интерактиваня среда разработки **Jupyter** а также библиотеки `numpy`, `pandas` и `matplotlib`
#
# - Как видно из графика [#7] нормализация увеличивает скорость сходимости градиентного спуска.
# - В пункте #10 показана зависимость скорости сходимости от параметра ɑ и количества итераций.
# - В пункте #11 метод градиентного спуска сравнивается с методом наименьших квадратов.
| lab1/.ipynb_checkpoints/lab1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.13 64-bit (''momoKoBERT'': conda)'
# name: python3613jvsc74a57bd0fdec29f9b47567deea31828944d53754aeb926d3c1b9335ea3443d6e8a8e588c
# ---
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import gluonnlp as nlp
import numpy as np
from tqdm import tqdm, tqdm_notebook
from kobert.utils import get_tokenizer
from kobert.pytorch_kobert import get_pytorch_kobert_model
from transformers import AdamW
from transformers.optimization import get_cosine_schedule_with_warmup
##GPU 사용 시
device = torch.device("cuda:0")
bertmodel, vocab = get_pytorch_kobert_model()
dataset_train = nlp.data.TSVDataset("data/ratings_train.txt", field_indices=[1,2], num_discard_samples=1)
dataset_test = nlp.data.TSVDataset("data/ratings_test.txt", field_indices=[1,2], num_discard_samples=1)
tokenizer = get_tokenizer()
tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower=False)
class BERTDataset(Dataset):
def __init__(self, dataset, sent_idx, label_idx, bert_tokenizer, max_len,
pad, pair):
transform = nlp.data.BERTSentenceTransform(
bert_tokenizer, max_seq_length=max_len, pad=pad, pair=pair)
self.sentences = [transform([i[sent_idx]]) for i in dataset]
self.labels = [np.int32(i[label_idx]) for i in dataset]
def __getitem__(self, i):
return (self.sentences[i] + (self.labels[i], ))
def __len__(self):
return (len(self.labels))
## Setting parameters
max_len = 64
batch_size = 64
warmup_ratio = 0.1
num_epochs = 5
max_grad_norm = 1
log_interval = 200
learning_rate = 5e-5
data_train = BERTDataset(dataset_train, 0, 1, tok, max_len, True, False)
data_test = BERTDataset(dataset_test, 0, 1, tok, max_len, True, False)
train_dataloader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, num_workers=5)
test_dataloader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, num_workers=5)
class BERTClassifier(nn.Module):
def __init__(self,
bert,
hidden_size = 768,
num_classes=2,
dr_rate=None,
params=None):
super(BERTClassifier, self).__init__()
self.bert = bert
self.dr_rate = dr_rate
self.classifier = nn.Linear(hidden_size , num_classes)
if dr_rate:
self.dropout = nn.Dropout(p=dr_rate)
def gen_attention_mask(self, token_ids, valid_length):
attention_mask = torch.zeros_like(token_ids)
for i, v in enumerate(valid_length):
attention_mask[i][:v] = 1
return attention_mask.float()
def forward(self, token_ids, valid_length, segment_ids):
attention_mask = self.gen_attention_mask(token_ids, valid_length)
_, pooler = self.bert(input_ids = token_ids, token_type_ids = segment_ids.long(), attention_mask = attention_mask.float().to(token_ids.device), return_dict=False) # <-- return_dict=False 추가했습니다
if self.dr_rate:
out = self.dropout(pooler)
return self.classifier(out)
model = BERTClassifier(bertmodel, dr_rate=0.5).to(device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)
loss_fn = nn.CrossEntropyLoss()
t_total = len(train_dataloader) * num_epochs
warmup_step = int(t_total * warmup_ratio)
scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=t_total)
def calc_accuracy(X,Y):
max_vals, max_indices = torch.max(X, 1)
train_acc = (max_indices == Y).sum().data.cpu().numpy()/max_indices.size()[0]
return train_acc
for e in range(num_epochs):
train_acc = 0.0
test_acc = 0.0
model.train()
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(train_dataloader)):
optimizer.zero_grad()
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length= valid_length
label = label.long().to(device)
out = model(token_ids, valid_length, segment_ids)
loss = loss_fn(out, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
train_acc += calc_accuracy(out, label)
if batch_id % log_interval == 0:
print("epoch {} batch id {} loss {} train acc {}".format(e+1, batch_id+1, loss.data.cpu().numpy(), train_acc / (batch_id+1)))
print("epoch {} train acc {}".format(e+1, train_acc / (batch_id+1)))
model.eval()
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(test_dataloader)):
token_ids = token_ids.long().to(device)
segment_ids = segment_ids.long().to(device)
valid_length= valid_length
label = label.long().to(device)
out = model(token_ids, valid_length, segment_ids)
test_acc += calc_accuracy(out, label)
print("epoch {} test acc {}".format(e+1, test_acc / (batch_id+1)))
# +
import torch
from torchtext import data
import torchtext
from konlpy.tag import Mecab
import pandas as pd
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# +
tokenizer = Mecab()
def my_tokenizer(text):
return_value = list(map(lambda x:x[0],tokenizer.pos(text))) # okt.pos(text) output: [(word,kinds)], ex[("진짜","Noun")] I want only word(each tuple index 0)
if len(return_value)==0:#if length zero error ouccur
return_value.append(".")
return return_value
# +
tokenizer = Mecab()
ID = torchtext.data.Field(sequential = False,
use_vocab = False) # 실제 사용은 하지 않을 예정
TEXT = torchtext.data.Field(sequential=True,
use_vocab=True,
tokenize=my_tokenizer, # 토크나이저로는 Mecab 사용.
lower=True,
include_lengths = True
)
LABEL = data.LabelField(dtype = torch.float)
# -
train_data, test_data = data.TabularDataset.splits(
path='./data', train='ratings_train.txt', test='ratings_test.txt', format='tsv',
fields=[('id', ID), ('text', TEXT), ('label', LABEL)], skip_header=True)
# +
import random
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
# -
print(len(train_data))
print(len(valid_data))
print(vars(train_data[0]))
print(vars(train_data[5]))
# +
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data, max_size = MAX_VOCAB_SIZE)
LABEL.build_vocab(train_data)
# +
print('단어 집합의 크기 : {}'.format(len(TEXT.vocab)))
# -
print('단어 집합의 크기 : {}'.format(len(LABEL.vocab)))
print(LABEL.vocab.stoi)
# + tags=["outputPrepend"]
print(TEXT.vocab.stoi)
# +
BATCH_SIZE = 64
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
sort_within_batch = True,
sort_key = lambda x: len(x.text),
device = device)
# +
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers,
bidirectional, dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
self.rnn = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
#text = [sent len, batch size]
embedded = self.dropout(self.embedding(text))
#embedded = [sent len, batch size, emb dim]
#pack sequence
# lengths need to be on CPU!
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths.to('cpu'))
packed_output, (hidden, cell) = self.rnn(packed_embedded)
#unpack sequence
output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)
#output = [sent len, batch size, hid dim * num directions]
#output over padding tokens are zero tensors
#hidden = [num layers * num directions, batch size, hid dim]
#cell = [num layers * num directions, batch size, hid dim]
#concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers
#and apply dropout
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
#hidden = [batch size, hid dim * num directions]
return self.fc(hidden)
# -
# Like before, we'll create an instance of our RNN class, with the new parameters and arguments for the number of layers, bidirectionality and dropout probability.
#
# To ensure the pre-trained vectors can be loaded into the model, the `EMBEDDING_DIM` must be equal to that of the pre-trained GloVe vectors loaded earlier.
#
# We get our pad token index from the vocabulary, getting the actual string representing the pad token from the field's `pad_token` attribute, which is `<pad>` by default.
# +
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.5
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = RNN(INPUT_DIM,
EMBEDDING_DIM,
HIDDEN_DIM,
OUTPUT_DIM,
N_LAYERS,
BIDIRECTIONAL,
DROPOUT,
PAD_IDX)
# +
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# -
# ## Train the Model
# Now to training the model.
#
# The only change we'll make here is changing the optimizer from `SGD` to `Adam`. SGD updates all parameters with the same learning rate and choosing this learning rate can be tricky. `Adam` adapts the learning rate for each parameter, giving parameters that are updated more frequently lower learning rates and parameters that are updated infrequently higher learning rates. More information about `Adam` (and other optimizers) can be found [here](http://ruder.io/optimizing-gradient-descent/index.html).
#
# To change `SGD` to `Adam`, we simply change `optim.SGD` to `optim.Adam`, also note how we do not have to provide an initial learning rate for Adam as PyTorch specifies a sensibile default initial learning rate.
# +
import torch.optim as optim
optimizer = optim.Adam(model.parameters())
# -
# The rest of the steps for training the model are unchanged.
#
# We define the criterion and place the model and criterion on the GPU (if available)...
# +
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
# -
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
# We define a function for training our model.
#
# As we have set `include_lengths = True`, our `batch.text` is now a tuple with the first element being the numericalized tensor and the second element being the actual lengths of each sequence. We separate these into their own variables, `text` and `text_lengths`, before passing them to the model.
#
# **Note**: as we are now using dropout, we must remember to use `model.train()` to ensure the dropout is "turned on" while training.
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# Then we define a function for testing our model, again remembering to separate `batch.text`.
#
# **Note**: as we are now using dropout, we must remember to use `model.eval()` to ensure the dropout is "turned off" while evaluating.
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# And also create a nice function to tell us how long our epochs are taking.
# +
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# -
# Finally, we train our model...
# + tags=[]
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut2-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# -
# ...and get our new and vastly improved test accuracy!
# +
model.load_state_dict(torch.load('tut2-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
# -
# ## User Input
#
# We can now use our model to predict the sentiment of any sentence we give it. As it has been trained on movie reviews, the sentences provided should also be movie reviews.
#
# When using a model for inference it should always be in evaluation mode. If this tutorial is followed step-by-step then it should already be in evaluation mode (from doing `evaluate` on the test set), however we explicitly set it to avoid any risk.
#
# Our `predict_sentiment` function does a few things:
# - sets the model to evaluation mode
# - tokenizes the sentence, i.e. splits it from a raw string into a list of tokens
# - indexes the tokens by converting them into their integer representation from our vocabulary
# - gets the length of our sequence
# - converts the indexes, which are a Python list into a PyTorch tensor
# - add a batch dimension by `unsqueeze`ing
# - converts the length into a tensor
# - squashes the output prediction from a real number between 0 and 1 with the `sigmoid` function
# - converts the tensor holding a single value into an integer with the `item()` method
#
# We are expecting reviews with a negative sentiment to return a value close to 0 and positive reviews to return a value close to 1.
# +
from konlpy.tag import Mecab
tokenizer = Mecab()
def predict_sentiment(model, sentence):
model.eval()
tokenized = [tok for tok in tokenizer.morphs(sentence)]
print(tokenized)
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
print(indexed)
length = [len(indexed)]
print(length)
tensor = torch.LongTensor(indexed).to(device)
print(tensor.shape)
tensor = tensor.unsqueeze(1)
print(tensor.shape)
length_tensor = torch.LongTensor(length)
print(length_tensor.shape)
prediction = torch.sigmoid(model(tensor, length_tensor))
print(prediction)
return prediction.item()
# -
# An example negative review...
predict_sentiment(model, "이 영화 너무 재밌다.")
# An example positive review...
predict_sentiment(model, "This film is great")
# ## Next Steps
#
# We've now built a decent sentiment analysis model for movie reviews! In the next notebook we'll implement a model that gets comparable accuracy with far fewer parameters and trains much, much faster.
| NSMC Classifier_BERT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating a Sampled Dataset
#
# **Learning Objectives**
# - Sample the natality dataset to create train/eval/test sets
# - Preprocess the data in Pandas dataframe
# ## Introduction
#
# In this notebook we'll read data from BigQuery into our notebook to preprocess the data within a Pandas dataframe.
PROJECT = 'cloud-training-demos' # Replace with your PROJECT
BUCKET = 'cloud-training-bucket' # Replace with your BUCKET
REGION = 'us-central1' # Choose an available region for Cloud MLE
TFVERSION = '1.12' # TF version for CMLE to use
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = TFVERSION
# + language="bash"
# if ! gsutil ls | grep -q gs://${BUCKET}/; then
# gsutil mb -l ${REGION} gs://${BUCKET}
# fi
# -
# ## Create ML datasets by sampling using BigQuery
#
# We'll begin by sampling the BigQuery data to create smaller datasets.
# Create SQL query using natality data after the year 2000
query_string = """
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
"""
# There are only a limited number of years and months in the dataset. Let's see what the hashmonths are.
#
# We'll call BigQuery but group by the hashmonth and see the number of records for each group. This will enable us to get the correct train/eval/test percentages
# +
from google.cloud import bigquery
bq = bigquery.Client(project=PROJECT)
df = bq.query("SELECT hashmonth, COUNT(weight_pounds) AS num_babies FROM ("
+ query_string +
") GROUP BY hashmonth").to_dataframe()
print("There are {} unique hashmonths.".format(len(df)))
df.head()
# -
# Here's a way to get a well-distributed portion of the data in such a way that the train/eval/test sets do not overlap.
# +
# Added the RAND() so that we can now subsample from each of the hashmonths to get approximately the record counts we want
train_query = "SELECT * FROM (" + query_string + ") WHERE MOD(hashmonth, 100) < 80 AND RAND() < 0.0005"
eval_query = "SELECT * FROM (" + query_string + ") WHERE MOD(hashmonth, 100) >= 80 AND MOD(hashmonth, 100) < 90 AND RAND() < 0.0005"
test_query = "SELECT * FROM (" + query_string + ") WHERE MOD(hashmonth, 100) >= 90 AND RAND() < 0.0005"
train_df = bq.query(train_query).to_dataframe()
eval_df = bq.query(eval_query).to_dataframe()
test_df = bq.query(test_query).to_dataframe()
print("There are {} examples in the train dataset.".format(len(train_df)))
print("There are {} examples in the validation dataset.".format(len(eval_df)))
print("There are {} examples in the test dataset.".format(len(test_df)))
# -
# ## Preprocess data using Pandas
#
# We'll perform a few preprocessing steps to the data in our dataset. Let's add extra rows to simulate the lack of ultrasound. That is we'll duplicate some rows and make the `is_male` field be `Unknown`. Also, if there is more than child we'll change the `plurality` to `Multiple(2+)`. While we're at it, We'll also change the plurality column to be a string. We'll perform these operations below.
#
# Let's start by examining the training dataset as is.
train_df.head()
# Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data)
train_df.describe()
# It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a `preprocess` function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect.
# +
import pandas as pd
def preprocess(df):
# clean up data
# remove what we don't want to use for training
df = df[df.weight_pounds > 0]
df = df[df.mother_age > 0]
df = df[df.gestation_weeks > 0]
df = df[df.plurality > 0]
# modify plurality field to be a string
twins_etc = dict(zip([1,2,3,4,5],
['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)']))
df['plurality'].replace(twins_etc, inplace=True)
# now create extra rows to simulate lack of ultrasound
no_ultrasound = df.copy(deep=True)
no_ultrasound.loc[no_ultrasound['plurality'] != 'Single(1)', 'plurality'] = 'Multiple(2+)'
no_ultrasound['is_male'] = 'Unknown'
return pd.concat([df, no_ultrasound])
# -
# Let's process the train/eval/test set and see a small sample of the training data after our preprocessing:
train_df = preprocess(train_df)
eval_df = preprocess(eval_df)
test_df = preprocess(test_df)
train_df.head()
train_df.tail()
# Let's look again at a summary of the dataset. Note that we only see numeric columns, so `plurality` does not show up.
train_df.describe()
# ## Write to .csv files
#
# In the final versions, we want to read from files, not Pandas dataframes. So, we write the Pandas dataframes out as csv files. Using csv files gives us the advantage of shuffling during read. This is important for distributed training because some workers might be slower than others, and shuffling the data helps prevent the same data from being assigned to the slow workers.
train_df.to_csv('train.csv', index=False, header=False)
eval_df.to_csv('eval.csv', index=False, header=False)
test_df.to_csv('test.csv', index=False, header=False)
# + language="bash"
# wc -l *.csv
# + language="bash"
# head *.csv
# + language="bash"
# tail *.csv
# -
# Copyright 2017-2018 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| courses/machine_learning/deepdive/05_review/2_sample_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pachterlab/BLCSBGLKP_2020/blob/master/notebooks/viral_load.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="vwbw-hCYBpZX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3fb3bc34-955f-437a-e0cc-ffff49f812c5"
# !date
# + id="yH5EONAOBpZi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="3f376d37-02f0-40bd-8bb9-e48d539cacc3"
# !git clone https://github.com/pachterlab/BLCSBGLKP_2020.git
# + id="nJvJWRWlBpZp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="eab2b7e3-ce21-44bc-ee65-7de49525421c"
# !pip install anndata
# + [markdown] id="Z_qBGgygBpZv" colab_type="text"
# # Predicting viral load
# + id="vq1Ps0byBpZv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="6ffd518c-dbd3-4962-c287-dad7e1ab0085"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import string
import anndata
from collections import defaultdict
from collections import OrderedDict
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib as mpl
import matplotlib.patches as mpatches
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
from sklearn.preprocessing import normalize
from sklearn.decomposition import TruncatedSVD
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
from scipy.special import expit as sigmoid
def nd(arr):
return np.asarray(arr).reshape(-1)
def yex(ax):
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
return ax
def main(X, y1, y2):
y = np.asarray([y1, y2]).T
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=43)
clf = LogisticRegression(random_state=43, dual=False, max_iter=1000, tol=1e-6)
clf.fit(X_train, y_train[:,0])
y_pred = clf.predict(X_test)
# T = True, F = False, P = Positive, N = Negative
# Model Precision: TP/(TP+FP)
# Model Recall: TP/(TP+FN)
print("Score: {:,.4f}".format(clf.score(X_test, y_test[:,0].astype(int))))
print("Precision: {:,.4f}".format(metrics.precision_score(y_test[:,0].astype(int), y_pred.astype(int))))
print("Recall: {:,.4f}".format(metrics.recall_score(y_test[:,0].astype(int), y_pred.astype(int))))
w = clf.coef_[0]
b = clf.intercept_[0]
return (X_train, X_test, y_train, y_test, y_pred, w, b)
def plot(X, y, xidx, yidx, xlabel, ylabel, w, b):
N = 1000
r = 0.2
# Get the test data
c = nd(np.log1p(y[:,1]))
x = nd(X[:,xidx])
y = nd(X[:,yidx])
# Find the limits
xlims = (np.min(x)*(1-r), np.max(x)*(1+r))
ylims = (np.min(y)*(1-r), np.max(y)*(1+r))
# compute boundary line
xx = np.linspace(*xlims, len(x))
yy = (-xx*w[xidx] - b)/w[yidx]
X, Y = np.meshgrid(np.linspace(*xlims, N), np.linspace(*ylims, N))
YY = (-X*w[xidx] - b)/w[yidx]
###############################################################
ax.set_xlim(*xlims)
ax.set_ylim(*ylims)
### Scatter plot of points
sc = ax.scatter(x, y, c = c,s=100, edgecolors="black", cmap="Greys")
### Plot boundary line
# note that here we solve the above equation for y using the
# coefficients and the intercept
ax.plot(xx, yy, linestyle="--", color="black", linewidth=2, label="Log. reg. boundary")
### Plot logistic function
# Perpendicular from the line is the probability that a sample
# has viral RNA. This function is the logistic function and has
# the form f(x) = 1/(1+exp(-(x-x0))) but we only care about variation
# perpendicular to the line so we use Y and YY
Z = sigmoid(Y-YY)
# Since we want probability of 1 to be above the line, we do 1-Z
cs = ax.imshow(Z, vmin = 0., vmax = 1., cmap=plt.cm.coolwarm, origin='lower',
extent=[*xlims, *ylims])
#### Colorbar for RNA amount
plt.colorbar(sc, label="log(Viral RNA molecules + 1)")
# Colorbar for Probability
plt.colorbar(cs, label="Probability of + Virus")
###############################################################
## Prettying up the plot, adding
pos = mpatches.Patch(color="#D43F3A", label='$+$ Viral RNA')
neg = mpatches.Patch(color="#3182bd", label='$-$ Viral RNA')
handles, labels = ax.get_legend_handles_labels()
handles.append(neg); handles.append(pos)
ax.legend(handles=handles[::-1])
ax.set_xlabel("log({}+1) amplicon counts".format(xlabel))
ax.set_ylabel("log({}+1) amplicon counts".format(ylabel))
ax.set_xlabel("log({}+1) amplicon counts".format("Spikein"))
ax.set_ylabel("log({}+1) amplicon counts".format("Viral"))
ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter('{x:,.0f}'))
plt.tight_layout(h_pad=1)
ax.set_title("Logistic regression classifier on test data")
return ax
def plot_LOD_adjusted(X_test, y_test, xlabel, ylabel, xidx, yidx, w, b, y_pred):
x = np.exp(X_test[:,xidx])
y = np.exp(X_test[:,yidx])
c = pd.Series(y_pred).map(cm)
xx = y_test[:,1]
# xx[xx==0] = 0.1
# yy = y*w[yidx] + x*(w[xidx])
yy = (y**w[yidx])/(x**(-w[xidx]))
ax.scatter(xx, yy, c=c)
### Make the plot pretty
ax.set_xscale("symlog")
ax.set_yscale("symlog")
# bc = ax.axhline(y=np.exp(-b), linestyle="--", label="Log. reg. boundary", color="k")
ax.set_xlabel(r"Viral RNA molecules")
ax.set_ylabel(r"({}+1)^({:,.2f}) / ({}+1)^({:,.2f})".format(ylabel,w[yidx], xlabel,w[xidx]))
ax.set_ylabel(r"({}+1)^({:,.2f}) / ({}+1)^({:,.2f})".format("Viral",w[yidx], "Spikein",w[xidx]))
# legend
pos = mpatches.Patch(color="#D43F3A", label='$+$ Viral RNA predicted')
neg = mpatches.Patch(color="#3182bd", label='$-$ Viral RNA predicted')
ax.legend(handles=[pos, neg])
ax.set_title("Adjusted normalization based on logistic regression")
return ax
def plot_LOD_normal(X_test, y_test, xlabel, ylabel, xidx, yidx, w, b, y_pred):
x = np.exp(X_test[:,xidx])
y = np.exp(X_test[:,yidx])
c = pd.Series(y_pred).map(cm)
xx = y_test[:,1]
# xx[xx==0] = 0.1
yy = y/x
ax.scatter(xx, yy, c=c)
### Make the plot pretty
ax.set_xscale("symlog")
ax.set_yscale("symlog")
ax.set_xlabel(r"Viral RNA molecules")
ax.set_ylabel(r"({}+1) / ({}+1))".format(ylabel, xlabel))
ax.set_ylabel(r"({}+1) / ({}+1))".format("Viral", "Spikein"))
# legend
pos = mpatches.Patch(color="#D43F3A", label='$+$ Viral RNA predicted')
neg = mpatches.Patch(color="#3182bd", label='$-$ Viral RNA predicted')
ax.legend(handles=[pos, neg])
ax.set_title("Standard normalization")
return ax
cm = {1:"#D43F3A", 0:"#3182bd"}
fsize=20
plt.rcParams.update({'font.size': fsize})
# %config InlineBackend.figure_format = 'retina'
# + id="pe1uaQpCBpZ1" colab_type="code" colab={}
adata = anndata.read_h5ad("BLCSBGLKP_2020/data/kb/adata.h5ad")
# + [markdown] id="q6haH7dKBpZ7" colab_type="text"
# # Predicting Viral load
# + id="awZdiUJoBpZ8" colab_type="code" colab={}
mtx = adata.layers["raw"]
gene = adata.var.gene.values
obs = adata.obs
# + id="V0vgxGAeBpaA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 735} outputId="97713f1c-2566-4210-a5d0-e686c6665f89"
p, l, g, c = ("Plate1", "HEK293", "N1", "Twist")
pmask = obs.plate.values==p
cmask = obs[c+"_bool"].values
lmask = obs.lysate.values==l
gzero = obs[c].values>0
m = np.logical_and.reduce([pmask, cmask, lmask])#, gzero])
vm = np.logical_or.reduce([gene==g, gene==g+"_spikein"])
s_idx = np.where(gene==g+"_spikein")
v_idx = np.where(gene==g)
X = np.log1p(mtx[:,vm][m])
#X = np.log1p(mtx[m][:,vm])
load = np.log1p(obs[c].values[m])
f = 0.5
X_train, X_test, y_train, y_test = train_test_split(X, load, test_size=f, random_state=43)
test_size = np.ceil(len(load)*f).astype(int)
train_size = X.shape[0]-test_size
print(test_size==y_test.shape[0])
print(train_size==y_train.shape[0])
sample_weight_train = np.ones(train_size)
sample_weight_train[y_train==0] = 1/np.unique(load).shape[0]
sample_weight_test = np.ones(test_size)
sample_weight_test[y_test==0] = 1/np.unique(load).shape[0]
lr = LinearRegression(normalize=False)
lr.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred = lr.predict(X_test)
print("r2 = {:,.4f}".format(lr.score(X_test, y_test, sample_weight=sample_weight_test)))
score = metrics.r2_score(y_test, y_pred, sample_weight=sample_weight_test)
mse = metrics.mean_squared_error(y_test, y_pred, sample_weight=sample_weight_test)
print("r2 coefficient : {:,.4f}".format(score))
print('Mean squared error: {:,.4f}'.format(mse))
XX = np.dot(X_test, lr.coef_.T) + lr.intercept_
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(XX, y_test, label="Truth, size=sample_weight", s=sample_weight_test*75, c="k")
ax.plot(XX, y_pred, label="Ideal, r2={:,.4f}".format(score), color="k", linewidth=1)
ax.set_xlabel("{:,.2f}*log(viral+1) + {:,.2f}*log(spikein+1) + {:,.2f}".format(lr.coef_[0],lr.coef_[1], lr.intercept_))
ax.set_ylabel("log(Viral load + 1)")
ax.set_xlim(-0.1)
ax.set_ylim(-0.1)
yex(ax)
ax.set_title("Viral load: {} {} {}".format(p, g.split("_")[-1][0], c))
ax.legend()
# plt.savefig("./figs/viral_load_{}_{}_{}.png".format(p, g.split("_")[-1], c),bbox_inches='tight', dpi=300, fontsize=20)
plt.show()
# + id="3mI4nMQQBpaH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 735} outputId="36ab4ff4-7c8c-43a2-9e0d-2d033c28a396"
p, l, g, c = ("Plate1", "HEK293", "N1", "ATCC_RNA")
pmask = obs.plate.values==p
cmask = obs[c+"_bool"].values
lmask = obs.lysate.values==l
gzero = obs[c].values>0
m = np.logical_and.reduce([pmask, cmask, lmask])#, gzero])
vm = np.logical_or.reduce([gene==g, gene==g+"_spikein"])
s_idx = np.where(gene==g+"_spikein")
v_idx = np.where(gene==g)
X = np.log1p(mtx[:,vm][m])
#X = np.log1p(mtx[m][:,vm])
load = np.log1p(obs[c].values[m])
f = 0.5
X_train, X_test, y_train, y_test = train_test_split(X, load, test_size=f, random_state=43)
test_size = np.ceil(len(load)*f).astype(int)
train_size = X.shape[0]-test_size
print(test_size==y_test.shape[0])
print(train_size==y_train.shape[0])
sample_weight_train = np.ones(train_size)
sample_weight_train[y_train==0] = 1/np.unique(load).shape[0]
sample_weight_test = np.ones(test_size)
sample_weight_test[y_test==0] = 1/np.unique(load).shape[0]
lr = LinearRegression(normalize=False)
lr.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred = lr.predict(X_test)
print("r2 = {:,.4f}".format(lr.score(X_test, y_test, sample_weight=sample_weight_test)))
score = metrics.r2_score(y_test, y_pred, sample_weight=sample_weight_test)
mse = metrics.mean_squared_error(y_test, y_pred, sample_weight=sample_weight_test)
print("r2 coefficient : {:,.4f}".format(score))
print('Mean squared error: {:,.4f}'.format(mse))
XX = np.dot(X_test, lr.coef_.T) + lr.intercept_
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(XX, y_test, label="Truth, size=sample_weight", s=sample_weight_test*75, c="k")
ax.plot(XX, y_pred, label="Ideal, r2={:,.4f}".format(score), color="k", linewidth=1)
ax.set_xlabel("{:,.2f}*log(viral+1) + {:,.2f}*log(spikein+1) + {:,.2f}".format(lr.coef_[0],lr.coef_[1], lr.intercept_))
ax.set_ylabel("log(Viral load + 1)")
ax.set_xlim(-0.1)
ax.set_ylim(-0.1)
yex(ax)
ax.set_title("Viral load: {} {} {}".format(p, g.split("_")[-1][0], c))
ax.legend()
#plt.savefig("./figs/viral_load_{}_{}_{}.png".format(p, g.split("_")[-1], c),bbox_inches='tight', dpi=300, fontsize=20)
plt.show()
# + id="wvf9Tvs5BpaN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 735} outputId="d542962d-2f76-4938-daf7-824e49346727"
p, l, g, c = ("Plate2", "HEK293", "S2", "Twist")
pmask = obs.plate.values==p
cmask = obs[c+"_bool"].values
lmask = obs.lysate.values==l
gzero = obs[c].values>0
m = np.logical_and.reduce([pmask, cmask, lmask])#, gzero])
vm = np.logical_or.reduce([gene==g, gene==g+"_spikein"])
s_idx = np.where(gene==g+"_spikein")
v_idx = np.where(gene==g)
X = np.log1p(mtx[:,vm][m])
#X = np.log1p(mtx[m][:,vm])
load = np.log1p(obs[c].values[m])
f = 0.5
X_train, X_test, y_train, y_test = train_test_split(X, load, test_size=f, random_state=43)
test_size = np.ceil(len(load)*f).astype(int)
train_size = X.shape[0]-test_size
print(test_size==y_test.shape[0])
print(train_size==y_train.shape[0])
sample_weight_train = np.ones(train_size)
sample_weight_train[y_train==0] = 1/np.unique(load).shape[0]
sample_weight_test = np.ones(test_size)
sample_weight_test[y_test==0] = 1/np.unique(load).shape[0]
lr = LinearRegression(normalize=False)
lr.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred = lr.predict(X_test)
print("r2 = {:,.4f}".format(lr.score(X_test, y_test, sample_weight=sample_weight_test)))
score = metrics.r2_score(y_test, y_pred, sample_weight=sample_weight_test)
mse = metrics.mean_squared_error(y_test, y_pred, sample_weight=sample_weight_test)
print("r2 coefficient : {:,.4f}".format(score))
print('Mean squared error: {:,.4f}'.format(mse))
XX = np.dot(X_test, lr.coef_.T) + lr.intercept_
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(XX, y_test, label="Truth, size=sample_weight", s=sample_weight_test*75, c="k")
ax.plot(XX, y_pred, label="Ideal, r2={:,.4f}".format(score), color="k", linewidth=1)
ax.set_xlabel("{:,.2f}*log(viral+1) + {:,.2f}*log(spikein+1) + {:,.2f}".format(lr.coef_[0],lr.coef_[1], lr.intercept_))
ax.set_ylabel("log(Viral load + 1)")
ax.set_xlim(-0.1)
ax.set_ylim(-0.1)
yex(ax)
ax.set_title("Viral load: {} {} {}".format(p, g.split("_")[-1][0], c))
ax.legend()
#plt.savefig("./figs/viral_load_{}_{}_{}.png".format(p, g.split("_")[-1], c),bbox_inches='tight', dpi=300, fontsize=20)
plt.show()
# + id="0vYQGCz3BpaR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 735} outputId="e2644cee-7b19-47f9-a05e-a92f3f49cd2c"
p, l, g, c = ("Plate2", "HEK293", "S2", "ATCC_RNA")
pmask = obs.plate.values==p
cmask = obs[c+"_bool"].values
lmask = obs.lysate.values==l
gzero = obs[c].values>0
m = np.logical_and.reduce([pmask, cmask, lmask])#, gzero])
vm = np.logical_or.reduce([gene==g, gene==g+"_spikein"])
s_idx = np.where(gene==g+"_spikein")
v_idx = np.where(gene==g)
X = np.log1p(mtx[:,vm][m])
#X = np.log1p(mtx[m][:,vm])
load = np.log1p(obs[c].values[m])
f = 0.5
X_train, X_test, y_train, y_test = train_test_split(X, load, test_size=f, random_state=43)
test_size = np.ceil(len(load)*f).astype(int)
train_size = X.shape[0]-test_size
print(test_size==y_test.shape[0])
print(train_size==y_train.shape[0])
sample_weight_train = np.ones(train_size)
sample_weight_train[y_train==0] = 1/np.unique(load).shape[0]
sample_weight_test = np.ones(test_size)
sample_weight_test[y_test==0] = 1/np.unique(load).shape[0]
lr = LinearRegression(normalize=False)
lr.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred = lr.predict(X_test)
print("r2 = {:,.4f}".format(lr.score(X_test, y_test, sample_weight=sample_weight_test)))
score = metrics.r2_score(y_test, y_pred, sample_weight=sample_weight_test)
mse = metrics.mean_squared_error(y_test, y_pred, sample_weight=sample_weight_test)
print("r2 coefficient : {:,.4f}".format(score))
print('Mean squared error: {:,.4f}'.format(mse))
XX = np.dot(X_test, lr.coef_.T) + lr.intercept_
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(XX, y_test, label="Truth, size=sample_weight", s=sample_weight_test*75, c="k")
ax.plot(XX, y_pred, label="Ideal, r2={:,.4f}".format(score), color="k", linewidth=1)
ax.set_xlabel("{:,.2f}*log(viral+1) + {:,.2f}*log(spikein+1) + {:,.2f}".format(lr.coef_[0],lr.coef_[1], lr.intercept_))
ax.set_ylabel("log(Viral load + 1)")
ax.set_xlim(-0.1)
ax.set_ylim(-0.1)
yex(ax)
ax.set_title("Viral load: {} {} {}".format(p, g.split("_")[-1][0], c))
ax.legend()
#plt.savefig("./figs/viral_load_{}_{}_{}.png".format(p, g.split("_")[-1], c),bbox_inches='tight', dpi=300, fontsize=20)
plt.show()
# + id="-v2QkBBGBpaV" colab_type="code" colab={}
| notebooks/viral_load.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Group Submission 2
# ---
# ## Team Members:
# - <NAME>
# - <NAME>
# - <NAME>
# - <NAME>
#
# ---
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:80% !important; }</style>"))
# ## Question
# - Select at least four explanatory variables and perform the necessary transformations
# so that they are useful in the model phase. You are encouraged to use more than four
# variables. Investigate feature engineering techniques such as PCA and encoding
# target variables using one-hot encoding
# - Write a short paragraph about each technique investigated and show an
# implementation of it in a Jupyter Notebook. Make sure to include references that
# indicate where the ideas were sourced
# ### Generating Variables
#
# - we will use the data from the **`data_day`** file generated from Submission 1 for this exercise
#
#
# #### Importing Into DataFrame
# +
import pandas as pd
import numpy as np
data = pd.read_csv('data_day.csv', parse_dates=True, index_col='Date_Time')
# -
# #### Generating `close` variable
#
# - period close price
data.drop(columns=['open', 'low', 'high', 'volume'], inplace=True)
data.head()
# #### Generating `MACD` & `MACDSignal` variables
#
# - Moving average convergence / divergence value
# - MACD signal
# +
data['EMA12'] = 0
data['EMA26'] = 0
data['EMA12'] = data['close'].ewm(min_periods=12,halflife=48,adjust=False).mean()
data['EMA26'] = data['close'].ewm(min_periods=26,halflife=104,adjust=False).mean()
data['MACD'] = (data['EMA12'] - data['EMA26'])
data['MACDsignal'] = data['MACD'].ewm(min_periods=9,halflife=36,adjust=False).mean()
data.drop(columns=['EMA12', 'EMA26'], inplace=True)
data.tail()
# -
# #### Generating `pct_change` & `RSI` variables
#
# - Percent change increase - `True / False`
# - Relative strength index
#
# +
data['pct_change'] = data['close'].pct_change()
data['pct_change'] = np.where(data['pct_change']>=0, 1, 0)
data['up'] = data['pct_change']
data['up'].fillna(0)
data['up'] = np.where(data['up']>0, 1, 0)
data['down'] = data['pct_change']
data['down'].fillna(0)
data['down'] = np.where(data['down']<=0, 1, 0)
data['RSIup'] = data['up'].ewm(min_periods=14,halflife=56,adjust=False).mean()
data['RSIdown'] = data['down'].ewm(min_periods=14,halflife=56,adjust=False).mean()
data['RS'] = data['RSIup'] / data['RSIdown']
data['RSI']= 100.0 - (100.0 / (1.0 + data['RS']))
data.drop(columns=['up', 'down', 'RSIup', 'RSIdown', 'RS'], inplace=True)
data.tail()
# -
# #### Generating `close_autocorrel`, `pearsonr_close_MACD` and `pearsonr_close_RSI` variables
#
# - Autocorrelation on period close price
# - Correlation period close price vs MACD
# - Correlation period close price vs RSI
# +
from scipy.stats.stats import pearsonr
autocorrel = []
data['close_autocorrel'] = ''
data['pearsonr_close_MACD'] = ''
data['pearsonr_close_RSI'] = ''
data['quartile'] = 0
quarter = int(len(data) / 4)
for i in range (0,29):
autocorrel.append('')
data.iloc[i, data.columns.get_loc('quartile')] = int((i - 1) / quarter) + 1
for i in range(29,len(data)):
seq1 = []
seq2 = []
seq3 = []
quartile_val = int((i - 1) / quarter) + 1
if (quartile_val == 5):
quartile_val = 4
data.iloc[i, data.columns.get_loc('quartile')] = quartile_val
#print(i, quarter, quartile_val)
for j in range (i-28,i):
seq1.append(data['close'][j])
seq2.append(data['MACD'][j])
seq3.append(data['RSI'][j])
autocorrel_series = pd.Series(seq1)
autocorrel_val = autocorrel_series.autocorr(lag=1)
data.iloc[i, data.columns.get_loc('close_autocorrel')] = autocorrel_val
autocorrel.append(autocorrel_val)
cross_correlation = pearsonr(seq1, seq2)
data.iloc[i, data.columns.get_loc('pearsonr_close_MACD')] = cross_correlation[0]
cross_correlation = pearsonr(seq1, seq3)
data.iloc[i, data.columns.get_loc('pearsonr_close_RSI')] = cross_correlation[0]
data.tail()
# -
data.tail()
# #### Submission 3
#
# 1.Decide on an algorithm or group of algorithms (for example, ensemble techniques).
#
# 2 Fit the model.
#
# 3 Show that it works out of sample, and use appropriate cross-validation techniques.
#
# 4 Provide the following performance metrics:
#
# (a) ROC curves,
#
# (b) Confusion Matrix,
#
# (c) Precision, Recall, F1-Score, Accuracy, and AUC.
#
# 5 Analysis of metrics and report.
data.dropna(thresh=9,inplace=True)
data.columns
X = data[['close','MACD','MACDsignal','RSI','close_autocorrel','pearsonr_close_MACD','pearsonr_close_RSI','quartile']] #This includes everything
#data['Returns']= data['Price'].shift(-1)-data['Price'].shift(-2)
#Y = data['Returns']
Y = data['pct_change']
X
#create training and testing data sets
test_size = 0.2
X_train = X[:-int(test_size*len(X))]
X_test = X[-int(test_size*len(X)):]
y_train = Y[:-int(test_size*len(X))]
y_test = Y[-int(test_size*len(X)):]
from sklearn.preprocessing import StandardScaler
#scale data
scaler = StandardScaler()
scaler.fit(X_train) #only fit to training set
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Try neural network - MLPRegressor
from sklearn.neural_network import MLPClassifier, MLPRegressor
mlp = MLPRegressor(hidden_layer_sizes=(13,13,13), max_iter=500, learning_rate='adaptive',verbose=10,activation='relu',solver='adam',alpha=0.0001,random_state=0)
mlp.fit(X_train, y_train)
print ("Training Score: ", mlp.score(X_train, y_train))
print ("Test Score: ", mlp.score(X_test, y_test))
# +
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
rf_clf = RandomForestRegressor(n_estimators = 100)
rf_clf.fit(X_train,y_train)
print ("Training Score: ", rf_clf.score(X_train, y_train))
print ("Test Score: ", rf_clf.score(X_test, y_test))
# -
from matplotlib import pyplot as plt
#Feature Extraction based on the RF fitted model.
feat_importances = pd.Series(rf_clf.feature_importances_, index=X.columns)
feat_importances.nlargest(20).plot(kind='barh')
plt.show()
# +
from sklearn.metrics import roc_curve, classification_report
y_pred = rf_clf.predict(X_test)
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
# +
from keras.models import Sequential
from keras.layers import Dense
from matplotlib import pyplot
def get_model(trainX, trainy):
# define model
model = Sequential()
model.add(Dense(100, input_dim=8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit model
model.fit(trainX, trainy, epochs=300, verbose=0)
return model
# fit model
model = get_model(X_train, y_train)
# +
from keras.models import Sequential
from keras.layers import Dense
from matplotlib import pyplot
def get_model(trainX, trainy):
# define model
model = Sequential()
model.add(Dense(100, input_dim=8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit model
model.fit(trainX, trainy, epochs=300, verbose=0)
return model
# fit model
model = get_model(X_train, y_train)
# +
#************************Precision, Recall, F1-Score, Accuracy, and AUC.
# predict probabilities for test set
yhat_probs = model.predict(X_test, verbose=0)
# predict crisp classes for test set
yhat_classes = model.predict_classes(X_test, verbose=0)
# +
from sklearn.datasets import make_circles
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
# accuracy: (tp + tn) / (p + n)
accuracy = accuracy_score(y_test, yhat_classes)
print('Accuracy: %f' % accuracy)
# precision tp / (tp + fp)
precision = precision_score(y_test, yhat_classes)
print('Precision: %f' % precision)
# recall: tp / (tp + fn)
recall = recall_score(y_test, yhat_classes)
print('Recall: %f' % recall)
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_test, yhat_classes)
print('F1 score: %f' % f1)
# ROC AUC
auc = roc_auc_score(y_test, yhat_probs)
print('ROC AUC: %f' % auc)
# confusion matrix
matrix = confusion_matrix(y_test, yhat_classes)
print(matrix)
# -
returns = y_pred - y_test
print (returns)
import pyfolio as pf
from pyfolio import timeseries
yhat_probs
#perf_func = timeseries.perf_stats
#perf_stats_all = perf_func(returns=your_portfolio_returns_as_a_pdSeries, factor_returns=your_benchmark_returns_as_a_pdSeries, positions=None, transactions=None, turnover_denom="AGB")
pf.create_full_tear_sheet(returns)
| Jupyter_notebook/Jupyter_Notebook_Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #Quest!
# Now that we have amassed all the tools we need, we are going to make a video game! Now, it won't be a next-gen console game with lifelike graphics, we will be making a text based adventure game. First, we need one more tool. We want to take input from our player if we want them to stay interested in the game. We do this with the **input()** command. If we put something inside the parentheses, that will be our prompt.
x = input("Input something please ")
print(x)
# Let's get started on our game! The first thing any good hero needs is an inventory. Let's give them one.
# Good! It's empty for now, but I am sure our hero will come across some items soon. Let's give our hero some base information like name, age, health, damage, gold, etc. Choose whatever you think will be important in your game. Remember, we can ask the user for input on some of these.
# Okay, so our hero exists and is ready for action. We ought to keep track of where he is, let's define a location for them to start at, perhaps a town. We can update this as the hero moves from place to place.
# Looks like we are ready to start the adventure. Let's give some options for our hero to choose from and ask for some input. Then let's check **if** our hero chose one of the acceptable optons. We can check where our hero is and update the options accordingly. Let's loop over and over **while** we are still in town.
# Maybe now that we have explored the town, we can move on to our quest! As our hero moves from place to place, we can inspire some conflict and use while loops to fight some monster until either of us dies.
# So now we can loop over and over until we decide the hero has "won". Have fun with this and explore different ways to use what we have learned to keep track of our hero.
| Python Workshop/Quest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/heavy1368/ON2022/blob/main/Lab6_on_live.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="0j7bOFgFVgQy"
# + [markdown] id="MUfE_05AXxy2"
# ## Algebra liniowa w Pythonie
# * ```np.linalg.inv ``` - odwrotność macierzy
# * ```np.linalg.det``` - wyznacznik
# * ```np.linalg.eig``` - wartości i wektory własne
# * ```np.dot``` - iloczyn macierzowy
# * ```np.inner``` - iloczyn skalarny
# * ```np.corss``` - iloczyn wektorowy
#
# * ```np.linalg.norm``` - norma macierzy lub wektora
# + id="mc1FIY9uX1bm"
import numpy as np
# + id="A5uob6PLaHdP"
A=np.array([[2,3],[0,2]])
B=np.array([[2,5],[3,8]])
C=np.dot(B,np.dot(A,np.linalg.inv(B)))
Eigenvalues=np.linalg.eig(A)
# + colab={"base_uri": "https://localhost:8080/"} id="J_8eaHYwaJRO" outputId="a2561409-644e-437e-ebfe-ee3326fd0136"
np.max(Eigenvalues[0])
# + id="7JKrvg3uaRRZ"
# + [markdown] id="8GJgEg2vbmLt"
# #### Zad.1
# Macierze o wartościach własnych 1,2
#
# + id="Cd7ZNK_zbqaw"
B=np.array([[1,2],[3,4]])
# + colab={"base_uri": "https://localhost:8080/"} id="-oh9rZdUbvJO" outputId="d334e8a5-8cb5-4c6b-8d50-65341a2c3d3c"
np.linalg.det(B)
# + id="2Lu13y4qbyj4"
C=np.dot(B,np.dot(np.diag([1,2]),np.linalg.inv(B)))
# + colab={"base_uri": "https://localhost:8080/"} id="6DYSsqY3cBlF" outputId="b4ce215b-afc2-43b9-d65a-13d0205c2478"
np.linalg.eig(C)
# + colab={"base_uri": "https://localhost:8080/"} id="VQbWGTUlcHjl" outputId="ae2f84e4-c8be-4000-9757-b76a009c3750"
B @ B
# + colab={"base_uri": "https://localhost:8080/"} id="X7bivPmCcQkA" outputId="a9d5662c-d1a6-4c18-ff4a-e4ebb6ff4f34"
np.dot(B,B)
# + id="OSidr7gtcTrS"
# + [markdown] id="FRpWUxOjciRA"
# #### Rozkłady macierzy
# * ```np.linalg.eig``` - to właściwie rozkład Jordana (macierz kwadratowa)
# * ```np.linalg.svd``` - rozkład SVD (ortogonalna, diagonalna, ortogonalna)
# * ```np.linalg.qr``` - rozkład QR (ortogonalna, górnotrójkątna)
# * ```scipy.linalg.lu``` - rozkład LU (dolnotrójkątna, górnotrójkątna)
# + id="nmavOOP5cihd"
# + [markdown] id="rbk-hOS2dbzC"
# #### Zadanie 2: SVD
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="dMOtSlIbdgT6" outputId="67676a90-22ea-4d15-9def-55bae2fd5b21"
A=np.random.random((5,5))
A
# + id="83BjQGW7dj6N"
SVD=np.linalg.svd(A)
# + colab={"base_uri": "https://localhost:8080/"} id="1zlPAWTrdq7j" outputId="583df966-ad47-4e56-b49e-089db9367521"
SVD[0] @ np.diag(SVD[1]) @ SVD[2]-A
# + colab={"base_uri": "https://localhost:8080/"} id="vgIM0-75d-Kl" outputId="737fc3d1-5914-4f1c-8e2b-9ecbd42c0d86"
(SVD[0] @ SVD[0].T).round(0)
# + id="JwrXBABieXLv"
# + [markdown] id="plFned5hfC1s"
# #### Zadanie
# Znajdź rozkład własny i rozkład SVD macierzy $\begin{bmatrix} 1&2&3\\1&2&1\\0&1&0\end{bmatrix}$.
# + colab={"base_uri": "https://localhost:8080/"} id="rtQAZA-afDH5" outputId="6acbcb11-99e7-4ff2-c97a-918604bcf478"
np.concatenate([A,A])
# + id="TdLtO-XLgl7f"
B=np.array([[1,2,3],[1,2,1]])
# + id="lsI1CbSmhDd5"
B_SVD=np.linalg.svd(B)
# + colab={"base_uri": "https://localhost:8080/"} id="iGigg8rNhHoK" outputId="e3374eaa-0ab4-4098-ba3f-5a8fd8c76c71"
sigma=np.diag(B_SVD[1])
sigma
# + id="xzza7Tf0hO4O"
sigma2=np.concatenate([sigma,np.zeros((2,1))],axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="7j-YAOsihaqX" outputId="4e4c11b7-0f07-4f56-9e76-8a498958d906"
sigma2
# + colab={"base_uri": "https://localhost:8080/"} id="svA4ivcahfcE" outputId="25e9f4ce-b41e-47d4-90b8-60e999119a2b"
B_SVD[0] @ sigma2 @ B_SVD[2]
# + id="B9WWxB8tiFDP"
# + [markdown] id="wpwS5H6Ai8Fm"
# #### Zadanie
# Oblicz $\begin{bmatrix} 1&2&3\\1&2&1\\0&1&0\end{bmatrix}^{20}$.
# + id="ZQAkPQYJi9WD"
A=np.array([[1,2,3],[1,2,1],[0,1,0]])
# + colab={"base_uri": "https://localhost:8080/"} id="XrDLC9V-jb99" outputId="54f40f08-4f4b-4c51-f268-045af678fc31"
# %time np.linalg.matrix_power(A,20)
# + colab={"base_uri": "https://localhost:8080/"} id="tx9CumCvjiLs" outputId="5d796021-70d7-45a3-ba39-bb7b8ae25bdb"
# %%time
A_rozklad=np.linalg.eig(A)
JA=np.diag(A_rozklad[0])
P=A_rozklad[1]
P @ JA**20 @ np.linalg.inv(P)
# + colab={"base_uri": "https://localhost:8080/"} id="NrAchrOEkjD0" outputId="695477fd-a9e9-41d0-d674-b0ad6c18ec96"
# + colab={"base_uri": "https://localhost:8080/"} id="xrvcQK5NkvUV" outputId="231ece82-0967-4a4d-a04f-788456862bac"
P
# + [markdown] id="CHqbFAH_mVu4"
# ## Rozwiązywanie układów równań
#
# * ```np.linalg.cond``` - wskaźnik uwarunkowania
# * ```np.linalg.solve``` - rozwiązywanie
# + colab={"base_uri": "https://localhost:8080/"} id="cb0Ck2gck1p6" outputId="c24c1b69-5e4d-44c5-c1ae-9b14a5cea82d"
np.linalg.cond(A)
# + colab={"base_uri": "https://localhost:8080/", "height": 130} id="ScZF94KPmkFu" outputId="48fb1651-248c-4289-a6e2-9269b8450fc2"
# + [markdown] id="4dW9wyvAnLJb"
# #### Zadanie
# Oblicz współczynnik uwarunkowania i rozwiąż układ równań $Ax=b$.
#
# $$
# A=\begin{bmatrix} 1&0&2\\3&2&1\\1&1&1 \end{bmatrix},\quad b_1=\begin{bmatrix}20.001\\-9.9999\\ 100.00001\end{bmatrix},\quad b_2=\begin{bmatrix}20\\-10\\ 100\end{bmatrix}
# $$
# + id="ppoAE2lKnIXd"
A=np.array([[1,0,2],[3,2,1],[1,1,1]])
# + colab={"base_uri": "https://localhost:8080/"} id="V69pZj3HnkKw" outputId="57a16cf1-5279-4a5d-b8eb-e1d085870d6d"
np.linalg.cond(A)
# + id="zsHJayyynmPt"
b1=np.array([20.001,-9.9999,100.00001])
b2=np.array([20,-10,100])
# + colab={"base_uri": "https://localhost:8080/"} id="phuNCuwHn0Rm" outputId="a52ea9cf-5a87-4b1d-a12f-4ba680554ace"
np.linalg.solve(A,b1)
# + colab={"base_uri": "https://localhost:8080/"} id="Z5uq5TY-n3sP" outputId="dc678fab-7a1a-4b40-ab74-95dabb5d6bd1"
np.linalg.solve(A,b2)
# + id="Ejk8CcD0n_Lj"
# + [markdown] id="yf8fvK_PoVxQ"
# ### Zadanie domowe
# Stowrzyć macierz 3x3 o współczynniku uwarunkowania co najmniej $10000$ i sprawdzić błąd rozwiązania dla b1 i b2
# + id="_bzPjUlvodzl"
import numpy as np
A=np.array([[1,131313,2],[3,2,1],[1,1,1]])
# + id="ZmqyLOncpz0C" outputId="147be928-bb67-445e-d4a3-3dc055e33cd9" colab={"base_uri": "https://localhost:8080/"}
np.linalg.cond(A)
# + id="wTGHZ-pxp1Tg"
b1=np.array([20.001,-9.9999,100.00001])
b2=np.array([20,-10,100])
# + id="Wxdv0dZ6p1M8"
j=np.linalg.solve(A,b1)
# + id="vUSA3BvBp1Db"
k=np.linalg.solve(A,b2)
# + id="Xw5gQ0kyqSz8" outputId="42722162-5af5-4a3e-c80a-676df9427cb0" colab={"base_uri": "https://localhost:8080/"}
j
# + id="UXbqLr_hqSkk" outputId="4e7af3c8-e557-4430-9c88-9a0162221179" colab={"base_uri": "https://localhost:8080/"}
k
# + id="ZMc3eGjHp05e" outputId="19661b0a-c3e8-41c7-a803-283987841241" colab={"base_uri": "https://localhost:8080/"}
np.abs(j-k)
# + [markdown] id="hiX5IPxeqFrT"
# #### Zadanie dla chętnych (łańcuch Markowa)
# $P=\begin{bmatrix} 1&0&0&0&0&0\\0&1&0&0&0&0\\p&q&r&0&0&0\\p&0&q&r&0&0\\p&0&0&q&r&0\\p&0&0&0&q&r \end{bmatrix}$
#
#
#
# <NAME> opisuje prawdopodobieństwa przejść między pięcioma stanami: wyrzucony ze studiów, ukończył studia, po 4 roku, po 3 roku, po 2 roku, po 1 roku. (np. ze stanu nr 6 do stanu nr 5 prawdop. = q). Przyjmijmy p=0.2, q=0.7, r=0.1.
#
# * Oblicz prawdopodobieństwo, że jeśli ukończyłeś pierwszy rok, to ukończysz całe studia.
#
# * Zauważ w macierzy $P$ blokową strukturę: $P=\begin{bmatrix} Id & 0 \\ R & Q \end{bmatrix}$. Porównaj poprzedni wynik z wynikiem mnożenia $(Id-Q)^{-1} R$.
# + id="zmwDJuw1qHe2"
| Lab6_on_live.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] raw_mimetype="text/latex"
# (c) <NAME> 2019. Thanks to Universidad EAFIT for support. This material is part of the course Introduction to Finite Element Analysis
# -
# # Global assembly of the elemental FEM equilibrium equations
# ## Introduction
# In this Notebook we describe computational details related to the final assembly of the global equilibrium equations in the finite element formulation of the theory of elasticity boundary value problem. This assembly process corresponds to the addition of the stiffness matrices corresponding to each element of the mesh considering the appropriate position of each coeeficient. **After completing this notebook you should be able to:**
#
# * Understand the physical basis, in terms of the action-reaction principle, behind the process of assembly of global equilibrium equations.
#
# * Recognize the main numerical operators involved in the process of assembly of global equilibrium equations.
#
# * Implement algorithms to assemble global equilibrium equations for general elements.
# ## Finite element assembly
#
# Consider the simple finite element model shown in the figure below. As discussed previously, and as a result of discretizing the PVW, the nodal forces associated to the $Q$-th degree of freedom satisfy the following equilibrium relationship:
#
# $$
# K^{QP}u^P=f_V^Q+f_t^Q.
# $$
#
#
# The term $K^{QP}u^P$ in this equilibrium equation corresponds to the nodal force $f_\sigma^Q$ resulting from internal forces associated to the element stresses. The total magnitude of these internal forces its due to the contribution from all the elements connecting to the node. This is exactly the same case when solving a simple spring-mass system, [see for instance Bathe(2006) Example 3.1].
#
# <center><img src="img/assembled.png" alt="files" style="width:500px"></center>
#
#
#
# The process of considering internal force terms like $f_\sigma^Q$, accounting for all the elements, and leading to the global equilibrium equations of the system is called the assembly process. The resulting internal forces for the complete system $\left\{F_\sigma\right\}$ can be written in organized form like:
#
#
# $$
# \left\{F_\sigma\right\}=\left[K^G\right]\left\{U^G\right\}
# $$
#
# and the equilibriun equations for the finite element model as:
#
# $$
# \left\{F_\sigma\right\}-\left\{F_V\right\}-\left\{F_t\right\}=0
# $$
#
# where $\left\{F_V\right\}$ and $\left\{F_t\right\}$ are global force vectors due to body forces and surface tractions. The assembly of the global stiffness matrix $\left[K^G\right]$ leading to the internal forces vector can be written like:
#
# $$
# \left[K^G\right]=\bigwedge_{i=1}^{Numel} k^i
# $$
#
# where $\bigwedge$ is called the **assembly operator** which loops through the $NUMEL$ elements in the mesh and adds each local coefficient matrix $k^i$. The assembly operator works like the standard summation operator $\Sigma$ but with the intrinsic inteligence of adding the terms at the right positions.
# ## Physical assembly
#
# The process of assembly can be easily understood considering Newton's third law of action and reaction. This is ilustrated in the figure below where we have labeled $U_b$ those degrees of freedom along the common surface $S_b$ and $U_a$ and $U_c$ those pertaining to other regions of element $1$ and $2$ respectively.
#
# <center><img src="img/coupled1.png" alt="files" style="width:500px"></center>
#
#
# Now, the nodal forces representing the internal stresses take the following forms in each element:
#
#
# $$
# \begin{Bmatrix}F_a\\F_b\end{Bmatrix} = \begin{bmatrix}K_{aa}^1&K_{ab}^1\\K_{ba}^1&K_{bb}^1\end{bmatrix}\begin{Bmatrix}U_a\\U_b\end{Bmatrix}
# $$
#
# and
#
# $$
# \begin{Bmatrix}-F_b\\F_c\end{Bmatrix}=\begin{bmatrix}K_{bb}^2&K_{bc}^2\\K_{cb}^2&K_{cc}^2\end{bmatrix}\begin{Bmatrix}U_b\\U_c\end{Bmatrix}.
# $$
#
#
# Using the equilibrium and compatibility conditions in terms of nodal forces and displacements given by:
#
# \begin{align*}
# & F_b^1+F_b^2=0
# & U_b^1=U_b^2
# \end{align*}
#
# yields the equilibrium equations for the two element assemblage:
#
# $$
# \begin{bmatrix}K_{aa}^1&K_{ab}^1&0\\K_{ba}^1&K_{bb}^1+K_{bb}^2&K_{bc}^2\\0&K_{cb}^2&K_{cc}^2\end{bmatrix}\begin{Bmatrix}U_a\\U_b\\U_c\end{Bmatrix}=\begin{Bmatrix}F_a\\0\\F_c\end{Bmatrix}.
# $$
#
#
# The addition of more elements via mechanical interaction through the exposed surfaces implies the same process of canceling force terms and enforcing displacement compatibility. At the end of the process the only forces left are those introduced by surface tractions and body forces.
# **Questions:**
#
# ***For the mesh shown in the figure, with internal surfaces between elements 1-3 and 3-2 labeled $S_b$ and $S_c$ respectively, write the form of the global stiffness matrix resulting from the physical assembly. Explicitly formulate the force and displacement compatibility equations along both boundaries.**
#
#
#
# <center><img src="img/long.png" alt="files" style="width:300px"></center>
#
# ## Computational assembly
#
# Computationally, the assembly process implies (i) identifying active and restrained degrees of freedom (dof) in the mesh (ii) assigning equation identifiers to the active degrees of freedom and (iii) identifying the contributtion from each element to the different degrees of freedom.
#
# ### Boundary conditions array IBC()
#
# To identify active and restrained dofs the nodal data specifies a bounadry condition index to each node (see figure) with values $0$ and $-1$ specifying a free and restrained dof respectively. So the nodal data in the input file gives for each node its nodal id, the nodal coordinates in the global reference system and the boundary condition flag.
#
# <center><img src="img/nodesF.png" alt="files" style="width:200px"></center>
#
#
# The boundary conditions data is then stored into an integer array **IBC()** which in a first instance contains only $0$s and $-1$s
#
# $$
# \begin{array}{c}0\\1\\2\\3\\4\\5\\6\\7\\8\end{array}\begin{bmatrix}0&-1\\-1&-1\\0&-1\\0&0\\0&0\\0&0\\0&0\\0&0\\0&0\end{bmatrix}
# $$
#
# and in a second instance is transformed into equation numbers:
#
#
# $$
# \begin{array}{c}0\\1\\2\\3\\4\\5\\6\\7\\8\end{array}\begin{bmatrix}0&-1\\-1&-1\\1&-1\\2&3\\4&5\\6&7\\8&9\\10&11\\12&13\end{bmatrix}
# $$
#
#
#
# The following two subroutines read the input (text) files (nodes, mats , elements and loads) and form the boundary conditions array **IBC()** in its two instances completing steps (i) and (ii) for the computational assembly process. This last step is performed by the subroutine **eqcounter()**.
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import sympy as sym
# **(Add comments to clarify the relevant steps ion the code below)**.
def readin():
nodes = np.loadtxt('files/' + 'snodes.txt', ndmin=2)
mats = np.loadtxt('files/' + 'smater.txt', ndmin=2)
elements = np.loadtxt('files/' + 'seles.txt', ndmin=2, dtype=np.int)
loads = np.loadtxt('files/' + 'sloads.txt', ndmin=2)
return nodes, mats, elements, loads
nodes, mats, elements, loads = readin()
def eqcounter(nodes):
nnodes = nodes.shape[0]
IBC = np.zeros([nnodes, 2], dtype=np.integer)
for i in range(nnodes):
for k in range(2):
IBC[i , k] = int(nodes[i , k+3])
neq = 0
for i in range(nnodes):
for j in range(2):
if IBC[i, j] == 0:
IBC[i, j] = neq
neq = neq + 1
return neq, IBC
neq, IBC = eqcounter(nodes)
# ### Element connectivites array IELCON()
#
# Step (iii) in the process is completed after relating nodes in each element to the equation numbers specified in **IBC()**. The nodal points defining each element are input in a data file (see figure below). Note that each nodal identifier indicates the row in the **IBC()** array storing the equation numbers assigned to this node.
#
#
# <center><img src="img/elesF.png" alt="files" style="width:400px"></center>
#
#
# The nodal data for each element is stored in a connectivities array **IELCON()** where the row and element number coincide.
#
#
# $$
# \begin{array} {c}0\\1\\2\\3\end{array}\begin{bmatrix}0&1&4&3\\3&4&7&6\\4&5&8&7\\1&2&5&4\end{bmatrix}
# $$
#
# **Question:**
#
# **Modify the node ordering in the definition of each elementand explain what would be the implications of this change in the local stiffness matrix.**
#
# ### The assembly operator DME() array
#
# The final step in the construction of the assembly operator, termed here the **DME()** operator is just the translation of the **IELCON()** array storing nodal numbers into equation numbers stored in **IBC()**:
#
# $$
# \begin{array}{c}0\\1\\2\\3\end{array}\begin{bmatrix}0&-1&-1&-1&4&5&2&3\\2&3&4&5&10&11&8&9\\4&5&6&7&12&13&10&11\\-1&-1&1&-1&6&7&4&5\end{bmatrix}
# $$
#
#
# **Question:**
#
# **(i) Use the IELCON() array together with the boundary conditions array IBC() to find the assembly operator.**
#
# **(ii) Use a different numberig scheme for the sample mesh shown above and repeat the computation of the assebly operator.**
#
# The **DME()** operator can now be used in a straight forward process relating local to global equations identifiers. For instance the first row of the stiffness matrix for element 2 is assembled as indicated next:
#
#
# $$
# \begin{align*}
# K_{22}^G & \leftarrow K_{22}^G+k_{00}^2\\
# K_{23}^G & \leftarrow K_{23}^G+k_{01}^2\\
# K_{24}^G & \leftarrow K_{24}^G+k_{02}^2\\
# K_{25}^G & \leftarrow K_{25}^G+k_{03}^2\\
# K_{2,10}^G & \leftarrow K_{2,10}^G+k_{04}^2\\
# K_{2,11}^G & \leftarrow K_{2,11}^G+k_{05}^2\\
# K_{28}^G & \leftarrow K_{28}^G+k_{06}^2\\
# K_{29}^G & \leftarrow K_{29}^G+k_{07}^2
# \end{align*}
# $$
# The **DME()** operator is obtained by the following subroutine which takes as input arguments the nodes and elements arrays and returns the assembly operator.
#
# **(Add comments to clarify the relevant steps in the code below)**.
def DME(nodes, elements):
nels = elements.shape[0]
IELCON = np.zeros([nels, 4], dtype=np.integer)
DME = np.zeros([nels, 8], dtype=np.integer)
neq, IBC = eqcounter(nodes)
ndof = 8
nnodes = 4
ngpts = 4
for i in range(nels):
for j in range(nnodes):
IELCON[i, j] = elements[i, j+3]
kk = IELCON[i, j]
for l in range(2):
DME[i, 2*j+l] = IBC[kk, l]
return DME , IBC , neq
DME , IBC , neq = DME(nodes, elements)
print(DME)
# It was shown that the assembly involves a typical step like:
#
# $$
# K_{22}^G \leftarrow K_{22}^G+k_{00}^2
# $$
#
# which involves computation of local elemental matrices with terms $K_{ij}^q$. The following subroutine uses as input the **DME()** operator and loops through the elements of the mesh to compute the local matrix [see **UEL()**] and add its contribution into the global matrix.
#
# **(Add comments to clarify the relevant steps ion the code below)**.
def assembly(elements, mats, nodes, neq, DME, uel=None):
IELCON = np.zeros([4], dtype=np.integer)
KG = np.zeros((neq, neq))
nels = elements.shape[0]
nnodes = 4
ndof = 8
for el in range(nels):
elcoor = np.zeros([nnodes, 2])
im = np.int(elements[el , 2])
par0, par1 = mats[im , :]
for j in range(nnodes):
IELCON[j] = elements[el , j+3]
elcoor[j, 0] = nodes[IELCON[j], 1]
elcoor[j, 1] = nodes[IELCON[j], 2]
kloc = uel4nquad(elcoor, par1, par0)
dme = DME[el, :ndof]
for row in range(ndof):
glob_row = dme[row]
if glob_row != -1:
for col in range(ndof):
glob_col = dme[col]
if glob_col != -1:
KG[glob_row, glob_col] = KG[glob_row, glob_col] +\
kloc[row, col]
return KG
# In this case we have assumed that the elemental subroutine produces a stiffness matrix filled with $1$s.
#
# **(Complete this suboroutine with the implementation performed in NB 8)**.
def uel4nquad(coord, enu, Emod):
kl = np.ones([8, 8])
return kl
KG = assembly(elements, mats, nodes, neq, DME)
print(KG)
# **Question:**
#
# **For the mesh shown in the figure propose different node numbering schemes and identify the resulting changes in the size of the half-band in the stiffness matrix. Assume that each element subroutine is full of $1$s.**
#
# <center><img src="img/halfband.png" alt="files" style="width:300px"></center>
#
# ### Glossary of terms.
#
# **Boundary conditions array IBC():** Integer type array storing equation numbers assigned to each nodal point in the mesh.
#
# **Connectivity array IELCON():** Integer type array storing identifiers for the nodal points defining each element in the mesh.
#
# **Assembly:** Computational procedure by which the elemental stiffness matrix are properly added together to form the global stiffness matrix.
#
# **Assembly operator DME():** Integer type array storing the nodal connectivities from each element but translated into equation numbers through the boudnary conditions array **IBC()**.
# ## Class activity.
#
# * (i) Use the subroutines developed previously to compute the stiffness matrix of bi-linear and cuadratic finite elements to compute the global stiffness matrix for the sample problem discussed in this notebook and with the input files **Snodes.txt and Selements.txt** provided.
#
# * (ii) Assume nodal values for the active displacemnts and use the global matrix found in step (i) to find the internal forces vector $\left\{F_\sigma\right\}$ consistent with the element stresses.
#
# * (iii) Repeat step (ii) but instead of assuming known nodal displacements find them after applying point forces along degrees of freedom $9$, $11$ and $13$ and solving the system of equations:
#
#
# $$
# \left[K^G\right]\left\{U^G\right\} = \left\{F\right\}.
# $$
#
#
# * (iv) Verify that the nodal displacemnts $U^G$ found in step (iii) produce internal forces $\left\{F_\sigma\right\}$ in equilibrium with the external forces $\left\{F\right\}.$
#
# ### References
#
# * <NAME>. (2006) Finite element procedures. Klaus-Jurgen Bathe. Prentice Hall International.
#
# * <NAME>, <NAME> (2018). SolidsPy: 2D-Finite Element Analysis with Python, <https://github.com/AppliedMechanics-EAFIT/SolidsPy>.
from IPython.core.display import HTML
def css_styling():
styles = open('./nb_style.css', 'r').read()
return HTML(styles)
css_styling()
| notebooks/09_assembly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import required libraries
import pandas as pd
import numpy as np
import os
from matplotlib import pyplot as plt
import seaborn
import collections
import math
import statistics
#Set parameters
# %matplotlib inline
pd.options.display.max_rows = 30
# -
# Load data from csv file
file_path = os.path.join(os.pardir, 'data', 'raw', 'survey_results_public.csv')
df = pd.read_csv(file_path)
# ## 1. Work challenges
# Count of Null values in LanguageWorkedWith and LanguageDesireNextYear columns
print(f'Number of null values in Main Branch column: {np.sum(df.MainBranch.isnull() == True)}')
print(f'Number of null values in WorkPlan column: {np.sum(df.WorkChallenge.isnull() == True)}')
# Count each blocker occurance in dataset
blockers = {}
for row in df.WorkChallenge:
for blocker in str(row).split(';'):
blockers.setdefault(blocker, int())
blockers[blocker] += 1
blockers.update((x, (y/df.shape[0])*100) for x, y in blockers.items())
blocker_percent = pd.DataFrame(index=blockers.keys(), columns=['Blocker'], dtype='float')
for blocker, percent in blockers.items():
blocker_percent.at[blocker, 'Blocker'] = percent
fig = plt.bar(range(len(blockers)), list(blockers.values()))
plt.xticks(range(len(blockers)), list(blockers.keys()), rotation='vertical');
# Plot for Outside USA respondents
fig = blocker_percent.nlargest(10, 'Blocker').plot(kind='barh', figsize=(7,7), color='steelblue');
fig.invert_yaxis()
plt.title('Blockers for Developers Productivity');
plt.ylabel('Blocker');
plt.xlabel('Percentage (%)');
| notebooks/3. Top_Blockers_for_development.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ````
# AESM1450 - Geophysical Prospecting -- Controlled-Source ElectroMagnetic (CSEM) Modelling
# ````
# # A. Interactive GUI
#
# An example how you can build an interactive GUI to play around.
import empymod
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import FloatSlider, interact
# %matplotlib notebook
plt.style.use('ggplot')
w_res = FloatSlider(min=0.01, max=100.0, step=0.1, value=50, continuous_update=False) # Target resistivity
w_thi = FloatSlider(min=10, max=500, step=10, value=100, continuous_update=False) # Target thickness
w_dep = FloatSlider(min=500, max=3000, step=50, value=2000, continuous_update=False) # Target depth
w_frq = FloatSlider(min=-2, max=1, step=0.1, value=-1, continuous_update=False) # log10 frequency
# +
# Acquisition parameters
off = np.arange(5, 101)*100
inp = {
'src': [0, 0, -950, 0, 0],
'rec': [off, np.zeros(off.shape), -1000, 0, 0], # Receiver dipoles [x=off, y=0, z=300; azimuth=0, dip=0]
'htarg': {'pts_per_dec': -1},
'verb': 1
}
# Define background model
bg_dep = [0, -1000]
bg_res = [2e14, 0.3, 1]
# +
fig = plt.figure(figsize=(9.8, 6), num='Interactive modeling with empymod')
plt.subplots_adjust(left=0.07, right=.93, bottom=0.1, top=.92, wspace=.05)
bg_em = empymod.bipole(depth=bg_dep, res=bg_res, freqtime=10**w_frq.value, **inp)
# 1. Plot resistivity model; initialize target with background
ax1 = plt.subplot2grid((2, 3), (0, 0), rowspan=2)
ax1.set_title('Resistivity model')
bgres = np.r_[bg_res[1], bg_res[1:], bg_res[2]]
bgdep = np.r_[bg_dep, bg_dep[1], -4500]/1000
ax1.semilogx(bgres, bgdep, 'k', label='Background')
ax1.semilogx(bgres, bgdep, 'r', label='Target')
ax1.set_xlim([.05, 700])
ax1.set_ylim([-4.5, 0])
ax1.legend()
ax1.set_ylabel('Depth (km)')
ax1.set_xlabel(r'Resistivity ($\Omega\,$m)')
# 2. Plot amplitude; initialize target with background
ax2 = plt.subplot2grid((2, 3), (0, 1), colspan=2)
ax2.set_title(r'$|E|$ (V/m)')
ax2.semilogy(off/1000, np.where(bg_em.amp() >= 0, bg_em.amp(), np.nan), 'k', label='pos. values')
ax2.semilogy(off/1000, np.where(bg_em.amp() < 0, -bg_em.amp(), np.nan), 'k--', label='neg. values')
ax2.semilogy(off/1000, np.where(bg_em.amp() >= 0, bg_em.amp(), np.nan), 'r') # Positive values with solid line
ax2.semilogy(off/1000, np.where(bg_em.amp() < 0, -bg_em.amp(), np.nan), 'r--') # Negative values with dashed line
ax2.set_xticklabels([])
ax2.yaxis.tick_right()
ax2.legend()
ax2.set_ylim([min(bg_em.amp())/10, max(bg_em.amp())*5])
# 3. Plot phase; initialize target with background
ax3 = plt.subplot2grid((2, 3), (1, 1), colspan=2)
ax3.set_title(r'$\phi(E)$ (rad)')
ax3.plot(off/1000, np.angle(bg_em), 'k')
ax3.plot(off/1000, np.angle(bg_em), 'r')
ax3.set_xlabel('Offset (km)')
ax3.yaxis.tick_right()
ax3.set_ylim([-np.pi, np.pi])
# 4. Define function in which we update the target response
def calc_em(resistivity, depth, thickness, log10freq):
# Update target depth and resistivity with input
tg_dep = np.r_[bg_dep, bg_dep[-1]-depth, bg_dep[-1]-(depth+thickness)]
tg_res = np.r_[bg_res, resistivity, bg_res[2]]
# Re-calculate the response
tg_em = empymod.bipole(depth=tg_dep, res=tg_res, freqtime=10**log10freq, **inp)
bg_em = empymod.bipole(depth=bg_dep, res=bg_res, freqtime=10**log10freq, **inp)
# Update model and response in plot
ax1.lines[1].set_xdata([bg_res[2], resistivity, resistivity, bg_res[2]])
ax1.lines[1].set_ydata(bg_dep[-1]/1000 -
np.r_[depth, depth, depth + thickness, depth + thickness]/1000)
ax2.lines[0].set_ydata(np.where(bg_em.amp() >= 0, bg_em.amp(), np.nan)) # Positive values with solid line
ax2.lines[1].set_ydata(np.where(bg_em.amp() < 0, -bg_em.amp(), np.nan)) # Negative values with dashed line
ax2.lines[2].set_ydata(np.where(tg_em.amp() >= 0, tg_em.amp(), np.nan)) # Positive values with solid line
ax2.lines[3].set_ydata(np.where(tg_em.amp() < 0, -tg_em.amp(), np.nan)) # Negative values with dashed line
ax2.set_ylim([min(bg_em.amp())/10, max(bg_em.amp())*5]) # Doesn't seem to update...
ax3.lines[0].set_ydata(np.angle(bg_em))
ax3.lines[1].set_ydata(np.angle(tg_em))
plt.draw()
# -
fwgt = interact(calc_em, resistivity=w_res, depth=w_dep, thickness=w_thi, log10freq=w_frq)
empymod.Report()
| A-Interactive-GUI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''venv'': venv)'
# language: python
# name: python37664bitvenvvenv11a0a02e04af425b992f821ca4558704
# ---
import glob
import json
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
from functools import reduce
from pylab import rcParams
rcParams['figure.figsize'] = 10,10
# +
# benchmarking throughput
target_ids = [
'30I8399019000',
'30I8399019001',
'30I8399019002',
'30I8399019003',
'30I8399019004',
'30I8399019005',
'30I8399019006',
'30I8399019007',
'30I8399019008',
'30I8399019009',
'30I8399019010',
'30I8399019011',
'30I8399019012',
'30I8399019013',
]
base_dir = '../'
download_dir = 'tmp/collect-all'
benchmark_dir = 'tmp/benchmark_throughput'
def parse_elapsed(lines):
ret = {}
for x in lines:
key, value = x.split('\t')
m, s = value.replace('s', '').split('m')
ret[key] = float(m)*60 + float(s)
return ret
def file_stat(files):
ret = {}
ret['file_num'] = len(files)
ret['file_size'] = reduce(lambda a,b: a+b ,map(os.path.getsize, files))
return ret
cmd_rm_stat = 'cd {base_dir} && mkdir -p {benchmark_dir} && rm -f {benchmark_dir}/stat-{id}.json'
cmd_calc_stat = 'cd {base_dir} && time python3 lastomesh.py --local-scheduler --workers 4 DownloadShizuokaPCD --product-id {id} --output-dir {benchmark_dir} --work-dir {download_dir}/{id}'
throughputs = []
for id in target_ids:
# ファイル数, 容量
las_files = list(glob.glob(os.path.join(base_dir, download_dir, id, '*.las')))
fstat = file_stat(las_files)
# !{cmd_rm_stat.format(base_dir=base_dir, benchmark_dir=benchmark_dir, id=id)}
# 読み込み&特徴量算出 時間計測
output = !{cmd_calc_stat.format(base_dir=base_dir, download_dir=download_dir, benchmark_dir=benchmark_dir, id=id)}
elapsed = parse_elapsed(output[-3:])
# 点の数
filepath = os.path.join(base_dir, benchmark_dir, 'stat-{}.json'.format(id))
with open(filepath, 'r') as f:
pcd_stat = json.load(f)
# 結果まとめる
record = dict(fstat, **elapsed, point_num=pcd_stat['shape']['value'][0], id=id)
print(record)
throughputs.append(record)
df_throughputs = pd.DataFrame(throughputs)
df_throughputs
# -
throughput_point = df_throughputs['point_num'].sum() / df_throughputs['real'].sum()
file_size_sum = df_throughputs['file_size'].sum() / (1024 ** 3)
elapsed_per_1gb = df_throughputs['real'].sum() / file_size_sum
print(throughput_point, elapsed_per_1gb, file_size_sum, df_throughputs['point_num'].sum())
# + tags=["outputPrepend"]
# benchmarking downsampling
target_projects = {
'mms':[
'30I8399019000',
'30I8399019001',
'30I8399019002',
'30I8399019003',
'30I8399019004',
'30I8399019005',
'30I8399019006',
'30I8399019007',
'30I8399019008',
'30I8399019009',
],
'construction': [
'28XXX00040001',
'29D6152011105',
'29K2033011103',
'29K2461011102',
'29K3481011101',
'29W9350011101',
'30D0230011102',
'30D0721011102',
'30D3703011102',
'30D7318011101',
],
'building': [
'01R0107011318',
'01R0107021318',
'01R0107031318',
'01R0107041318',
'01R0107051318',
'01R0107061318',
'01R0107071318',
'29XXX00010002',
'30XXX03010001',
'31XXX07010001',
],
'terrain': [
'28XXX00030001',
'28XXX00030002',
'28XXX00030003',
'28XXX00030004',
'30XXX00010001',
'30XXX00010002',
'30XXX00010003',
'30XXX00010004',
'30XXX00010005',
'30XXX00010062',
],
}
base_dir = '../'
download_dir = 'tmp/collect-all'
benchmark_dir = 'tmp/benchmark_throughput'
cmd_rm_ply = 'cd {base_dir} && mkdir -p {benchmark_dir} && rm -f {benchmark_dir}/pcd-{id}.ply'
cmd_calc_ply = 'cd {base_dir} && time python3 lastomesh.py --local-scheduler --workers 4 CreateMeshFromLasData --product-id {id} --output-dir {benchmark_dir} --work-dir {download_dir}/{id} --output-filename pcd-{id}.ply --skip-meshing true'
downsampling = []
for group, target_ids in target_projects.items():
for id in target_ids:
print(id)
# 読み込み&特徴量算出 時間計測
# !{cmd_rm_ply.format(base_dir=base_dir, benchmark_dir=benchmark_dir, id=id)}
output = !{cmd_calc_ply.format(base_dir=base_dir, download_dir=download_dir, benchmark_dir=benchmark_dir, id=id)}
elapsed = parse_elapsed(output[-3:])
# ファイル数, 容量
las_files = list(glob.glob(os.path.join(base_dir, download_dir, id, '*.las')))
fstat = file_stat(las_files)
ply_files = list(glob.glob(os.path.join(base_dir, benchmark_dir, 'pcd-{}.ply'.format(id))))
fstat_ply = file_stat(ply_files)
# 結果まとめる
record = dict(ply_size=fstat_ply['file_size'], **fstat, **elapsed, id=id, group=group)
# print(record)
downsampling.append(record)
df_downsampling = pd.DataFrame(downsampling)
df_downsampling
# +
downsampling_mean = df_downsampling.mean()
downsampling_sum = df_downsampling.sum()
reduce_ratio = downsampling_sum['ply_size'] / downsampling_sum['file_size']
elapsed_per_1gb = downsampling_sum['real'] / (downsampling_sum['file_size'] / 1024**3 )
print(elapsed_per_1gb, reduce_ratio, downsampling_sum['ply_size'] / 1024**2 / 40)
# +
df_downsampling['file_size_mb'] = df_downsampling['file_size'] / 1024**2
df_downsampling['ply_size_mb'] = df_downsampling['ply_size'] / 1024**2
for key, downsampling_sum in df_downsampling.groupby('group').sum().iterrows():
reduce_ratio = downsampling_sum['ply_size'] / downsampling_sum['file_size']
elapsed_per_1gb = downsampling_sum['real'] / (downsampling_sum['file_size'] / 1024**3 )
print(key, elapsed_per_1gb, reduce_ratio)
df_downsampling.groupby('group').mean()
# +
base_dir = '../'
download_dir = 'tmp/collect-all'
benchmark_dir = 'tmp/benchmark_throughput'
cmd_rm_ply = 'cd {base_dir} && mkdir -p {benchmark_dir} && rm -f {benchmark_dir}/mesh-{id}.ply'
cmd_calc_ply = 'cd {base_dir} && time python3 lastomesh.py --local-scheduler --workers 4 CreateMeshFromLasData --product-id {id} --output-dir {benchmark_dir} --work-dir {download_dir}/{id} --output-filename mesh-{id}.ply --simplify-type vertex-clustering'
meshing = []
for group, target_ids in target_projects.items():
for id in target_ids:
print(id)
# 読み込み&特徴量算出 時間計測
# !{cmd_rm_ply.format(base_dir=base_dir, benchmark_dir=benchmark_dir, id=id)}
output = !{cmd_calc_ply.format(base_dir=base_dir, download_dir=download_dir, benchmark_dir=benchmark_dir, id=id)}
elapsed = parse_elapsed(output[-3:])
# ファイル数, 容量
las_files = list(glob.glob(os.path.join(base_dir, download_dir, id, '*.las')))
fstat = file_stat(las_files)
ply_files = list(glob.glob(os.path.join(base_dir, benchmark_dir, 'mesh-{}.ply'.format(id))))
fstat_ply = file_stat(ply_files)
# 結果まとめる
record = dict(ply_size=fstat_ply['file_size'], **fstat, **elapsed, id=id, group=group)
# print(record)
meshing.append(record)
df_meshing = pd.DataFrame(meshing)
df_meshing
# +
meshing_mean = df_meshing.mean()
meshing_sum = df_meshing.sum()
reduce_ratio = meshing_sum['ply_size'] / meshing_sum['file_size']
elapsed_per_1gb = meshing_sum['real'] / (meshing_sum['file_size'] / 1024**3 )
print(elapsed_per_1gb, reduce_ratio, meshing_sum['file_size'] / 1024**2 / 40, meshing_sum['ply_size'] / 1024**2 / 40)
meshing_sum
# +
df_meshing['file_size_mb'] = df_meshing['file_size'] / 1024**2
df_meshing['ply_size_mb'] = df_meshing['ply_size'] / 1024**2
for key, meshing_sum in df_meshing.groupby('group').sum().iterrows():
reduce_ratio = meshing_sum['ply_size'] / meshing_sum['file_size']
elapsed_per_1gb = meshing_sum['real'] / (meshing_sum['file_size'] / 1024**3 )
print(key, elapsed_per_1gb, reduce_ratio)
df_meshing.groupby('group').mean()
| notebooks/benchmarks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_PLAIN
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
decodedObjects = pyzbar.decode(frame)
for obj in decodedObjects:
print("Data", obj.data)
cv2.putText(frame, str(obj.data), (50, 50), font, 3,
(255, 0, 0), 3 )
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key ==27 & 0xFF == ord('q'):
break
# -
| _posts/OpenCV/codes/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
#Data import using Numpy
import numpy as np
import csv
import random
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import norm
# -
# __Load Data File__
#
# We are using csv reader to load data file to our numpy array
# +
data_dir = "data/"
file_name = data_dir + "BasicDefaultTypeEMG.csv"
reader = csv.reader(open(file_name, "rb"), delimiter=",")
x = list(reader)
result = np.array(x[1:])
# -
# __Extract Imp Featurs__
#
# From the loaded data, we are extracting main features that is emg pods value which is collected from myo arm band device, this will be feeded in the neural network. We extract the target value from the file and store it in the grips_data.
#
# +
#Number of rows that is going to be processed
#Number of each defined for each classificatinon
print result.shape
emg_pods = result[33142:,1:9].astype('float64')
grips_data = result[33142:,-1]
grip_types = np.unique(grips_data)
for grip in grip_types:
print("%s Rows are %d" % (grip, len(grips_data[grips_data ==grip])))
# -
# __Feature Scaling__
#
# We are normalizing our data using standard scaler, we should try different scaler to find out the result
scaler = StandardScaler()
#scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(emg_pods)
emg_pods = scaled_data
# __Analyzing the data__
#
# We are taking standard deviation and mean of the emg pods, using this value we are ploting a normal distribution graphs.
#
# We are also using scatter to plot to define a correlation between pod values and grip types.
#
#
# +
grip_count = 4
figs, axs = plt.subplots(nrows=grip_count, ncols=1, figsize=(18,40))
s_fig, s_axs = plt.subplots(nrows=4, ncols=1, figsize=(18,40))
for type_id in range(grip_count):
random_select = random.sample(range(15000), 30)
t = emg_pods[grips_data == grip_types[type_id]] #semi western
y = (np.random.rand(len(random_select), 1) + type_id)
#sd = np.std(t, axis=0)
mean = np.mean(t, axis=0)
#color=next(colors)
colors = iter(cm.rainbow(np.linspace(0, 1, 8)))
# Mean = 0, SD = 2.
for i in range(8):
s_axs[type_id].scatter(t[random_select,i], y, color=next(colors))
s_axs[type_id].plot(mean[i].repeat(30), y)
x_axis = np.arange(np.min(t[:,i]), np.max(t[:,i]), 1)
axs[type_id].plot(x_axis, norm.pdf(x_axis,mean[i], 2), label = "pod" + str(i))
s_axs[type_id].set_title("Grip Type %s" % grip_types[type_id])
axs[type_id].legend('12345678',shadow=True, fancybox=True, loc="upper right");
axs[type_id].set_title(grip_types[type_id] )
# -
# __Label Classifiers__
#
# We are using one hot encoding, for the grip types coloumn
#
# Working =>
#
# grips_data["conti.", "western", etc]
#
# then resultant vector would
#
# [[1,0,0,0], [0,0,0,1],..etc]
# +
#Data Preprossing
#removing extra coloumns
# onehot encoding - label classifier
from sklearn.preprocessing import OneHotEncoder,LabelEncoder
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(grips_data)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
Y_labels = onehot_encoder.fit_transform(integer_encoded)
#prepare training-set, test-set, valid-set.
# -
# __Preparing Data Set__
#
# We are using train test split, to create a Training and Test data.
# Also we are using half of the test data as validation data set which
# well help us check how out model is performing on the go
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(emg_pods, Y_labels, test_size=0.3, random_state=1)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5, random_state=1)
X_train = scaler.fit_transform(X_train)
X_val = scaler.fit_transform(X_val)
X_test = scaler.fit_transform(X_test)
print X_train.shape
print X_test.shape
print X_val.shape
X_train = np.expand_dims(X_train, axis=2)
X_val = np.expand_dims(X_val, axis=2)
X_test = np.expand_dims(X_test, axis=2)
# +
#Create MultiLayer Perceptron Network
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense, BatchNormalization, LeakyReLU
from keras.models import Sequential
model = Sequential()
model.add(Dense(16, input_shape=(8,), activation = 'relu', kernel_initializer='truncated_normal'))
model.add(Dense(len(grip_types), activation='softmax', kernel_initializer='truncated_normal'))
model.summary()
# +
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense, BatchNormalization, LeakyReLU
from keras.models import Sequential
model = Sequential()
model.add(Conv1D(32, 1, activation='relu', input_shape=(8,1)))
model.add(Conv1D(64, 1, activation='relu'))
model.add(Dropout(.4))
model.add(Conv1D(128, 2, activation='relu', ))
model.add(Conv1D(128, 2, activation='relu', ))
model.add(Dropout(.4))
model.add(Conv1D(256, 3, activation='relu', ))
model.add(Conv1D(256, 3, activation='relu', ))
model.add(Dropout(.4))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dropout(.4))
model.add(Dense(len(grip_types), activation='softmax', kernel_initializer='truncated_normal'))
model.summary()
# +
#Test MultiLayer Network
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print X_train.shape
# +
model.fit(X_train, y_train, epochs=20, batch_size=32, validation_data=(X_val, y_val),verbose=1)
# +
model_prediction = [np.argmax(model.predict(np.expand_dims(test_vector, axis=0))) for test_vector in X_test]
# report test accuracy
test_accuracy = 100*np.sum(np.array(model_prediction)==np.argmax(y_test, axis=1))/len(model_prediction)
print('Test accuracy: %.4f%%' % test_accuracy)
# -
print model_prediction[1:100]
print("shafo")
# +
import itertools
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn import svm, datasets
from sklearn.metrics import confusion_matrix
## we need a better way analyse the confusion matrix for 133 labels.
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
test_targets_result = np.argmax(y_test, axis=1)
cnf_matrix = confusion_matrix(model_prediction[1:100], test_targets_result[1:100])
np.set_printoptions(precision=2)
print cnf_matrix
cm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
print cm
# Plot non-normalized confusion matrix
#plt.figure()
#plot_confusion_matrix(cnf_matrix, classes=grip_types,
# title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure(figsize=(10,10))
plot_confusion_matrix(cnf_matrix, classes=grip_types, normalize=True,
title='Normalized confusion matrix')
#plt.show()
# +
##testing live data
from collections import Counter
def read_emg_from_file(file_name):
reader = csv.reader(open(file_name, "rb"), delimiter=",")
data = list(reader)
data_set = np.array(x[1:])[:,1:9].astype('float64')
return data_set
def detect_grip(filename):
predict_data_set = read_emg_from_file(file_name)
predict_data_set = np.expand_dims(predict_data_set, axis=2)
model_prediction = [np.argmax( model.predict(np.expand_dims(test_vector, axis=0))) for test_vector in emg_pods_test]
counter = Counter(model_prediction).most_common(4)
return grip_types[counter[0][0]]
# -
file_name = data_dir + "test_continental.csv"
print detect_grip(file_name)
##testing live data
file_name = data_dir + "test_semiwesternfore.csv"
print detect_grip(file_name)
# +
##testing live data
file_name = data_dir + "test_eastfore.csv"
print detect_grip(file_name)
# +
##testing live data
file_name = data_dir + "test_westernfore.csv"
print detect_grip(file_name)
| apps/data_analysis/grip_classification/.ipynb_checkpoints/GripClassifier-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1> Deploying and predicting with model </h1>
#
# This notebook illustrates:
# <ol>
# <li> Deploying model
# <li> Predicting with model
# </ol>
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
# + language="bash"
# if ! gsutil ls | grep -q gs://${BUCKET}/; then
# gsutil mb -l ${REGION} gs://${BUCKET}
# fi
# + language="bash"
# # copy solution to Lab #5 (skip this step if you still have results from Lab 5 in your bucket)
# gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
# -
# ## Task 1
#
# What files are present in the model trained directory (gs://${BUCKET}/babyweight/trained_model)?
#
# Hint (highlight to see): <p style='color:white'>
# Run gsutil ls in a bash cell.
# Answer: model checkpoints are in the trained model directory and several exported models (model architecture + weights) are in the export/exporter subdirectory
# </p>
# <h2> Task 2: Deploy trained, exported model </h2>
#
# Uncomment and run the the appropriate gcloud lines ONE-BY-ONE to
# deploy the trained model to act as a REST web service.
#
# Hint (highlight to see): <p style='color:white'>
# The very first time, you need only the last two gcloud calls to create the model and the version.
# To experiment later, you might need to delete any deployed version, but should not have to recreate the model
# </p>
# + language="bash"
# gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
# + language="bash"
# MODEL_NAME="babyweight"
# MODEL_VERSION="ml_on_gcp"
# MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
# echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
# #gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
# #gcloud ai-platform models delete ${MODEL_NAME}
# #gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
# #gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
# -
# ## Task 3: Write Python code to invoke the deployed model (online prediction)
#
# <p>
# Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
#
# The deployed model requires the input instances to be formatted as follows:
# <pre>
# {
# 'key': 'b1',
# 'is_male': 'True',
# 'mother_age': 26.0,
# 'plurality': 'Single(1)',
# 'gestation_weeks': 39
# },
# </pre>
# The key is an arbitrary string. Allowed values for is_male are True, False and Unknown.
# Allowed values for plurality are Single(1), Twins(2), Triplets(3), Multiple(2+)
# +
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
# TODO: complete
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
# -
# <h2> Task 4: Try out batch prediction </h2>
# <p>
# Batch prediction is commonly used when you thousands to millions of predictions.
# Create a file withe one instance per line and submit using gcloud.
# %%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
# + language="bash"
# INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
# OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
# gsutil cp inputs.json $INPUT
# gsutil -m rm -rf $OUTPUT
# gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
# --data-format=TEXT --region ${REGION} \
# --input-paths=$INPUT \
# --output-path=$OUTPUT \
# --model=babyweight --version=ml_on_gcp
# -
# Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| courses/machine_learning/deepdive/06_structured/labs/6_deploy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
from raiwidgets import ErrorAnalysisDashboard
from azureml.core import Workspace, Dataset
subscription_id = '##############'
resource_group = '##############'
workspace_name = '##############'
results_name = 'model-##############-results'
label_indicator = 'Label'
prediction_indicator = 'Prediction'
workspace = Workspace(subscription_id, resource_group, workspace_name)
dataset = Dataset.get_by_name(workspace, name=results_name)
dataset = dataset.to_pandas_dataframe()
X_test = dataset.drop([label_indicator, prediction_indicator], axis=1)
ErrorAnalysisDashboard(dataset = X_test, true_y=dataset[label_indicator], pred_y=dataset[prediction_indicator], features=X_test.columns, max_depth=3)
| packages/How_to_implement_Azure_machine_learning/aml_modeling/notebooks/error_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="qN8P0AnTnAhh"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="p8SrVqkmnDQv"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="AftvNA5VMemJ"
# # Federated Learning for Image Classification
# + [markdown] colab_type="text" id="coAumH42q9nz"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/federated_learning_for_image_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/federated_learning_for_image_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="Zs2LgZBOMt4M"
# **NOTE**: This colab has been verified to work with the `v0.1.0` version of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.
#
# In this tutorial, we use the classic MNIST training example to introduce the
# Federated Learning (FL) API layer of TFF, `tff.learning` - a set of
# higher-level interfaces that can be used to perform common types of federated
# learning tasks, such as federated training, against user-supplied models
# implemented in TensorFlow.
#
# This tutorial, and the Federated Learning API, are intended primarly for users
# who want to plug their own TensorFlow models into TFF, treating the latter
# mostly as a black box. For a more in-depth understanding of TFF and how to
# implement your own federated learning algorithms, see the tutorials on the FC Core API - [Custom Federated Algorithms Part 1](custom_federated_algorithms_1.ipynb) and [Part 2](custom_federated_algorithms_2.ipynb).
#
# For more on `tff.learning`, continue with the
# [Federated Learning for Text Generation](federated_learning_for_text_generation.ipynb),
# tutorial which in addition to covering recurrent models, also demonstrates loading a
# pre-trained serialized Keras model for refinement with federated learning
# combined with evaluation using Keras.
# + [markdown] colab_type="text" id="MnUwFbCAKB2r"
# ## Before we start
#
# Before we start, please run the following to make sure that your environment is
# correctly setup. If you don't see a greeting, please refer to the
# [Installation](../install.md) guide for instructions.
# + colab={} colab_type="code" id="ZrGitA_KnRO0"
#@test {"skip": true}
# NOTE: If you are running a Jupyter notebook, and installing a locally built
# pip package, you may need to edit the following to point to the '.whl' file
# on your local filesystem.
# !pip install tensorflow_federated
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 4731, "status": "ok", "timestamp": 1551726784731, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="8BKyHkMxKHfV" outputId="79ee5066-8d6a-44e3-a9d6-351e55dfda79"
from __future__ import absolute_import, division, print_function
import collections
from six.moves import range
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow_federated import python as tff
nest = tf.contrib.framework.nest
np.random.seed(0)
tf.compat.v1.enable_v2_behavior()
tff.federated_computation(lambda: 'Hello, World!')()
# + [markdown] colab_type="text" id="5Cyy2AWbLMKj"
# ## Preparing the input data
#
# Let's start with the data. Federated learning requires a federated data set,
# i.e., a collection of data from multiple users. Federated data is typically
# non-[i.i.d.](https://en.wikipedia.org/wiki/Independent_and_identically_distributed_random_variables),
# which poses a unique set of challenges.
#
# In order to facilitate experimentation, we seeded the TFF repository with a few
# datasets, including a federated version of MNIST that contains a version of the [original NIST dataset](https://www.nist.gov/srd/nist-special-database-19) that has been re-processed using [Leaf](https://github.com/TalwalkarLab/leaf) so that the data is keyed by the original writer of the digits. Since each writer has a unique style, this dataset exhibits the kind of non-i.i.d. behavior expected of federated datasets.
#
# Here's how we can load it.
# + colab={} colab_type="code" id="NayDhCX6SjwE"
#@test {"output": "ignore"}
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
# + [markdown] colab_type="text" id="yeX8BKgPfeFw"
# The data sets returned by `load_data()` are instances of
# `tff.simulation.ClientData`, an interface that allows you to enumerate the set
# of users, to construct a `tf.data.Dataset` that represents the data of a
# particular user, and to query the structure of individual elements. Here's how
# you can use this interface to explore the content of the data set. Keep in mind
# that while this interface allows you to iterate over clients ids, this is only a
# feature of the simulation data. As you will see shortly, client identities are
# not used by the federated learning framework - their only purpose is to allow
# you to select subsets of the data for simulations.
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1551726791212, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="kN4-U5nJgKig" outputId="3328f06b-b655-4232-99b6-3398ce2beafc"
len(emnist_train.client_ids)
# + colab={"height": 52} colab_type="code" executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1551726791277, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="ZyCzIrSegT62" outputId="6c85f91f-9d1d-4d45-82a9-e65694c32903"
emnist_train.output_types, emnist_train.output_shapes
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 53, "status": "ok", "timestamp": 1551726791388, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="EsvSXGEMgd9G" outputId="a1aa714a-2ca1-4f69-9db0-ffc3b5b141b3"
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
example_element = iter(example_dataset).next()
example_element['label'].numpy()
# + colab={"height": 275} colab_type="code" executionInfo={"elapsed": 115, "status": "ok", "timestamp": 1551726791547, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="OmLV0nfMg98V" outputId="1461cf50-9975-4795-9e17-9cbb98dbbe53"
#@test {"output": "ignore"}
from matplotlib import pyplot as plt
plt.imshow(example_element['pixels'].numpy(), cmap='gray', aspect='equal')
plt.grid('off')
_ = plt.show()
# + [markdown] colab_type="text" id="lMd01egqy9we"
# Since the data is already a `tf.data.Dataset`, preprocessing can be accomplished using Dataset transformations. Here, we flatten the `28x28` images
# into `784`-element arrays, shuffle the individual examples, organize them into batches, and renames the features
# from `pixels` and `label` to `x` and `y` for use with Keras. We also throw in a
# `repeat` over the data set to run several epochs.
# + colab={} colab_type="code" id="cyG_BMraSuu_"
NUM_EPOCHS = 10
BATCH_SIZE = 20
SHUFFLE_BUFFER = 500
def preprocess(dataset):
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], [-1])),
('y', tf.reshape(element['label'], [1])),
])
return dataset.repeat(NUM_EPOCHS).map(element_fn).shuffle(
SHUFFLE_BUFFER).batch(BATCH_SIZE)
# + [markdown] colab_type="text" id="m9LXykN_jlJw"
# Let's verify this worked.
# + colab={"height": 474} colab_type="code" executionInfo={"elapsed": 36, "status": "ok", "timestamp": 1551726791664, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="VChB7LMQjkYz" outputId="94342024-7640-4164-f833-b6b491a269b1"
#@test {"output": "ignore"}
preprocessed_example_dataset = preprocess(example_dataset)
sample_batch = nest.map_structure(
lambda x: x.numpy(), iter(preprocessed_example_dataset).next())
sample_batch
# + [markdown] colab_type="text" id="JGsMvRQt9Agl"
# We have almost all the building blocks in place to construct federated data
# sets.
#
# One of the ways to feed federated data to TFF in a simulation is simply as a
# Python list, with each element of the list holding the data of an individual
# user, whether as a list or as a `tf.data.Dataset`. Since we already have
# an interface that provides the latter, let's use it.
#
# Here's a simple helper function that will construct a list of datasets from the
# given set of users as an input to a round of training or evaluation.
# + colab={} colab_type="code" id="_PHMvHAI9xVc"
def make_federated_data(client_data, client_ids):
return [preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids]
# + [markdown] colab_type="text" id="0M9PfjOtAVqw"
# Now, how do we choose clients?
#
# In a typical federated training scenario, we are dealing with potentially a very
# large population of user devices, only a fraction of which may be available for
# training at a given point in time. This is the case, for example, when the
# client devices are mobile phones that participate in training only when plugged
# into a power source, off a metered network, and otherwise idle.
#
# Of course, we are in a simulation environment, and all the data is locally
# available. Typically then, when running simulations, we would simply sample a
# random subset of the clients to be involved in each round of training, generally
# different in each round.
#
# That said, as you can find out by studying the paper on the
# [Federated Averaging](https://arxiv.org/abs/1602.05629) algorithm, achieving convergence in a system with randomly sampled
# subsets of clients in each round can take a while, and it would be impractical
# to have to run hundreds of rounds in this interactive tutorial.
#
# What we'll do instead is sample the set of clients once, and
# reuse the same set across rounds to speed up convergence (intentionally
# over-fitting to these few user's data). We leave it as an exercise for the
# reader to modify this tutorial to simulate random sampling - it is fairly easy to
# do (once you do, keep in mind that getting the model to converge may take a
# while).
# + colab={"height": 52} colab_type="code" executionInfo={"elapsed": 86, "status": "ok", "timestamp": 1551726791867, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="GZ6NYHxB8xer" outputId="5ed00ea0-1719-4b48-8fde-10e154c77dc9"
#@test {"output": "ignore"}
NUM_CLIENTS = 3
sample_clients = emnist_train.client_ids[0:NUM_CLIENTS]
federated_train_data = make_federated_data(emnist_train, sample_clients)
len(federated_train_data), federated_train_data[0]
# + [markdown] colab_type="text" id="HOxq4tbi9m8-"
# ## Creating a model with Keras
#
# If you are using Keras, you likely already have code that constructs a Keras
# model. Here's an example of a simple model that will suffice for our needs.
# + colab={} colab_type="code" id="LYCsJGJFWbqt"
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
10, activation=tf.nn.softmax, kernel_initializer='zeros', input_shape=(784,))])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(
y_true, y_pred))
model.compile(
loss=loss_fn,
optimizer=gradient_descent.SGD(learning_rate=0.02),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
# + [markdown] colab_type="text" id="NHdraKFH4OU2"
# One critical note on `compile`. When used in the Federated Averaging algorithm,
# as below, the `optimizer` is only half of of the total optimization algorithm,
# as it is only used to compute local model updates on each client. The rest of
# the algorithm involves how these updates are averaged over clients, and how they
# are then applied to the global model at the server. In particular, this means
# that the choice of optimizer and learning rate used here may need to be
# different than the ones you have used to train the model on a standard i.i.d.
# dataset. We recommend starting with regular SGD, possibly with a smaller
# learning rate than usual. The learning rate we use here has not been carefully
# tuned, feel free to experiment.
#
# In order to use any model with TFF, it needs to be wrapped in an instance of the
# `tff.learning.Model` interface, which exposes methods to stamp the model's
# forward pass, metadata properties, etc., similarly to Keras, but also introduces
# additional elements, such as ways to control the process of computing federated
# metrics. Let's not worry about this for now; if you have a compiled Keras model
# like the one we've just defined above, you can have TFF wrap it for you by
# invoking `tff.learning.from_compiled_keras_model`, passing the model and a
# sample data batch as arguments, as shown below.
# + colab={} colab_type="code" id="Q3ynrxd53HzY"
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
# + [markdown] colab_type="text" id="XJ5E3O18_JZ6"
# ## Training the model on federated data
#
# Now that we have a model wrapped as `tff.learning.Model` for use with TFF, we
# can let TFF construct a Federated Averaging algorithm by invoking the helper
# function `tff.learning.build_federated_averaging_process`, as follows.
#
# Keep in mind that the argument needs to be a constructor (such as `model_fn`
# above), not an already-constructed instance, so that the construction of your
# model can happen in a context controlled by TFF (if you're curious about the
# reasons for this, we encourage you to read the follow-up tutorial on
# [custom algorithms](custom_federated_algorithms_1.ipynb)).
# + colab={} colab_type="code" id="sk6mjOfycX5N"
#@test {"output": "ignore"}
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
# + [markdown] colab_type="text" id="f8FpvN2n67sm"
# What just happened? TFF has constructed a pair of *federated computations* and
# packaged them into a `tff.utils.IterativeProcess` in which these computations
# are available as a pair of properties `initialize` and `next`.
#
# In a nutshell, *federated computations* are programs in TFF's internal language
# that can express various federated algorithms (you can find more about this in
# the [custom algorithms](custom_federated_algorithms_1.ipynb) tutorial). In this
# case, the two computations generated and packed into `iterative_process`
# implement [Federated Averaging](https://arxiv.org/abs/1602.05629).
#
# It is a goal of TFF to define computations in a way that they could be executed
# in real federated learning settings, but currently only local execution
# simulation runtime is implemented. To execute a computation in a simulator, you
# simply invoke it like a Python function. This default interpreted environment is
# not designed for high performance, but it will suffice for this tutorial; we
# expect to provide higher-performance simulation runtimes to facilitate
# larger-scale research in future releases.
#
# Let's start with the `initialize` computation. As is the case for all federated
# computations, you can think of it as a function. The computation takes no
# arguments, and returns one result - the representation of the state of the
# Federated Averaging process on the server. While we don't want to dive into the
# details of TFF, it may be instructive to see what this state looks like. You can
# visualize it as follows.
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1551726793243, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="Z4pcfWsUBp_5" outputId="59e1a15f-ac5e-4926-dceb-7341d98b66bb"
#@test {"output": "ignore"}
str(iterative_process.initialize.type_signature)
# + [markdown] colab_type="text" id="v1gbHQ_7BiyT"
# While the above type signature may at first seem a bit cryptic, you can
# recognize that the server state consists of a `model` (the initial model
# parameters for MNIST that will be distributed to all devices), and
# `optimizer_state` (additional information maintained by the server, such as the
# number of rounds to use for hypermarameter schedules, etc.).
#
# Let's invoke the `initialize` computation to construct the server state.
# + colab={} colab_type="code" id="6cagCWlZmcch"
state = iterative_process.initialize()
# + [markdown] colab_type="text" id="TjjxTx9e_rMd"
# The second of the pair of federated computations, `next`, represents a single
# round of Federated Averaging, which consists of pushing the server state
# (including the model parameters) to the clients, on-device training on their
# local data, collecting and averaging model updates, and producing a new updated
# model at the server.
#
# Conceptually, you can think of `next` as having a functional type signature that
# looks as follows.
#
# ```
# SERVER_STATE, FEDERATED_DATA -> SERVER_STATE, TRAINING_METRICS
# ```
#
# In particular, one should think about `next()` not as being a function that runs on a server, but rather being a declarative functional representation of the entire decentralized computation - some of the inputs are provided by the server (`SERVER_STATE`), but each participating device contributes its own local dataset.
#
# Let's run a single round of training and visualize the results. We can use the
# federated data we've already generated above for a sample of users.
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 2976, "status": "ok", "timestamp": 1551726796344, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="F3M_W9dDE6Tm" outputId="9fcdd743-af11-4096-b9fe-f634f4e36ff3"
#@test {"timeout": 600, "output": "ignore"}
state, metrics = iterative_process.next(state, federated_train_data)
print('round 1, metrics={}'.format(metrics))
# + [markdown] colab_type="text" id="UmhReXt9G4A5"
# Let's run a few more rounds. As noted earlier, typically at this point you would
# pick a subset of your simulation data from a new randomly selected sample of
# users for each round in order to simulate a realistic deployment in which users
# continuously come and go, but in this interactive notebook, for the sake of
# demonstration we'll just reuse the same users, so that the system converges
# quickly.
# + colab={"height": 175} colab_type="code" executionInfo={"elapsed": 26368, "status": "ok", "timestamp": 1551726822761, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="qrJkQuCRJP9C" outputId="6785bdd5-ec25-4b73-be0a-29b01ea26b66"
#@test {"skip": true}
for round_num in range(2, 11):
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
# + [markdown] colab_type="text" id="joHYzn9jcs0Y"
# Training loss is decreasing after each round of federated training, indicating
# the model is converging. There are some important caveats with these training
# metrics, however, see the section on *Evaluation* later in this tutorial.
# + [markdown] colab_type="text" id="T4hneAcb-F2l"
# ## Customizing the model implementation
#
# Keras is the [recommended high-level model API for TensorFlow](https://medium.com/tensorflow/standardizing-on-keras-guidance-on-high-level-apis-in-tensorflow-2-0-bad2b04c819a), and we encourage using Keras models (via
# `tff.learning.from_keras_model` or
# `tff.learning.from_compiled_keras_model`) in TFF whenever possible.
#
# However, `tff.learning` provides a lower-level model interface, `tff.learning.Model`, that exposes the minimal functionality necessary for using a model for federated learning. Directly implementing this interface (possibly still using building blocks like `tf.keras.layers`) allows for maximum customization without modifying the internals of the federated learning algorithms.
#
# So let's do it all over again from scratch.
#
# ### Defining model variables, forward pass, and metrics
#
# The first step is to identify the TensorFlow variables we're going to work with.
# In order to make the following code more legible, let's define a data structure
# to represent the entire set. This will include variables such as `weights` and
# `bias` that we will train, as well as variables that will hold various
# cumulative statistics and counters we will update during training, such as
# `loss_sum`, `accuracy_sum`, and `num_examples`.
# + colab={} colab_type="code" id="uqRD72WQC4u1"
MnistVariables = collections.namedtuple(
'MnistVariables', 'weights bias num_examples loss_sum accuracy_sum')
# + [markdown] colab_type="text" id="nkJfDcY5oXii"
# Here's a method that creates the variables. For the sake of simplicity, we
# represent all statistics as `tf.float32`, as that will eliminate the need for
# type conversions at a later stage. Wrapping variable initializers as lambdas is
# a requirement imposed by
# [resource variables](https://www.tensorflow.org/api_docs/python/tf/enable_resource_variables).
# + colab={} colab_type="code" id="H3GQHLNqCfMU"
def create_mnist_variables():
return MnistVariables(
weights = tf.Variable(
lambda: tf.zeros(dtype=tf.float32, shape=(784, 10)),
name='weights',
trainable=True),
bias = tf.Variable(
lambda: tf.zeros(dtype=tf.float32, shape=(10)),
name='bias',
trainable=True),
num_examples = tf.Variable(0.0, name='num_examples', trainable=False),
loss_sum = tf.Variable(0.0, name='loss_sum', trainable=False),
accuracy_sum = tf.Variable(0.0, name='accuracy_sum', trainable=False))
# + [markdown] colab_type="text" id="SrdnR0fAre-Q"
# With the variables for model parameters and cumulative statistics in place, we
# can now define the forward pass method that computes loss, emits predictions,
# and updates the cumulative statistics for a single batch of input data, as
# follows.
# + colab={} colab_type="code" id="ZYSRAl-KCvC7"
def mnist_forward_pass(variables, batch):
y = tf.nn.softmax(tf.matmul(batch['x'], variables.weights) + variables.bias)
predictions = tf.cast(tf.argmax(y, 1), tf.int32)
flat_labels = tf.reshape(batch['y'], [-1])
loss = -tf.reduce_mean(tf.reduce_sum(
tf.one_hot(flat_labels, 10) * tf.log(y), reduction_indices=[1]))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(predictions, flat_labels), tf.float32))
num_examples = tf.to_float(tf.size(batch['y']))
tf.assign_add(variables.num_examples, num_examples)
tf.assign_add(variables.loss_sum, loss * num_examples)
tf.assign_add(variables.accuracy_sum, accuracy * num_examples)
return loss, predictions
# + [markdown] colab_type="text" id="-gm-yx2Mr_bl"
# Next, we define a function that returns a set of local metrics, again using TensorFlow. These are the values (in addition to model updates, which are handled automatically) that are elligible to be aggregated to the server in a federated learning or evaluation process.
#
# Here, we simply return the average `loss` and `accuracy`, as well as the
# `num_examples`, which we'll need to correctly weight the contributions from
# different users when computing federated aggregates.
# + colab={} colab_type="code" id="RkAZXhjGEekp"
def get_local_mnist_metrics(variables):
return collections.OrderedDict([
('num_examples', variables.num_examples),
('loss', variables.loss_sum / variables.num_examples),
('accuracy', variables.accuracy_sum / variables.num_examples)
])
# + [markdown] colab_type="text" id="9ywGs1G-s1o3"
# Finally, we need to determine how to aggregate the local metrics emitted by each
# device via `get_local_mnist_metrics`. This is the only part of the code that isn't written in TensorFlow - it's a *federated computation* expressed in TFF. If you'd like to
# dig deeper, skim over the [custom algorithms](custom_federated_algorithms_1.ipynb)
# tutorial, but in most applications, you won't really need to; variants of the
# pattern shown below should suffice. Here's what it looks like:
#
# + colab={} colab_type="code" id="BMr2PwkfExFI"
@tff.federated_computation
def aggregate_mnist_metrics_across_clients(metrics):
return {
'num_examples': tff.federated_sum(metrics.num_examples),
'loss': tff.federated_average(metrics.loss, metrics.num_examples),
'accuracy': tff.federated_average(metrics.accuracy, metrics.num_examples)
}
# + [markdown] colab_type="text" id="2rXZ3Hg44aeN"
# The input `metrics` argument corresponds to the `OrderedDict` returned by `get_local_mnist_metrics` above, but critically the values are no longer `tf.Tensors` - they are "boxed" as `tff.Value`s, to make it clear you can no longer manipulate them using TensorFlow, but only using TFF's federated operators like `tff.federated_average` and `tff.federated_sum`. The returned
# dictionary of global aggregates defines the set of metrics which will be available on the server.
#
#
#
# + [markdown] colab_type="text" id="7MXGAuQRvmcp"
# ### Constructing an instance of `tff.learning.Model`
#
# With all of the above in place, we are ready to construct a model representation
# for use with TFF similar to one that's generated for you when you let TFF ingest
# a Keras model.
# + colab={} colab_type="code" id="blQGiTQFS9_r"
class MnistModel(tff.learning.Model):
def __init__(self):
self._variables = create_mnist_variables()
@property
def trainable_variables(self):
return [self._variables.weights, self._variables.bias]
@property
def non_trainable_variables(self):
return []
@property
def local_variables(self):
return [
self._variables.num_examples, self._variables.loss_sum,
self._variables.accuracy_sum
]
@property
def input_spec(self):
return collections.OrderedDict([('x', tf.TensorSpec([None, 784],
tf.float32)),
('y', tf.TensorSpec([None, 1], tf.int32))])
# TODO(b/124777499): Remove `autograph=False` when possible.
@tf.contrib.eager.function(autograph=False)
def forward_pass(self, batch, training=True):
del training
loss, predictions = mnist_forward_pass(self._variables, batch)
return tff.learning.BatchOutput(loss=loss, predictions=predictions)
@tf.contrib.eager.function(autograph=False)
def report_local_outputs(self):
return get_local_mnist_metrics(self._variables)
@property
def federated_output_computation(self):
return aggregate_mnist_metrics_across_clients
# + [markdown] colab_type="text" id="sMN1AszMwLHL"
# As you can see, the abstract methods and properties defined by
# `tff.learning.Model` correspond to the code snippets in the preceding section
# that introduced the variables and defined the loss and statistics.
#
# Here are a few points worth highlighting:
#
# * All state that your model will use must be captured as TensorFlow variables,
# as TFF does not use Python at runtime (remember your code should be written
# such that it can be deployed to mobile devices; see the
# [custom algorithms](custom_federated_algorithms_1.ipynb) tutorial for a more
# in-depth commentary on the reasons).
# * Your model should describe what form of data it accepts (`input_spec`), as
# in general, TFF is a strongly-typed environment and wants to determine type
# signatures for all components. Declaring the format of your model's input is
# an essential part of it.
# * Although technically not required, we recommend wrapping all TensorFlow
# logic (forward pass, metric calculations, etc.) as `tf.contrib.eager.function`s,
# as this helps ensure the TensorFlow can be serialized, and removes the need
# for explicit control dependencies.
#
# + [markdown] colab_type="text" id="9DVhXk2Bu-GU"
# The above is sufficient for evaluation and algorithms like Federated SGD.
# However, for Federated Averaging, we need to specify how the model should train
# locally on each batch.
# + colab={} colab_type="code" id="q1w7US3PFN2p"
class MnistTrainableModel(MnistModel, tff.learning.TrainableModel):
# TODO(b/124777499): Remove `autograph=False` when possible.
@tf.contrib.eager.defun(autograph=False)
def train_on_batch(self, batch):
output = self.forward_pass(batch)
optimizer = tf.train.GradientDescentOptimizer(0.02)
optimizer.minimize(output.loss, var_list=self.trainable_variables)
return output
# + [markdown] colab_type="text" id="hVBugKP3yw03"
# ### Simulating federated training with the new model
#
# With all the above in place, the remainder of the process looks like what we've
# seen already - just replace the model constructor with the constructor of our
# new model class, and use the two federated computations in the iterative process
# you created to cycle through training rounds.
# + colab={} colab_type="code" id="FK3c8_leS9_t"
iterative_process = tff.learning.build_federated_averaging_process(
MnistTrainableModel)
# + colab={} colab_type="code" id="Jv_LiggwS9_u"
state = iterative_process.initialize()
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 3360, "status": "ok", "timestamp": 1551727143518, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="PtOLElmzDPxs" outputId="8f46711a-9143-46bf-cafc-6cb16791a242"
<EMAIL> {"timeout": 600, "output": "ignore"}
state, metrics = iterative_process.next(state, federated_train_data)
print('round 1, metrics={}'.format(metrics))
# + colab={"height": 175} colab_type="code" executionInfo={"elapsed": 28916, "status": "ok", "timestamp": 1551727172475, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="gFkv0yJEGhue" outputId="8e4410e6-eea1-4415-ab60-4a7c0f26cd6c"
#@test {"skip": true}
for round_num in range(2, 11):
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
# + [markdown] colab_type="text" id="m7lz59lMJ0kj"
# ## Evaluation
#
# All of our experiments so far presented only federated training metrics - the
# average metrics over all batches of data trained across all clients in the
# round. This introduces the normal concerns about overfitting, especially since
# we used the same set of clients on each round for simplicity, but there is an
# additional notion of overfitting in training metrics specific to the Federated
# Averaging algorithm. This is easiest to see if we imagine each client had a
# single batch of data, and we train on that batch for many iterations (epochs).
# In this case, the local model will quickly exactly fit to that one batch, and so
# the local accuracy metric we average will approach 1.0. Thus, these training
# metrics can be taken as a sign that training is progressing, but not much more.
#
# To perform evaluation on federated data, you can construct another *federated
# computation* designed for just this purpose, using the
# `tff.learning.build_federated_evaluation` function, and passing in your model
# constructor as an argument. Note that unlike with Federated Averaging, where
# we've used `MnistTrainableModel`, it suffices to pass the `MnistModel`.
# Evaluation doesn't perform gradient descent, and there's no need to construct
# optimizers.
#
# For experimentation and research, when a centralized test dataset is available,
# [Federated Learning for Text Generation](federated_learning_for_text_generation.ipynb)
# demonstrates another evaluation option: taking the trained weights from
# federated learning, applying them to a standard Keras model, and then simply
# calling `tf.keras.models.Model.evaluate()` on a centralized dataset.
# + colab={} colab_type="code" id="nRiXyqnXM2VO"
evaluation = tff.learning.build_federated_evaluation(MnistModel)
# + [markdown] colab_type="text" id="uwfINGoNQEuV"
# You can inspect the abstract type signature of the evaluation function as follows.
# + colab={"height": 54} colab_type="code" executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1551727172654, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="3q5ueoO0NDNb" outputId="b148de2e-701c-47ec-a061-6541db384310"
str(evaluation.type_signature)
# + [markdown] colab_type="text" id="XA3v7f2SQs6q"
# No need to be concerned about the details at this point, just be aware that it
# takes the following general form, similar to `tff.utils.IterativeProcess.next`
# but with two important differences. First, we are not returning server state,
# since evaluation doesn't modify the model or any other aspect of state - you can
# think of it as stateless. Second, evaluation only needs the model, and doesn't
# require any other part of server state that might be associated with training,
# such as optimizer variables.
#
# ```
# SERVER_MODEL, FEDERATED_DATA -> TRAINING_METRICS
# ```
#
# Let's invoke evaluation on the latest state we arrived at during training. In
# order to extract the latest trained model from the server state, you simply
# access the `.model` member, as follows.
# + colab={} colab_type="code" id="OX4Sk_uyOaYa"
#@test {"output": "ignore"}
train_metrics = evaluation(state.model, federated_train_data)
# + [markdown] colab_type="text" id="UeEsdwJgRGMW"
# Here's what we get. Note the numbers look marginally better than what was
# reported by the last round of training above. By convention, the training
# metrics reported by the iterative training process generally reflect the
# performance of the model at the beginning of the training round, so the
# evaluation metrics will always be one step ahead.
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 12, "status": "ok", "timestamp": 1551727173222, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="zwCy1IPxOfiT" outputId="8415297b-7fa5-4f83-ab8f-a6bf71d9c145"
#@test {"output": "ignore"}
str(train_metrics)
# + [markdown] colab_type="text" id="SpfgdNDoRjPy"
# Now, let's compile a test sample of federated data and rerun evaluation on the
# test data. The data will come from the same sample of real users, but from a
# distinct held-out data set.
# + colab={"height": 52} colab_type="code" executionInfo={"elapsed": 291, "status": "ok", "timestamp": 1551727173581, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="in8vProVNc04" outputId="a10db41a-cb6d-45d0-ab15-59c04703e3eb"
federated_test_data = make_federated_data(emnist_test, sample_clients)
len(federated_test_data), federated_test_data[0]
# + colab={} colab_type="code" id="ty-ZwfE0NJfV"
#@test {"output": "ignore"}
test_metrics = evaluation(state.model, federated_test_data)
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1551727174005, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": 480} id="e5fGtIJYNqYH" outputId="bf274858-42fd-4e8f-97ca-3b4e2ddc28bd"
#@test {"output": "ignore"}
str(test_metrics)
# + [markdown] colab_type="text" id="67vYxrDWzRcj"
# This concludes the tutorial. We encourage you to play with the
# parameters (e.g., batch sizes, number of users, epochs, learning rates, etc.), to modify the code above to simulate training on random samples of users in
# each round, and to explore the other tutorials we've developed.
| docs/tutorials/federated_learning_for_image_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time series forecasting using ARIMA
# ### Import necessary libraries
# +
# %matplotlib notebook
import numpy
import pandas
import datetime
import sys
import time
import matplotlib.pyplot as ma
import statsmodels.tsa.seasonal as st
import statsmodels.tsa.arima_model as arima
import statsmodels.tsa.stattools as tools
# -
# ### Load necessary CSV file
# +
try:
ts = pandas.read_csv('../../datasets/srv-1-usr-5m.csv')
except:
print("I am unable to connect to read .csv file", sep=',', header=1)
ts.index = pandas.to_datetime(ts['ts'])
# delete unnecessary columns
del ts['id']
del ts['ts']
# print table info
ts.info()
# -
# ### Get values from specified range
ts = ts['2018-06-16':'2018-07-15']
# ### Remove possible zero and NA values (by interpolation)
# We are using MAPE formula for counting the final score, so there cannot occure any zero values in the time series. Replace them with NA values. NA values are later explicitely removed by linear interpolation.
# +
def print_values_stats():
print("Zero Values:\n",sum([(1 if x == 0 else 0) for x in ts.values]),"\n\nMissing Values:\n",ts.isnull().sum(),"\n\nFilled in Values:\n",ts.notnull().sum(), "\n")
idx = pandas.date_range(ts.index.min(), ts.index.max(), freq="5min")
ts = ts.reindex(idx, fill_value=None)
print("Before interpolation:\n")
print_values_stats()
ts = ts.replace(0, numpy.nan)
ts = ts.interpolate(limit_direction="both")
print("After interpolation:\n")
print_values_stats()
# -
# ### Plot values
# Idea: Plot figure now and do not wait on ma.show() at the end of the notebook
ma.ion()
ma.show()
fig1 = ma.figure(1)
ma.plot(ts, color="blue")
ma.draw()
try:
ma.pause(0.001) # throws NotImplementedError, ignore it
except:
pass
# ### Ignore timestamps, make the time series single dimensional
# Since now the time series is represented by continuous single-dimensional Python list. ARIMA does not need timestamps or any irrelevant data.
dates = ts.index # save dates for further use
ts = [x[0] for x in ts.values]
# ### Split time series into train and test series
# We have decided to split train and test time series by two weeks.
train_data_length = 12*24*7
ts_train = ts[:train_data_length]
ts_test = ts[train_data_length+1:]
# ### Estimate integrated (I) parameter
# Check time series stationarity and estimate it's integrated parameter (maximum integration value is 2). The series itself is highly seasonal, so we can assume that the time series is not stationary.
# +
def check_stationarity(ts, critic_value=0.05):
try:
result = tools.adfuller(ts)
return result[0] < 0.0 and result[1] < critic_value
except:
# Program may raise an exception when there are NA values in TS
return False
integrate_param = 0
ts_copy = pandas.Series(ts_train, copy=True) # Create copy for stationarizing
while not check_stationarity(ts_copy) and integrate_param < 2:
integrate_param += 1
ts_copy = ts_copy - ts_copy.shift()
ts_copy.dropna(inplace=True) # Remove initial NA values
print("Estimated integrated (I) parameter: ", integrate_param, "\n")
# -
# ### Print ACF and PACF graphs for AR(p) and MA(q) order estimation
# AutoCorellation and Parcial AutoCorellation Functions are necessary for ARMA order estimation. Configure the *NLagsACF* and *NlagsPACF* variables for number of lagged values in ACF and PACF graphs.
# +
def plot_bar(ts, horizontal_line=None):
ma.bar(range(0, len(ts)), ts, width=0.5)
ma.axhline(0)
if horizontal_line != None:
ma.axhline(horizontal_line, linestyle="-")
ma.axhline(-horizontal_line, linestyle="-")
ma.draw()
try:
ma.pause(0.001) # throws NotImplementedError, ignore it
except:
pass
NlagsACF = 500
NLagsPACF = 100
# ACF
ma.figure(2)
plot_bar(tools.acf(ts_train, nlags=NlagsACF), 1.96 / numpy.sqrt(len(ts)))
# PACF
ma.figure(3)
plot_bar(tools.pacf(ts_train, nlags=NLagsPACF), 1.96 / numpy.sqrt(len(ts)))
# -
# ### ARIMA order estimation and prediction configuration
# According to the Box-Jenkins model (https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc446.htm) we assumed that this time series is an AR(p) model. The ACF graph shows us that the series itself should be stationary, because the ACF is positive and negative altering and (very) slowly decaying to zero. In the PACF graph we can see that there are two significant spikes on indices 0 and 1, so we have decided to go with ARIMA(2,0,0) model.
#
# You can specify how many values you want to use for ARIMA model fitting (by setting *N_train_data* variable) and how many new values you want to predict in single step (by setting *N_values_to_forecast* variable).
ARIMA_order = (2,0,0)
M_train_data = sys.maxsize
N_values_to_forecast = 1
# ### Forecast new values
# Unexpectedly, we have a very large time series (over 8 thousand samples), so the forecasting takes much time.
# +
predictions = []
confidence = []
print("Forecasting started...")
start_time = time.time()
ts_len = len(ts)
for i in range(train_data_length+1, ts_len, N_values_to_forecast):
try:
start = i-M_train_data if i-M_train_data >= 0 else 0
arima_model = arima.ARIMA(ts[start:i], order=ARIMA_order).fit(disp=0)
forecast = arima_model.forecast(steps=N_values_to_forecast)
for j in range(0, N_values_to_forecast):
predictions.append(forecast[0][j])
confidence.append(forecast[2][j])
except:
print("Error during forecast: ", i, i+N_values_to_forecast)
# Push back last successful predictions
for j in range(0, N_values_to_forecast):
predictions.append(predictions[-1] if len(predictions) > 0 else 0)
confidence.append(confidence[-1] if len(confidence) > 0 else 0)
print("Forecasting finished")
print("Time elapsed: ", time.time() - start_time)
# -
# ### Count mean absolute percentage error
# We use MAPE (https://www.forecastpro.com/Trends/forecasting101August2011.html) instead of MSE because the result of MAPE does not depend on size of values.
# +
values_sum = 0
for value in zip(ts_test, predictions):
actual = value[0]
predicted = value[1]
values_sum += abs((actual - predicted) / actual)
values_sum *= 100/len(predictions)
print("MAPE: ", values_sum, "%\n")
# -
# ### Plot forecasted values
fig2 = ma.figure(4)
ma.plot(ts_test, color="blue", label="Test")
ma.plot(predictions, color="red", label="ARIMA")
ts_len = len(ts)
date_offset_indices = ts_len // 6
num_date_ticks = ts_len // date_offset_indices + 1
ma.xticks(range(0, ts_len, date_offset_indices), [x.date().strftime('%Y-%m-%d') for x in dates[::date_offset_indices]])
ma.xlabel("Timestamps")
ma.ylabel("User counts")
ma.legend(loc='best')
ma.draw()
| analyses/SERV-1/Usr-5min-ARIMA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import bs4
import requests
import csv
import pandas as pd
import re
resp=requests.get("https://www.medifee.com/spa-services-india.php")
resp.status_code
soup=bs4.BeautifulSoup(resp.content,"html5")
link=soup.find_all("a")
title=[]
link_main=[]
li=[]
for i in range (14,len(link)-9):
a=link[i]
title.append(a.text)
li.append(a["href"])
for r in li:
link_main.append("https://www.medifee.com"+ (r))
link_main
for d in range(len(link_main)):
resp_1=requests.get(link_main[d])
print(resp_1.status_code)
soup_1=bs4.BeautifulSoup(resp_1.content,"html5")
table=soup_1.find_all("tr")
city=[]
Average_Price=[]
Starting_Price=[]
Price_Upto=[]
for y in range (1,len(table)):
data=table[y].find_all("td")
city.append(data[0])
Average_Price.append(data[1])
Starting_Price.append(data[2])
Price_Upto.append(data[3])
print(city)
print(Average_Price)
print(Starting_Price)
print(Price_Upto)
dic = {'city': city, 'Average_Price': Average_Price, 'Starting_Price': Starting_Price," Price_Upto": Price_Upto}
df = pd.DataFrame(dic)
df.to_csv(f'spa{title[d]}.csv')
| Medifee/spa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 3 Ungraded Sections - Part 2: T5 SQuAD Model
#
# Welcome to the part 2 of testing the models for this week's assignment. This time we will perform decoding using the T5 SQuAD model. In this notebook we'll perform Question Answering by providing a "Question", its "Context" and see how well we get the "Target" answer.
#
# ## Colab
#
# Since this ungraded lab takes a lot of time to run on coursera, as an alternative we have a colab prepared for you.
#
# [T5 SQuAD Model Colab](https://drive.google.com/file/d/1c-8KJkTySRGqCx_JjwjvXuRBTNTqEE0N/view?usp=sharing)
#
# - If you run into a page that looks similar to the one below, with the option `Open with`, this would mean you need to download the `Colaboratory` app. You can do so by `Open with -> Connect more apps -> in the search bar write "Colaboratory" -> install`
#
# <img src = "colab_help_1.png">
#
# - After installation it should look like this. Click on `Open with Google Colaboratory`
#
# <img src = "colab_help_2.png">
# ## Outline
#
# - [Overview](#0)
# - [Part 1: Resuming the assignment (T5 SQuAD Model)](#1)
# - [Part 2: Fine-tuning on SQuAD](#2)
# - [2.1 Loading in the data and preprocessing](#2.1)
# - [2.2 Decoding from a fine-tuned model](#2.2)
# <a name='0'></a>
# ### Overview
#
# In this notebook you will:
# * Implement the Bidirectional Encoder Representation from Transformer (BERT) loss.
# * Use a pretrained version of the model you created in the assignment for inference.
# <a name='1'></a>
# # Part 1: Getting ready
#
# Run the code cells below to import the necessary libraries and to define some functions which will be useful for decoding. The code and the functions are the same as the ones you previsouly ran on the graded assignment.
# +
import string
import t5
import numpy as np
import trax
from trax.supervised import decoding
import textwrap
wrapper = textwrap.TextWrapper(width=70)
# +
PAD, EOS, UNK = 0, 1, 2
def detokenize(np_array):
return trax.data.detokenize(
np_array,
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='.')
def tokenize(s):
return next(trax.data.tokenize(
iter([s]),
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='.'))
vocab_size = trax.data.vocab_size(
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='.')
def get_sentinels(vocab_size, display=False):
sentinels = {}
for i, char in enumerate(reversed(string.ascii_letters), 1):
decoded_text = detokenize([vocab_size - i])
# Sentinels, ex: <Z> - <a>
sentinels[decoded_text] = f'<{char}>'
if display:
print(f'The sentinel is <{char}> and the decoded token is:', decoded_text)
return sentinels
sentinels = get_sentinels(vocab_size, display=False)
def pretty_decode(encoded_str_list, sentinels=sentinels):
# If already a string, just do the replacements.
if isinstance(encoded_str_list, (str, bytes)):
for token, char in sentinels.items():
encoded_str_list = encoded_str_list.replace(token, char)
return encoded_str_list
# We need to decode and then prettyfy it.
return pretty_decode(detokenize(encoded_str_list))
# + [markdown] colab_type="text" id="HEoSSKNwgDVA"
# <a name='2'></a>
# # Part 2: Fine-tuning on SQuAD
#
# Now let's try to fine tune on SQuAD and see what becomes of the model.For this, we need to write a function that will create and process the SQuAD `tf.data.Dataset`. Below is how T5 pre-processes SQuAD dataset as a text2text example. Before we jump in, we will have to first load in the data.
#
# <a name='2.1'></a>
# ### 2.1 Loading in the data and preprocessing
#
# You first start by loading in the dataset. The text2text example for a SQuAD example looks like:
#
# ```json
# {
# 'inputs': 'question: <question> context: <article>',
# 'targets': '<answer_0>',
# }
# ```
#
# The squad pre-processing function takes in the dataset and processes it using the sentencePiece vocabulary you have seen above. It generates the features from the vocab and encodes the string features. It takes on question, context, and answer, and returns "question: Q context: C" as input and "A" as target.
# + colab={} colab_type="code" id="RcdR5Dh9UVEw"
# Retrieve Question, C, A and return "question: Q context: C" as input and "A" as target.
def squad_preprocess_fn(dataset, mode='train'):
return t5.data.preprocessors.squad(dataset)
# +
# train generator, this takes about 1 minute
train_generator_fn, eval_generator_fn = trax.data.tf_inputs.data_streams(
'squad/plain_text:1.0.0',
data_dir='data/',
bare_preprocess_fn=squad_preprocess_fn,
input_name='inputs',
target_name='targets'
)
train_generator = train_generator_fn()
next(train_generator)
# + colab={} colab_type="code" id="QGQsExH8xv40"
#print example from train_generator
(inp, out) = next(train_generator)
print(inp.decode('utf8').split('context:')[0])
print()
print('context:', inp.decode('utf8').split('context:')[1])
print()
print('target:', out.decode('utf8'))
# + [markdown] colab_type="text" id="cC3JaiSMpWma"
# <a name='2.2'></a>
# ### 2.2 Decoding from a fine-tuned model
#
# You will now use an existing model that we trained for you. You will initialize, then load in your model, and then try with your own input.
# -
# Initialize the model
model = trax.models.Transformer(
d_ff = 4096,
d_model = 1024,
max_len = 2048,
n_heads = 16,
dropout = 0.1,
input_vocab_size = 32000,
n_encoder_layers = 24,
n_decoder_layers = 24,
mode='predict') # Change to 'eval' for slow decoding.
# load in the model
# this will take a minute
shape11 = trax.shapes.ShapeDtype((1, 1), dtype=np.int32)
model.init_from_file('model_squad.pkl.gz',
weights_only=True, input_signature=(shape11, shape11))
# + colab={} colab_type="code" id="FdGy_pHJGEF6"
# create inputs
# a simple example
# inputs = 'question: She asked him where is john? context: John was at the game'
# an extensive example
inputs = 'question: What are some of the colours of a rose? context: A rose is a woody perennial flowering plant of the genus Rosa, in the family Rosaceae, or the flower it bears.There are over three hundred species and tens of thousands of cultivars. They form a group of plants that can be erect shrubs, climbing, or trailing, with stems that are often armed with sharp prickles. Flowers vary in size and shape and are usually large and showy, in colours ranging from white through yellows and reds. Most species are native to Asia, with smaller numbers native to Europe, North America, and northwestern Africa. Species, cultivars and hybrids are all widely grown for their beauty and often are fragrant.'
# -
# tokenizing the input so we could feed it for decoding
print(tokenize(inputs))
test_inputs = tokenize(inputs)
# Run the cell below to decode.
#
# ### Note: This will take some time to run
# + colab={} colab_type="code" id="c_CwYjXHIQOJ"
# Temperature is a parameter for sampling.
# # * 0.0: same as argmax, always pick the most probable token
# # * 1.0: sampling from the distribution (can sometimes say random things)
# # * values inbetween can trade off diversity and quality, try it out!
output = decoding.autoregressive_sample(model, inputs=np.array(test_inputs)[None, :],
temperature=0.0, max_length=5) # originally max_length=10
print(wrapper.fill(pretty_decode(output[0])))
# -
# You should also be aware that the quality of the decoding is not very good because max_length was downsized from 10 to 5 so that this runs faster within this environment. The colab version uses the original max_length so check that one for the actual decoding.
| Natural Language Processing/Course 4 - Natural Language Processing with attention models/Labs/Week 3/Part 2 - T5 SQuAD Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FloPy
#
# ### ZoneBudget Example
#
# This notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.
#
# First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
# +
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# -
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
# ### Read File Containing Zones
# Using the `read_zbarray` utility, we can import zonebudget-style array files.
# +
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, :, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
# -
# ### Extract Budget Information from ZoneBudget Object
#
# At the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
# ### Convert Units
# The `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
# +
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
# -
cmd is cfd
# ### Alias Names
# A dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
# ### Return the Budgets as a Pandas DataFrame
# Set `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.
# The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
# Slice the multi-index dataframe to retrieve a subset of the budget.
# NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
# Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
# +
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
# -
# Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
# Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
df = zb.get_dataframes(index_key='kstpkper')
df.head()
# ### Write Budget Output to CSV
#
# We can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
# +
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
# -
# ### Net Budget
# Using the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
# +
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
# -
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
# ## Plot Budget Components
# The following is a function that can be used to better visualize the budget components using matplotlib.
# +
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
# +
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
# -
| examples/Notebooks/flopy3_ZoneBudget_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""Simulation of infections for different scenarios."""
import covid19sim.coronalib as cl
import pandas as pd
import numpy as np
import os
import datetime
import plotly.express as px
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.subplots import make_subplots
import plotly.io as pio
# Covid-19 Sim provides two populations:<br>
# "current" : The population is based on the current population (2019) <br>
# "household" : The population is bases on a subsample in 2010 but with household numbers and additional persons per household
# +
# make a population
age, agegroup, gender, contacts, drate, hnr, persons = cl.makepop("current",
17900000)
# set day 0
day0date = datetime.date(2020, 3, 8)
r_change = {}
# Intial r0
r_change[-1] = 3.3 * contacts/np.mean(contacts)
# First change point (8.3.2020)
r_change[0] = 1.84 * contacts/np.mean(contacts)
# second change point (16.3.2020)
r_change[6] = 1.04 * contacts/np.mean(contacts)
# third change point (23.3.2020)
r_change[13] = 0.9 * contacts/np.mean(contacts)
# fourth change point (20.4.2020)
r_change[42] = 1.1 * contacts/np.mean(contacts)
state, statesum, infections, day0, rnow, args, gr = cl.sim(
age, drate, nday=150, prob_icu=0.01125,
day0cumrep=1000,
mean_days_to_icu=7,mean_duration_icu=10,
mean_serial=7.0, std_serial=3.0, immunt0=0.0, ifr=0.004,
long_term_death=False,hnr=None, com_attack_rate=0,
r_change=r_change, simname="Test", datadir="/mnt/wd1/nrw_corona/",
realized=None, rep_delay=8.7, alpha=0.125, day0date=day0date)
# +
fig = make_subplots(rows=4, cols=1)
fig.add_trace(go.Scatter(x=gr["Datum"], y=gr["Erwartete Neu-Meldefälle"],
mode="lines",
name="Erwartete Neu-Meldefälle"),
row=1, col=1)
fig.add_trace(go.Scatter(x=gr["Datum"], y=gr["Erwartete Gesamt-Meldefälle"],
name="Erwartete Gesamt-Meldefälle",
mode="lines"), row=2, col=1)
fig.add_trace(go.Scatter(x=gr["Datum"], y=gr["Erwartete Tote"],
name="Erwartete Tote",
mode="lines"), row=3, col=1)
fig.add_trace(go.Scatter(x=gr["Datum"], y=gr["ICU"],
name="Erwartete Intensiv",
mode="lines"), row=4, col=1)
fig.update_layout(legend_orientation="h", title=args["simname"])
plot(fig, filename=os.path.join(args["datadir"], args["simname"]+".html"))
# -
| scripts/Untitled2.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp parallel
# +
#export
from fastcore.imports import *
from fastcore.foundation import *
from fastcore.basics import *
from fastcore.xtras import *
from functools import wraps
# from contextlib import contextmanager,ExitStack
from multiprocessing import Process, Queue
import concurrent.futures,time
from multiprocessing import Manager
from threading import Thread
# -
from fastcore.test import *
from nbdev.showdoc import *
from fastcore.nb_imports import *
# # Parallel
#
# > Threading and multiprocessing functions
#export
def threaded(f):
"Run `f` in a thread, and returns the thread"
@wraps(f)
def _f(*args, **kwargs):
res = Thread(target=f, args=args, kwargs=kwargs)
res.start()
return res
return _f
# +
@threaded
def _1():
time.sleep(0.05)
print("second")
@threaded
def _2():
time.sleep(0.01)
print("first")
_1()
_2()
time.sleep(0.1)
# -
#export
def startthread(f):
"Like `threaded`, but start thread immediately"
threaded(f)()
# +
@startthread
def _():
time.sleep(0.05)
print("second")
@startthread
def _():
time.sleep(0.01)
print("first")
time.sleep(0.1)
# -
#export
def set_num_threads(nt):
"Get numpy (and others) to use `nt` threads"
try: import mkl; mkl.set_num_threads(nt)
except: pass
try: import torch; torch.set_num_threads(nt)
except: pass
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
# This sets the number of threads consistently for many tools, by:
#
# 1. Set the following environment variables equal to `nt`: `OPENBLAS_NUM_THREADS`,`NUMEXPR_NUM_THREADS`,`OMP_NUM_THREADS`,`MKL_NUM_THREADS`
# 2. Sets `nt` threads for numpy and pytorch.
#export
def _call(lock, pause, n, g, item):
l = False
if pause:
try:
l = lock.acquire(timeout=pause*(n+2))
time.sleep(pause)
finally:
if l: lock.release()
return g(item)
#export
class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
"Same as Python's ThreadPoolExecutor, except can pass `max_workers==0` for serial execution"
def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs):
if max_workers is None: max_workers=defaults.cpus
store_attr()
self.not_parallel = max_workers==0
if self.not_parallel: max_workers=1
super().__init__(max_workers, **kwargs)
def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs):
self.lock = Manager().Lock()
g = partial(f, *args, **kwargs)
if self.not_parallel: return map(g, items)
_g = partial(_call, self.lock, self.pause, self.max_workers, g)
try: return super().map(_g, items, timeout=timeout, chunksize=chunksize)
except Exception as e: self.on_exc(e)
show_doc(ThreadPoolExecutor, title_level=4)
#export
class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor):
"Same as Python's ProcessPoolExecutor, except can pass `max_workers==0` for serial execution"
def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs):
if max_workers is None: max_workers=defaults.cpus
store_attr()
self.not_parallel = max_workers==0
if self.not_parallel: max_workers=1
super().__init__(max_workers, **kwargs)
def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs):
self.lock = Manager().Lock()
g = partial(f, *args, **kwargs)
if self.not_parallel: return map(g, items)
_g = partial(_call, self.lock, self.pause, self.max_workers, g)
try: return super().map(_g, items, timeout=timeout, chunksize=chunksize)
except Exception as e: self.on_exc(e)
show_doc(ProcessPoolExecutor, title_level=4)
#export
try: from fastprogress import progress_bar
except: progress_bar = None
#export
def parallel(f, items, *args, n_workers=defaults.cpus, total=None, progress=None, pause=0,
threadpool=False, timeout=None, chunksize=1, **kwargs):
"Applies `func` in parallel to `items`, using `n_workers`"
pool = ThreadPoolExecutor if threadpool else ProcessPoolExecutor
with pool(n_workers, pause=pause) as ex:
r = ex.map(f,items, *args, timeout=timeout, chunksize=chunksize, **kwargs)
if progress and progress_bar:
if total is None: total = len(items)
r = progress_bar(r, total=total, leave=False)
return L(r)
# +
def add_one(x, a=1):
time.sleep(random.random()/80)
return x+a
inp,exp = range(50),range(1,51)
test_eq(parallel(add_one, inp, n_workers=2, progress=False), exp)
test_eq(parallel(add_one, inp, threadpool=True, n_workers=2, progress=False), exp)
test_eq(parallel(add_one, inp, n_workers=0), exp)
test_eq(parallel(add_one, inp, n_workers=1, a=2), range(2,52))
test_eq(parallel(add_one, inp, n_workers=0, a=2), range(2,52))
# -
# Use the `pause` parameter to ensure a pause of `pause` seconds between processes starting. This is in case there are race conditions in starting some process, or to stagger the time each process starts, for example when making many requests to a webserver. Set `threadpool=True` to use `ThreadPoolExecutor` instead of `ProcessPoolExecutor`.
from datetime import datetime
# +
def print_time(i):
time.sleep(random.random()/1000)
print(i, datetime.now())
parallel(print_time, range(5), n_workers=2, pause=0.25);
# -
# Note that `f` should accept a collection of items.
#export
def run_procs(f, f_done, args):
"Call `f` for each item in `args` in parallel, yielding `f_done`"
processes = L(args).map(Process, args=arg0, target=f)
for o in processes: o.start()
yield from f_done()
processes.map(Self.join())
# +
#export
def _f_pg(obj, queue, batch, start_idx):
for i,b in enumerate(obj(batch)): queue.put((start_idx+i,b))
def _done_pg(queue, items): return (queue.get() for _ in items)
# -
#export
def parallel_gen(cls, items, n_workers=defaults.cpus, **kwargs):
"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel."
if n_workers==0:
yield from enumerate(list(cls(**kwargs)(items)))
return
batches = L(chunked(items, n_chunks=n_workers))
idx = L(itertools.accumulate(0 + batches.map(len)))
queue = Queue()
if progress_bar: items = progress_bar(items, leave=False)
f=partial(_f_pg, cls(**kwargs), queue)
done=partial(_done_pg, queue, items)
yield from run_procs(f, done, L(batches,idx).zip())
# +
class _C:
def __call__(self, o): return ((i+1) for i in o)
items = range(5)
res = L(parallel_gen(_C, items, n_workers=3))
idxs,dat1 = zip(*res.sorted(itemgetter(0)))
test_eq(dat1, range(1,6))
res = L(parallel_gen(_C, items, n_workers=0))
idxs,dat2 = zip(*res.sorted(itemgetter(0)))
test_eq(dat2, dat1)
# -
# `cls` is any class with `__call__`. It will be passed `args` and `kwargs` when initialized. Note that `n_workers` instances of `cls` are created, one in each process. `items` are then split in `n_workers` batches and one is sent to each `cls`. The function then returns a generator of tuples of item indices and results.
# +
class TestSleepyBatchFunc:
"For testing parallel processes that run at different speeds"
def __init__(self): self.a=1
def __call__(self, batch):
for k in batch:
time.sleep(random.random()/4)
yield k+self.a
x = np.linspace(0,0.99,20)
res = L(parallel_gen(TestSleepyBatchFunc, x, n_workers=2))
test_eq(res.sorted().itemgot(1), x+1)
# -
# # Export -
#hide
from nbdev.export import notebook2script
notebook2script()
| nbs/03a_parallel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ginttone/multi_python/blob/master/5_test_KFold.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="j1Nll2ipynMf"
# KFold<br>
# 내가 넣은 숫자만큼 쪼개줌
#
# class sklearn.model_selection.KFold(n_splits=5, *, shuffle=False, random_state=None)
#
# X.shape#feature , y.shape #label
# + colab={"base_uri": "https://localhost:8080/"} id="JvQFNSKPxciN" outputId="d6f2deba-de56-4d0e-d649-d5d03d87f6ee"
import numpy as np
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4],[5,6],[7,8],[9,10]])
y = np.array([1, 2, 3, 4, 2, 3, 1])
X.shape , y.shape
# + colab={"base_uri": "https://localhost:8080/"} id="EAgHLZyn0BtK" outputId="3689e718-ab5f-40d1-ee09-226d0a5d75ef"
from sklearn.model_selection import KFold
KF= KFold(n_splits=2,shuffle=True)
KF.get_n_splits(X)
# + [markdown] id="kWUQk-rD19OY"
# 실제 데이터 넣어서 스플릿 해보기
#
# 결과: np.array 한 행의 위치값이 나온 것
#
# + colab={"base_uri": "https://localhost:8080/"} id="d2M4zwFI0wgc" outputId="2b029ccf-6b13-4395-9f60-66a208630488"
for first, second in KF.split(X):
print(first,':',second)
# + [markdown] id="Abj_J6NZ3Gd9"
# 데이터에 의한 쏠림을 줄여주려고 만듬
# (overfit 줄이고자)
#
# 중복 안되게 서로 섞어진 것을 확인 가능
# + colab={"base_uri": "https://localhost:8080/"} id="m-d-RJ1P1Dct" outputId="05253e6e-9020-4566-d2d0-819e8c510cd3"
KF= KFold(n_splits=3,shuffle=True)
KF.get_n_splits(X)
for first, second in KF.split(X):
print(first,':',second)
# + [markdown] id="vhg6Zg823loN"
# mask씌우면 해당 값 찾을수 있어
#
# first,second의 type은 np.ndarray로 되있음
#
# shape은 (5,)(2,)
# + colab={"base_uri": "https://localhost:8080/"} id="_vhpVHmG2-E4" outputId="e7d4c033-7816-4843-e5bb-6bd9a283ca5d"
for first, second in KF.split(X):
print(first,':',second)
print('---------')
x_train=X[first]
print(x_train)
print('---------')
x_test = X[second]
print(x_test)
# + id="2YBTkUiw3x72"
| 5_test_KFold.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this notebook, you will implement the forward longitudinal vehicle model. The model accepts throttle inputs and steps through the longitudinal dynamic equations. Once implemented, you will be given a set of inputs that drives over a small road slope to test your model.
#
# The input to the model is a throttle percentage $x_\theta \in [0,1]$ which provides torque to the engine and subsequently accelerates the vehicle for forward motion.
#
# The dynamic equations consist of many stages to convert throttle inputs to wheel speed (engine -> torque converter -> transmission -> wheel). These stages are bundled together in a single inertia term $J_e$ which is used in the following combined engine dynamic equations.
#
# \begin{align}
# J_e \dot{\omega}_e &= T_e - (GR)(r_{eff} F_{load}) \\ m\ddot{x} &= F_x - F_{load}
# \end{align}
#
# Where $T_e$ is the engine torque, $GR$ is the gear ratio, $r_{eff}$ is the effective radius, $m$ is the vehicle mass, $x$ is the vehicle position, $F_x$ is the tire force, and $F_{load}$ is the total load force.
#
# The engine torque is computed from the throttle input and the engine angular velocity $\omega_e$ using a simplified quadratic model.
#
# \begin{align}
# T_e = x_{\theta}(a_0 + a_1 \omega_e + a_2 \omega_e^2)
# \end{align}
#
# The load forces consist of aerodynamic drag $F_{aero}$, rolling friction $R_x$, and gravitational force $F_g$ from an incline at angle $\alpha$. The aerodynamic drag is a quadratic model and the friction is a linear model.
#
# \begin{align}
# F_{load} &= F_{aero} + R_x + F_g \\
# F_{aero} &= \frac{1}{2} C_a \rho A \dot{x}^2 = c_a \dot{x}^2\\
# R_x &= N(\hat{c}_{r,0} + \hat{c}_{r,1}|\dot{x}| + \hat{c}_{r,2}\dot{x}^2) \approx c_{r,1} \dot{x}\\
# F_g &= mg\sin{\alpha}
# \end{align}
#
# Note that the absolute value is ignored for friction since the model is used for only forward motion ($\dot{x} \ge 0$).
#
# The tire force is computed using the engine speed and wheel slip equations.
#
# \begin{align}
# \omega_w &= (GR)\omega_e \\
# s &= \frac{\omega_w r_e - \dot{x}}{\dot{x}}\\
# F_x &= \left\{\begin{array}{lr}
# cs, & |s| < 1\\
# F_{max}, & \text{otherwise}
# \end{array}\right\}
# \end{align}
#
# Where $\omega_w$ is the wheel angular velocity and $s$ is the slip ratio.
#
# We setup the longitudinal model inside a Python class below. The vehicle begins with an initial velocity of 5 m/s and engine speed of 100 rad/s. All the relevant parameters are defined and like the bicycle model, a sampling time of 10ms is used for numerical integration.
# +
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class Vehicle():
def __init__(self):
# ==================================
# Parameters
# ==================================
#Throttle to engine torque
self.a_0 = 400
self.a_1 = 0.1
self.a_2 = -0.0002
# Gear ratio, effective radius, mass + inertia
self.GR = 0.35
self.r_e = 0.3
self.J_e = 10
self.m = 2000
self.g = 9.81
# Aerodynamic and friction coefficients
self.c_a = 1.36
self.c_r1 = 0.01
# Tire force
self.c = 10000
self.F_max = 10000
# State variables
self.x = 0
self.v = 5
self.a = 0
self.w_e = 100
self.w_e_dot = 0
self.sample_time = 0.01
def reset(self):
# reset state variables
self.x = 0
self.v = 5
self.a = 0
self.w_e = 100
self.w_e_dot = 0
# -
# Implement the combined engine dynamic equations along with the force equations in the cell below. The function $\textit{step}$ takes the throttle $x_\theta$ and incline angle $\alpha$ as inputs and performs numerical integration over one timestep to update the state variables. Hint: Integrate to find the current position, velocity, and engine speed first, then propagate those values into the set of equations.
import math
class Vehicle(Vehicle):
def step(self, throttle, alpha):
# ==================================
# Implement vehicle model here
# ==================================
self.w_e += self.w_e_dot * self.sample_time
self.v += self.a * self.sample_time
self.x += self.v * self.sample_time
F_a = self.c_a * math.pow(self.v,2)
F_g = self.m * self.g * alpha
R_x = self.c_r1 * self.v
F_load = F_a + F_g + R_x
T_e = throttle * ( self.a_0 + self.a_1 * self.w_e + self.a_2 * math.pow(self.w_e,2))
self.w_e_dot = (T_e - self.GR * self.r_e * F_load) / self.J_e
w_w = self.GR * self.w_e
s = (w_w * self.r_e - self.v) / self.v
if (abs(s)<1):
F_x = self.c*s
else:
F_x = self.F_max
self.a = (F_x - F_load) / self.m
pass
# Using the model, you can send constant throttle inputs to the vehicle in the cell below. You will observe that the velocity converges to a fixed value based on the throttle input due to the aerodynamic drag and tire force limit. A similar velocity profile can be seen by setting a negative incline angle $\alpha$. In this case, gravity accelerates the vehicle to a terminal velocity where it is balanced by the drag force.
# +
sample_time = 0.01
time_end = 100
model = Vehicle()
t_data = np.arange(0,time_end,sample_time)
v_data = np.zeros_like(t_data)
# throttle percentage between 0 and 1
throttle = 0.2
# incline angle (in radians)
alpha = 0
for i in range(t_data.shape[0]):
v_data[i] = model.v
model.step(throttle, alpha)
plt.plot(t_data, v_data)
plt.show()
# -
# We will now drive the vehicle over a slope as shown in the diagram below.
#
# 
#
# To climb the slope, a trapezoidal throttle input is provided for the next 20 seconds as shown in the figure below.
#
# 
#
# The vehicle begins at 20% throttle and gradually increases to 50% throttle. This is maintained for 10 seconds as the vehicle climbs the steeper slope. Afterwards, the vehicle reduces the throttle to 0.
#
# In the cell below, implement the ramp angle profile $\alpha (x)$ and throttle profile $x_\theta (t)$ and step them through the vehicle dynamics. The vehicle position $x(t)$ is saved in the array $\textit{x_data}$. This will be used to grade your solution.
#
# +
time_end = 20
t_data = np.arange(0,time_end,sample_time)
x_data = np.zeros_like(t_data)
# reset the states
model.reset()
# ==================================
# Learner solution begins here
# ==================================
model = Vehicle()
t_data = np.arange(0,time_end,sample_time)
v_data = np.zeros_like(t_data)
x_data = np.zeros_like(t_data)
throttle_data = np.zeros_like(t_data)
al_data = np.zeros_like(t_data)
# throttle percentage between 0 and 1
throttle = 0.2
# x_d
# incline angle (in radians)
alpha = 0
throttle_data[0]= 0.2
x_data[0] = 0
al_data[0]=math.atan(0.05)
for i in range(2000):
if (i>0 and i<=500):
throttle_data[i] = (i*0.06*0.01) + 0.2
alpha = math.atan(0.05)
al_data[i] = alpha
v_data[i] = model.v
x_data[i] = (model.v * 0.01) + x_data[i-1]
model.step(throttle_data[i], al_data[i])
elif (i>500 and i<=1500):
throttle = throttle_data[i] = 0.5
alpha = math.atan(0.05)
if (i >= 676):
alpha = math.atan(0.1)
al_data[i] = alpha
v_data[i] = model.v
x_data[i] = (model.v * 0.01) + x_data[i-1]
model.step(throttle, alpha)
elif (i>=1500 and i<=2000):
alpha = 0.1
if (i>=1512):
alpha = 0
al_data[i] = alpha
v_data[i] = model.v
x_data[i] = (model.v * 0.01) + x_data[i-1]
throttle_data[i] = (-0.1 * i *0.01) + 2
model.step(throttle_data[i], al_data[i])
plt.plot(t_data, v_data)
plt.show()
# plt.axis('equal')
plt.plot(t_data, throttle_data,label='Learner Model')
plt.legend()
plt.show()
# ==================================
# Learner solution ends here
# ==================================
# Plot x vs t for visualization
plt.plot(t_data, x_data)
plt.show()
# -
# If you have implemented the vehicle model and inputs correctly, you should see that the vehicle crosses the ramp at ~15s where the throttle input begins to decrease.
#
# The cell below will save the time and vehicle inputs as text file named $\textit{xdata.txt}$. To locate the file, change the end of your web directory to $\textit{/notebooks/Course_1_Module_4/xdata.txt}$
#
# Once you are there, you can download the file and submit to the Coursera grader to complete this assessment.
# +
data = np.vstack([t_data, x_data]).T
np.savetxt('xdata.txt', data, delimiter=', ')
# -
# Congratulations! You have now completed the assessment! Feel free to test the vehicle model with different inputs in the cell below, and see what trajectories they form. In the next module, you will see the longitudinal model being used for speed control. See you there!
# +
sample_time = 0.01
time_end = 30
model.reset()
t_data = np.arange(0,time_end,sample_time)
x_data = np.zeros_like(t_data)
# ==================================
# Test various inputs here
# ==================================
for i in range(t_data.shape[0]):
model.step(0,0)
plt.axis('equal')
plt.plot(x_data, y_data)
plt.show()
| Longitudinal_Vehicle_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 19: Interfaces
#
# *Author: <NAME>*
#
# ## Objective
# Today, we're learning about Interfaces. Check out the Tutorial tab for learning materials and an instructional video!
#
# ## Task
# The AdvancedArithmetic interface and the method declaration for the abstract divisorSum(n) method are provided for you in the editor below.
#
# Complete the implementation of Calculator class, which implements the AdvancedArithmetic interface. The implementation for the divisorSum(n) method must return the sum of all divisors of $n$.
#
# ## Example
#
# $n=25$
#
# The divisors of $25$ are $1,5,25$. Their sum is $31$.
#
#
# $n=20$
#
# The divisors of $20$ are $1,2,4,5,10,20$ and their sum is $42$.
#
# ## Sample Input
# ```
# 6
# ```
#
# ## Sample Output
# ```
# I implemented: AdvancedArithmetic
# 12
# ```
#
# + pycharm={"name": "#%%\n"}
class AdvancedArithmetic(object):
def divisorSum(n):
raise NotImplementedError
class Calculator(AdvancedArithmetic):
def divisorSum(self, n):
val = 0
for i in range(1, n+1):
if n % i == 0:
val +=i
return val
n = int(input())
my_calculator = Calculator()
s = my_calculator.divisorSum(n)
print("I implemented: " + type(my_calculator).__bases__[0].__name__)
print(s)
| 30 Days of Challenge/Day 19 - Interfaces.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RBM training $L= 100$ - Dataset with $ T = \infty$
#
# In order to investigate the RBM flow in an extreme situation, we train the machine in an dataset composed only by random states.
#
# It is crucial to note that, since the visible layer is always fed with an unidimensional vector (an array is transformed in a vector to be fed in the visible layer), the machine does not have any geometric information about the lattice in this case.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
torch.manual_seed(12)
import sys
sys.path.append('../modules')
from rbm import RBM
from mc_ising2d import IsingMC
L = 100
# -
# #### Random configurations:
# +
nstates = 1000
up = nn.init.constant_(torch.empty(int(L*L/2)),
val= 1.0)
down = nn.init.constant_(torch.empty(int(L*L/2)),
val= 0.0)
up_down = torch.cat((up,down), 0)
data_set_high = up_down.reshape(1, up_down.shape[0])
for j in range(nstates - 1):
data_set_high_ = up_down[torch.randperm(up_down.shape[0]) ]
data_set_high = torch.cat( ( data_set_high, data_set_high_.reshape(1, data_set_high_.shape[0])), 0 )
# -
# #### Constructing training and test sets
#
# Using `train_test_split` from [`sklearn`](http://scikit-learn.org/) it is easy to split the data into training and test sets.
train_data, test_data = train_test_split(np.array(data_set_high),
test_size= 0.2,
random_state= 12)
# +
training_set = torch.Tensor(train_data)
test_set = torch.Tensor(test_data)
training_set = training_set[torch.randperm(training_set.size()[0])]
# -
# #### Training the model
#
# Our code implementing a Restricted Boltzmann Machine is written a python class called `RBM` which is imported from `rbm.py`.
#
# For simplification, the units have no bias and the RBM stochasticity parameter, represented below by $T$ is set to unity, as usual in most practical applications. Note that we set `use_cuda=True`, which makes use of [CUDA tensor types](https://pytorch.org/docs/stable/cuda.html), implementing GPU computation. If a GPU is not available, one should just set `use_cuda=False`.
# +
Nv = training_set.shape[1]
Nh = training_set.shape[1]
lr = 0.001
k_learning = 1
batch_size = 100
nb_epoch = 1000
k_sampling = 1
rbm = RBM(num_visible= Nv,
num_hidden= Nh,
bias= False,
T= 1.0,
use_cuda= False)
rbm.learn(training_set= training_set,
test_set = test_set,
lr= lr,
nb_epoch= nb_epoch,
batch_size= batch_size,
k_learning= k_learning,
k_sampling= k_sampling,
verbose= 1)
# -
# #### Saving the trained model
# +
nb_epoch = rbm.num_train_epochs()
name = 'RBM_model_T_inf_only_nv%d_nh%d_lr%.1E_k%d_bsize%d_nepochs%d' % (Nv,
Nh,
lr,
k_learning,
batch_size,
nb_epoch)
PATH = '../RBM_trained_models/'+ name + '.pt'
torch.save(rbm, PATH)
# -
# ### Weights distribution
# +
W, v, h = rbm.parameters()
del v
del h
torch.cuda.empty_cache()
# +
W_ = W.cpu().numpy().reshape((W.shape[0]*W.shape[1]))
# Plot normalized histogram
plt.hist(W_, bins= 100, density= True)
# Maximum and minimum of xticks to compute the theoretical distribution
x_min, x_max = min(plt.xticks()[0]), max(plt.xticks()[0])
domain = np.linspace(x_min, x_max, len(W_))
# Fitting a normal distribution
muW_, sigmaW_ = stats.norm.fit(W_)
plot_pdf = stats.norm.pdf(domain, muW_, sigmaW_) # Fitting the PDF in the interval
plt.plot(domain, plot_pdf, linewidth= 2.5,
label= '$\mu= %f$ \n$\sigma^2$ = %f' % (muW_, sigmaW_**2 ))
plt.title('Fitting a Normal Distribution for the weights ${\cal W}$')
plt.legend()
plt.show()
# -
del W
torch.cuda.empty_cache()
| training_RBM/.ipynb_checkpoints/RBM_L100_train_dataset__only_Tinf-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np # numpy kütüphanesi programa np adıyla dahil edilir.
# Bundan sonra numpy. yerine np. olarak çalıştırılacaktır.
# # NUMPY-1 START (BAŞLANGIÇ)
a = np.arange(15).reshape(3, 5)
a
# •Bu satırda np.arange() fonksiyonu ile 15'e kadar bir sınır belirtiyoruz ve .reshape ile
# 3 satırlık ve 5 sütünluk bir liste oluşturmasını sağlıyoruz
#
# •np.arange() içine girilen değer .reshape() içine girilen değerlerin çarpımına eşit olması gerekmektedir.
#
# •Mesela np.arange(20).reshape(3, 2) yapmak bize hata döndürecektir.
# >>> ValueError: cannot reshape array of size 20 into shape (3,2)
#
#
# #### ÖZET np.arange(x).reshape(a, b) ==> a*b = x
a.shape
# • .shape fonksiyonu ise bu dizinin kaç satır ve sütundan olduğunu gösteriyor.
a.ndim
# • .ndim bize dizinin boyutunu (1D, 2D, 3D vs.)
print((np.array([0,1,2])).ndim)
# • Yukardaki ise tek boyutlu (1D) bir dizidir.
a.dtype.name
# • .dtype.name bize dizinin içerisindeki değerlerin tipini gösterir.
a.itemsize
# • .itemsize bir dizinin içerisindeki her bir elemanın bayt cinsinden boyutudur.
#
# Örneğin, float64 tipi bir dizi elemanın 8 (= 64/8) öğesi boyutundayken,
# complex32 türünden biri öğe 4'e (= 32/8) sahiptir.
a.size
# • .size ise bir dizinin içerisinde kaç tane değer olduğunu bize gösterir.
type(a)
# • numpy'daki dizilerin tipi ndarray tipidir.
#
# <type 'numpy.ndarray'> şeklinde.
b = np.array([1, 2, 3])
b
# • Numpy'da herhangi bir listeyi, tuple'ı .ndarray nesnesine dönüştürmek istersek np.array(x) fonksiyonunu kullanırız.
type(b)
# _______________________________________________________________________________________________
#
# # Yukardaki kodları çalıştırmak için öncelikle numpy kütüphanesini indirmeniz gerekmektedir.
# # python3 -m pip install numpy
# # Bu kodlar python3 ün yorumlayıcısında veya jupyter.notebook'da çalıştırılmalıdır.
| 01Numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
# %matplotlib inline
# ## Starting with spreadsheets and text files with a list of trees
wind_river_df = pd.read_csv('../data/raw/wind_river/plot_data/wind_river_12ha_PSME.csv').dropna(axis=1)
wind_river_df.head()
# ## Georeferencing location of trees relative to southwest corner of each plot
f, ax = plt.subplots(1, figsize=(14, 20))
plots = gpd.read_file('../data/interim/hj_andrews/hj_andrews_good_plots.shp')
plots.plot(ax=ax);
# ## Producing a geo-referenced shapefile
wind_river_gdf = gpd.read_file('../data/interim/wind_river/wind_river_live_trees.shp')
wind_river_gdf.head()
f, ax = plt.subplots(1, figsize=(14, 20))
wind_river_gdf.plot(ax=ax, alpha=0.5)
ax.set_title('Geo-referenced Stem Map, Wind River Large Plot', fontsize=18, pad=15);
wind_river_gdf['cr_ratio'] = wind_river_gdf.L / wind_river_gdf.HEIGHT
wind_river_gdf.columns
wind_river_gdf = wind_river_gdf.rename({'TAG': 'tree_id',
'SPECIES':'species',
'DBH':'dbh',
'HEIGHT':'top_height',
'MAXR':'cr_radius',
'UTM_X':'stem_x',
'UTM_Y':'stem_y'
}, axis='columns').drop(labels=['L', 'X', 'Y', 'Z'],
axis=1)
wind_river_gdf.to_file('../data/processed/wind_river/wind_river_tree_list.shp')
| notebooks/sandbox/Illustrating process from treelist to shapefile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
ngene=200
# hESC
fname='/home/linaiqi/Lab/data/BEELINE-data/inputs/scRNA-Seq/hESC/GeneOrdering.csv'
df = pd.read_csv(fname)
df=df.sort_values(by=['Variance'], ascending=False)
df.head()
genes=df.iloc[:ngene, 0].tolist()
# +
# df=df.rename(mapper={'Unnamed: 0': "gene"}, axis='columns')
# df=df.set_index('gene')
# fout='/home/linaiqi/Lab/data/hESC200-celldata.csv'
# df.iloc[:ngene].to_csv(fout)
# -
fname='/home/linaiqi/Lab/data/BEELINE-data/inputs/scRNA-Seq/hESC/ExpressionData.csv'
df = pd.read_csv(fname)
df.head()
lst=df.columns
lst
cnt=0
fout='/home/linaiqi/Lab/data/hESC200-celldata.csv'
with open(fout,'w') as f:
f.write(',PseudoTime1\n')
for g in lst[1:]:
f.write('%s,%d\n'%(g,cnt))
cnt+=1
df=df.rename(mapper={'Unnamed: 0': "gene"}, axis='columns')
df=df.set_index('gene')
fout='/home/linaiqi/Lab/data/bl-hESC200.csv'
df.loc[genes].to_csv(fout)
data=df.loc[genes].to_numpy().T
data.shape
fout='/home/linaiqi/Lab/data/hESC200.txt'
with open(fout, 'w') as f:
f.write("\t".join(genes)+'\n')
for i in range(data.shape[0]):
f.write('%.8f'%data[i, 0])
for j in range(1, data.shape[1]):
f.write('\t%.8f'%data[i, j])
f.write('\n')
# +
fname='/home/linaiqi/Lab/data/Networks/human/hESC-ChIP-seq-network.csv'
fout='/home/linaiqi/Lab/data/bl-hESC200-ChIP-seq-network.csv'
cnt=0
with open(fname) as fin, open(fout, 'w') as f:
l = fin.readline()
f.write(l)
for l in fin:
a, b = l.strip().split(',')
if a in genes and b in genes:
cnt+=1
f.write('%s,%s\n'%(a,b))
print(cnt)
# -
# +
fname='/home/linaiqi/Lab/data/Networks/human/Non-specific-ChIP-seq-network.csv'
fout='/home/linaiqi/Lab/data/bl-hESC200-nonspec-ChIP-seq-network.csv'
cnt=0
with open(fname) as fin, open(fout, 'w') as f:
l = fin.readline()
f.write(l)
for l in fin:
a, b = l.strip().split(',')
if a in genes and b in genes:
cnt+=1
f.write('%s,%s\n'%(a,b))
print(cnt)
# -
# +
fname='/home/linaiqi/Lab/data/Networks/human/STRING-network.csv'
fout='/home/linaiqi/Lab/data/bl-hESC200-STRING-network.csv'
with open(fname) as fin, open(fout, 'w') as f:
l = fin.readline()
f.write(l)
for l in fin:
a, b = l.strip().split(',')
if a in genes and b in genes:
f.write('%s,%s\n'%(a,b))
# -
| eval/beeline-hESC-preprocess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solar Power Generation Data: KMM, Naive and Decision Tree.
# ## Solar power generation and sensor data for two power plants.
#
# Description
# This data has been gathered at two solar power plants in India over a 34 day period. It has two pairs of files - each pair has one power generation dataset and one sensor readings dataset. The power generation datasets are gathered at the inverter level - each inverter has multiple lines of solar panels attached to it. The sensor data is gathered at a plant level - single array of sensors optimally placed at the plant.
#
# There are a few areas of concern at the solar power plant -
#
# - Can we predict the power generation for next couple of days? - this allows for better grid management
# - Can we identify generation profiles?
# - Can we identify the need for panel cleaning/maintenance?
# - Can we identify faulty or suboptimally performing equipment?
#
# [Link to source](https://www.kaggle.com/anikannal/solar-power-generation-data)
# ## Aluno: <NAME>
# +
import sys
sys.path.append('../')
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
from read import (
sourcesGen01Cleaned,
groupsWeather01Cleaned,
)
# +
X = groupsWeather01Cleaned["IRRADIATION"].T
y = sourcesGen01Cleaned["AC_POWER"][list(sourcesGen01Cleaned["AC_POWER"].keys())[0]].T
X0 = X.iloc[0].values
y0 = y.iloc[0].values
X1 = X.iloc[1].values
y1 = y.iloc[1].values
plt.plot(X1, y1, 'o', color='red', label='Original data')
plt.xlabel("IRRADIATION")
plt.ylabel("AC_POWER")
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X0.reshape(-1, 1), y0.astype(int))
pred = neigh.predict(X1.reshape(-1, 1))
knnScore = neigh.score(X0.reshape(-1, 1), y0.astype(int))
plt.plot(X1, pred, 'o', color='blue', label='Predicted data')
plt.legend()
plt.show()
# +
X = groupsWeather01Cleaned["IRRADIATION"].T
y = sourcesGen01Cleaned["AC_POWER"][list(sourcesGen01Cleaned["AC_POWER"].keys())[0]].T
X0 = X.iloc[0].values
y0 = y.iloc[0].values
X1 = X.iloc[1].values
y1 = y.iloc[1].values
plt.plot(X1, y1, 'o', color='red', label='Original data')
plt.xlabel("IRRADIATION")
plt.ylabel("AC_POWER")
clf = GaussianNB()
clf.fit(X0.reshape(-1, 1), y0.astype(int))
pred = clf.predict(X1.reshape(-1, 1))
naiveBayesScore = clf.score(X0.reshape(-1, 1), y0.astype(int))
plt.plot(X1, pred, 'x', color='cyan', label='Predicted data')
plt.legend()
plt.show()
# +
X = groupsWeather01Cleaned["IRRADIATION"].T
y = sourcesGen01Cleaned["AC_POWER"][list(sourcesGen01Cleaned["AC_POWER"].keys())[0]].T
X0 = X.iloc[0].values
y0 = y.iloc[0].values
X1 = X.iloc[1].values
y1 = y.iloc[1].values
plt.plot(X1, y1, 'o', color='red', label='Original data')
plt.xlabel("IRRADIATION")
plt.ylabel("AC_POWER")
clf = DecisionTreeClassifier(random_state=0, criterion="entropy", max_depth=4)
clf.fit(X0.reshape(-1, 1), y0.astype(int))
pred = clf.predict(X1.reshape(-1, 1))
treeScore = clf.score(X0.reshape(-1, 1), y0.astype(int))
plt.plot(X1, pred, 'x', color='blue', label='Predicted data')
plt.legend()
plt.show()
# +
height = [knnScore, naiveBayesScore, treeScore]
bars = ('KNeighbors', 'Naive Bayes', 'Tree Decision')
x_pos = np.arange(len(bars))
plt.bar(x_pos, height)
plt.xticks(x_pos, bars)
plt.show()
| src/notebooks/ClassifierTestBase.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="TBFXQGKYUc4X"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="1z4xy2gTUc4a"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="FE7KNzPPVrVV"
# # Dogs vs Cats Image Classification Without Image Augmentation
# + [markdown] colab_type="text" id="KwQtSOz0VrVX"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="gN7G9GFmVrVY"
#
#
# In this tutorial, we will discuss how to classify images into pictures of cats or pictures of dogs. We'll build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`.
#
# ## Specific concepts that will be covered:
# In the process, we will build practical experience and develop intuition around the following concepts
#
# * Building _data input pipelines_ using the `tf.keras.preprocessing.image.ImageDataGenerator`class — How can we efficiently work with data on disk to interface with our model?
# * _Overfitting_ - what is it, how to identify it? .
#
# <hr>
#
#
# **Before you begin**
#
# Before running the code in this notebook, reset the runtime by going to **Runtime -> Reset all runtimes** in the menu above. If you have been working through several notebooks, this will help you avoid reaching Colab's memory limits.
#
# + [markdown] colab_type="text" id="zF9uvbXNVrVY"
# # Importing packages
# + [markdown] colab_type="text" id="VddxeYBEVrVZ"
# Let's start by importing required packages:
#
# * os — to read files and directory structure
# * numpy — for some matrix math outside of TensorFlow
# * matplotlib.pyplot — to plot the graph and display images in our training and validation data
#
#
#
#
#
# + colab={} colab_type="code" id="qyHrknvL0pOu"
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import matplotlib.pyplot as plt
import numpy as np
# + [markdown] colab_type="text" id="AhtpmVue0p3G"
# For the TensorFlow imports, we directly specify Keras symbols (Sequential, Dense, etc.). This enables us to refer to these names directly in our code without having to qualify their full names (for example, `Dense` instead of `tf.keras.layer.Dense`).
#
# + colab={} colab_type="code" id="rtPGh2MAVrVa"
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# + colab={} colab_type="code" id="GHHqtPisG3R1"
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# + [markdown] colab_type="text" id="UZZI6lNkVrVm"
# # Data Loading
# + [markdown] colab_type="text" id="DPHx8-t-VrVo"
# To build our image classifier, we begin by downloading the dataset. The dataset we are using is a filtered version of <a href="https://www.kaggle.com/c/dogs-vs-cats/data" target="_blank">Dogs vs. Cats</a> dataset from Kaggle (ultimately, this dataset is provided by Microsoft Research).
#
# In previous Colabs, we've used <a href="https://www.tensorflow.org/datasets" target="_blank">TensorFlow Datasets</a>, which is a very easy and convenient way to use datasets. In this Colab however, we will make use of the class `tf.keras.preprocessing.image.ImageDataGenerator` which will read data from disk. We therefore need to directly download *Dogs vs. Cats* from a URL and unzip it to the Colab filesystem.
# + colab={} colab_type="code" id="rpUSoFjuVrVp"
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)
# + [markdown] colab_type="text" id="Giv0wMQzVrVw"
# The dataset we have downloaded has the following directory structure.
#
# <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
# <b>cats_and_dogs_filtered</b>
# |__ <b>train</b>
# |______ <b>cats</b>: [cat.0.jpg, cat.1.jpg, cat.2.jpg ...]
# |______ <b>dogs</b>: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...]
# |__ <b>validation</b>
# |______ <b>cats</b>: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ...]
# |______ <b>dogs</b>: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...]
# </pre>
#
# We can list the directories with the following terminal command:
# + colab={} colab_type="code" id="ssD23VbTZeVA"
zip_dir_base = os.path.dirname(zip_dir)
# !find $zip_dir_base -type d -print
# + [markdown] colab_type="text" id="VpmywIlsVrVx"
# We'll now assign variables with the proper file path for the training and validation sets.
# + colab={} colab_type="code" id="sRucI3QqVrVy"
base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures
# + [markdown] colab_type="text" id="ZdrHHTy2VrV3"
# ### Understanding our data
# + [markdown] colab_type="text" id="LblUYjl-VrV3"
# Let's look at how many cats and dogs images we have in our training and validation directory
# + colab={} colab_type="code" id="vc4u8e9hVrV4"
num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))
num_cats_val = len(os.listdir(validation_cats_dir))
num_dogs_val = len(os.listdir(validation_dogs_dir))
total_train = num_cats_tr + num_dogs_tr
total_val = num_cats_val + num_dogs_val
# + colab={} colab_type="code" id="g4GGzGt0VrV7"
print('total training cat images:', num_cats_tr)
print('total training dog images:', num_dogs_tr)
print('total validation cat images:', num_cats_val)
print('total validation dog images:', num_dogs_val)
print("--")
print("Total training images:", total_train)
print("Total validation images:", total_val)
# + [markdown] colab_type="text" id="tdsI_L-NVrV_"
# # Setting Model Parameters
# + [markdown] colab_type="text" id="8Lp-0ejxOtP1"
# For convenience, we'll set up variables that will be used later while pre-processing our dataset and training our network.
# + colab={} colab_type="code" id="3NqNselLVrWA"
BATCH_SIZE = 100 # Number of training examples to process before updating our models variables
IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels
# + [markdown] colab_type="text" id="INn-cOn1VrWC"
# # Data Preparation
# + [markdown] colab_type="text" id="5Jfk6aSAVrWD"
# Images must be formatted into appropriately pre-processed floating point tensors before being fed into the network. The steps involved in preparing these images are:
#
# 1. Read images from the disk
# 2. Decode contents of these images and convert it into proper grid format as per their RGB content
# 3. Convert them into floating point tensors
# 4. Rescale the tensors from values between 0 and 255 to values between 0 and 1, as neural networks prefer to deal with small input values.
#
# Fortunately, all these tasks can be done using the class **tf.keras.preprocessing.image.ImageDataGenerator**.
#
# We can set this up in a couple of lines of code.
# + colab={} colab_type="code" id="syDdF_LWVrWE"
train_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our training data
validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data
# + [markdown] colab_type="text" id="RLciCR_FVrWH"
# After defining our generators for training and validation images, **flow_from_directory** method will load images from the disk, apply rescaling, and resize them using single line of code.
# + colab={} colab_type="code" id="Pw94ajOOVrWI"
train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary')
# + colab={} colab_type="code" id="2oUoKUzRVrWM"
val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE,
directory=validation_dir,
shuffle=False,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary')
# + [markdown] colab_type="text" id="hyexPJ8CVrWP"
# ### Visualizing Training images
# + [markdown] colab_type="text" id="60CnhEL4VrWQ"
# We can visualize our training images by getting a batch of images from the training generator, and then plotting a few of them using `matplotlib`.
# + colab={} colab_type="code" id="3f0Z7NZgVrWQ"
sample_training_images, _ = next(train_data_gen)
# + [markdown] colab_type="text" id="49weMt5YVrWT"
# The `next` function returns a batch from the dataset. One batch is a tuple of (*many images*, *many labels*). For right now, we're discarding the labels because we just want to look at the images.
# + colab={} colab_type="code" id="JMt2RES_VrWU"
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
# + colab={} colab_type="code" id="d_VVg_gEVrWW"
plotImages(sample_training_images[:5]) # Plot images 0-4
# + [markdown] colab_type="text" id="b5Ej-HLGVrWZ"
# # Model Creation
# + [markdown] colab_type="text" id="wEgW4i18VrWZ"
# ## Define the model
#
# The model consists of four convolution blocks with a max pool layer in each of them. Then we have a fully connected layer with 512 units, with a `relu` activation function. The model will output class probabilities for two classes — dogs and cats — using `softmax`.
# + colab={} colab_type="code" id="F15-uwLPVrWa"
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(2, activation='softmax')
])
# + [markdown] colab_type="text" id="PI5cdkMQVrWc"
# ### Compile the model
#
# As usual, we will use the `adam` optimizer. Since we are output a softmax categorization, we'll use `sparse_categorical_crossentropy` as the loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so we are passing in the metrics argument.
# + colab={} colab_type="code" id="6Mg7_TXOVrWd"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + [markdown] colab_type="text" id="2YmQZ3TAVrWg"
# ### Model Summary
#
# Let's look at all the layers of our network using **summary** method.
# + colab={} colab_type="code" id="Vtny8hmBVrWh"
model.summary()
# + [markdown] colab_type="text" id="N06iqE8VVrWj"
# ### Train the model
# + [markdown] colab_type="text" id="oub9RtoFVrWk"
# It's time we train our network.
#
# Since our batches are coming from a generator (`ImageDataGenerator`), we'll use `fit_generator` instead of `fit`.
# + colab={} colab_type="code" id="KSF2HqhDVrWk"
EPOCHS = 100
history = model.fit_generator(
train_data_gen,
steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))),
epochs=EPOCHS,
validation_data=val_data_gen,
validation_steps=int(np.ceil(total_val / float(BATCH_SIZE)))
)
# + [markdown] colab_type="text" id="ojJNteAGVrWo"
# ### Visualizing results of the training
# + [markdown] colab_type="text" id="LZPYT-EmVrWo"
# We'll now visualize the results we get after training our network.
# + colab={} colab_type="code" id="K6oA77ADVrWp"
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(EPOCHS)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.savefig('./foo.png')
plt.show()
# + [markdown] colab_type="text" id="kDnr50l2VrWu"
# As we can see from the plots, training accuracy and validation accuracy are off by large margin and our model has achieved only around **70%** accuracy on the validation set (depending on the number of epochs you trained for).
#
# This is a clear indication of overfitting. Once the training and validation curves start to diverge, our model has started to memorize the training data an is unable to perform well on the validation data.
| courses/udacity_intro_to_tensorflow_for_deep_learning/l05c01_dogs_vs_cats_without_augmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import datetime as dt
import os
import matplotlib.pyplot as plt
from numpy import exp, abs, log
import pickle
from metrics import *
import universal as up
from universal import tools, algos
from universal.algos import *
# +
data = tools.dataset('msci')
# plot first three of them as example
data.iloc[:,:3].plot()
# -
data
# +
df_close = log(data)
ll = df_close.shape[0]
for i in list(range(1, ll))[::-1]:
df_close.iloc[i] -= df_close.iloc[i-1]
# -
df_close
# +
cp = df_close
for phs in range(0, 100000):
print('Phase: %d'%phs)
ntrain = 300
ntest = 100
win = 5
nstock = df_close.shape[1]
choice = np.arange(nstock)
train_st = phs*ntest
train_ed = test_st = phs*ntest+ntrain
test_st = phs*ntest+ntrain-win
test_ed = phs*ntest+ntrain+ntest
if test_ed > ll:
break
cp_train = cp.iloc[train_st:train_ed, :]
cp_test = cp.iloc[test_st:test_ed, :]
cp_trainx = np.zeros((ntrain - win, win * nstock))
cp_trainy = np.zeros((ntrain - win, nstock))
cov_train = np.cov(np.exp(cp_train.to_numpy().T))
for i in range(win, ntrain):
cp_trainy[i - win] = cp_train.to_numpy()[i]
for s in range(nstock):
cp_trainx[i - win, s * win:(s + 1) * win] = cp_train.to_numpy()[i - win:i, s]
cp_testx = np.zeros((ntest, win * nstock))
cp_testy = np.zeros((ntest, nstock))
for i in range(win, ntest + win):
cp_testy[i - win] = cp_test.to_numpy()[i]
for s in range(nstock):
cp_testx[i - win, s * win:(s + 1) * win] = cp_test.to_numpy()[i - win:i, s]
np.savez('./stock_data/MSCI_stock_phase%02d_lb%d' % (phs, win), rt_trainx=cp_trainx, rt_trainy=cp_trainy, \
rt_valx=None, rt_valy=None, \
rt_testx=cp_testx, rt_testy=cp_testy, choice=choice, cov_train=cov_train)
# print(np.min(cov_train))
# -
| preprocess_MSCI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + tags=[]
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
from sklearn.datasets import fetch_lfw_people
import matplotlib.pyplot as plt
faces = fetch_lfw_people(min_faces_per_person=60)
print(faces.target_names)
print(faces.images.shape)
# -
fig, ax = plt.subplots(3, 5)
for i, axi in enumerate(ax.flat):
axi.imshow(faces.images[i], cmap='bone')
axi.set(xticks=[], yticks=[],
xlabel=faces.target_names[faces.target[i]])
# +
from sklearn.svm import SVC
from sklearn.decomposition import PCA as RandomizedPCA
from sklearn.pipeline import make_pipeline
pca = RandomizedPCA(n_components=150, whiten=True, random_state=42)
svc = SVC(kernel='rbf', class_weight='balanced')
model = make_pipeline(pca, svc)
# -
| python/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## NWB-Datajoint tutorial 1
#
# **Note: make a copy of this notebook and run the copy to avoid git conflicts in the future**
#
# This is the first in a multi-part tutorial on the NWB-Datajoint pipeline used in <NAME>'s lab, UCSF. It demonstrates how to run spike sorting within the pipeline.
#
# If you have not done [tutorial 0](0_intro.ipynb) yet, make sure to do so before proceeding.
#
# Let's start by importing the `nwb_datajoint` package, along with a few others.
# +
from pathlib import Path
import os
import numpy as np
import nwb_datajoint as nd
import warnings
warnings.simplefilter('ignore', category=DeprecationWarning) # ignore datajoint+jupyter async warning
# Comment these if you have already set these environment variables
data_dir = Path('/stelmo/nwb') # CHANGE ME TO THE BASE DIRECTORY FOR DATA STORAGE ON YOUR SYSTEM
os.environ['DJ_SUPPORT_FILEPATH_MANAGEMENT'] = 'TRUE'
os.environ['NWB_DATAJOINT_BASE_DIR'] = str(data_dir)
os.environ['KACHERY_STORAGE_DIR'] = str(data_dir / 'kachery-storage')
os.environ['SPIKE_SORTING_STORAGE_DIR'] = str(data_dir / 'spikesorting')
# -
# We also import a bunch of tables so that we can call them easily
from nwb_datajoint.common import (RawPosition, HeadDir, Speed, LinPos, StateScriptFile, VideoFile,
DataAcquisitionDevice, CameraDevice, Probe,
DIOEvents,
ElectrodeGroup, Electrode, Raw, SampleCount,
LFPSelection, LFP, LFPBandSelection, LFPBand,
SortGroup, SpikeSorting, SpikeSorter, SpikeSorterParameters, SpikeSortingWaveformParameters, SpikeSortingParameters, SpikeSortingMetrics, CuratedSpikeSorting,
FirFilter,
IntervalList, SortInterval,
Lab, LabMember, Institution,
BrainRegion,
SensorData,
Session, ExperimenterList,
Subject,
Task, TaskEpoch,
Nwbfile, AnalysisNwbfile, NwbfileKachery, AnalysisNwbfileKachery)
# In this tutorial, we will continue to work with the copy of `beans20190718.nwb` that you created in tutorial 0. If you deleted it from `Session`, make sure to re-insert before proceeding.
# Define the name of the file that you copied and renamed; make sure it's something unique.
nwb_file_name = 'despereaux20191125_.nwb'
# + tags=[]
Nwbfile()
# -
# ### Spike sorting
#
# In general, running spike sorting means making decisions about the following:
# 1. which eletrodes to sort together (e.g. electrodes that form a tetrode should be sorted together, but tetrodes that are far apart need not be);
# 2. which time interval to sort (e.g. there may a long period in the recording where nothing happens, and we might want to exclude that);
# 3. which spike sorter to use (e.g. Mountainsort? Kilosort? IronClust?);
# 4. given choice of the spike sorter in 3, which parameter set to use.
#
# In our Datajoint framework, everything that we do is an interaction with a table. This is true for spike sorting as well - i.e. we think of spike sorting as a process where we enter parameters of spike sorting (i.e. our decisions about the four questions above) into tables, and use that information to populate another table that will hold the result of spike sorting. Under the hood, we use a number of packages, notably `spikeinterface`. But the user need not know this - they just have to interact with the table. This makes spike sorting straightforward. In addition, the entries in these tables serve as a record of exactly which decisions you made.
# #### Define sort group
# We start with the first question: which electrodes do we want to sort together? We first inspect the `Electrode` table.
# This recording was done with polymer probes. Here `electrode_group_name` refers to a probe. We can see that there were two probes, `0` and `1`.
# get unique probe id
np.unique((Electrode & {'nwb_file_name': nwb_file_name}).fetch('electrode_group_name'))
sort_group_id = 10
# Note that `insert` is a method, just like `fetch`. You can insert an entry in the form of a dictionary or a list in the order of the attributes. We can look at the new entries we just made.
Electrode() & {'nwb_file_name' : nwb_file_name}
SortGroup & {'nwb_file_name' : nwb_file_name, 'sort_group_id' : sort_group_id}
SortGroup.SortGroupElectrode & {'nwb_file_name': nwb_file_name, 'sort_group_id': sort_group_id}
# #### Define sort interval
# Next, we make a decision about the time interval for our spike sorting. Let's re-examine `IntervalList`.
interval_list_name = '02_r1'
sort_interval_name = '02_r1'
sorter_name='mountainsort4'
# #### Define sorter parameters
# Once we have decided on a spike sorter, we have to set parameters. Some of these parameters are common to all sorters (e.g. frequency band to filter the raw data before sorting begins) but most are specific to the sorter that we chose. Again, we populate `SpikeSorterParameters` table with some default parameters for each sorter, and then we add our version as a new entry.
# Let's look at the default params
ms4_default_params = (SpikeSorterParameters & {'sorter_name' : sorter_name,
'parameter_set_name' : 'default'}).fetch1()
print(ms4_default_params)
# Change the default params
param_dict = ms4_default_params['parameter_dict']
param_dict['adjacency_radius'] = 100
param_dict['curation'] = False
# Turn filter and whiten off since we will filter it prior to starting sort
param_dict['filter'] = False
param_dict['whiten'] = False
# set num_workers to be the same number as the number of electrodes
param_dict['num_workers'] = len((SortGroup.SortGroupElectrode & {'nwb_file_name': nwb_file_name,'sort_group_id':sort_group_id}).fetch('electrode_id'))
param_dict['verbose'] = True
# set clip size as number of samples for 2 milliseconds
param_dict['clip_size'] = np.int(1e-3 * (Raw & {'nwb_file_name' : nwb_file_name}).fetch1('sampling_rate'))
param_dict['noise_overlap_threshold'] = 0
param_dict
# Give a unique name here
parameter_set_name = 'franklab_hippocampus_test2'
SpikeSorterParameters() & {'sorter_name': 'mountainsort4'}
# Insert
(SpikeSorterParameters() & {'sorter_name' : sorter_name,
'parameter_set_name' : parameter_set_name}).delete()
SpikeSorterParameters.insert1({'sorter_name' : sorter_name,
'parameter_set_name' : parameter_set_name,
'parameter_dict' : param_dict,
'frequency_min':600,
'filter_chunk_size' : 2000000})
# Check that insert was successful
SpikeSorterParameters & {'sorter_name' : sorter_name, 'parameter_set_name' : parameter_set_name}
# #### Define qualtiy metric parameters
#
# We're almost done. There are more parameters related to how to compute the quality metrics for curation. We just use the default options here.
# we'll use `test`
SpikeSortingMetrics()
SpikeSortingParameters().drop()
# #### Bringing everything together
#
# We now collect all the decisions we made up to here and put it into `SpikeSortingParameters` table (note: this is different from spike sor*ter* parameters defined above).
# collect the params
key = dict()
key['nwb_file_name'] = nwb_file_name
key['sort_group_id'] = sort_group_id
key['sort_interval_name'] = sort_interval_name
key['interval_list_name'] = interval_list_name
key['sorter_name'] = sorter_name
key['parameter_set_name'] = parameter_set_name
key['cluster_metrics_list_name'] = cluster_metrics_list_name
# insert
SpikeSortingParameters.insert1(key, skip_duplicates = True)
# inspect
(SpikeSortingParameters & {'nwb_file_name' : nwb_file_name, 'parameter_set_name': 'franklab_hippocampus_test2'}).fetch1()
recording = SpikeSorting().get_filtered_recording_extractor(key)
data = recording.get_traces()
import matplotlib.pyplot as plt
# %matplotlib inline
import scipy.stats as stats
data.shape
IntervalList()
zscore_thresh = 10
amplitude_thresh = 3000
above_z = zscore > zscore_thresh
above_a = data > amplitude_thresh
above_both = np.ravel(np.argwhere(np.sum(np.logical_and(above_z, above_a), axis=0) > 3))
# +
def crossings_pos2neg(data):
pos = data > 0
return (pos[:-1] & ~pos[1:]).nonzero()[0]
def crossings_neg2pos(data):
pos = data > 0
return (~pos[:,-1] & pos[1:]).nonzero()[0]
# -
recording.get_num_frames
zero_size = 30
for a in above_both:
data[:, a-zero_size:a+zero_size] = 0
recording._timestamps
# +
w = 50
for a in above_both:
plt.figure()
for e in range(4):
plt.subplot(4,1,e+1)
plt.plot(data[3, a-w:a+w])
# -
vcounter=1;
ofc_filelist={'nt31.mda', 'nt32.mda', 'nt33.mda', 'nt34.mda', 'nt35.mda', 'nt36.mda', 'nt37.mda', 'nt39.mda', 'nt40.mda', 'nt41.mda', 'nt42.mda', 'nt43.mda', 'nt46.mda'};
# %ofc_filelist={'nt31.mda', 'nt32.mda', 'nt33.mda', 'nt34.mda', 'nt36.mda', 'nt39.mda', 'nt40.mda', 'nt41.mda', 'nt42.mda', 'nt43.mda'};
hpc_filelist={'nt5.mda','nt6.mda'}; %HPC
# %hpc_filelist={'nt5.mda','nt6.mda'}; %HPC
pfc_filelist={'nt15.mda','nt16.mda','nt17.mda','nt18.mda','nt23.mda','nt24.mda','nt25.mda','nt26.mda'}; %PFC
# %pfc_filelist={'nt16.mda','nt17.mda','nt18.mda','nt23.mda','nt24.mda','nt25.mda','nt26.mda'}; %PFC
nacc_filelist={'nt47.mda','nt48.mda','nt49.mda','nt50.mda','nt51.mda','nt52.mda','nt53.mda','nt54.mda','nt55.mda','nt56.mda','nt57.mda','nt58.mda','nt60.mda'};
# %nacc_filelist={'nt47.mda','nt48.mda','nt49.mda','nt50.mda','nt53.mda','nt55.mda','nt57.mda','nt58.mda','nt60.mda'};
z
# #### Running spike sorting
# Now we can run spike sorting. As we said it's nothing more than populating another table (`SpikeSorting`) from the entries of `SpikeSortingParameters`.
SpikeSorting.populate((SpikeSortingParameters & {'nwb_file_name' : nwb_file_name, 'parameter_set_name' : 'franklab_hippocampus_test'}).proj())
# +
import spiketoolkit as st
# st.preprocessing.bandpass_filter?
# -
| notebooks/artifact_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# <script>
# function code_toggle() {
# if (code_shown){
# $('div.input').hide('500');
# $('#toggleButton').val('Show Code')
# } else {
# $('div.input').show('500');
# $('#toggleButton').val('Hide Code')
# }
# code_shown = !code_shown
# }
#
# $( document ).ready(function(){
# code_shown=false;
# $('div.input').hide()
# });
# </script>
# <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# +
import numpy as np
import pandas as pd
tweaked_results = '../data/TweakedResults2017-12-8.xls'
data = pd.read_excel(tweaked_results,
usecols = "A, N, O, P, Q")
data['Litter'] = [x[:2] for x in data['Image Name']]
data['AnimalID'] = [x[:4] for x in data['Image Name']]
#data.head()
# -
key = pd.read_excel('../data/genotypingkey.xlsx')
#key.head()
# +
df = pd.merge(left = data, right = key,
how = 'inner',
left_on = 'AnimalID',
right_on = 'AnimalID'
)
## Drop unneeded columns
df.drop('Litter_y', axis = 1, inplace = True)
df.drop('ID', axis = 1, inplace = True)
df.drop('Image Name', axis = 1, inplace = True)
df.rename(columns = {'Litter_x':'Litter'},
inplace = True)
df['Branch Per Vessel Length'] = df['Total Number of Junctions']/df['Total Vessels Length']
#df.head()
# -
data_mean = (df
.set_index(['Litter', 'Genotype']) # pivot
.sort_index(level = ['Litter', 'Genotype'])
.mean(level = ['Litter', 'Genotype'])
)
data_mean
wild = data_mean.xs('+/+', level = 'Genotype')
mutant = data_mean.xs('DEE/DEE', level = 'Genotype')
diff = wild['Branch Per Vessel Length'] - mutant['Branch Per Vessel Length']
print('wild - mutant')
print(diff)
print('mean difference across litters: {:2f}'.format(diff.values.mean()))
# +
from scipy import stats
t_stat, p_val = stats.ttest_rel(wild['Branch Per Vessel Length'],
mutant['Branch Per Vessel Length'])
print("p-value for branch per vessel length: {:f}".format(p_val))
# -
# ## Before averaging within litter/genotype
# +
import matplotlib
# %matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
#sns.set(style='ticks', color_codes=True)
sns.pairplot(df,
hue = 'Genotype',
hue_order = ['+/+', 'DEE/DEE'],
#vars=['Total Vessels Length',
# 'Junctions density',
# 'Branch Per Vessel Length',
# 'Total Number of Junctions'],
x_vars=['Total Vessels Length', 'Junctions density', 'Vessels percentage area'],
y_vars=['Total Number of Junctions', 'Branch Per Vessel Length'],
markers=['s', 'v'],
kind='reg',
size=5,
)
sns.set(style="white", color_codes=True)
#sns.set_context("talk")
# -
data_mean.reset_index(inplace=True)
#data_mean
# ## After averaging over litter/genotype
sns.pairplot(data_mean,
hue = 'Genotype',
hue_order = ['+/+', 'DEE/DEE'],
#vars=['Total Vessels Length',
# 'Junctions density',
# 'Branch Per Vessel Length',
# 'Total Number of Junctions'],
x_vars=['Total Vessels Length', 'Junctions density', 'Vessels percentage area'],
y_vars=['Total Number of Junctions', 'Branch Per Vessel Length'],
markers=['s', 'v'],
kind='reg',
size=5,
)
# + active=""
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#999; background:#fff;">
# Created with Jupyter, delivered by Fastly, rendered by Rackspace.
# </footer>
| notebooks/BranchesPerVesselLength.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Import Package
# +
import numpy as np
import pandas as pd
import random
from pandas.api.types import CategoricalDtype
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder, MinMaxScaler, FunctionTransformer
from sklearn.model_selection import KFold, StratifiedKFold, GroupShuffleSplit, GroupKFold, train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn.feature_selection import f_regression, SelectKBest, SelectPercentile, mutual_info_regression
plt.rcParams['figure.figsize'] = (12, 9)
plt.rcParams['font.size'] = 16
# -
# ## Import Data and Data Manipulations
df = pd.read_csv("../data/cook_county_housing.csv", index_col='Unnamed: 0')
df.columns = df.columns.str.replace(" ", "")
#df.columns
df["LogSalePrice"] = np.log(df["SalePrice"])
df["LogBuildingSquareFeet"] = np.log(df["BuildingSquareFeet"])
df["Grp_LogSalePrice"] = np.zeros(df.shape[0])
df["Grp_LogSalePrice"][df["LogSalePrice"]<2.5] = "lessthan2.5"
df["Grp_LogSalePrice"][(df["LogSalePrice"]>=2.5) & (df["LogSalePrice"]<7.5)] = "between2.5and7.5"
df["Grp_LogSalePrice"][df["LogSalePrice"]>=7.5] = "over7.5"
Q1 = df["LogSalePrice"].quantile(0.25)
Q3 = df["LogSalePrice"].quantile(0.75)
IQR = Q3 - Q1
IQR
outliers = ~((df["LogSalePrice"] < (Q1 - 1.5*IQR)) | (df["LogSalePrice"] > (Q3 + 1.5*IQR)))
df1 = df.copy()
df = df1.loc[outliers,]
df.shape
# ## Data Preprocessing
# +
maxmin_ftr = ['Longitude','Latitude',]
std_ftr = ['Fireplaces','LandSquareFeet', 'Garage1Size', 'Garage2Size',
"NumberofCommercialUnits",
'Estimate(Land)','Estimate(Building)', 'Age', 'SaleYear',
'SaleQuarter', 'SaleHalf-Year', 'AgeDecade', 'LotSize','LogBuildingSquareFeet']
no_need = ['PIN', 'DeedNo.', 'Description', 'SalePrice', 'LogSalePrice','CensusTract',
'SiteDesirability', 'OtherImprovements', 'ModelingGroup', "BuildingSquareFeet"]
ord_ftr = ["BasementFinish", "AtticType", "ConstructionQuality", "RepairCondition", "Floodplain"]
onehot_ftr = ["PropertyClass","NeighborhoodCode", "TownCode", "Apartments" ,"WallMaterial", "RoofMaterial","Basement",
"CentralHeating", "OtherHeating", "CentralAir", "AtticFinish", "DesignPlan", "CathedralCeiling",
"Garage1Material", "Garage1Attachment", "Garage1Area",
"Garage2Material", "Garage2Attachment", "Garage2Area",
"Porch", "MultiCode", "MultiPropertyIndicator", "Use", "O'HareNoise", "RoadProximity",
"SaleQuarterofYear", 'SaleMonthofYear', 'SaleHalfofYear', "MostRecentSale", "PureMarketFilter",
"GarageIndicator", "NeigborhoodCode(mapping)", "TownandNeighborhood"]
ord_cat = [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [0.0, 1.0]]
# -
X = df.loc[:, ((df.columns != "Grp_LogSalePrice"))]
X = X.drop(columns = no_need)
y = df["LogSalePrice"]
# ## Feature Selection
# +
X = df.loc[:, ((df.columns != "Grp_LogSalePrice"))]
X = X.drop(columns = no_need)
y = df["LogSalePrice"]
preprocessor = ColumnTransformer(
transformers=[
('onehot', OneHotEncoder(sparse=False,handle_unknown='ignore'), onehot_ftr),
('minmax', MinMaxScaler(), maxmin_ftr),
('std', StandardScaler(), std_ftr),
('ord', OrdinalEncoder(categories = ord_cat), ord_ftr)])
clf = Pipeline(steps=[('preprocessor', preprocessor)])
original_prep = clf.fit_transform(X)
feature_names = list(preprocessor.named_transformers_['onehot'].get_feature_names(onehot_ftr)) + preprocessor.transformers_[1][-1] + \
preprocessor.transformers_[2][-1] + preprocessor.transformers_[3][-1]
df_original_prep = pd.DataFrame(data = original_prep, columns = feature_names)
f_selector = SelectPercentile(f_regression, percentile = 66)
f_selector.fit_transform(df_original_prep, y)
f_sel_66per = list(np.array(feature_names)[(f_selector.get_support())])
# -
indices = [i for i in range(len(feature_names)) if ((f_selector.get_support())[i] == True)]
# ## Model Training
# +
preprocessor = ColumnTransformer(
transformers=[
('onehot', OneHotEncoder(sparse=False,handle_unknown='ignore'), onehot_ftr),
('minmax', MinMaxScaler(), maxmin_ftr),
('std', StandardScaler(), std_ftr),
('ord', OrdinalEncoder(categories = ord_cat), ord_ftr)])
def MLpipe_strKFold_RMSE(X, y, preprocessor, ML_algo, parameter_grid, strati_tar, num_folds):
best_models = []
test_scores = []
state_lst = [random.randint(0, 1000) for _ in range(5)]
for state in state_lst:
pipe = Pipeline(steps = [('preprocessor', preprocessor),
('regressor', ML_algo)])
X_other, X_test, y_other, y_test = train_test_split(X, y, test_size= 0.20,
random_state = state, stratify = strati_tar)
kf = KFold(n_splits = num_folds, shuffle = True, random_state=state)
grid = GridSearchCV(pipe, param_grid = parameter_grid,
scoring = make_scorer(mean_squared_error, greater_is_better = False, squared = False),
cv = kf, return_train_score = True, n_jobs = -1, verbose = True)
grid.fit(X_other, y_other)
predictions = grid.predict(X_test)
best_model = grid.best_params_
test_score = mean_squared_error(y_test, predictions, squared = False)
best_models.append(best_model)
test_scores.append(test_score)
return grid, X_other, y_other, X_test, y_test, best_models, test_scores, predictions
# -
grid_ridge.score
# #### Lasso
# +
# Don't run this
from sklearn.linear_model import Lasso
regressor = Lasso(max_iter=100000)
alpha = [1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3]
param_grid = {"regressor__alpha": alpha}
grid_lasso, X_train_lasso, y_train_lasso, X_test_lasso, y_test_lasso, models_lasso, scores_lasso, predictions_lasso = MLpipe_strKFold_RMSE(X, y,
preprocessor, regressor, param_grid, df["Grp_LogSalePrice"], 4)
# -
np.std(scores_lasso)
# #### RF
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor()
grid = {'regressor__n_estimators': [10, 100, 300, 500, 1000],
'regressor__max_depth': [10, 30, 50, 70]}
grid_rf, X_train_rf, y_train_rf, X_test_rf, y_test_rf, models_rf, scores_rf, predictions_rf = MLpipe_strKFold_RMSE(X, y,
preprocessor, regressor, grid, df["Grp_LogSalePrice"], 4)
scores_rf
# #### Ridge
from sklearn.linear_model import Ridge
regressor = Ridge(max_iter = 1000000)
grid = {"regressor__alpha": alpha}
grid_ridge, X_train_ridge, y_train_ridge, X_test_ridge, y_test_ridge, models_ridge, scores_ridge, predictions_ridge = MLpipe_strKFold_RMSE(X, y, preprocessor,
regressor, grid, df["Grp_LogSalePrice"], 4)
models_ridge, np.std(scores_ridge + scores_lasso + scores_svr)
# #### SVR
from sklearn.svm import SVR
regressor = SVR()
grid = {'regressor__C': [1e-2, 1e-1, 1e1, 1e2],
'regressor__gamma': [1e-2, 1e-1, 1e1, 1e2]}
grid_svr, X_train_svr, y_train_svr, X_test_svr, y_test_svr, models_svr, scores_svr, predictions_svr = MLpipe_strKFold_RMSE(X, y, preprocessor, regressor, grid, df["Grp_LogSalePrice"], 4)
models_svr, scores_svr
# ## results
baseline_regression = np.mean(y)
#mean_squared_error(y, baseline_regression)
baseline_regression
baseline_pred = [baseline_regression for i in range(168757)]
baseline_rmse = mean_squared_error(y, baseline_pred, squared = False)
np.std(np.sqrt(((y - baseline_pred)**2)))
(baseline_rmse - hist_ridge)/(np.std(np.sqrt(((y - baseline_pred)**2))))
hist_lasso = np.mean(scores_lasso)
hist_rf = np.mean(scores_rf)
hist_ridge = np.mean(scores_ridge)
hist_svr = np.mean(scores_svr)
y = [hist_lasso, hist_rf, hist_ridge, hist_svr]
plt.barh(range(4), [hist_lasso, hist_rf, hist_ridge, hist_svr])
plt.yticks(range(4), ["Lasso", "Randome Forest", "Ridge", "SVR"])
plt.axvline(baseline_rmse, label = "Baseline MSE Score", color = "red")
plt.xlabel("Models")
plt.ylabel("MSE Score")
plt.title("MSE Scores for different Models")
plt.legend()
for index, value in enumerate(y):
plt.text(value, index, str(value))
plt.show();
#plt.savefig('../data/histogram_mse.png')
# #### Global Feature Importances
# ##### Coefficients
coefs = grid_ridge.best_estimator_[-1].coef_
sorted_coefs = np.argsort(np.abs(coefs))
plt.barh(np.arange(20), coefs[sorted_coefs[-20:]])
plt.yticks(np.arange(20), [feature_names[i] for i in sorted_coefs[-20:]])
plt.xlabel("Coefficients")
plt.ylabel("Features")
plt.title("The Bar Plot of the Top 10 Highest Coefficients and Values");
# ##### Permutation Featrue Importance
ftr_names = X.columns
ftr_names
X_test_ridge_trans
# +
np.random.seed(42)
nr_runs = 10
scores = np.zeros([len(ftr_names), nr_runs])
for i in range(len(ftr_names)):
acc_scores = []
for j in range(nr_runs):
X_test_shuffled = X_test_ridge.copy()
X_test_shuffled[ftr_names[i]] = np.random.permutation(X_test_ridge[ftr_names[i]].values)
acc_scores.append(np.abs(grid_ridge.score(X_test_shuffled, y_test_ridge)))
scores[i] = acc_scores
# -
test_score_ridge = np.abs(grid_ridge.score(X_test_ridge, y_test_ridge))
sorted_indices = np.argsort(np.mean(scores, axis = 1))[::-1][:10]
plt.boxplot((scores[sorted_indices].T)[:10], labels = ftr_names[sorted_indices], vert = False)
plt.title("Permutation Importances (test set)")
plt.xlabel("score with pertubed feature")
plt.show()
# ##### Global -- Shape Values
X_test_ridge
X_test_ridge_trans = grid_ridge.best_estimator_[0].transform(X_test_ridge)
X_train_ridge_trans = grid_ridge.best_estimator_[0].transform(X_train_ridge)
np.shape(X_test_ridge_trans)
np.shape(shapval)
import shap
shap.initjs()
#ridge_regressor = Ridge(max_iter=100000, alpha = 1.0)
#ridge_regressor.fit(X_train_ridge_trans, y_train_ridge)
grid_ridge.best_estimator_[0].transform(X_test_ridge)
explainer = shap.LinearExplainer(ridge_regressor, X_test_ridge_trans)
shapval = explainer.shap_values(X_test_ridge_trans)
shap.summary_plot(shapval, X_test_ridge_trans, feature_names=feature_names)
shapval
# #### Local Shap
ftr = "TownandNeighborhood_39280"
shap.dependence_plot(ftr, shapval, X_test_ridge_trans, feature_names=feature_names)
ftr = "TownandNeighborhood_70150"
shap.dependence_plot(ftr, shapval, X_test_ridge_trans, feature_names=feature_names)
len(feature_names)
| src/submission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="DAnCpLjTo1kW"
# ! mkdir ~/.kaggle
# ! cp kaggle.json ~/.kaggle/
# ! chmod 600 ~/.kaggle/kaggle.json
# + colab={"base_uri": "https://localhost:8080/"} id="s4o_Uhddo1dJ" outputId="5d89340b-7537-431b-9dee-ac249a195f84"
# ! kaggle datasets download -d ashenafifasilkebede/dataset
# + colab={"base_uri": "https://localhost:8080/"} id="cnk8a3KMo1Vr" outputId="9d349388-2380-43c5-9af1-011e86ee28d5"
# ! unzip dataset.zip
# + id="RxFhfW3Eo1L4"
import os
import matplotlib.pyplot as plt
import pandas as pd
import cv2
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="bHQbFgJrpVFI" outputId="c2b905c4-b0ca-41b7-a02e-dd8d2cf28bcd"
img_path='/content/train'
directories=['Normal','OSCC']
for dir in directories:
path=os.path.join(img_path,dir)
class_label=directories.index(dir)
for img in os.listdir(path):
img_array=cv2.imread(os.path.join(path,img))
print(img_array.shape)
colored=cv2.cvtColor(img_array,cv2.COLOR_BGR2RGB)
new_array=cv2.resize(colored,(200,200))
plt.imshow(new_array)
break
break
# + id="ymx989RHou4C"
training_data=[]
img_names={'image':[],
'label':[]}
img_path='/content/train'
directories=['Normal','OSCC']
def create_training():
for dir in directories:
path=os.path.join(img_path,dir)
class_label=directories.index(dir)
for img in os.listdir(path):
img_array=cv2.imread(os.path.join(path,img))
colored=cv2.cvtColor(img_array,cv2.COLOR_BGR2RGB)
new_array=cv2.resize(colored,(200,200))
training_data.append([new_array,class_label])
img_names['image'].append(img)
img_names['label'].append(class_label)
create_training()
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="8xXY2l70ze4K" outputId="d0f05a3d-ec71-4482-a564-ccdd665219ed"
for i in range(6):
plt.subplot(2,3,i+1)
plt.imshow(training_data[i][0])
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="oO-cOI-DuO63" outputId="89708dfb-dd83-435c-c264-803cb464f375"
df1=pd.DataFrame(img_names)
df1['label'].value_counts()
# + id="tLNtAnvDo0ns"
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torch.optim as optim
import torchvision.transforms as transforms
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="H_UiYuOCr9ON" outputId="d4705174-4b42-44b1-9ec0-1b40a948e1b6"
df1.head(
)
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="nExhQB6yrNjo" outputId="8a7645ba-33f1-42b2-c00b-883e5ebfdf82"
df1.tail()
# + id="zso6pApAz_5m"
| Day42/Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'> Exploratory Data Analysis with Python</font>
# ### <NAME>, Data and Statistics Librarian
# ### <NAME>, Government Information and Statistics Librarian
# ### <EMAIL>
# + [markdown] slideshow={"slide_type": "slide"}
# # <font color='blue'> Outline </font>
#
#
# -
# ## <font color='blue'> 1 Overview</blue>
# - What is Python and why use it?
# - Versions of Python
# - How Python works - programming languages, objects and methods, libraries, indentation/white space (might integrate with demo)
# - Indexing (starts at 0), rows (records) and columns (variables/attributes) (might integrate with demo)
# - Using Python - Jupyter Notebooks, spyder, Colab
# -- Jupyter notebooks - cells of code and markdown; last line determines output of cell; running cells (change to *); do you expect to see output?
#
# "Anaconda Navigator is a desktop graphical user interface included in Anaconda that allows you to launch applications and easily manage conda packages, environments and channels without the need to use command line commands."
# "Anaconda® is a package manager, an environment manager, a Python/R data science distribution, and a collection of over 1,500+ open source packages. "
#
#
#
#
# ## <font color='blue'> 2 Import libraries and import your data </font>
#
# *some setup stuff like import pandas as pd pd.set_option('display.max_rows', 500)*
#
# ## <font color='blue'> 3 Viewing your Data </font>
#
# *not showing: titanic.shape, titanic.columns*
# *displaying vs saving/creating new data frame*
#
# ## <font color='blue'> 4 Selecting and filtering your data </font>
#
# ## <font color='blue'> 5 Create crosstabs and grouping data </font>
#
# ## <font color='blue'> 6 Editing data / creating new fields </font>
#
# ## <font color='blue'> 7 Getting help </font>
# +
# not to show unless someone asks
from platform import python_version
print(python_version())
# -
# ## <font color='blue'> 2. Import packages/libraries and import your data</font>
#
# ### a) Import packages/libraries
#
# Things to consider:
# - functionality that you need
# - depending on setup of Python on your computer, you may need to install the libraries first using [Anaconda Navigator](https://docs.anaconda.com/anaconda/navigator/tutorials/manage-packages/), [conda](https://docs.anaconda.com/anaconda/user-guide/tasks/install-packages/), or the [command line](https://packaging.python.org/tutorials/installing-packages/)
# - using a nickname/short name for libraries that you will be referring to later (there are some common/standard ones)
# - syntax for importing packages/libraries: __import packagename as nickname__
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# ### b) Import existing data
#
# Things to consider:
# - where the data is stored
# - same folder as your Jupyter notebook or Python file? don't need to specify the path
# - different folder? need to specify path
# - file type of data (csv, excel, text, other) and whether you might need a package to help you read the data
# - how the data is separated (comma, space, semicolon, other)
# - is there a header row with variable names?
# - pandas makes some guesses about your data format and type
# - int64, float64, object, bool
# - in pandas, your data is stored in a data frame
#
# [Importing Data cheatsheat](https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Cheat+Sheets/Importing_Data_Python_Cheat_Sheet.pdf)
titanic = pd.read_csv('titanic.csv', sep=';')
# ## <font color='blue'>3. Viewing your Data</font>
#
# ### a) View the first few rows of data
#
# Things to note:
# - first row is Row 0
# - you can indicate how many rows you want to see by including a number in parentheses (default is 5)
titanic.head()
# ### b) Variable/column names and types
#
# Things to consider:
# - did pandas guess correctly about the type of data? What can you do if it didn't?
# - use __.astype()__
# - code: __titanic['ColumnName'] = titanic['ColumnName'].astype('NewDataType')__
# - you can also see just a list of column names with the code: __titanic.columns__
titanic.info()
# ### c) Missing/null data
#
# Things to consider:
# - why is the data "missing"?
# - how will missing data affect your analyses? What can you do to address this?
# - need to know when pandas includes/excludes null values
titanic.isnull().sum()
# ### d) View a summary of your variables
#
# Things to consider:
# - what kinds of summary measures are meaningful for different variable types?
# - how is the mean value of age calculated?
titanic.describe()
titanic.describe(include='all')
# ### e) View more meaningful summary data for categorical data
titanic['survived'].value_counts()
# ### f) Create a bar plot for categorical data
titanic['survived'].value_counts().plot(kind='bar')
# ### g) Create a histogram for continuous, numerical data
titanic['age'].plot(kind='hist')
# ## Exercise
#
# 1. What was the most common age of passengers on the Titanic? Hint: use value_counts()
# 2. Create a bar plot of passenger class
titanic['age'].value_counts()
titanic['pclass'].value_counts().plot(kind='bar')
# ## <font color='blue'>4. Selecting and filtering your data</font>
#
# Things to note:
# - syntax differences when selecting [one] vs [[multiple]] columns
titanic.columns
# ### a) Select/view one column
titanic['name']
# ### b) Select/view multiple columns
titanic[ ['name', 'fare'] ]
# ### c) Select/view specific rows and columns
#
# Note: this will return values at the specific rows and columns that you specify (so rows 0:10 will show 11 rows, from Row 0 to Row 10)
titanic.loc[0:10, ['name', 'fare']]
# ### d) Select/view rows and columns by range of indices
#
# Note: this will return values at the ranges of rows and columns that you specify. In Python, this means from the lower index to one less than the higher index (so rows 0:10 will show 10 rows, from Row 0 to Row 9, and columns 0:3 will show 3 columns, from Column 0 to Column 2)
titanic.iloc[0:10, 0:3]
# ## Exercise
#
# 1. Show the final 10 rows of the data set, and the name and home destination columns.
# ### e) Select/view data that meets certain conditions (filters)
titanic[titanic['fare'] > 50]
titanic[titanic['survived'] == 1]
titanic[titanic['name'].str.contains("Robert")]
titanic[titanic['name'] == "Chisholm, Mr. <NAME>"]
# ### f) Number of rows that meet your conditions
#
len(titanic[titanic['fare'] > 50])
# ### g) Combine multiple filters
#
# Note: Combine with & (this means AND) or | (this means OR)
titanic[(titanic['fare'] > 50) & titanic['name'].str.contains("Robert")]
titanic[((titanic['pclass']==1) | (titanic['pclass']==2))]
# ## Exercise
#
# 1. Create a filter that lists passengers who did not survive
# 2. Combine the filters we created earlier to create a list of passengers with the name Robert who survived
# 3. Create a filter that lists passengers in class 1 who were more than 30 years old
# 4. How many passengers fit the criteria from question 3?
#
titanic[titanic['survived'] == 0]
titanic[titanic['name'].str.contains("Robert") & titanic['survived'] == 1]
titanic[((titanic['pclass']==1) & (titanic['age']>30))]
len(titanic[((titanic['pclass']==1) & (titanic['age']>30))])
# ## <font color='blue'>5 Creating crosstabs and grouping data</font>
# ### a) Create crosstabs
#
# Things to think about:
# - data types of variables you're interested in
pd.crosstab(titanic.pclass, titanic.survived)
# Use the normalize argument to display crosstab values as percentages
pd.crosstab(titanic.pclass, titanic.survived, normalize='index')
# Cross tabs aren't just limited to comparing two variables at a time. Let's say we want to compare passenger class, sex and survival rates. We can use square brackets [ ] to incorporate more variables into the crosstab, similar to earlier exammples.
pd.crosstab([titanic.pclass, titanic.sex], titanic.survived, normalize='index')
# ### b) Grouping Data
#
# - when does it make sense to use sum, mean, value_counts?
titanic.groupby('pclass').mean()
titanic.groupby('pclass').sum()
titanic.groupby('pclass')['survived'].sum()
titanic.groupby('pclass')['survived'].value_counts()
# ### Exercise
#
# 1. Create a crosstab to show the numbers of men and women who survived.
# 2. Create a table to show the same data using groupby.
#
# Which output is easier to read?
pd.crosstab(titanic.sex, titanic.survived)
titanic.groupby('sex')['survived'].value_counts()
# ## <font color='blue'>6 Editing data and creating new fields</font>
#
# Often variables in datasets use codes that aren't very descriptive. It's helpful to first view all codes in a variable before editing.
titanic['embarked'].value_counts()
# Next, read the codebook to understand what the codes mean. There are 3 codes for embarkation points: S = Southampton, C = Cherbourg and Q = Queenstown. Start the next line with the name of the variable you would like to edit, e.g. titanic['embarked'].
#
# Use the = sign next to make sure you write the change to the entire variable and save it. This is similar to value assignment in algebra, e.g. x = y + z.
#
# We can use the .replace( ) method to change our codes to names. We can use .value_counts( ) to check our work.
titanic['embarked'] = titanic['embarked'].replace(['S', "C", "Q"], ["Southampton", "Cherbourg", "Queenstown"])
titanic['embarked'].value_counts()
# ### Creating new variables
#
# The syntax for creating new variables in a dataframe starts by calling the dataframe by name and placing the variable name is square brackets in quotes and assigning value with an equal sign. e.g. dataframe['new variable'] = value.
#
# Let's say we want to calculate the fare variable in Canadian dollars. In 1912, the value of the Canadian dollar was pegged at 4.8666CAD to one British Pound Sterling.
titanic['fare_CAD'] = titanic['fare']*4.8666
# Check if the new variable has been added by using the .head( ) method.
titanic.head()
# ## Exercise
#
# Create a new variable called 'is_child'. Filter the data for all passengers under the age of 18 and assign the results to the new variable. Check your new variable using .value_counts(). Next, do a crosstab to check survival rates for children vs. adults. **Bonus:** Add pclass to the crosstab to see how many children and adults in first, second, and third class survived or perished.
# +
## solution titanic['is_child'] = titanic['age'] < 18
# +
## solution titanic['is_child'].value_counts()
# +
## solution pd.crosstab(titanic.is_child, titanic.survived)
# -
| uoft-exploratortdataanalysis/.ipynb_checkpoints/ExploratoryDataAnalysisWithPython-Instructors-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def gaussian_elimination_with_pivot(m):
# forward elimination
n = len(m)
for i in range(n):
pivot(m, n, i)
for j in range(i+1, n):
m[j] = [m[j][k] - m[i][k]*m[j][i]/m[i][i] for k in range(n+1)]
if m[n-1][n-1] == 0: raise ValueError('No unique solution')
# backward substitution
x = [0] * n
for i in range(n-1, -1, -1):
s = sum(m[i][j] * x[j] for j in range(i, n))
x[i] = (m[i][n] - s) / m[i][i]
return x
def pivot(m, n, i):
max = -1e100
for r in range(i, n):
if max < abs(m[r][i]):
max_row = r
max = abs(m[r][i])
m[i], m[max_row] = m[max_row], m[i]
# +
m = [[1,-1,3,2], [6,-6,2,-2], [1,1,0,3]]
print(gaussian_elimination_with_pivot(m))
# -
import numpy as np
A=np.array([[1,-1,3,2], [6,-6,2,-2]])
b=np.array([[1,1,0,3]])
b=b.T
np.linalg.solve(A,b)
| assignment_1/ge_with_pivoting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fish sleep and bacteria growth - A review of Statistical Thinking I and II
# > To begin, you'll use two data sets from Caltech researchers to rehash the key points of Statistical Thinking I and II to prepare you for the following case studies! This is the Summary of lecture "Case Studies in Statistical Thinking", via datacamp.
#
# - toc: true
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [Python, Datacamp, Statistics]
# - image: images/bs_rep_semilogy.png
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['figure.figsize'] = (10, 5)
# -
# ## Case Studies in Statistical Thinking
# - Active bouts: a metric for wakefulness
# - Active bout: A period of time where a fish is consistently active
# - Active bout length: Number of consecutive minutes with activity
# - The exponential distribution
# - Poisson process: The timing of the next event is completely independent of when the previous event happened
# - Story of Exponential distribution: The waiting time between arrivals of a Poisson process is Exponentially distributed
# ### EDA: Plot ECDFs of active bout length
# An active bout is a stretch of time where a fish is constantly moving. Plot an ECDF of active bout length for the mutant and wild type fish for the seventh night of their lives. The data sets are in the numpy arrays `bout_lengths_wt` and `bout_lengths_mut`. The bout lengths are in units of minutes.
bout = pd.read_csv('./dataset/gandhi_et_al_bouts.csv', skiprows=4)
bout.head()
bout_lengths_mut = bout[bout['genotype'] == 'mut']['bout_length'].to_numpy()
bout_lengths_wt = bout[bout['genotype'] == 'wt']['bout_length'].to_numpy()
bout_lengths_het = bout[bout['genotype'] == 'het']['bout_length'].to_numpy()
# +
import dc_stat_think as dcst
# Generate x and y values for plotting ECDFs
x_wt, y_wt = dcst.ecdf(bout_lengths_wt)
x_mut, y_mut = dcst.ecdf(bout_lengths_mut)
# Plot the ECDFs
_ = plt.plot(x_wt, y_wt, marker='.', linestyle='none')
_ = plt.plot(x_mut, y_mut, marker='.', linestyle='none')
# Make a legend, label axes
_ = plt.legend(('wt', 'mut'))
_ = plt.xlabel('active bout length (min)')
_ = plt.ylabel('ECDF')
# -
# ### Interpreting ECDFs and the story
# Q: While a more detailed analysis of distributions is often warranted for careful analyses, you can already get a feel for the distributions and the story behind the data by eyeballing the ECDFs. Which of the following would be the most reasonable statement to make about how the active bout lengths are distributed and what kind of process might be behind exiting the active bout to rest?
#
# A: The bout lengths appear Exponentially distributed, which implies that exiting an active bout to rest is a Poisson process; the fish have no apparent memory about when they became active.
# ## Bootstrap confidence intervals
# - EDA is the first step
# > "Exploratory data analysis can never be the whole story, but nothing else can serve as a foundation stone - as the first step". - <NAME>
# - Optimal parameter value
# - Optimal parameter value: The value of the parameter of a probability distribution that best describe the data
# - Optimal parameter for the Exponential disribution: Computed from the mean of the data
# - Bootstrap replicates
# - A statistic computed from a bootstrap sample
# - Bootstrap confidence interval
# - If we repeated measurements over and over again, p% of the observed values would lie within the p% confidence interval
# ### Parameter estimation: active bout length
# Compute the mean active bout length for wild type and mutant, with 95% bootstrap confidence interval.
# +
# Compute mean active bout length
mean_wt = np.mean(bout_lengths_wt)
mean_mut = np.mean(bout_lengths_mut)
# Draw bootstrap replicates
bs_reps_wt = dcst.draw_bs_reps(bout_lengths_wt, np.mean, size=10000)
bs_reps_mut = dcst.draw_bs_reps(bout_lengths_mut, np.mean, size=10000)
# Compute a 95% confidence interval
conf_int_wt = np.percentile(bs_reps_wt, [2.5, 97.5])
conf_int_mut = np.percentile(bs_reps_mut, [2.5, 97.5])
# Print the results
print("""
wt: mean = {0:.3f} min., conf. int. = [{1:.1f}, {2:.1f}] min.
mut: mean = {3:.3f} min., conf. int. = [{4:.1f}, {5:.1f}] min.
""".format(mean_wt, *conf_int_wt, mean_mut, *conf_int_mut))
# -
# ## Permutation and bootstrap hypothesis tests
# - Genotype definitions
# - Wile type: No mutations
# - Heterozygote: Mutation on one of two chromosomes
# - Mutant: Mutation on both chromosomes
# - Hypothesis test
# - Assessment of how reasonable the observed data are assuming a hypothesis is true
# - p-value
# - The probability of obtaining a value of your **test statistic** that is **at least as extreme as** what was observed, under the assumption the **null hypothesis** is true
# - Serves as a basis of comparison
# - Requires clear specification of:
# - **Null hypothesis** that can be simulated
# - **Test statistic** that can be calculated from observed and simulated data
# - Definition of **at least as extreme as**
# - Pipeline for hypothesis testing
# - Clearly state the null hypothesis
# - Define your test statistic
# - Generate many sets of simulated data assuming the null hypothesis is true
# - Compute the test statistic for each simulated data set
# - The p-value is the fraction of your simulated data sets for which the test statistic is at least as extreme as for the real data
# - Specifying the test
# - **Null hypothesis**: the active bout lengths of wild type and heterozygotic fish are identically distributed
# - **test statistic**: Difference in mean active bout length between heterozygotes and wile type
# - **At least as extreme as**: Test statistic is greater than or equal to what was observed
# - Permutation test
# - For eash replicate
# - Scramble labels of data points
# - Compute test statistic
# - p-value is fraction of replicates at least as extreme as what was observed
# ### Permutation test: wild type versus heterozygote
# Test the hypothesis that the heterozygote and wild type bout lengths are identically distributed using a permutation test.
#
#
# +
# Compute the difference of means: diff_means_exp
diff_means_exp = np.mean(bout_lengths_het) - np.mean(bout_lengths_wt)
# Draw permutation replicates: perm_reps
perm_reps = dcst.draw_perm_reps(bout_lengths_het, bout_lengths_wt,
dcst.diff_of_means, size=10000)
# Compute the p-value: p_val
p_val = np.sum(perm_reps >= diff_means_exp) / len(perm_reps)
# Print the result
print('p = ', p_val)
# -
# A p-value of 0.001 suggests that the observed difference in means is unlikely to occur if heterozygotic and wild type fish have active bout lengths that are identically distributed.
# ### Bootstrap hypothesis test
# The permutation test has a pretty restrictive hypothesis, that the heterozygotic and wild type bout lengths are identically distributed. Now, use a bootstrap hypothesis test to test the hypothesis that the means are equal, making no assumptions about the distributions.
#
#
# +
# Concatenate arrays: bout_lengths_concat
bout_lengths_concat = np.concatenate([bout_lengths_wt, bout_lengths_het])
# Compute mean of all bout_lengths: mean_bout_length
mean_bout_length = np.mean(bout_lengths_concat)
# Generate shifted arrays
wt_shifted = bout_lengths_wt - np.mean(bout_lengths_wt) + mean_bout_length
het_shifted = bout_lengths_het - np.mean(bout_lengths_het) + mean_bout_length
# Compute 10,000 bootstrap replicates from shifted array
bs_reps_wt = dcst.draw_bs_reps(wt_shifted, np.mean, size=10000)
bs_reps_het = dcst.draw_bs_reps(het_shifted, np.mean, size=10000)
# Get replicates of difference of means: bs_replicates
bs_reps = bs_reps_het - bs_reps_wt
# Compute and print p-value: p
p = np.sum(bs_reps >= diff_means_exp) / len(bs_reps)
print('p-value =', p)
# -
# We get a result of similar magnitude as the permutation test, though slightly smaller, probably because the heterozygote bout length distribution has a heavier tail to the right.
# ## Linear regressions and pairs bootstrap
# - Pairs bootstrap
# - resample data in pairs
# - Compute slope and intercept from resampled data
# - Each slope and intercept is a bootstrap replicate
# - Compute confidence intervals from percentiles of bootstrap replicates
# ### Assessing the growth rate
# To compute the growth rate, you can do a linear regression of the logarithm of the total bacterial area versus time. Compute the growth rate and get a 95% confidence interval using pairs bootstrap. The time points, in units of hours, are stored in the numpy array `t` and the bacterial area, in units of square micrometers, is stored in `bac_area`.
bac = pd.read_csv('./dataset/park_bacterial_growth.csv', skiprows=2)
bac.head()
bac_area = bac['bacterial area (sq. microns)'].to_numpy()
t = bac['time (hr)'].to_numpy()
# +
# Compute logarithm of the bacterial area: log_bac_area
log_bac_area = np.log(bac_area)
# Compute the slope and intercept: growth_rate, log_a0
growth_rate, log_a0 = np.polyfit(t, log_bac_area, deg=1)
# Draw 10,000 pairs bootstrap replicates: growth_rate_bs_reps, log_a0_bs_reps
growth_rate_bs_reps, log_a0_bs_reps = dcst.draw_bs_pairs_linreg(t, log_bac_area, size=10000)
# Compute confidence intervals: growth_rate_conf_int
growth_rate_conf_int = np.percentile(growth_rate_bs_reps, [2.5, 97.5])
# Print the result to the screen
print("""
Growth rate: {0:.4f} sq. µm/hour
95% conf int: [{1:.4f}, {2:.4f}] sq. µm/hour
""".format(growth_rate, *growth_rate_conf_int))
# -
# Under these conditions, the bacteria add about 0.23 square micrometers worth of mass each hour. The error bar is very tight,
# ### Plotting the growth curve
# You saw in the previous exercise that the confidence interval on the growth curve is very tight. You will explore this graphically here by plotting several bootstrap lines along with the growth curve. You will use the `plt.semilogy()` function to make the plot with the y-axis on a log scale. This means that you will need to transform your theoretical linear regression curve for plotting by exponentiating it.
#
#
# +
# Plot data points in a semilog-y plot with axis labeles
_ = plt.semilogy(t, bac_area, marker='.', linestyle='none')
# Generate x-values for the boostrap lines: t_bs
t_bs = np.array([0, 14])
# Plot the first 100 bootstrap lines
for i in range(100):
y = np.exp(growth_rate_bs_reps[i] * t_bs + log_a0_bs_reps[i])
_ = plt.semilogy(t_bs, y, linewidth=0.5, alpha=0.05, color='red')
# Label axes
_ = plt.xlabel('time (hr)')
_ = plt.ylabel('area (sq. µm)')
plt.savefig('../images/bs_rep_semilogy.png')
# -
# You can see that the bootstrap replicates do not stray much. This is due to the exquisitly exponential nature of the bacterial growth under these experimental conditions.
| _notebooks/2020-06-23-01-Fish-sleep-and-bacteria-growth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
# %config InlineBackend.figure_format = 'retina'
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:100% !important;}</style>"))
from scipy.special import gamma, factorial,digamma
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.gridspec as gridspec
import sys
sys.path.append(r'/Users/ys18223/Documents/GitHub/FLAIR_BiocomputeLab')
# -
# # Fig H
#Input MAPE performance accross simulations when estimating the mean fluorescence
d_mean = {'MAPE': [100*i for i in ([0.1674891 , 0.14371818, 0.12273398,
0.16679492, 0.13970324, 0.1015513 ,
0.16319497, 0.12743953, 0.06931147]+[0.51141972, 0.51385324, 0.51403695,
0.52769436, 0.51004928, 0.51341036,
0.53446 , 0.52250617, 0.5075517 ])]+[15.29211367, 14.14405139, 14.05101411]+[12.61702118, 10.50428435, 9.82247402]+[10.31754068, 7.2084087 , 4.77361639]+[16.35151345, 16.9359747 , 17.78217523]+[14.38362791, 14.93895699, 15.7100954 ]+[13.14528142, 13.4672431 , 14.25780018], 'distribution': ['Gamma']*18+['Lognormal']*18,'inference':['ML']*9+['MOM']*9+['ML']*9+['MOM']*9}
df_mean = pd.DataFrame(data=d_mean)
df_mean.head()
# +
# Create the figure
fig = plt.figure(figsize=(11.7,8.3))
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0])
my_pal = {"ML": "#2463A3", "MOM": "#B5520E"}
ax=sns.violinplot(x="distribution", y="MAPE", hue="inference",
data=df_mean, palette=my_pal)
ax.set_ylabel('MAPE (mean) %')
ax.set_xlabel('')
# my_pal = ['#2463A3', '#B5520E','#2463A3', '#B5520E']
# INF=['ML','MOM','ML','MOM']
# color_dict = dict(zip(INF, my_pal ))
# for i in range(0,4):
# mybox = ax.artists[i]
# mybox.set_facecolor(color_dict[INF[i]])
#plt.legend(frameon=False,fontsize=12)
ax.get_legend().remove()
sns.despine()
width=3.54
height=3.54
fig.set_size_inches(width, height)
plt.subplots_adjust(hspace=.0 , wspace=.00, left=.15, right=.95, top=.95, bottom=.13)
plt.show()
# -
# # Fig I
#Input MAPE performance accross simulations when estimating the mean variance
d_var = {'MAPE': [56.51961891, 50.47877742, 46.13735704,
56.41471139, 48.30979619, 39.03006257,
56.08137685, 44.53477141, 27.01354216]+[287.74453306, 298.1863082 , 298.21313797,299.7961364 , 300.44014621, 311.36703739,
324.08161946, 323.83104867, 327.57942772]+[67.89211699, 64.24130949, 63.92732816]+[60.43748406, 50.92945822, 46.84127056]+[54.94239969, 39.2380389 , 24.5262507 ]+[195.21194215, 232.21351093, 238.5230456 ]+[219.98637949, 221.72468045, 217.98143615]+[226.76576441, 196.59937264, 221.02871965], 'distribution': ['Gamma']*18+['Lognormal']*18,'inference':['ML']*9+['MOM']*9+['ML']*9+['MOM']*9}
df_var = pd.DataFrame(data=d_var)
df_var.head()
# +
# Create the figure
fig = plt.figure(figsize=(11.7,8.3))
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0])
my_pal = {"ML": "#2463A3", "MOM": "#B5520E"}
ax=sns.violinplot(x="distribution", y="MAPE", hue="inference",
data=df_var, palette=my_pal)
ax.set_ylabel('MAPE (standard deviation) %')
ax.set_xlabel('')
ax.get_legend().remove()
sns.despine()
width=3.54
height=3.54
fig.set_size_inches(width, height)
plt.subplots_adjust(hspace=.0 , wspace=.00, left=.15, right=.95, top=.95, bottom=.13)
plt.show()
# -
| examples/Analysis/FigHI-Comparing-Modeling_Choices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pycountry_convert as pc
import plotly.graph_objs as go
from plotly.offline import iplot
from sklearn.linear_model import LogisticRegression
# -
# - Run in the command prompt:
# - pip install pycountry-convert
# - pip install plotly==4.9.0
# # Happiness report
# <img src="https://media.giphy.com/media/11sBLVxNs7v6WA/giphy.gif" width="700" align="center">
# ## Abstract
# The purpose of choosing this work is to find out which factors are more important to live a happier life. As a result, people and countries can focus on the more significant factors to achieve a higher happiness level. We will also compare the happiness dataset from 2019 with older data.
# ## Introduction
# The World Happiness Report is a landmark survey of the state of global happiness. The first report was published in 2012, the second in 2013, the third in 2015, and the fourth in the 2016 Update. The World Happiness 2019, which ranks 156 countries by their happiness levels, was released at the United Nations at an event celebrating International Day of Happiness on March 20th. The report continues to gain global recognition as governments, organizations and civil society increasingly use happiness indicators to inform their policy-making decisions. Leading experts across fields – economics, psychology, survey analysis, national statistics, health, public policy and more – describe how measurements of well-being can be used effectively to assess the progress of nations. The reports review the state of happiness in the world today and show how the new science of happiness explains personal and national variations in happiness.
# ### Meaning of terms used in the dataset
# - **GDP per capita**:GDP per capita is a measure of a country's economic output that accounts for its number of people.
# - **Social support**:Social support means having friends and other people, including family, to turn to in times of need or crisis to give you a broader focus and positive self-image. Social support enhances quality of life and provides a buffer against adverse life events.
# - **Healthy life expectancy**:Healthy Life Expectancy is the average number of years that a newborn can expect to live in "full health"—in other words, not hampered by disabling illnesses or injuries.
# - **Freedom to make life choices**:Freedom of choice describes an individual's opportunity and autonomy to perform an action selected from at least two available options, unconstrained by external parties.
# - **Generosity**:the quality of being kind and generous.
# - **Perceptions of corruption**:The Corruption Perceptions Index (CPI) is an index published annually by Transparency International since 1995 which ranks countries "by their perceived levels of public sector corruption, as determined by expert assessments and opinion surveys.
# ## Initial Data Exploration
happiness_data_2015 = pd.read_csv("data/2015.csv")
happiness_data_2016 = pd.read_csv("data/2016.csv")
happiness_data_2017 = pd.read_csv("data/2017.csv")
happiness_data_2018 = pd.read_csv("data/2018.csv")
happiness_data_2019 = pd.read_csv("data/2019.csv")
happiness_data_2019.head()
# - Our dataset is pretty clean, but we will implement a few adjustments to make it looks better.
happiness_data_2019.shape
# - Lets check column data types
happiness_data_2019.dtypes
# - The column names are not in python format, so we are going to change them.
happiness_data_2019.columns = ["rank", "country", "score", "gdp", "social_support", "healthy_life", "freedom", "generosity", "trust"]
happiness_data_2019.sample(n=10)
# - The next step is adding another column to the dataset which is continent. I want to work on different continents to discover whether there are different trends for them regarding which factors play a significant role in gaining higher happiness score. We will not include Antarctica in our research, so the remaining continents are Asia, Africa, North America, South America, Europe and Australia.
# - First we create a dictionary with key: continent code | value: continent full name
continents = {
'EU': 'Europe',
'NA': 'North America',
'SA': 'South America',
'AS': 'Asia',
'OC': 'Australia',
'AF': 'Africa'
}
# - Following function takes country name as argument and returns the continent full name
def get_continent_by_country(country):
if country == "Kosovo":
return "Europe"
country_alpha2_code = pc.country_name_to_country_alpha2(country, cn_name_format="default")
continent_code = pc.country_alpha2_to_continent_code(country_alpha2_code)
return continents[continent_code]
# - Test the get_continent_by_country function
get_continent_by_country("China")
get_continent_by_country("Bulgaria")
get_continent_by_country("Argentina")
get_continent_by_country("Australia")
get_continent_by_country("Egypt")
# - Some of the country names contains special symbols or are not correct, so the next row fix the invalid names:
happiness_data_2019.country = happiness_data_2019.country.replace(
["Trinidad & Tobago", "Congo (Brazzaville)", "Congo (Kinshasa)", "Palestinian Territories"],
["Trinidad and Tobago", "Congo", "Democratic Republic of the Congo", "Palestine"])
# - Then we add a new column in happiness data frame named "continent". We place it right after the country column.
happiness_data_2019.insert(2, "continent", happiness_data_2019.country.apply(get_continent_by_country))
happiness_data_2019.sample(n = 5)
# - Lets check the data for NaN values
happiness_data_2019.isnull().values.any()
happiness_data_2019.info()
# - We don't see any NaN entries
# ## Visualization
# In this section, we will play with different variables to find out how they correlate with each other.
# ### Correlation plot
# Let’s see the correlation between numerical variables in our dataset.
def draw_correlation_heatmap_matrix(dataframe):
plt.figure(figsize=(16,10))
sns.heatmap(dataframe.corr(), annot=True, linewidths=1, cmap="ocean_r", fmt=".2f")
plt.suptitle("Correlation Matrix", fontsize=18)
plt.show()
draw_correlation_heatmap_matrix(happiness_data_2019)
# Obviously, there is an inverse correlation between “Happiness Rank” and all the other numerical variables. In other words, the lower the happiness rank, the higher the happiness score, and the higher the other six factors that contribute to happiness. So let’s remove the happiness rank, and see the correlation again.
happiness_data_2019_without_rank = happiness_data_2019.drop(["rank"], axis=1)
draw_correlation_heatmap_matrix(happiness_data_2019_without_rank)
# - According to the above correlation plot: gdp, healthy life expectancy, and social support play the most significant role in contributing to happiness. Trust and generosity have the lowest impact on the happiness score.
# ### Comparing different continents regarding their happiness variables
#
group_by_continent = happiness_data_2019.groupby("continent")
# - First lets check number of contries that took part in the research by continent
group_by_continent_size = group_by_continent.size().sort_values(ascending=False)
plt.figure(figsize=(12,8))
sns.barplot(x=group_by_continent_size.index, y=group_by_continent_size.values)
plt.xticks(rotation=90)
plt.xlabel('Continents')
plt.ylabel('Coutnries count')
plt.title('Number of countries that took part in the research by continent')
plt.show()
def plot_continents_barchart(data, ylabel, title):
plt.figure(figsize=(12, 8))
plt.xlabel("Continents")
plt.ylabel(ylabel)
plt.title(title)
plt.bar(data.index, data.values)
plt.show()
score_by_continent = group_by_continent["score"].mean()
plot_continents_barchart(score_by_continent, "Happiness Score", "Happiness score per continent")
score_by_continent = group_by_continent["gdp"].mean()
plot_continents_barchart(score_by_continent, "Gdp", "Gdp per capita per continent")
score_by_continent = group_by_continent["social_support"].mean()
plot_continents_barchart(score_by_continent, "Social support", "Social support per continent")
score_by_continent = group_by_continent["healthy_life"].mean()
plot_continents_barchart(score_by_continent, "Healthy life expectancy", "Healthy life expectancy per continent")
score_by_continent = group_by_continent["freedom"].mean()
plot_continents_barchart(score_by_continent, "Freedom index", "Freedom index per continent")
score_by_continent = group_by_continent["generosity"].mean()
plot_continents_barchart(score_by_continent, "Generosity index", "Generosity index per continent")
score_by_continent = group_by_continent["trust"].mean()
plot_continents_barchart(score_by_continent, "Perceptions of corruption", "Perceptions of corruption per continent")
# - We can see that Australia has approximately the highest average in all fields, after that Europe, North America, and South America are roughly the same. Australia and Europe have a good lead in GDP per capita field. Finally, Asia and Africa have the lowest scores in all fields.
# ### Geographic Visualization of Happiness Score
# +
data = dict(type = 'choropleth',
locations = happiness_data_2019.country,
locationmode = 'country names',
colorscale='RdYlGn',
z = happiness_data_2019.score,
text = happiness_data_2019.country,
colorbar = {'title':'Happiness Score'})
layout = dict(title = 'Geographical Visualization of Happiness Score',
geo = dict(showframe = True, projection = {'type': 'azimuthal equal area'}))
choromap3 = go.Figure(data = [data], layout=layout)
iplot(choromap3)
# -
# ### Compare how GDP, Social Support and health changed over the years
# Lets consider Gdp(monetary),Social Support(family) and Health as primary concerns for an individual residing in a country i.e overall wellbeing.Lets see how these distributions are altering over the years or are staying stagnant.
# #### Healthy life expectancy over the Years
plt.figure(figsize=(10,5))
sns.kdeplot(happiness_data_2015['Health (Life Expectancy)'], color='red')
sns.kdeplot(happiness_data_2016['Health (Life Expectancy)'],color='blue')
sns.kdeplot(happiness_data_2017['Health..Life.Expectancy.'],color='limegreen')
sns.kdeplot(happiness_data_2018['Healthy life expectancy'],color='orange')
sns.kdeplot(happiness_data_2019['healthy_life'],color='pink')
plt.title('Health over the Years',size=20)
plt.xlabel("Health")
plt.legend(["Health 2015", "Health 2016", "Health 2017", "Health 2018", "Health 2019"], loc='upper left')
plt.show()
# It looks like the Healthy life expectancy, has been on a pretty good level at 2015. However after this year it experienced a drastic drop. Then a recover can be observed during 2018 and 2019. For 2019 we have even higher values than 2015.
# #### GDP per capita (Economy) over the Years
plt.figure(figsize=(10,5))
sns.kdeplot(happiness_data_2015['Economy (GDP per Capita)'],color='red')
sns.kdeplot(happiness_data_2016['Economy (GDP per Capita)'],color='blue')
sns.kdeplot(happiness_data_2017['Economy..GDP.per.Capita.'],color='limegreen')
sns.kdeplot(happiness_data_2018['GDP per capita'],color='orange')
sns.kdeplot(happiness_data_2019['gdp'],color='pink')
plt.title('Economy over the Years',size=20)
plt.xlabel("Gdp")
plt.legend(["Gdp 2015", "Gdp 2016", "Gdp 2017", "Gdp 2018", "Gdp 2019"], loc='upper left')
plt.show()
# We observe weak result during 2018 and 2019.
# #### Social support over the Years
plt.figure(figsize=(10,5))
sns.kdeplot(happiness_data_2015['Family'],color='red')
sns.kdeplot(happiness_data_2016['Family'],color='blue')
sns.kdeplot(happiness_data_2017['Family'],color='limegreen')
sns.kdeplot(happiness_data_2018['Social support'],color='orange')
sns.kdeplot(happiness_data_2019['social_support'],color='pink')
plt.title('Social support over the Years',size=20)
plt.xlabel("Social support")
plt.legend(["Social support 2015", "Social support 2016", "Social support 2017", "Social support 2018", "Social support 2019"],
loc='upper left')
plt.show()
# The social support is increasing over the years, except for 2016. We observe the lowest values there.
# ### Conclusion
# Gdp, healthy life expectancy, and social support play the most significant role in contributing to happiness.
#
# On continent level Australia has the best mean happiness score, whereas Africa has the worst.
# ### References
# [1] https://www.kaggle.com/unsdsn/world-happiness
#
# [2] https://www.kaggle.com/javadzabihi/happiness-2017-visualization-prediction
#
# [3] https://www.kaggle.com/avnika22/world-happiness-report-eda-clustering
| FinalProject/happiness_report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 参考 https://www.jianshu.com/p/6ef54e943ad0
import pandas as pd
df = pd.read_excel("../refs/数据处理.xlsx")
# 参考 https://stackoverflow.com/a/41709869/8625228
# +
# df.groupby(['user_id','节点'])['时间'].max().reset_index().join(df, on=['user_id', '节点', '时间'])
# df.join(df.groupby(['user_id','节点'])['时间'].max().reset_index(), on=['user_id', '节点', '时间'])
# -
# 需求没有很清晰,先 close
| xiaobinghui/analysis/.ipynb_checkpoints/pandas-lag-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import warnings
warnings.filterwarnings(action = 'ignore')
# %matplotlib inline
plt.rcParams['font.sans-serif']=['SimHei'] #解决中文显示乱码问题
plt.rcParams['axes.unicode_minus']=False
import sklearn.linear_model as LM
from sklearn.model_selection import cross_val_score,train_test_split
from sklearn.datasets import make_regression
from sklearn import tree
# +
data=pd.read_excel('北京市空气质量数据.xlsx')
data=data.replace(0,np.NaN)
data=data.dropna()
X_train=data.iloc[:,3:-1]
y_train=data['质量等级']
print(y_train.value_counts())
y_train=y_train.map({'优':'1','良':'2','轻度污染':'3','中度污染':'4','重度污染':'5','严重污染':'6'})
modelDTC = tree.DecisionTreeClassifier(max_depth=2,random_state=123)
modelDTC.fit(X_train, y_train)
print(tree.export_text(modelDTC))
#print(tree.plot_tree(modelDTC))
print("训练精度:%f"%(modelDTC.score(X_train,y_train)))
with open("D:\jueceshu.dot", 'w') as f:
f = tree.export_graphviz(modelDTC, out_file = f,filled=True,class_names=True,proportion=True,rounded=True)
# -
# 说明:(1)第1至7行:读入空气质量监测数据。进行数据预处理。
# (2)第8,9行:建立树深度等于2的分类树模型,并拟合数据。
# (3)第10行:输出分类树的文本化表达结果。
# 文本化表达是以字符形式展示分类树的构成,树根在左,树叶在右。规则集包含4推理规则。
# (4)第12行:计算分类树的训练精度。因树深度较浅,预测效果不理想。
# (5)第14,15行:将分类树的图形结果保存到指定文件中。
#
| chapter5-4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import math
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
plt.style.use('fivethirtyeight')
df = web.DataReader('AMZN', data_source='yahoo', start='2012-01-01', end='2021-03-01')
# Vizualize closing price history
plt.figure(figsize=(16,8))
plt.title('Closing price history of AMZN')
plt.plot(df['Close'])
plt.ylabel('Close Price')
plt.xlabel('Date')
plt.show()
# Create a new dataframe with only column and convet it to a numpy array
dataset = df.filter(['Close']).values
train_set = dataset[:round(dataset.shape[0]*0.8)]
test_set = dataset[round(dataset.shape[0]*0.8):]
#print(train_set.shape)
# Scale the data
scaler = MinMaxScaler(feature_range=(0,1))
train_sc = scaler.fit_transform(train_set)
test_sc = scaler.fit_transform(test_set)
#Split the train data into xtrain and ytrain:
#Independant variables
X_train = []
#Dependant variables
y_train = []
# xtrain contain 15 values, which are use for prediction
# y train is a discrete type vector. if the first value after 15 days is higher than the average value of pre-15 days' values then 1 else 0
for i in range(15, len(train_sc)):
# appending the 15 previous stock prices to the list for i
# we need to specify the rows and simply pick the first and only column
X_train.append(train_sc[i-15:i, 0])
# appending the 15th stock price to the list for i
if train_sc[i,0]>np.mean(train_sc[i-15:i, 0])*1.15:
y_train.append(1)
else:
y_train.append(0)
#Split the test data into xtrain and ytrain:
#Independant variables
X_test = []
#Dependant variables
y_test = []
for i in range(15, len(test_sc)):
# appending the 15 previous stock prices to the list for i
# we need to specify the rows and simply pick the first and only column
X_test.append(test_sc[i-15:i, 0])
# appending the 15th stock price to the list for i
if test_sc[i,0]>np.mean(test_sc[i-15:i, 0])*1.15:
y_test.append(1)
else:
y_test.append(0)
#--------------------- Building SVM model --------------------#
clf1 = SVC(C=1)
clf1.fit(X_train,y_train)
y_pred=clf1.predict(X_test)
accuracy_score(y_test,y_pred)
| ML Model/Basic ML models for Stocks Predictions(SVM).ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
# %%
TS_SPLIT = 10 # how many splits for timeseries
WINDOW_SIZE = 5 # how many minutes to aggregate as single datapoint
PREDICT_AHEAD1 = 6 # how far time ahead predict 1st model
PREDICT_AHEAD2 = 10 # how far ahead predict 2nd model
EARLY_STOP = 10 # stopping xgboost training if it's not progressing
RESAMPLE_MIN = 1 # original data are in 1-sec intervals, but that's not good for training?
# %%
data_folder = Path('../data/zad2-dane')
# %%
types = ['manipulowane', 'zaklocajace', 'zaklocane', 'straty']
types2 = ['man', 'zak', 'zcne', 'stra']
files_by_type = {}
for t,t2 in zip(types,types2):
files_by_type[t2]= sorted(list(data_folder.glob('2021-05-*/' + t + '*'))) # TODO: 2021-04-19 seems to not work
# %%
def all_types_to_df(files_dict, end=100):
type_to_df = {}
for t in types2:
types_csv = []
for p in files_dict[t][:end]:
data_df = pd.read_csv(p)
data_df.columns = map(lambda x: x.lower(), data_df.columns)
data_df.columns = [data_df.columns[0]] + [t+'_'+nom for nom in data_df.columns[1:]]
types_csv.append(data_df)
type_to_df[t] = types_csv
return type_to_df
# %%
type_df = all_types_to_df(files_by_type)
# %%
for t in types2:
d = type_df[t][0].columns.values
print(type_df[t][0].columns)
for i in type_df[t]:
assert np.all(i.columns.values == d), (i,d)
# %%
type_per_df = { t:pd.concat(type_df[t], ignore_index=True) for t in types2}
# %%
from functools import reduce
df_final = reduce(lambda left,right: pd.merge(left,right,on='czas'), type_per_df.values())
# %%
del df_final['man_unnamed: 5']
del df_final['zcne_unnamed: 5']
# %%
translate_naming = {
"man_001fcx00285_sppv.pv":'man_air_flow',
"man_001xxxcalc01.num.pv[3]":"man_co2",
"man_001scx00274_sppv.pv":"man_blow",
"man_001fcx00241_sppv.pv":"man_dust",
"zak_001fyx00206_spsum.pv":"zak_mixer",
"zak_001fcx00231_sppv.pv":"zak_fry",
"zak_001fcx00251_sppv.pv":"zak_slag",
"zak_001fcx00281.pv":"zak_oxy1",
"zak_001fcx00262.pv":"zak_oxy2",
"zak_001fcx00261.pv":"zak_air",
"zak_001xxxcalc01.num.pv[2]":"zak_sturoxy",
"zak_prob_corg":"zak_carbon",
"zak_prob_s":"zak_sulfur",
"zak_sita_nadziarno":"zak_over_seed",
"zak_sita_podziarno":"zak_under_seed",
"zak_poziom_zuzel":"zak_slag_level",
"zcne_001ucx00274.pv":"zcne_angle",
"zcne_001nir0ods0.daca.pv":"zcne_loss",
"zcne_temp_zuz":"zcne_temp",
"zcne_007sxr00555.daca1.pv":"zcne_shake",
"stra_001nir0szr0.daca.pv": "stra_sum",
"stra_001nir0szrg.daca.pv": "stra_1",
"stra_001nir0s600.daca.pv":"stra_2",
"stra_001nir0s500.daca.pv":"stra_3",
"stra_001nir0s300.daca.pv":"stra_4",
"stra_001nir0s100.daca.pv":"stra_5",
}
new_cols = [translate_naming.get(cname) or cname for cname in df_final.columns]
df_final.columns = new_cols
df_final = df_final.drop(['stra_1','stra_2','stra_3','stra_4','stra_5'], axis=1, errors='ignore')
# %% [markdown]
# # Resampling
# %%
df_final['czas'] = pd.to_datetime(df_final['czas'])
df_final = df_final.resample(f'{RESAMPLE_MIN}min', on='czas').mean()
df_final['stra_sum'] = df_final['stra_sum'].rolling(RESAMPLE_MIN).mean().shift(-RESAMPLE_MIN)
df_final = df_final[:-RESAMPLE_MIN]
df_final
# %%
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split, TimeSeriesSplit
import xgboost as xgb
# %% [markdown]
# # Predict using data window of X1 minutes for X2 minutes ahead
# %%
X = df_final.iloc[:,:-1]
X_org = X.copy()
y = df_final.iloc[:,-1]
y_org = y.copy()
for i in range(1, WINDOW_SIZE):
X_shifted1 = X_org.shift(-i).rename(index=None, columns=lambda c: c + f'_{i}')
X = X.merge(X_shifted1, left_index=True, right_index=True)
X = X.iloc[:-i,:]
y = y.iloc[i:]
X = X.iloc[:-PREDICT_AHEAD1,:]
y = y.shift(-PREDICT_AHEAD1).iloc[:-PREDICT_AHEAD1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42)
# %%
tscv = TimeSeriesSplit(TS_SPLIT)
model3 = xgb.XGBRegressor()
m = None
train_rmse = []
test_rmse = []
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
m = model3.fit(X_train, y_train, eval_metric='rmse', eval_set=[(X_train, y_train), (X_test, y_test)], verbose=False, xgb_model = m, early_stopping_rounds=EARLY_STOP)
train_rmse += model3.evals_result()['validation_0']['rmse']
test_rmse += model3.evals_result()['validation_1']['rmse']
plt.figure(figsize=(25,10))
plt.plot(train_rmse, label='train')
plt.plot(test_rmse, label='test')
# %%
predictions3 = model3.predict(X_test)
print(mean_squared_error(predictions3, y_test))
# %%
predictions3 = model3.predict(X_test)
d = pd.DataFrame(predictions3, index=y_test.index, columns=['predykcja'])
d['oryginał'] = y_test
d.plot(figsize=(25,15))
# %% [markdown]
# # Predict using data window of X1 minutes for X2' minutes ahead
# %%
X = df_final.iloc[:,:-1]
X_org = X.copy()
y = df_final.iloc[:,-1]
y_org = y.copy()
for i in range(1, WINDOW_SIZE):
X_shifted1 = X_org.shift(-i).rename(index=None, columns=lambda c: c + f'_{i}')
X = X.merge(X_shifted1, left_index=True, right_index=True)
X = X.iloc[:-i,:]
y = y.iloc[i:]
X = X.iloc[:-PREDICT_AHEAD2,:]
y = y.shift(-PREDICT_AHEAD2).iloc[:-PREDICT_AHEAD2]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42)
# %%
tscv = TimeSeriesSplit(TS_SPLIT)
model4 = xgb.XGBRegressor()
m = None
train_rmse = []
test_rmse = []
for train_index, test_index in tscv.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
m = model4.fit(X_train, y_train, eval_metric='rmse', eval_set=[(X_train, y_train), (X_test, y_test)], verbose=False, xgb_model = m, early_stopping_rounds=EARLY_STOP)
train_rmse += model4.evals_result()['validation_0']['rmse']
test_rmse += model4.evals_result()['validation_1']['rmse']
plt.figure(figsize=(25,10))
plt.plot(train_rmse, label='train')
plt.plot(test_rmse, label='test')
# %%
predictions4 = model4.predict(X_test)
print(mean_squared_error(predictions4, y_test))
# %%
predictions4 = model4.predict(X_test)
d = pd.DataFrame(predictions4, index=y_test.index, columns=['predykcja'])
d['oryginał'] = y_test
d.plot(figsize=(25,15))
# %% [markdown]
# # Using models for predictions
# %%
def cartesian_product(*arrays):
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [len(arrays)], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[..., i] = a
return arr.reshape(-1, len(arrays))
# %%
# Prediction with windowed models
SETTINGS_CHANGE_UNIT = [80.0, 0.8, 2.0, 0.5] # how hard it is to adjust param
ADJUSTMENT_TARGET_WEIGHT = 0.0 # how important is adjustment cost against target difference
def find_winning_params2(state, current_settings, target):
# params configuration which we want to try
params = cartesian_product(
np.linspace(1900, 3500, 20),
np.linspace(65, 81, 81-65),
np.linspace(40, 70, 70-40),
np.linspace(13, 27, 27-13)
).round()
aab = np.ones((params.shape[0], state.shape[1])) * state.T
aab[:, 0:4] = params
predictions3 = model3.predict(aab) # predict 6 min ahead
predictions4 = model4.predict(aab) # predict 10 min ahead
predictions = (predictions3 + predictions4) / 2
target_diff = np.abs(predictions - target)
adjustment_cost = np.sum(np.abs(aab[:, 0:4] - current_settings.reshape((1, 4))) / SETTINGS_CHANGE_UNIT, axis=1)
winner = np.argmin(target_diff + adjustment_cost * ADJUSTMENT_TARGET_WEIGHT)
return params[winner,:] # settings which we believe are the best to obtain target from current
# %%
X = df_final.iloc[:,:-1]
X_org = X.copy()
for i in range(1, WINDOW_SIZE):
X_shifted1 = X_org.shift(-i).rename(index=None, columns=lambda c: c + f'_{i}')
X = X.merge(X_shifted1, left_index=True, right_index=True)
X = X.iloc[:-i,:]
X = X.iloc[:-PREDICT_AHEAD2, :]
state = X.iloc[0].values.reshape((-1, 1)) # real-time input from sensors
current_settings = X.iloc[0, 0:4].values
print(current_settings)
for t in range(15, 25):
print(t, find_winning_params2(state, current_settings, t))
state = X.iloc[4242].values.reshape((-1, 1)) # real-time input from sensors
current_settings = X.iloc[4242, 0:4].values
print(current_settings)
for t in range(15, 25):
print(t, find_winning_params2(state, current_settings, t))
state = X.iloc[8888].values.reshape((-1, 1)) # real-time input from sensors
current_settings = X.iloc[8888, 0:4].values
print(current_settings)
for t in range(15, 25):
print(t, find_winning_params2(state, current_settings, t))
| xgb_ftw-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Session 5: Generative Networks
# ## Assignment: Generative Adversarial Networks and Recurrent Neural Networks
#
# <p class="lead">
# <a href="https://www.kadenze.com/courses/creative-applications-of-deep-learning-with-tensorflow/info">Creative Applications of Deep Learning with Google's Tensorflow</a><br />
# <a href="http://pkmital.com"><NAME></a><br />
# <a href="https://www.kadenze.com">Kadenze, Inc.</a>
# </p>
#
# # Table of Contents
#
# <!-- MarkdownTOC autolink="true" autoanchor="true" bracket="round" -->
#
# - [Overview](#overview)
# - [Learning Goals](#learning-goals)
# - [Part 1 - Generative Adversarial Networks \(GAN\) / Deep Convolutional GAN \(DCGAN\)](#part-1---generative-adversarial-networks-gan--deep-convolutional-gan-dcgan)
# - [Introduction](#introduction)
# - [Building the Encoder](#building-the-encoder)
# - [Building the Discriminator for the Training Samples](#building-the-discriminator-for-the-training-samples)
# - [Building the Decoder](#building-the-decoder)
# - [Building the Generator](#building-the-generator)
# - [Building the Discriminator for the Generated Samples](#building-the-discriminator-for-the-generated-samples)
# - [GAN Loss Functions](#gan-loss-functions)
# - [Building the Optimizers w/ Regularization](#building-the-optimizers-w-regularization)
# - [Loading a Dataset](#loading-a-dataset)
# - [Training](#training)
# - [Equilibrium](#equilibrium)
# - [Part 2 - Variational Auto-Encoding Generative Adversarial Network \(VAEGAN\)](#part-2---variational-auto-encoding-generative-adversarial-network-vaegan)
# - [Batch Normalization](#batch-normalization)
# - [Building the Encoder](#building-the-encoder-1)
# - [Building the Variational Layer](#building-the-variational-layer)
# - [Building the Decoder](#building-the-decoder-1)
# - [Building VAE/GAN Loss Functions](#building-vaegan-loss-functions)
# - [Creating the Optimizers](#creating-the-optimizers)
# - [Loading the Dataset](#loading-the-dataset)
# - [Training](#training-1)
# - [Part 3 - Latent-Space Arithmetic](#part-3---latent-space-arithmetic)
# - [Loading the Pre-Trained Model](#loading-the-pre-trained-model)
# - [Exploring the Celeb Net Attributes](#exploring-the-celeb-net-attributes)
# - [Find the Latent Encoding for an Attribute](#find-the-latent-encoding-for-an-attribute)
# - [Latent Feature Arithmetic](#latent-feature-arithmetic)
# - [Extensions](#extensions)
# - [Part 4 - Character-Level Language Model](session-5-part-2.ipynb#part-4---character-level-language-model)
# - [Part 5 - Pretrained Char-RNN of Donald Trump](session-5-part-2.ipynb#part-5---pretrained-char-rnn-of-donald-trump)
# - [Getting the Trump Data](session-5-part-2.ipynb#getting-the-trump-data)
# - [Basic Text Analysis](session-5-part-2.ipynb#basic-text-analysis)
# - [Loading the Pre-trained Trump Model](session-5-part-2.ipynb#loading-the-pre-trained-trump-model)
# - [Inference: Keeping Track of the State](session-5-part-2.ipynb#inference-keeping-track-of-the-state)
# - [Probabilistic Sampling](session-5-part-2.ipynb#probabilistic-sampling)
# - [Inference: Temperature](session-5-part-2.ipynb#inference-temperature)
# - [Inference: Priming](session-5-part-2.ipynb#inference-priming)
# - [Assignment Submission](session-5-part-2.ipynb#assignment-submission)
# <!-- /MarkdownTOC -->
#
#
# <a name="overview"></a>
# # Overview
#
# This is certainly the hardest session and will require a lot of time and patience to complete. Also, many elements of this session may require further investigation, including reading of the original papers and additional resources in order to fully grasp their understanding. The models we cover are state of the art and I've aimed to give you something between a practical and mathematical understanding of the material, though it is a tricky balance. I hope for those interested, that you delve deeper into the papers for more understanding. And for those of you seeking just a practical understanding, that these notebooks will suffice.
#
# This session covered two of the most advanced generative networks: generative adversarial networks and recurrent neural networks. During the homework, we'll see how these work in more details and try building our own. I am not asking you train anything in this session as both GANs and RNNs take many days to train. However, I have provided pre-trained networks which we'll be exploring. We'll also see how a Variational Autoencoder can be combined with a Generative Adversarial Network to allow you to also encode input data, and I've provided a pre-trained model of this type of model trained on the Celeb Faces dataset. We'll see what this means in more details below.
#
# After this session, you are also required to submit your final project which can combine any of the materials you have learned so far to produce a short 1 minute clip demonstrating any aspect of the course you want to invesitgate further or combine with anything else you feel like doing. This is completely open to you and to encourage your peers to share something that demonstrates creative thinking. Be sure to keep the final project in mind while browsing through this notebook!
#
# <a name="learning-goals"></a>
# # Learning Goals
#
# * Learn to build the components of a Generative Adversarial Network and how it is trained
# * Learn to combine the Variational Autoencoder with a Generative Adversarial Network
# * Learn to use latent space arithmetic with a pre-trained VAE/GAN network
# * Learn to build the components of a Character Recurrent Neural Network and how it is trained
# * Learn to sample from a pre-trained CharRNN model
# +
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n',
'You should consider updating to Python 3.4.0 or',
'higher as the libraries built for this course',
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda'
'and then restart `jupyter notebook`:\n',
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
import IPython.display as ipyd
import tensorflow as tf
from libs import utils, gif, datasets, dataset_utils, nb_utils
except ImportError as e:
print("Make sure you have started notebook in the same directory",
"as the provided zip file which includes the 'libs' folder",
"and the file 'utils.py' inside of it. You will NOT be able",
"to complete this assignment unless you restart jupyter",
"notebook inside the directory created by extracting",
"the zip file or cloning the github repo.")
print(e)
# We'll tell matplotlib to inline any drawn figures like so:
# %matplotlib inline
plt.style.use('ggplot')
# -
# Bit of formatting because I don't like the default inline code style:
from IPython.core.display import HTML
HTML("""<style> .rendered_html code {
padding: 2px 4px;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
} </style>""")
# <a name="part-1---generative-adversarial-networks-gan--deep-convolutional-gan-dcgan"></a>
# # Part 1 - Generative Adversarial Networks (GAN) / Deep Convolutional GAN (DCGAN)
#
# <a name="introduction"></a>
# ## Introduction
#
# Recall from the lecture that a Generative Adversarial Network is two networks, a generator and a discriminator. The "generator" takes a feature vector and decodes this feature vector to become an image, exactly like the decoder we built in Session 3's Autoencoder. The discriminator is exactly like the encoder of the Autoencoder, except it can only have 1 value in the final layer. We use a sigmoid to squash this value between 0 and 1, and then interpret the meaning of it as: 1, the image you gave me was real, or 0, the image you gave me was generated by the generator, it's a FAKE! So the discriminator is like an encoder which takes an image and then perfoms lie detection. Are you feeding me lies? Or is the image real?
#
# Consider the AE and VAE we trained in Session 3. The loss function operated partly on the input space. It said, per pixel, what is the difference between my reconstruction and the input image? The l2-loss per pixel. Recall at that time we suggested that this wasn't the best idea because per-pixel differences aren't representative of our own perception of the image. One way to consider this is if we had the same image, and translated it by a few pixels. We would not be able to tell the difference, but the per-pixel difference between the two images could be enormously high.
#
# The GAN does not use per-pixel difference. Instead, it trains a distance function: the discriminator. The discriminator takes in two images, the real image and the generated one, and learns what a similar image should look like! That is really the amazing part of this network and has opened up some very exciting potential future directions for unsupervised learning. Another network that also learns a distance function is known as the siamese network. We didn't get into this network in this course, but it is commonly used in facial verification, or asserting whether two faces are the same or not.
#
# The GAN network is notoriously a huge pain to train! For that reason, we won't actually be training it. Instead, we'll discuss an extension to this basic network called the VAEGAN which uses the VAE we created in Session 3 along with the GAN. We'll then train that network in Part 2. For now, let's stick with creating the GAN.
#
# Let's first create the two networks: the discriminator and the generator. We'll first begin by building a general purpose encoder which we'll use for our discriminator. Recall that we've already done this in Session 3. What we want is for the input placeholder to be encoded using a list of dimensions for each of our encoder's layers. In the case of a convolutional network, our list of dimensions should correspond to the number of output filters. We also need to specify the kernel heights and widths for each layer's convolutional network.
#
# We'll first need a placeholder. This will be the "real" image input to the discriminator and the discrimintator will encode this image into a single value, 0 or 1, saying, yes this is real, or no, this is not real.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
# We'll keep a variable for the size of our image.
n_pixels = 32
n_channels = 3
input_shape = [None, n_pixels, n_pixels, n_channels]
# And then create the input image placeholder
X = tf.placeholder(name='X'...
# -
# <a name="building-the-encoder"></a>
# ## Building the Encoder
#
# Let's build our encoder just like in Session 3. We'll create a function which accepts the input placeholder, a list of dimensions describing the number of convolutional filters in each layer, and a list of filter sizes to use for the kernel sizes in each convolutional layer. We'll also pass in a parameter for which activation function to apply.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
def encoder(x, channels, filter_sizes, activation=tf.nn.tanh, reuse=None):
# Set the input to a common variable name, h, for hidden layer
h = x
# Now we'll loop over the list of dimensions defining the number
# of output filters in each layer, and collect each hidden layer
hs = []
for layer_i in range(len(channels)):
with tf.variable_scope('layer{}'.format(layer_i+1), reuse=reuse):
# Convolve using the utility convolution function
# This requirs the number of output filter,
# and the size of the kernel in `k_h` and `k_w`.
# By default, this will use a stride of 2, meaning
# each new layer will be downsampled by 2.
h, W = utils.conv2d(...
# Now apply the activation function
h = activation(h)
# Store each hidden layer
hs.append(h)
# Finally, return the encoding.
return h, hs
# <a name="building-the-discriminator-for-the-training-samples"></a>
# ## Building the Discriminator for the Training Samples
#
# Finally, let's take the output of our encoder, and make sure it has just 1 value by using a fully connected layer. We can use the `libs/utils` module's, `linear` layer to do this, which will also reshape our 4-dimensional tensor to a 2-dimensional one prior to using the fully connected layer.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
def discriminator(X,
channels=[50, 50, 50, 50],
filter_sizes=[4, 4, 4, 4],
activation=utils.lrelu,
reuse=None):
# We'll scope these variables to "discriminator_real"
with tf.variable_scope('discriminator', reuse=reuse):
# Encode X:
H, Hs = encoder(X, channels, filter_sizes, activation, reuse)
# Now make one last layer with just 1 output. We'll
# have to reshape to 2-d so that we can create a fully
# connected layer:
shape = H.get_shape().as_list()
H = tf.reshape(H, [-1, shape[1] * shape[2] * shape[3]])
# Now we can connect our 2D layer to a single neuron output w/
# a sigmoid activation:
D, W = utils.linear(...
return D
# Now let's create the discriminator for the real training data coming from `X`:
D_real = discriminator(X)
# And we can see what the network looks like now:
graph = tf.get_default_graph()
nb_utils.show_graph(graph.as_graph_def())
# <a name="building-the-decoder"></a>
# ## Building the Decoder
#
# Now we're ready to build the Generator, or decoding network. This network takes as input a vector of features and will try to produce an image that looks like our training data. We'll send this synthesized image to our discriminator which we've just built above.
#
# Let's start by building the input to this network. We'll need a placeholder for the input features to this network. We have to be mindful of how many features we have. The feature vector for the Generator will eventually need to form an image. What we can do is create a 1-dimensional vector of values for each element in our batch, giving us `[None, n_features]`. We can then reshape this to a 4-dimensional Tensor so that we can build a decoder network just like in Session 3.
#
# But how do we assign the values from our 1-d feature vector (or 2-d tensor with Batch number of them) to the 3-d shape of an image (or 4-d tensor with Batch number of them)? We have to go from the number of features in our 1-d feature vector, let's say `n_latent` to `height x width x channels` through a series of convolutional transpose layers. One way to approach this is think of the reverse process. Starting from the final decoding of `height x width x channels`, I will use convolution with a stride of 2, so downsample by 2 with each new layer. So the second to last decoder layer would be, `height // 2 x width // 2 x ?`. If I look at it like this, I can use the variable `n_pixels` denoting the `height` and `width` to build my decoder, and set the channels to whatever I want.
#
# Let's start with just our 2-d placeholder which will have `None x n_features`, then convert it to a 4-d tensor ready for the decoder part of the network (a.k.a. the generator).
# +
# We'll need some variables first. This will be how many
# channels our generator's feature vector has. Experiment w/
# this if you are training your own network.
n_code = 16
# And in total how many feature it has, including the spatial dimensions.
n_latent = (n_pixels // 16) * (n_pixels // 16) * n_code
# Let's build the 2-D placeholder, which is the 1-d feature vector for every
# element in our batch. We'll then reshape this to 4-D for the decoder.
Z = tf.placeholder(name='Z', shape=[None, n_latent], dtype=tf.float32)
# Now we can reshape it to input to the decoder. Here we have to
# be mindful of the height and width as described before. We need
# to make the height and width a factor of the final height and width
# that we want. Since we are using strided convolutions of 2, then
# we can say with 4 layers, that first decoder's layer should be:
# n_pixels / 2 / 2 / 2 / 2, or n_pixels / 16:
Z_tensor = tf.reshape(Z, [-1, n_pixels // 16, n_pixels // 16, n_code])
# -
# Now we'll build the decoder in much the same way as we built our encoder. And exactly as we've done in Session 3! This requires one additional parameter "channels" which is how many output filters we want for each net layer. We'll interpret the `dimensions` as the height and width of the tensor in each new layer, the `channels` is how many output filters we want for each net layer, and the `filter_sizes` is the size of the filters used for convolution. We'll default to using a stride of two which will downsample each layer. We're also going to collect each hidden layer `h` in a list. We'll end up needing this for Part 2 when we combine the variational autoencoder w/ the generative adversarial network.
def decoder(z, dimensions, channels, filter_sizes,
activation=tf.nn.relu, reuse=None):
h = z
hs = []
for layer_i in range(len(dimensions)):
with tf.variable_scope('layer{}'.format(layer_i+1), reuse=reuse):
h, W = utils.deconv2d(x=h,
n_output_h=dimensions[layer_i],
n_output_w=dimensions[layer_i],
n_output_ch=channels[layer_i],
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i],
reuse=reuse)
h = activation(h)
hs.append(h)
return h, hs
# <a name="building-the-generator"></a>
# ## Building the Generator
#
# Now we're ready to use our decoder to take in a vector of features and generate something that looks like our training images. We have to ensure that the last layer produces the same output shape as the discriminator's input. E.g. we used a `[None, 64, 64, 3]` input to the discriminator, so our generator needs to also output `[None, 64, 64, 3]` tensors. In other words, we have to ensure the last element in our `dimensions` list is 64, and the last element in our `channels` list is 3.
# Explore these parameters.
def generator(Z,
dimensions=[n_pixels//8, n_pixels//4, n_pixels//2, n_pixels],
channels=[50, 50, 50, n_channels],
filter_sizes=[4, 4, 4, 4],
activation=utils.lrelu):
with tf.variable_scope('generator'):
G, Hs = decoder(Z_tensor, dimensions, channels, filter_sizes, activation)
return G
# Now let's call the `generator` function with our input placeholder `Z`. This will take our feature vector and generate something in the shape of an image.
G = generator(Z)
graph = tf.get_default_graph()
nb_utils.show_graph(graph.as_graph_def())
# <a name="building-the-discriminator-for-the-generated-samples"></a>
# ## Building the Discriminator for the Generated Samples
#
# Lastly, we need *another* discriminator which takes as input our generated images. Recall the discriminator that we have made only takes as input our placeholder `X` which is for our actual training samples. We'll use the same function for creating our discriminator and **reuse** the variables we already have. This is the crucial part! We aren't making *new* trainable variables, but reusing the ones we have. We're just create a new set of operations that takes as input our generated image. So we'll have a whole new set of operations exactly like the ones we have created for our first discriminator. But we are going to use the exact same variables as our first discriminator, so that we optimize the same values.
D_fake = discriminator(G, reuse=True)
# Now we can look at the graph and see the new discriminator inside the node for the discriminator. You should see the original discriminator and a new graph of a discriminator within it, but all the weights are shared with the original discriminator.
nb_utils.show_graph(graph.as_graph_def())
# <a name="gan-loss-functions"></a>
# ## GAN Loss Functions
#
# We now have all the components to our network. We just have to train it. This is the notoriously tricky bit. We will have 3 different loss measures instead of our typical network with just a single loss. We'll later connect each of these loss measures to two optimizers, one for the generator and another for the discriminator, and then pin them against each other and see which one wins! Exciting times!
#
# Recall from Session 3's Supervised Network, we created a binary classification task: music or speech. We again have a binary classification task: real or fake. So our loss metric will again use the binary cross entropy to measure the loss of our three different modules: the generator, the discriminator for our real images, and the discriminator for our generated images.
#
# To find out the loss function for our generator network, answer the question, what makes the generator successful? Successfully fooling the discriminator. When does that happen? When the discriminator for the fake samples produces all ones. So our binary cross entropy measure will measure the cross entropy with our predicted distribution and the true distribution which has all ones.
with tf.variable_scope('loss/generator'):
loss_G = tf.reduce_mean(utils.binary_cross_entropy(D_fake, tf.ones_like(D_fake)))
# What we've just written is a loss function for our generator. The generator is optimized when the discriminator for the generated samples produces all ones. In contrast to the generator, the discriminator will have 2 measures to optimize. One which is the opposite of what we have just written above, as well as 1 more measure for the real samples. Try writing these two losses and we'll combine them using their average. We want to optimize the Discriminator for the real samples producing all 1s, and the Discriminator for the fake samples producing all 0s:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
with tf.variable_scope('loss/discriminator/real'):
loss_D_real = utils.binary_cross_entropy(D_real, ...
with tf.variable_scope('loss/discriminator/fake'):
loss_D_fake = utils.binary_cross_entropy(D_fake, ...
with tf.variable_scope('loss/discriminator'):
loss_D = tf.reduce_mean((loss_D_real + loss_D_fake) / 2)
nb_utils.show_graph(graph.as_graph_def())
# With our loss functions, we can create an optimizer for the discriminator and generator:
#
# <a name="building-the-optimizers-w-regularization"></a>
# ## Building the Optimizers w/ Regularization
#
# We're almost ready to create our optimizers. We just need to do one extra thing. Recall that our loss for our generator has a flow from the generator through the discriminator. If we are training both the generator and the discriminator, we have two measures which both try to optimize the discriminator, but in opposite ways: the generator's loss would try to optimize the discriminator to be bad at its job, and the discriminator's loss would try to optimize it to be good at its job. This would be counter-productive, trying to optimize opposing losses. What we want is for the generator to get better, and the discriminator to get better. Not for the discriminator to get better, then get worse, then get better, etc... The way we do this is when we optimize our generator, we let the gradient flow through the discriminator, but we do not update the variables in the discriminator. Let's try and grab just the discriminator variables and just the generator variables below:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
# Grab just the variables corresponding to the discriminator
# and just the generator:
vars_d = [v for v in tf.trainable_variables()
if ...]
print('Training discriminator variables:')
[print(v.name) for v in tf.trainable_variables()
if v.name.startswith('discriminator')]
vars_g = [v for v in tf.trainable_variables()
if ...]
print('Training generator variables:')
[print(v.name) for v in tf.trainable_variables()
if v.name.startswith('generator')]
# -
# We can also apply regularization to our network. This will penalize weights in the network for growing too large.
d_reg = tf.contrib.layers.apply_regularization(
tf.contrib.layers.l2_regularizer(1e-6), vars_d)
g_reg = tf.contrib.layers.apply_regularization(
tf.contrib.layers.l2_regularizer(1e-6), vars_g)
# The last thing you may want to try is creating a separate learning rate for each of your generator and discriminator optimizers like so:
# +
learning_rate = 0.0001
lr_g = tf.placeholder(tf.float32, shape=[], name='learning_rate_g')
lr_d = tf.placeholder(tf.float32, shape=[], name='learning_rate_d')
# -
# Now you can feed the placeholders to your optimizers. If you run into errors creating these, then you likely have a problem with your graph's definition! Be sure to go back and reset the default graph and check the sizes of your different operations/placeholders.
#
# With your optimizers, you can now train the network by "running" the optimizer variables with your session. You'll need to set the `var_list` parameter of the `minimize` function to only train the variables for the discriminator and same for the generator's optimizer:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
opt_g = tf.train.AdamOptimizer(learning_rate=lr_g).minimize(...)
opt_d = tf.train.AdamOptimizer(learning_rate=lr_d).minimize(loss_D + d_reg, var_list=vars_d)
# <a name="loading-a-dataset"></a>
# ## Loading a Dataset
#
# Let's use the Celeb Dataset just for demonstration purposes. In Part 2, you can explore using your own dataset. This code is exactly the same as we did in Session 3's homework with the VAE.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
# You'll want to change this to your own data if you end up training your own GAN.
batch_size = 64
n_epochs = 1
crop_shape = [n_pixels, n_pixels, 3]
crop_factor = 0.8
input_shape = [218, 178, 3]
files = datasets.CELEB()
batch = dataset_utils.create_input_pipeline(
files=files,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape)
# -
# <a name="training"></a>
# ## Training
#
# We'll now go through the setup of training the network. We won't actually spend the time to train the network but just see how it would be done. This is because in Part 2, we'll see an extension to this network which makes it much easier to train.
# +
ckpt_name = 'gan.ckpt'
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
tf.get_default_graph().finalize()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
if os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
print("VAE model restored.")
# +
n_examples = 10
zs = np.random.uniform(0.0, 1.0, [4, n_latent]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_examples)
# -
# <a name="equilibrium"></a>
# ## Equilibrium
#
# Equilibrium is at 0.693. Why? Consider what the cost is measuring, the binary cross entropy. If we have random guesses, then we have as many 0s as we have 1s. And on average, we'll be 50% correct. The binary cross entropy is:
#
# \begin{align}
# \sum_i \text{X}_i * \text{log}(\tilde{\text{X}}_i) + (1 - \text{X}_i) * \text{log}(1 - \tilde{\text{X}}_i)
# \end{align}
#
# Which is written out in tensorflow as:
# ```python
# (-(x * tf.log(z) + (1. - x) * tf.log(1. - z)))
# ```
# Where `x` is the discriminator's prediction of the true distribution, in the case of GANs, the input images, and `z` is the discriminator's prediction of the generated images corresponding to the mathematical notation of $\tilde{\text{X}}$. We sum over all features, but in the case of the discriminator, we have just 1 feature, the guess of whether it is a true image or not. If our discriminator guesses at chance, i.e. 0.5, then we'd have something like:
#
# \begin{align}
# 0.5 * \text{log}(0.5) + (1 - 0.5) * \text{log}(1 - 0.5) = -0.693
# \end{align}
#
# So this is what we'd expect at the start of learning and from a game theoretic point of view, where we want things to remain. So unlike our previous networks, where our loss continues to drop closer and closer to 0, we want our loss to waver around this value as much as possible, and hope for the best.
equilibrium = 0.693
margin = 0.2
# When we go to train the network, we switch back and forth between each optimizer, feeding in the appropriate values for each optimizer. The `opt_g` optimizer only requires the `Z` and `lr_g` placeholders, while the `opt_d` optimizer requires the `X`, `Z`, and `lr_d` placeholders.
#
# Don't train this network for very long because GANs are a huge pain to train and require a lot of fiddling. They very easily get stuck in their adversarial process, or get overtaken by one or the other, resulting in a useless model. What you need to develop is a steady equilibrium that optimizes both. That will likely take two weeks just trying to get the GAN to train and not have enough time for the rest of the assignment. They require a lot of memory/cpu and can take many days to train once you have settled on an architecture/training process/dataset. Just let it run for a short time and then interrupt the kernel (don't restart!), then continue to the next cell.
#
# From there, we'll go over an extension to the GAN which uses a VAE like we used in Session 3. By using this extra network, we can actually train a better model in a fraction of the time and with much more ease! But the network's definition is a bit more complicated. Let's see how the GAN is trained first and then we'll train the VAE/GAN network instead. While training, the "real" and "fake" cost will be printed out. See how this cost wavers around the equilibrium and how we enforce it to try and stay around there by including a margin and some simple logic for updates. This is highly experimental and the research does not have a good answer for the best practice on how to train a GAN. I.e., some people will set the learning rate to some ratio of the performance between fake/real networks, others will have a fixed update schedule but train the generator twice and the discriminator only once.
t_i = 0
batch_i = 0
epoch_i = 0
n_files = len(files)
while epoch_i < n_epochs:
batch_i += 1
batch_xs = sess.run(batch) / 255.0
batch_zs = np.random.uniform(
0.0, 1.0, [batch_size, n_latent]).astype(np.float32)
real_cost, fake_cost = sess.run([
loss_D_real, loss_D_fake],
feed_dict={
X: batch_xs,
Z: batch_zs})
real_cost = np.mean(real_cost)
fake_cost = np.mean(fake_cost)
if (batch_i % 20) == 0:
print(batch_i, 'real:', real_cost, '/ fake:', fake_cost)
gen_update = True
dis_update = True
if real_cost > (equilibrium + margin) or \
fake_cost > (equilibrium + margin):
gen_update = False
if real_cost < (equilibrium - margin) or \
fake_cost < (equilibrium - margin):
dis_update = False
if not (gen_update or dis_update):
gen_update = True
dis_update = True
if gen_update:
sess.run(opt_g,
feed_dict={
Z: batch_zs,
lr_g: learning_rate})
if dis_update:
sess.run(opt_d,
feed_dict={
X: batch_xs,
Z: batch_zs,
lr_d: learning_rate})
if batch_i % (n_files // batch_size) == 0:
batch_i = 0
epoch_i += 1
print('---------- EPOCH:', epoch_i)
# Plot example reconstructions from latent layer
recon = sess.run(G, feed_dict={Z: zs})
recon = np.clip(recon, 0, 1)
m1 = utils.montage(recon.reshape([-1] + crop_shape),
'imgs/manifold_%08d.png' % t_i)
recon = sess.run(G, feed_dict={Z: batch_zs})
recon = np.clip(recon, 0, 1)
m2 = utils.montage(recon.reshape([-1] + crop_shape),
'imgs/reconstructions_%08d.png' % t_i)
fig, axs = plt.subplots(1, 2, figsize=(15, 10))
axs[0].imshow(m1)
axs[1].imshow(m2)
plt.show()
t_i += 1
# Save the variables to disk.
save_path = saver.save(sess, "./" + ckpt_name,
global_step=batch_i,
write_meta_graph=False)
print("Model saved in file: %s" % save_path)
# +
# Tell all the threads to shutdown.
coord.request_stop()
# Wait until all threads have finished.
coord.join(threads)
# Clean up the session.
sess.close()
# -
# <a name="part-2---variational-auto-encoding-generative-adversarial-network-vaegan"></a>
# # Part 2 - Variational Auto-Encoding Generative Adversarial Network (VAEGAN)
#
# In our definition of the generator, we started with a feature vector, `Z`. This feature vector was not connected to anything before it. Instead, we had to randomly create its values using a random number generator of its `n_latent` values from -1 to 1, and this range was chosen arbitrarily. It could have been 0 to 1, or -3 to 3, or 0 to 100. In any case, the network would have had to learn to transform those values into something that looked like an image. There was no way for us to take an image, and find the feature vector that created it. In other words, it was not possible for us to *encode* an image.
#
# The closest thing to an encoding we had was taking an image and feeding it to the discriminator, which would output a 0 or 1. But what if we had another network that allowed us to encode an image, and then we used this network for both the discriminator and generative parts of the network? That's the basic idea behind the VAEGAN: https://arxiv.org/abs/1512.09300. It is just like the regular GAN, except we also use an encoder to create our feature vector `Z`.
#
# We then get the best of both worlds: a GAN that looks more or less the same, but uses the encoding from an encoder instead of an arbitrary feature vector; and an autoencoder that can model an input distribution using a trained distance function, the discriminator, leading to nicer encodings/decodings.
#
# Let's try to build it! Refer to the paper for the intricacies and a great read. Luckily, by building the `encoder` and `decoder` functions, we're almost there. We just need a few more components and will change these slightly.
#
# Let's reset our graph and recompose our network as a VAEGAN:
tf.reset_default_graph()
# <a name="batch-normalization"></a>
# ## Batch Normalization
#
# You may have noticed from the `VAE` code that I've used something called "batch normalization". This is a pretty effective technique for regularizing the training of networks by "reducing internal covariate shift". The basic idea is that given a minibatch, we optimize the gradient for this small sample of the greater population. But this small sample may have different characteristics than the entire population's gradient. Consider the most extreme case, a minibatch of 1. In this case, we overfit our gradient to optimize the gradient of the single observation. If our minibatch is too large, say the size of the entire population, we aren't able to manuvuer the loss manifold at all and the entire loss is averaged in a way that doesn't let us optimize anything. What we want to do is find a happy medium between a too-smooth loss surface (i.e. every observation), and a very peaky loss surface (i.e. a single observation). Up until now we only used mini-batches to help with this. But we can also approach it by "smoothing" our updates between each mini-batch. That would effectively smooth the manifold of the loss space. Those of you familiar with signal processing will see this as a sort of low-pass filter on the gradient updates.
#
# In order for us to use batch normalization, we need another placeholder which is a simple boolean: True or False, denoting when we are training. We'll use this placeholder to conditionally update batch normalization's statistics required for normalizing our minibatches. Let's create the placeholder and then I'll get into how to use this.
# placeholder for batch normalization
is_training = tf.placeholder(tf.bool, name='istraining')
# The original paper that introduced the idea suggests to use batch normalization "pre-activation", meaning after the weight multipllication or convolution, and before the nonlinearity. We can use the `libs/batch_norm` module to apply batch normalization to any input tensor give the tensor and the placeholder defining whether or not we are training. Let's use this module and you can inspect the code inside the module in your own time if it interests you.
from libs.batch_norm import batch_norm
help(batch_norm)
# Note that Tensorflow also includes numerous batch normalization implementations now that it did not include at the time of filming (Tensorflow is evolving very quickly)! These exist in `tf.contrib.layers.batch_norm`, `tf.contrib.learn.ops.batch_norm`, and `tf.contrib.slim.batch_norm`. They work slightly differently to the `libs/batch_norm.py` implementation in that they take a boolean for whether or not you are training, rather than a `tf.Placeholder`. This requires you to reconstruct the network when you are training/inferring, or create two networks, which is preferable for "deploying" a model. For instance, if you have trained a model and you want to hand it out, you don't necessarily want the batch norm operations for training the network in there. For the libraries in this course, we'll be using the `libs/batch_norm` implementation which means you will have to use `feed_dict` to denote when you are training or not.
#
# <a name="building-the-encoder-1"></a>
# ## Building the Encoder
#
# We can now change our encoder to accept the `is_training` placeholder and apply `batch_norm` just before the activation function is applied:
def encoder(x, is_training, channels, filter_sizes, activation=tf.nn.tanh, reuse=None):
# Set the input to a common variable name, h, for hidden layer
h = x
print('encoder/input:', h.get_shape().as_list())
# Now we'll loop over the list of dimensions defining the number
# of output filters in each layer, and collect each hidden layer
hs = []
for layer_i in range(len(channels)):
with tf.variable_scope('layer{}'.format(layer_i+1), reuse=reuse):
# Convolve using the utility convolution function
# This requirs the number of output filter,
# and the size of the kernel in `k_h` and `k_w`.
# By default, this will use a stride of 2, meaning
# each new layer will be downsampled by 2.
h, W = utils.conv2d(h, channels[layer_i],
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i],
d_h=2,
d_w=2,
reuse=reuse)
h = batch_norm(h, is_training)
# Now apply the activation function
h = activation(h)
print('layer:', layer_i, ', shape:', h.get_shape().as_list())
# Store each hidden layer
hs.append(h)
# Finally, return the encoding.
return h, hs
# Let's now create the input to the network using a placeholder. We can try a slightly larger image this time. But be careful experimenting with much larger images as this is a big network.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
n_pixels = 64
n_channels = 3
input_shape = [None, n_pixels, n_pixels, n_channels]
# placeholder for the input to the network
X = tf.placeholder(...)
# -
# And now we'll connect the input to an encoder network. We'll also use the `tf.nn.elu` activation instead. Explore other activations but I've found this to make the training much faster (e.g. 10x faster at least!). See the paper for more details: [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
# ](http://arxiv.org/abs/1511.07289)
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
channels = [64, 64, 64]
filter_sizes = [5, 5, 5]
activation = tf.nn.elu
n_hidden = 128
with tf.variable_scope('encoder'):
H, Hs = encoder(...
Z = utils.linear(H, n_hidden)[0]
# -
# <a name="building-the-variational-layer"></a>
# ## Building the Variational Layer
#
# In Session 3, we introduced the idea of Variational Bayes when we used the Variational Auto Encoder. The variational bayesian approach requires a richer understanding of probabilistic graphical models and bayesian methods which we we're not able to go over in this course (it requires a few courses all by itself!). For that reason, please treat this as a "black box" in this course.
#
# For those of you that are more familiar with graphical models, Variational Bayesian methods attempt to model an approximate joint distribution of $Q(Z)$ using some distance function to the true distribution $P(X)$. Kingma and Welling show how this approach can be used in a graphical model resembling an autoencoder and can be trained using KL-Divergence, or $KL(Q(Z) || P(X))$. The distribution Q(Z) is the variational distribution, and attempts to model the lower-bound of the true distribution $P(X)$ through the minimization of the KL-divergence. Another way to look at this is the encoder of the network is trying to model the parameters of a known distribution, the Gaussian Distribution, through a minimization of this lower bound. We assume that this distribution resembles the true distribution, but it is merely a simplification of the true distribution. To learn more about this, I highly recommend picking up the book by <NAME> called "Pattern Recognition and Machine Learning" and reading the original Kingma and Welling paper on Variational Bayes.
#
# Now back to coding, we'll create a general variational layer that does exactly the same thing as our VAE in session 3. Treat this as a black box if you are unfamiliar with the math. It takes an input encoding, `h`, and an integer, `n_code` defining how many latent Gaussians to use to model the latent distribution. In return, we get the latent encoding from sampling the Gaussian layer, `z`, the mean and log standard deviation, as well as the prior loss, `loss_z`.
def variational_bayes(h, n_code):
# Model mu and log(\sigma)
z_mu = tf.nn.tanh(utils.linear(h, n_code, name='mu')[0])
z_log_sigma = 0.5 * tf.nn.tanh(utils.linear(h, n_code, name='log_sigma')[0])
# Sample from noise distribution p(eps) ~ N(0, 1)
epsilon = tf.random_normal(tf.pack([tf.shape(h)[0], n_code]))
# Sample from posterior
z = z_mu + tf.mul(epsilon, tf.exp(z_log_sigma))
# Measure loss
loss_z = -0.5 * tf.reduce_sum(
1.0 + 2.0 * z_log_sigma - tf.square(z_mu) - tf.exp(2.0 * z_log_sigma),
1)
return z, z_mu, z_log_sigma, loss_z
# Let's connect this layer to our encoding, and keep all the variables it returns. Treat this as a black box if you are unfamiliar with variational bayes!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
# Experiment w/ values between 2 - 100
# depending on how difficult the dataset is
n_code = 32
with tf.variable_scope('encoder/variational'):
Z, Z_mu, Z_log_sigma, loss_Z = variational_bayes(h=Z, n_code=n_code)
# -
# <a name="building-the-decoder-1"></a>
# ## Building the Decoder
#
# In the GAN network, we built a decoder and called it the generator network. Same idea here. We can use these terms interchangeably. Before we connect our latent encoding, `Z` to the decoder, we'll implement batch norm in our decoder just like we did with the encoder. This is a simple fix: add a second argument for `is_training` and then apply batch normalization just after the `deconv2d` operation and just before the nonlinear `activation`.
def decoder(z, is_training, dimensions, channels, filter_sizes,
activation=tf.nn.elu, reuse=None):
h = z
for layer_i in range(len(dimensions)):
with tf.variable_scope('layer{}'.format(layer_i+1), reuse=reuse):
h, W = utils.deconv2d(x=h,
n_output_h=dimensions[layer_i],
n_output_w=dimensions[layer_i],
n_output_ch=channels[layer_i],
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i],
reuse=reuse)
h = batch_norm(h, is_training)
h = activation(h)
return h
# Now we'll build a decoder just like in Session 3, and just like our Generator network in Part 1. In Part 1, we created `Z` as a placeholder which we would have had to feed in as random values. However, now we have an explicit coding of an input image in `X` stored in `Z` by having created the encoder network.
# +
dimensions = [n_pixels // 8, n_pixels // 4, n_pixels // 2, n_pixels]
channels = [30, 30, 30, n_channels]
filter_sizes = [4, 4, 4, 4]
activation = tf.nn.elu
n_latent = n_code * (n_pixels // 16)**2
with tf.variable_scope('generator'):
Z_decode = utils.linear(
Z, n_output=n_latent, name='fc', activation=activation)[0]
Z_decode_tensor = tf.reshape(
Z_decode, [-1, n_pixels//16, n_pixels//16, n_code], name='reshape')
G = decoder(
Z_decode_tensor, is_training, dimensions,
channels, filter_sizes, activation)
# -
# Now we need to build our discriminators. We'll need to add a parameter for the `is_training` placeholder. We're also going to keep track of every hidden layer in the discriminator. Our encoder already returns the `Hs` of each layer. Alternatively, we could poll the graph for each layer in the discriminator and ask for the correspond layer names. We're going to need these layers when building our costs.
def discriminator(X,
is_training,
channels=[50, 50, 50, 50],
filter_sizes=[4, 4, 4, 4],
activation=tf.nn.elu,
reuse=None):
# We'll scope these variables to "discriminator_real"
with tf.variable_scope('discriminator', reuse=reuse):
H, Hs = encoder(
X, is_training, channels, filter_sizes, activation, reuse)
shape = H.get_shape().as_list()
H = tf.reshape(
H, [-1, shape[1] * shape[2] * shape[3]])
D, W = utils.linear(
x=H, n_output=1, activation=tf.nn.sigmoid, name='fc', reuse=reuse)
return D, Hs
# Recall the regular GAN and DCGAN required 2 discriminators: one for the generated samples in `Z`, and one for the input samples in `X`. We'll do the same thing here. One discriminator for the real input data, `X`, which the discriminator will try to predict as 1s, and another discriminator for the generated samples that go from `X` through the encoder to `Z`, and finally through the decoder to `G`. The discriminator will be trained to try and predict these as 0s, whereas the generator will be trained to try and predict these as 1s.
D_real, Hs_real = discriminator(X, is_training)
D_fake, Hs_fake = discriminator(G, is_training, reuse=True)
# <a name="building-vaegan-loss-functions"></a>
# ## Building VAE/GAN Loss Functions
#
# Let's now see how we can compose our loss. We have 3 losses for our discriminator. Along with measuring the binary cross entropy between each of them, we're going to also measure each layer's loss from our two discriminators using an l2-loss, and this will form our loss for the log likelihood measure. The details of how these are constructed are explained in more details in the paper: https://arxiv.org/abs/1512.09300 - please refer to this paper for more details that are way beyond the scope of this course! One parameter within this to pay attention to is `gamma`, which the authors of the paper suggest control the weighting between content and style, just like in Session 4's Style Net implementation.
with tf.variable_scope('loss'):
# Loss functions
loss_D_llike = 0
for h_real, h_fake in zip(Hs_real, Hs_fake):
loss_D_llike += tf.reduce_sum(tf.squared_difference(
utils.flatten(h_fake), utils.flatten(h_real)), 1)
eps = 1e-12
loss_real = tf.log(D_real + eps)
loss_fake = tf.log(1 - D_fake + eps)
loss_GAN = tf.reduce_sum(loss_real + loss_fake, 1)
gamma = 0.75
loss_enc = tf.reduce_mean(loss_Z + loss_D_llike)
loss_dec = tf.reduce_mean(gamma * loss_D_llike - loss_GAN)
loss_dis = -tf.reduce_mean(loss_GAN)
nb_utils.show_graph(tf.get_default_graph().as_graph_def())
# <a name="creating-the-optimizers"></a>
# ## Creating the Optimizers
#
# We now have losses for our encoder, decoder, and discriminator networks. We can connect each of these to their own optimizer and start training! Just like with Part 1's GAN, we'll ensure each network's optimizer only trains its part of the network: the encoder's optimizer will only update the encoder variables, the generator's optimizer will only update the generator variables, and the discriminator's optimizer will only update the discriminator variables.
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
learning_rate = 0.0001
opt_enc = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(
loss_enc,
var_list=[var_i for var_i in tf.trainable_variables()
if ...])
opt_gen = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(
loss_dec,
var_list=[var_i for var_i in tf.trainable_variables()
if ...])
opt_dis = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(
loss_dis,
var_list=[var_i for var_i in tf.trainable_variables()
if var_i.name.startswith('discriminator')])
# -
# <a name="loading-the-dataset"></a>
# ## Loading the Dataset
#
# We'll now load our dataset just like in Part 1. Here is where you should explore with your own data!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
from libs import datasets, dataset_utils
batch_size = 64
n_epochs = 100
crop_shape = [n_pixels, n_pixels, n_channels]
crop_factor = 0.8
input_shape = [218, 178, 3]
# Try w/ CELEB first to make sure it works, then explore w/ your own dataset.
files = datasets.CELEB()
batch = dataset_utils.create_input_pipeline(
files=files,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape)
# -
# We'll also create a latent manifold just like we've done in Session 3 and Part 1. This is a random sampling of 4 points in the latent space of `Z`. We then interpolate between then to create a "hyper-plane" and show the decoding of 10 x 10 points on that hyperplane.
n_samples = 10
zs = np.random.uniform(
-1.0, 1.0, [4, n_code]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_samples)
# Now create a session and create a coordinator to manage our queues for fetching data from the input pipeline and start our queue runners:
# +
# We create a session to use the graph
sess = tf.Session()
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(init_op)
# -
# Load an existing checkpoint if it exists to continue training.
if os.path.exists("vaegan.ckpt"):
saver.restore(sess, "vaegan.ckpt")
print("GAN model restored.")
# We'll also try resythesizing a test set of images. This will help us understand how well the encoder/decoder network is doing:
n_files = len(files)
test_xs = sess.run(batch) / 255.0
if not os.path.exists('imgs'):
os.mkdir('imgs')
m = utils.montage(test_xs, 'imgs/test_xs.png')
plt.imshow(m)
# <a name="training-1"></a>
# ## Training
#
# Almost ready for training. Let's get some variables which we'll need. These are the same as Part 1's training process. We'll keep track of `t_i` which we'll use to create images of the current manifold and reconstruction every so many iterations. And we'll keep track of the current batch number within the epoch and the current epoch number.
t_i = 0
batch_i = 0
epoch_i = 0
# Just like in Part 1, we'll train trying to maintain an equilibrium between our Generator and Discriminator networks. You should experiment with the margin depending on how the training proceeds.
equilibrium = 0.693
margin = 0.4
# Now we'll train! Just like Part 1, we measure the `real_cost` and `fake_cost`. But this time, we'll always update the encoder. Based on the performance of the real/fake costs, then we'll update generator and discriminator networks. This will take a long time to produce something nice, but not nearly as long as the regular GAN network despite the additional parameters of the encoder and variational networks. Be sure to monitor the reconstructions to understand when your network has reached the capacity of its learning! For reference, on Celeb Net, I would use about 5 layers in each of the Encoder, Generator, and Discriminator networks using as input a 100 x 100 image, and a minimum of 200 channels per layer. This network would take about 1-2 days to train on an Nvidia TITAN X GPU.
while epoch_i < n_epochs:
if epoch_i % (n_files // batch_size) == 0:
batch_i = 0
epoch_i += 1
print('---------- EPOCH:', epoch_i)
batch_i += 1
batch_xs = sess.run(batch) / 255.0
real_cost, fake_cost, _ = sess.run([
loss_real, loss_fake, opt_enc],
feed_dict={
X: batch_xs,
is_training: True})
real_cost = -np.mean(real_cost)
fake_cost = -np.mean(fake_cost)
gen_update = True
dis_update = True
if real_cost > (equilibrium + margin) or \
fake_cost > (equilibrium + margin):
gen_update = False
if real_cost < (equilibrium - margin) or \
fake_cost < (equilibrium - margin):
dis_update = False
if not (gen_update or dis_update):
gen_update = True
dis_update = True
if gen_update:
sess.run(opt_gen, feed_dict={
X: batch_xs,
is_training: True})
if dis_update:
sess.run(opt_dis, feed_dict={
X: batch_xs,
is_training: True})
if batch_i % 50 == 0:
print('real:', real_cost, '/ fake:', fake_cost)
# Plot example reconstructions from latent layer
recon = sess.run(G, feed_dict={
Z: zs,
is_training: False})
recon = np.clip(recon, 0, 1)
m1 = utils.montage(recon.reshape([-1] + crop_shape),
'imgs/manifold_%08d.png' % t_i)
# Plot example reconstructions
recon = sess.run(G, feed_dict={
X: test_xs,
is_training: False})
recon = np.clip(recon, 0, 1)
m2 = utils.montage(recon.reshape([-1] + crop_shape),
'imgs/reconstruction_%08d.png' % t_i)
fig, axs = plt.subplots(1, 2, figsize=(15, 10))
axs[0].imshow(m1)
axs[1].imshow(m2)
plt.show()
t_i += 1
if batch_i % 200 == 0:
# Save the variables to disk.
save_path = saver.save(sess, "./" + ckpt_name,
global_step=batch_i,
write_meta_graph=False)
print("Model saved in file: %s" % save_path)
# +
# One of the threads has issued an exception. So let's tell all the
# threads to shutdown.
coord.request_stop()
# Wait until all threads have finished.
coord.join(threads)
# Clean up the session.
sess.close()
# -
# <a name="part-3---latent-space-arithmetic"></a>
# # Part 3 - Latent-Space Arithmetic
#
# <a name="loading-the-pre-trained-model"></a>
# ## Loading the Pre-Trained Model
#
# We're now going to work with a pre-trained VAEGAN model on the Celeb Net dataset. Let's load this model:
tf.reset_default_graph()
from libs import celeb_vaegan as CV
net = CV.get_celeb_vaegan_model()
# We'll load the graph_def contained inside this dictionary. It follows the same idea as the `inception`, `vgg16`, and `i2v` pretrained networks. It is a dictionary with the key `graph_def` defined, with the graph's pretrained network. It also includes `labels` and a `preprocess` key. We'll have to do one additional thing which is to turn off the random sampling from variational layer. This isn't really necessary but will ensure we get the same results each time we use the network. We'll use the `input_map` argument to do this. Don't worry if this doesn't make any sense, as we didn't cover the variational layer in any depth. Just know that this is removing a random process from the network so that it is completely deterministic. If we hadn't done this, we'd get slightly different results each time we used the network (which may even be desirable for your purposes).
sess = tf.Session()
g = tf.get_default_graph()
tf.import_graph_def(net['graph_def'], name='net', input_map={
'encoder/variational/random_normal:0': np.zeros(512, dtype=np.float32)})
names = [op.name for op in g.get_operations()]
print(names)
# Now let's get the relevant parts of the network: `X`, the input image to the network, `Z`, the input image's encoding, and `G`, the decoded image. In many ways, this is just like the Autoencoders we learned about in Session 3, except instead of `Y` being the output, we have `G` from our generator! And the way we train it is very different: we use an adversarial process between the generator and discriminator, and use the discriminator's own distance measure to help train the network, rather than pixel-to-pixel differences.
X = g.get_tensor_by_name('net/x:0')
Z = g.get_tensor_by_name('net/encoder/variational/z:0')
G = g.get_tensor_by_name('net/generator/x_tilde:0')
# Let's get some data to play with:
files = datasets.CELEB()
img_i = 50
img = plt.imread(files[img_i])
plt.imshow(img)
# Now preprocess the image, and see what the generated image looks like (i.e. the lossy version of the image through the network's encoding and decoding).
p = CV.preprocess(img)
synth = sess.run(G, feed_dict={X: p[np.newaxis]})
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(p)
axs[1].imshow(synth[0] / synth.max())
# So we lost a lot of details but it seems to be able to express quite a bit about the image. Our inner most layer, `Z`, is only 512 values yet our dataset was 200k images of 64 x 64 x 3 pixels (about 2.3 GB of information). That means we're able to express our nearly 2.3 GB of information with only 512 values! Having some loss of detail is certainly expected!
#
# <a name="exploring-the-celeb-net-attributes"></a>
# ## Exploring the Celeb Net Attributes
#
# Let's now try and explore the attributes of our dataset. We didn't train the network with any supervised labels, but the Celeb Net dataset has 40 attributes for each of its 200k images. These are already parsed and stored for you in the `net` dictionary:
net.keys()
len(net['labels'])
net['labels']
# Let's see what attributes exist for one of the celeb images:
plt.imshow(img)
[net['labels'][i] for i, attr_i in enumerate(net['attributes'][img_i]) if attr_i]
# <a name="find-the-latent-encoding-for-an-attribute"></a>
# ## Find the Latent Encoding for an Attribute
#
# The Celeb Dataset includes attributes for each of its 200k+ images. This allows us to feed into the encoder some images that we know have a *specific* attribute, e.g. "smiling". We store what their encoding is and retain this distribution of encoded values. We can then look at any other image and see how it is encoded, and slightly change the encoding by adding the encoded of our smiling images to it! The result should be our image but with more smiling. That is just insane and we're going to see how to do it. First lets inspect our latent space:
Z.get_shape()
# We have 512 features that we can encode any image with. Assuming our network is doing an okay job, let's try to find the `Z` of the first 100 images with the 'Bald' attribute:
bald_label = net['labels'].index('Bald')
bald_label
# Let's get all the bald image indexes:
bald_img_idxs = np.where(net['attributes'][:, bald_label])[0]
bald_img_idxs
# Now let's just load 100 of their images:
bald_imgs = [plt.imread(files[bald_img_i])[..., :3]
for bald_img_i in bald_img_idxs[:100]]
# Let's see if the mean image looks like a good bald person or not:
plt.imshow(np.mean(bald_imgs, 0).astype(np.uint8))
# Yes that is definitely a bald person. Now we're going to try to find the encoding of a bald person. One method is to try and find every other possible image and subtract the "bald" person's latent encoding. Then we could add this encoding back to any new image and hopefully it makes the image look more bald. Or we can find a bunch of bald people's encodings and then average their encodings together. This should reduce the noise from having many different attributes, but keep the signal pertaining to the baldness.
#
# Let's first preprocess the images:
bald_p = np.array([CV.preprocess(bald_img_i) for bald_img_i in bald_imgs])
# Now we can find the latent encoding of the images by calculating `Z` and feeding `X` with our `bald_p` images:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
bald_zs = sess.run(Z, feed_dict=...
# Now let's calculate the mean encoding:
bald_feature = np.mean(bald_zs, 0, keepdims=True)
bald_feature.shape
# Let's try and synthesize from the mean bald feature now and see how it looks:
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
bald_generated = sess.run(G, feed_dict=...
plt.imshow(bald_generated[0] / bald_generated.max())
# <a name="latent-feature-arithmetic"></a>
# ## Latent Feature Arithmetic
#
# Let's now try to write a general function for performing everything we've just done so that we can do this with many different features. We'll then try to combine them and synthesize people with the features we want them to have...
def get_features_for(label='Bald', has_label=True, n_imgs=50):
label_i = net['labels'].index(label)
label_idxs = np.where(net['attributes'][:, label_i] == has_label)[0]
label_idxs = np.random.permutation(label_idxs)[:n_imgs]
imgs = [plt.imread(files[img_i])[..., :3]
for img_i in label_idxs]
preprocessed = np.array([CV.preprocess(img_i) for img_i in imgs])
zs = sess.run(Z, feed_dict={X: preprocessed})
return np.mean(zs, 0)
# Let's try getting some attributes positive and negative features. Be sure to explore different attributes! Also try different values of `n_imgs`, e.g. 2, 3, 5, 10, 50, 100. What happens with different values?
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# Explore different attributes
z1 = get_features_for('Male', True, n_imgs=10)
z2 = get_features_for('Male', False, n_imgs=10)
z3 = get_features_for('Smiling', True, n_imgs=10)
z4 = get_features_for('Smiling', False, n_imgs=10)
b1 = sess.run(G, feed_dict={Z: z1[np.newaxis]})
b2 = sess.run(G, feed_dict={Z: z2[np.newaxis]})
b3 = sess.run(G, feed_dict={Z: z3[np.newaxis]})
b4 = sess.run(G, feed_dict={Z: z4[np.newaxis]})
fig, axs = plt.subplots(1, 4, figsize=(15, 6))
axs[0].imshow(b1[0] / b1.max()), axs[0].set_title('Male'), axs[0].grid('off'), axs[0].axis('off')
axs[1].imshow(b2[0] / b2.max()), axs[1].set_title('Not Male'), axs[1].grid('off'), axs[1].axis('off')
axs[2].imshow(b3[0] / b3.max()), axs[2].set_title('Smiling'), axs[2].grid('off'), axs[2].axis('off')
axs[3].imshow(b4[0] / b4.max()), axs[3].set_title('Not Smiling'), axs[3].grid('off'), axs[3].axis('off')
# Now let's interpolate between the "Male" and "Not Male" categories:
notmale_vector = z2 - z1
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z1 + notmale_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
# And the same for smiling:
smiling_vector = z3 - z4
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z4 + smiling_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i] / g[i].max(), 0, 1))
ax_i.grid('off')
# There's also no reason why we have to be within the boundaries of 0-1. We can extrapolate beyond, in, and around the space.
n_imgs = 5
amt = np.linspace(-1.5, 2.5, n_imgs)
zs = np.array([z4 + smiling_vector*amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
# <a name="extensions"></a>
# ## Extensions
#
# [<NAME>](https://twitter.com/dribnet), Lecturer at Victoria University School of Design, also recently demonstrated an alternative way of interpolating using a sinusoidal interpolation. He's created some of the most impressive generative images out there and luckily for us he has detailed his process in the arxiv preprint: https://arxiv.org/abs/1609.04468 - as well, be sure to check out his twitter bot, https://twitter.com/smilevector - which adds smiles to people :) - Note that the network we're using is only trained on aligned faces that are frontally facing, though this twitter bot is capable of adding smiles to any face. I suspect that he is running a face detection algorithm such as AAM, CLM, or ASM, cropping the face, aligning it, and then running a similar algorithm to what we've done above. Or else, perhaps he has trained a new model on faces that are not aligned. In any case, it is well worth checking out!
#
# Let's now try and use sinusoidal interpolation using his implementation in [plat](https://github.com/dribnet/plat/blob/master/plat/interpolate.py#L16-L24) which I've copied below:
def slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1."""
if val <= 0:
return low
elif val >= 1:
return high
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
amt = np.linspace(0, 1, n_imgs)
zs = np.array([slerp(amt_i, z1, z2) for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
# It's certainly worth trying especially if you are looking to explore your own model's latent space in new and interesting ways.
#
# Let's try and load an image that we want to play with. We need an image as similar to the Celeb Dataset as possible. Unfortunately, we don't have access to the algorithm they used to "align" the faces, so we'll need to try and get as close as possible to an aligned face image. One way you can do this is to load up one of the celeb images and try and align an image to it using e.g. Photoshop or another photo editing software that lets you blend and move the images around. That's what I did for my own face...
img = plt.imread('parag.png')[..., :3]
img = CV.preprocess(img, crop_factor=1.0)[np.newaxis]
# Let's see how the network encodes it:
img_ = sess.run(G, feed_dict={X: img})
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(img[0]), axs[0].grid('off')
axs[1].imshow(np.clip(img_[0] / np.max(img_), 0, 1)), axs[1].grid('off')
# Notice how blurry the image is. Tom White's preprint suggests one way to sharpen the image is to find the "Blurry" attribute vector:
z1 = get_features_for('Blurry', True, n_imgs=25)
z2 = get_features_for('Blurry', False, n_imgs=25)
unblur_vector = z2 - z1
z = sess.run(Z, feed_dict={X: img})
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i] / g[i].max(), 0, 1))
ax_i.grid('off')
ax_i.axis('off')
# Notice that the image also gets brighter and perhaps other features than simply the bluriness of the image changes. Tom's preprint suggests that this is due to the correlation that blurred images have with other things such as the brightness of the image, possibly due biases in labeling or how photographs are taken. He suggests that another way to unblur would be to synthetically blur a set of images and find the difference in the encoding between the real and blurred images. We can try it like so:
# +
from scipy.ndimage import gaussian_filter
idxs = np.random.permutation(range(len(files)))
imgs = [plt.imread(files[idx_i]) for idx_i in idxs[:100]]
blurred = []
for img_i in imgs:
img_copy = np.zeros_like(img_i)
for ch_i in range(3):
img_copy[..., ch_i] = gaussian_filter(img_i[..., ch_i], sigma=3.0)
blurred.append(img_copy)
# +
# Now let's preprocess the original images and the blurred ones
imgs_p = np.array([CV.preprocess(img_i) for img_i in imgs])
blur_p = np.array([CV.preprocess(img_i) for img_i in blurred])
# And then compute each of their latent features
noblur = sess.run(Z, feed_dict={X: imgs_p})
blur = sess.run(Z, feed_dict={X: blur_p})
# -
synthetic_unblur_vector = np.mean(noblur - blur, 0)
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + synthetic_unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
# For some reason, it also doesn't like my glasses very much. Let's try and add them back.
z1 = get_features_for('Eyeglasses', True)
z2 = get_features_for('Eyeglasses', False)
glass_vector = z1 - z2
z = sess.run(Z, feed_dict={X: img})
n_imgs = 5
amt = np.linspace(0, 1, n_imgs)
zs = np.array([z[0] + glass_vector * amt_i + unblur_vector * amt_i for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
# Well, more like sunglasses then. Let's try adding everything in there now!
n_imgs = 5
amt = np.linspace(0, 1.0, n_imgs)
zs = np.array([z[0] + glass_vector * amt_i + unblur_vector * amt_i + amt_i * smiling_vector for amt_i in amt])
g = sess.run(G, feed_dict={Z: zs})
fig, axs = plt.subplots(1, n_imgs, figsize=(20, 4))
for i, ax_i in enumerate(axs):
ax_i.imshow(np.clip(g[i], 0, 1))
ax_i.grid('off')
ax_i.axis('off')
# Well it was worth a try anyway. We can also try with a lot of images and create a gif montage of the result:
n_imgs = 5
amt = np.linspace(0, 1.5, n_imgs)
z = sess.run(Z, feed_dict={X: imgs_p})
imgs = []
for amt_i in amt:
zs = z + synthetic_unblur_vector * amt_i + amt_i * smiling_vector
g = sess.run(G, feed_dict={Z: zs})
m = utils.montage(np.clip(g, 0, 1))
imgs.append(m)
gif.build_gif(imgs, saveto='celeb.gif')
ipyd.Image(url='celeb.gif?i={}'.format(
np.random.rand()), height=1000, width=1000)
# Exploring multiple feature vectors and applying them to images from the celeb dataset to produce animations of a face, saving it as a GIF. Recall you can store each image frame in a list and then use the `gif.build_gif` function to create a gif. Explore your own syntheses and then include a gif of the different images you create as "celeb.gif" in the final submission. Perhaps try finding unexpected synthetic latent attributes in the same way that we created a blur attribute. You can check the documentation in scipy.ndimage for some other image processing techniques, for instance: http://www.scipy-lectures.org/advanced/image_processing/ - and see if you can find the encoding of another attribute that you then apply to your own images. You can even try it with many images and use the `utils.montage` function to create a large grid of images that evolves over your attributes. Or create a set of expressions perhaps. Up to you just explore!
#
# <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
# +
imgs = []
... DO SOMETHING AWESOME ! ...
gif.build_gif(imgs=imgs, saveto='vaegan.gif')
# -
# <a name="part-4---character-level-recurrent-neural-network"></a>
# # Part 4 - Character Level Recurrent Neural Network
#
# Please visit [session-5-part2.ipynb](session-5-part2.ipynb) for the rest of the homework!
| session-5/session-5-part-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:rbc-dev] *
# language: python
# name: conda-env-rbc-dev-py
# ---
# + [markdown] slideshow={"slide_type": "subslide"}
# # Remote Backend Compiler - RBC
# *by <NAME>*
#
# ## Statement of the problem
#
# *Compile and run user-defined functions in a language-agnostic JIT enabled program semi-remotely.*
#
# ### Semi-remote compilation
#
# - *compiler frontend*: user-written source code is parsed and transformed to LLVM IR in client program
# - the client program sends the LLVM IR over network to a server program
# - *compiler backend*: the server program compiles the LLVM IR to machine code (that can be executed on the server's CPU or GPU devices)
#
# ### Constraints
#
# - A user-defined function is defined in Python or C/C++
# (or any other language with LLVM tools)
# - Client host runs on a 32- or 64-bit OS: Linux, MacOSX, Windows
# - Server host runs on a 64-bit Linux
#
# ## Solution
#
# - *Remote Backend Compiler - RBC*: https://github.com/xnd-project/rbc
# - A Python-to-Python prototype uses Numba for LLVM IR generation and Numba llvmlite for machine code compilation
# - A Python-to-SQL application uses Numba for LLVM IR generation and OmniSciDB for machine code compilation
# * OmniSciDB is a GPU enabled SQL database server
# * OmniSciDB uses LLVM Compiler C++ library for JIT compilation
# * https://www.omnisci.com/
#
# # Demo of the prototype
#
# 1. Start server
#
# ```python
# import rbc
# rbc.RemoteJIT(host='localhost', port=7890).start_server()
# ```
#
# 2. Client example program
# -
from rbc import RemoteJIT
# Client-server connector, RBC jit-decorator
rjit = RemoteJIT(host='localhost', port=7890)
# A user-defined function
@rjit('int64(int64, int64)')
def foo(a, b):
return a + b
# +
# Generate LLVM IR, useful for debugging
#print(foo)
# -
# Triggers:
# - local frontend compile,
# - remote backend compile,
# - and remote call
foo(1, 3)
foo(2, 4) # reuses remote compile result and triggers remote call
try:
foo(1.2, 3.4)
except Exception as msg:
print(msg)
# update with new signatures
foo.signature('double(double, double)')
foo.signature('complex128(complex128, complex128)');
foo(1, 3.4j) # triggers remote compile and call
foo(1.2, 3) # triggers remote compile and call
# # Demo of the application
#
# Guilherme...
#
# .
#
# .
#
# .
# 1. Start the server:
# ```bash
# $ mkdir data && omnisci_initdb data
# $ omnisci_server --enable-runtime-udf --enable-table-functions
# ```
#
#
# 2. Client example program
import rbc.omniscidb
omnisci = rbc.omniscidb.RemoteOmnisci()
omnisci.version
@omnisci('int32(int32, int32)')
def foo(a, b):
return a + b
list(omnisci.sql_execute('select foo(1, 2)')[1])
| notebooks/rbc-intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#https://eli5.readthedocs.io/en/latest/tutorials/sklearn_crfsuite.html
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# +
from itertools import chain
import nltk
import sklearn
import scipy.stats
from sklearn.metrics import make_scorer
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import RandomizedSearchCV
import sklearn_crfsuite
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics
# -
nltk.download('conll2002')
nltk.corpus.conll2002.fileids()
# %time
train_sents = list(nltk.corpus.conll2002.iob_sents('esp.train'))
test_sents = list(nltk.corpus.conll2002.iob_sents('esp.testb'))
train_sents[0]
# # Features
# +
def word2features(sent, i):
word = sent[i][0]
postag = sent[i][1]
features = {
'bias': 1.0,
'word.lower()': word.lower(),
'word[-3:]': word[-3:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(),
'word.isdigit()': word.isdigit(),
'postag': postag,
'postag[:2]': postag[:2],
}
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-1:word.lower()': word1.lower(),
'-1:word.istitle()': word1.istitle(),
'-1:word.isupper()': word1.isupper(),
'-1:postag': postag1,
'-1:postag[:2]': postag1[:2],
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][1]
features.update({
'+1:word.lower()': word1.lower(),
'+1:word.istitle()': word1.istitle(),
'+1:word.isupper()': word1.isupper(),
'+1:postag': postag1,
'+1:postag[:2]': postag1[:2],
})
else:
features['EOS'] = True
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def sent2tokens(sent):
return [token for token, postag, label in sent]
# -
sent2features(train_sents[0])[0]
# Extract features from the data.
# +
# %time
X_train = [sent2features(s) for s in train_sents]
y_train = [sent2labels(s) for s in train_sents]
X_test = [sent2features(s) for s in test_sents]
y_test = [sent2labels(s) for s in test_sents]
# -
X_train[0][0:2]
y_train[0][0:2]
X_train[2][0:2]
y_train[2][0:2]
# # Training
# %%time
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=30,
all_possible_transitions=False,
)
crf.fit(X_train, y_train)
# # Evaluation
labels = list(crf.classes_)
labels.remove('O')
labels
y_pred = crf.predict(X_test)
metrics.flat_f1_score(y_test, y_pred, average='weighted', labels=labels)
# group B and I results
sorted_labels = sorted(
labels,
key=lambda name: (name[1:], name[0])
)
print(metrics.flat_classification_report(
y_test, y_pred, labels=sorted_labels, digits=3
))
# # Older tests
eli5.show_weights(crf, top=30)
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=20,
all_possible_transitions=True,
)
crf.fit(X_train, y_train);
eli5.show_weights(crf, top=5, show=['transition_features'])
eli5.show_weights(crf, top=10, targets=['O', 'B-ORG', 'I-ORG'])
eli5.show_weights(crf, top=10, feature_re='^word\.is',
horizontal_layout=False, show=['targets'])
expl = eli5.explain_weights(crf, top=5, targets=['O', 'B-LOC', 'I-LOC'])
print(eli5.format_as_text(expl))
| crf-python/crf-tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **1**. Connect to the SQLite3 database at `data/faculty.db` using the `sqlite` package or `ipython-sql` magic functions.
# 2. Find the youngest and oldest faculty member of each gender.
# 3. Find the median age of the faculty members who know Python.
#
# As SQLite3 does not provide a median function, you can create a User Defined Function (UDF) to do this. See [documentation](https://docs.python.org/2/library/sqlite3.html#sqlite3.Connection.create_function).
# 4. Arrange countries by the average age of faculty in descending order. Countries are only included in the table if there are at least 3 faculty members from that country.
# 5. Which country has the most highest average body mass index (BMII) among the faculty? Recall that BMI is weight (kg) / (height (m))^2.
# 6. Do obese faculty (BMI > 30) know more languages on average than non-obese faculty?
| quiz/Quiz02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="YdTMFzueJXGi"
# + [markdown] id="ITbTJpqKJcNs"
# ## TensorFlow Object Detection API
# The TensorFlow object detection API is the framework for creating a deep learning network that solves object detection problems.
#
# There are already pretrained models in their framework which they refer to as Model Zoo. This includes a collection of pretrained models trained on the COCO dataset, the KITTI dataset, and the Open Images Dataset. These models can be used for inference if we are interested in categories only in this dataset.
#
# They are also useful for initializing your models when training on the novel dataset.
# + [markdown] id="ScnW_GNCJouz"
# ## MobileNet-SSD
# The SSD architecture is a single convolution network that learns to predict bounding box locations and classify these locations in one pass. Hence, SSD can be trained end-to-end. The SSD network consists of base architecture (MobileNet in this case) followed by several convolution layers:
#
# SSD operates on feature maps to detect the location of bounding boxes. Remember – a feature map is of the size Df * Df * M. For each feature map location, k bounding boxes are predicted. Each bounding box carries with it the following information:
#
# 1. 4 corner bounding box offset locations (cx, cy, w, h)
# 2. C class probabilities (c1, c2, …cp)
#
# SSD does not predict the shape of the box, rather just where the box is. The k bounding boxes each have a predetermined shape. The shapes are set prior to actual training. For example, in the figure above, there are 4 boxes, meaning k=4.
# + [markdown] id="kjLwKwIrJ0je"
# ## Loss in MobileNet-SSD
# With the final set of matched boxes, we can compute the loss like this:
#
# ### L = 1/N (L class + L box)
#
# Here, N is the total number of matched boxes. L class is the softmax loss for classification and ‘L box’ is the L1 smooth loss representing the error of matched boxes. L1 smooth loss is a modification of L1 loss which is more robust to outliers. In the event that N is 0, the loss is set to 0 as well.
#
#
#
# ## MobileNet
#
# The MobileNet model is based on depthwise separable convolutions which are a form of factorized convolutions. These factorize a standard convolution into a depthwise convolution and a 1 × 1 convolution called a pointwise convolution.
#
# For MobileNets, the depthwise convolution applies a single filter to each input channel. The pointwise convolution then applies a 1 × 1 convolution to combine the outputs of the depthwise convolution.
#
# A standard convolution both filters and combines inputs into a new set of outputs in one step. The depthwise separable convolution splits this into two layers – a separate layer for filtering and a separate layer for combining. This factorization has the effect of drastically reducing computation and model size.
# + [markdown] id="9heZCL6EJ-Yn"
# ## Install the Model
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="lx2UO9wdJfEh" outputId="6d046e96-52cf-4169-9fac-04004250b4bd"
# !pip install -U --pre tensorflow=="2.*"
# + colab={"base_uri": "https://localhost:8080/"} id="UhilUZgrKNgR" outputId="afe6bb22-0d80-4e35-9d89-33ec89c67830"
# !pip install pycocotools
# + [markdown] id="Cr6B0ongKjHu"
# ## Get tensorflow/models or cd to parent directory of the repository:
# + id="3ctuRlhwKS1T"
import os
import pathlib
if "models" in pathlib.Path.cwd().parts:
while "models" in pathlib.Path.cwd().parts:
os.chdir('..')
elif not pathlib.Path('models').exists():
# !git clone --depth 1 https://github.com/tensorflow/models
# + [markdown] id="lDm7XXmQKm0M"
# ### Compile protobufs and install the object_detection package:
# + id="6jGvTVR-KmI-" language="bash"
# cd models/research/
# protoc object_detection/protos/*.proto --python_out=.
# + colab={"base_uri": "https://localhost:8080/"} id="qpgskLFlKp9j" outputId="5e66dd2e-4885-44c6-abc0-0b805ad0a3dd" language="bash"
# cd models/research
# pip install
# + [markdown] id="p3qSR8qmKwAY"
# ## Import the Required Libraries
# + id="M77daVA0KtQY"
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from IPython.display import display
# + colab={"base_uri": "https://localhost:8080/"} id="erUAXdKkLaof" outputId="57a373d3-6f71-4568-d71e-262e8622a50d"
# !pip install tensorflow-object-detection-api
# + [markdown] id="OWMOzKsdLFOt"
# ### Import the object detection module:
# + id="BUh2hG6EKz9S"
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# + [markdown] id="--4PF9KwLLeI"
# ### Model Preparation
# ### Loader
# + id="_WGibYt6LINJ"
def load_model(model_name):
base_url = 'http://download.tensorflow.org/models/object_detection/'
model_file = model_name + '.tar.gz'
model_dir = tf.keras.utils.get_file(
fname=model_name,
origin=base_url + model_file,
untar=True)
model_dir = pathlib.Path(model_dir)/"saved_model"
model = tf.saved_model.load(str(model_dir))
model = model.signatures['serving_default']
return model
# + [markdown] id="LfLfJ1erLl46"
# ### Loading label map
# Label maps map indices to category names so that when our convolution network predicts 5, we know that this corresponds to an airplane:
# + id="IrRran_uPIAk"
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/", "height": 340} id="oCRiN1_lP3ya" outputId="bf1fc850-4c71-4522-d6c9-8883a2c3d8c7"
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'models/research/object_detection/data/mscoco_label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
# + [markdown] id="P40eRCm8P1Wa"
# ### For the sake of simplicity, we will test on 2 images:
# + colab={"base_uri": "https://localhost:8080/"} id="VbfrKPPwLpm3" outputId="531727e1-6533-46e2-a96b-6d3bdeb3e9fe"
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images')
TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob("*.jpg")))
TEST_IMAGE_PATHS
# + [markdown] id="ZYkcgAXdQDHo"
# ### Object Detection Model using TensorFlow API
# Load an object detection model:
# + colab={"base_uri": "https://localhost:8080/"} id="rNJR5xvPP_XT" outputId="820b3f68-bff6-4bf1-c17f-4ed08e17e3ed"
model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
detection_model = load_model(model_name)
# + [markdown] id="Fje4XGj8QNU-"
# ### Check the model’s input signature (it expects a batch of 3-color images of type int8):
# + colab={"base_uri": "https://localhost:8080/"} id="GfZOPaa3QGHy" outputId="6b92ec86-7b20-4940-9016-87a5a805f79a"
print(detection_model.inputs)
detection_model.output_dtypes
# + [markdown] id="fGNYBSIhQSUJ"
# ### Add a wrapper function to call the model and cleanup the outputs:
# + id="oHpoJ6Y6QQEo"
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
output_dict = model(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy()
for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
# + [markdown] id="YO8JHYEPQX-C"
# ### Run it on each test image and show the results:
# + id="TdjllTJOQVnc"
def show_inference(model, image_path):
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = np.array(Image.open(image_path))
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=8)
display(Image.fromarray(image_np))
# + [markdown] id="Ce4W3mURQtkv"
# ### Below is the example image tested on ssd_mobilenet_v1_coco (MobileNet-SSD trained on the COCO dataset):
#
# + id="ct9CQ6wHQbvJ"
for image_path in TEST_IMAGE_PATHS:
show_inference(detection_model, image_path)
# + id="82gOnvd-QfaY"
| TensorFlow_Object_Detection_API.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # numpy.random Package Investigation¶
# ### This is a Solution to the first assessment on Programming for Data Analysis¶
# ### Author: <NAME> (G00364694)¶ using my github account dewaledr
# #### Start Date: October 12, 2018
# ### Tasks in this assignment:
# 1. Explain the overall purpose of the package.
# 2. Explain the use of the “Simple random data” and “Permutations” functions.
# 3. Explain the use and purpose of at least five “Distributions” functions.
# 4. Explain the use of seeds in generating pseudorandom numbers.
# ##### Installation Instructions:¶
# - It is highly recommended you install Python using the Anaconda distribution to make sure all underlying dependencies (such as Linear Algebra libraries) all sync up. It can also be installed onto existing suystem with: conda install numpy
#
# ###### Once you've installed NumPy you can import it as a library: Import numpy as np
#
# # 1. NumPy¶
# NumPy (or Numpy) is a Linear Algebra Library for Python. Numpy is incredibly fast, as it has bindings to C libraries. NumPy's arrays are more compact than Python lists -- a list of lists as you describe, in Python, would take at least 20 MB or so, while a NumPy 3D array with single-precision floats in the cells would fit in 4 MB. Access in reading and writing items is also faster with NumPy [1].
# NumPy is the fundamental package for scientific computing with Python. It contains among other things: a powerful N-dimensional array object sophisticated (broadcasting) functions tools for integrating C/C++ and Fortran code useful linear algebra, Fourier transform, and random number capabilities Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can also be defined.
# [2].
#
# Broadcasting is the term used to describe the implicit element-by-element behavior of operations; generally speaking, in NumPy all operations, not just arithmetic operations, but logical, bit-wise, functional, etc., behave in this implicit element-by-element fashion, i.e., they broadcast.
# For example a and b could be multidimensional arrays of the same shape, or a scalar and an array, or even two arrays of with different shapes, provided that the smaller array is “expandable” to the shape of the larger in such a way that the resulting broadcast is unambiguous. For detailed “rules” of broadcasting see numpy.doc.broadcasting.
# NumPy fully supports an object-oriented approach, starting, once again, with ndarray. For example, ndarray is a class, possessing numerous methods and attributes. Many of its methods mirror functions in the outer-most NumPy namespace, giving the programmer complete freedom to code in whichever paradigm she prefers and/or which seems most appropriate to the task at hand.
# [3].
#
# #### Numpy has many built-in functions and capabilities. In this notebook, I will focus on some of the most important aspects of Numpy: vectors, arrays, matrices, and number generation.
# At the core of the NumPy package, is the ndarray object. This encapsulates n-dimensional arrays of homogeneous data types, with many operations being performed in compiled code for performance. There are several important differences between NumPy arrays and the standard Python sequences:
# 1. NumPy arrays have a fixed size at creation, unlike Python lists (which can grow dynamically). Changing the size of an ndarray will create a new array and delete the original.
# 2. The elements in a NumPy array are all required to be of the same data type, and thus will be the same size in memory. The exception: one can have arrays of objects, thereby allowing for arrays of different sized elements.
# 3. NumPy arrays facilitate advanced mathematical and other types of operations on large numbers of data. Typically, such operations are executed more efficiently and with less code than is possible using Python’s built-in sequences.
# 4. A growing plethora of scientific and mathematical Python-based packages are using NumPy arrays; though these typically support Python-sequence input, they convert such input to NumPy arrays prior to processing, and they often output NumPy arrays. In other words, in order to efficiently use much (perhaps even most) of today’s scientific/mathematical Python-based software, just knowing how to use Python’s built-in sequence types is insufficient - one also needs to know how to use NumPy arrays.
#
# ### Numpy Arrays¶
# Numpy arrays essentially come in two flavors: vectors and matrices. Vectors are strictly 1-d arrays and matrices are 2-d (note: a matrix can still have only one row or one column).
# #### Creating NumPy Arrays¶
# 1. From a Python List: We can create an array by directly converting a list or list of lists:
import numpy as np
my_list = [1,2,3]
my_list
# 2. By casting a list to numpy array
np.array(my_list)
my_matrix = [[3,2,1],[4,6,5],[7,8,9]]
np.array(my_matrix)
# ### There are lots of built-in ways to generate Arrays
# 1. arange: Return evenly spaced values within a given interval.
# 2. zeros and
# 3. ones: Generate arrays of zeros or ones
# 4. linspace: Return evenly spaced numbers over a specified interval.
# 5. eye: Creates an identity matrix. Usually a square matrix useful in Linear algebra
#arange - Return evenly spaced values within a given interval 1 default when not specified.
np.arange(0,10)
np.arange(0,11,2) #Return evenly spaced values within a given interval, 2 specified.
#2. zeros - Generate arrays of zeros, shape given
np.zeros(3)
np.zeros((5,5)) #Shape given 5x5
np.ones(3)
np.ones((3,3))
#4. linspace: Return evenly spaced numbers over a specified interval.
np.linspace(0,10,3)
np.linspace(0,10,50) #Returns 50 1D array of evenly spaced numbers from 0 - 10
#5. eye: Creates an identity matrix. Usually a square matrix useful in Linear algebra
np.eye(4)
# ### Array Attributes and Methods
# 1. reshape - One of the most useful methods that could be used on an array is the reshape method. Reshape will return an array containing the same data type in new shape.
# 2. max - These are all useful methods for finding max or min values or to find their index locations using argmin or argmax
# 3. min
# 4. argmax
# 5. argmin
# 6. shape - Shape is an attribute that arrays have (not a method):
# 7. dtype - We can also grab the data type of the object in the array:
arr = np.arange(25) # Return a 1D array values between 0 and 24
arr
ranarr = np.random.randint(0,50,10) #Return an array of 10 random integers between 0 and 50
ranarr
# Will return a 5x5 2D array from the original array (arr).
# Note that arr must have 25 members (5x5), else reshape will fail.
arr.reshape(5,5)
ranarr.max() #return the maximum value in ranarr
ranarr.min() #return the minimum value in ranarr
ranarr.argmin() #return the index location of min value in ranarr
ranarr.argmax() # return the index location of the max value in ranarr
# Will return a 5x5 2D array from the original array (arr).
# Note that arr must have 25 members (i.e. rows of 5 x columns of 5 in this case), else reshape will fail.
arr.reshape(5,5)
print('The code shown next will generate an error if run because the shape size does not match the array size...')
# +
# arr.reshape(5,3) #--- This will FAIL
# -
#Shape: Shape is an attribute that arrays have (not a method):
# Vector:
arr.shape
# Notice here I get 25 comma and that's indicating that arr array was just a one dimensional vector.
# Notice the two sets of brackets
arr.reshape(1,25)
arr.reshape(1,25).shape # (1, 25) returns a 1x25 array
# arr.reshape(25,1)
arr.reshape(25,1).shape # (25, 1) returns a - 25x1 array
#arr.reshape(5,5)
arr.reshape(5,5).shape # (5, 5) returns a - 5x5 array
# You can also grab the data type of the object in the array:
arr.dtype
# ## ... Please NOTE:
# ###### Rather than saying import
# ###### =====> numpy.random as np
# #### We can use a specific import option as shown here, i.e.:
# ###### =====> from numpy.random import rand, randint, randn
# - So, you may just use for example, randint(4,50) rather than the longer np.random.randint(4,50).[4]
#
# # 2. Simple random data
# ## 2.1. Random
# Numpy also has lots of ways to create random number arrays, for example:
# 1. rand: numpy.random.rand(d0, d1, ..., dn)
# 2. numpy.random.randn(d0, d1, ..., dn)
# 3. numpy.random.randint(low, high=None, size=None, dtype='l')
# ### 1: rand(d0, d1, ..., dn)
# Create an array of the given shape and populate it with random samples from a uniform distribution over "0, 1" Mathematically, this convention [ [0, 1) ]means first element (0) is inclusive while the second element (1) is not.
#
# ##### NOTE: np.random + TAB shows the existing methods for np.random
# np.random.rand(2) will create an array of shape 2 and populate the array with a random sample
# from a uniform distribution over 0 to 1
np.random.rand(2)
# And that means if I want just five one dimensional array of random numbers uniformly distributed from 0 to 1
# I can pass in a single digit and I'll get back that much one-dimensional array, i.e 5 as shown next
np.random.rand(5)
# And if I want a 3 by 4 matrix of random numbers I just pass and 3,4
np.random.rand(3,4)
# ### 2: randn(d0, d1, …, dn).
# If we want to return a sample or many samples from the standard normal distribution or a Gaussian distribution instead of using rand we can use randn
# Return a sample (or samples) from the "standard normal" distribution. Unlike rand which is uniform:
# and this will return numbers not from a uniform distribution from 0 to 1 but instead from a
# standard normal distribution center around zero.
# And that means I can go ahead and pass in 9 and I get nine random numbers from a standard normal distribution.
np.random.randn(9)
# +
# Again if I want this to be two dimensional I can just pass in two dimensions.
# So for a four by five I'll pass in 4,5.
# Note: we’re not passing a tuple here separate argument separated by a comma.
# And we can see by the set of two brackets is a two dimensional matrix as an output.
np.random.randn(4,5)
# +
# A plot of a normal distribution curve or a Gaussian distribution curve is shown next:
# REF: https://stackoverflow.com/questions/14873203/plotting-of-1-dimensional-gaussian-distribution-function
# +
from matplotlib import pyplot as mp
# import numpy as np
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
for mu, sig in [(0, 1)]:
mp.plot(gaussian(np.linspace(-5, 5, 100), mu, sig))
mp.show()
# -
# ### 3: randint(low, high=None, size=None, dtype='l')
# randint(low[, high, size, dtype]) Returns random integers from low (inclusive) to high (exclusive).
# - np.random.randint(SHIFT + TAB … will show the description of expected arguments…
#
# This will produce just one random integer between 1 and 100
np.random.randint(1,100)
# To return a particular number of random integers you can pass them in as a third argument.
# So to return 10 random integers from 1 to 100. But not including 100.
# I can just pass 10 and there are 10 random integers inclusive on the low and exclusive on the high end.
np.random.randint(1,100,10)
# ## 2.2. Permutations
# A permutation of a set of objects is an arrangement of the objects in a certain order.
# For example:
#
# The possible permutations of letters a, b and c is: abc/ acb/ bac/ bca/ cab/ cba/
#
# Numpy also has two functions to handle elements under permutations:
# 1. numpy.random.shuffle(x)
# 2. numpy.random.permutation(x)
# ### shuffle:
# numpy.random.shuffle(x) modify a sequence in-place by shuffling its contents.
# This function only shuffles the array along the first axis of a multi-dimensional array.
# The order of sub-arrays is changed but their contents remains the same.
# Example:
arr = np.arange(10)
arr
# This array can be shuffled around as shown here
np.random.shuffle(arr)
arr
# Create an array of 9 elements, then reshape it to a 3x3 matrix, then shuffle...
arr = np.arange(9).reshape((3, 3))
arr
# after shuffling
np.random.shuffle(arr)
arr
# ### permutation:
# numpy.random.permutation(x)¶
# Randomly permute a sequence, or return a permuted range.
# Example:
np.random.permutation(10)
# If x is a multi-dimensional array, it is only shuffled along its first index
# Example:
arr = np.arange(9).reshape((3, 3))
np.random.permutation(arr)
# # 3. Distributions
# ## Discrete Distributions:
# Discrete probability distributions are also called probability mass functions. Examples are:
# 1. Uniform Distribution
# 2. Binomial Distribution
# 3. Poisson Distribution
#
# ## Continuous Distributions:
# Continuous probability distributions are also called probability density functions. Examples are:
# 1. Normal Distribution
# 2. Exponential Distribution
# 3. Beta Distribution
#
# ### 3.1. uniform([low, high, size])
# +
# Draw samples from a uniform distribution.
# Samples are uniformly distributed over the half-open interval [low, high) (includes low, but excludes high).
# In other words, any value within the given interval is equally likely to be drawn by uniform.
np.random.uniform(low=0.0, high=1.0, size=None)
# -
s = np.random.uniform(-1,0,1000)
# Display the histogram of the samples, along with the probability density function:
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s, 10, density=True)
plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
plt.show()
# ### 3.2. binomial(n, p, size=None)
# “Binomial” means there are two discrete, mutually exclusive outcomes of a trial. heads or tails/ on or off/ success or failure
# A Bernoulli Trial is a random experiment in which there are only two possible outcomes - success or failure
# A series of trials n will follow a binary distribution so long as:
# a) the probability of success p is constant
# b) trials are independent of one another
# numpy.random.binomial(n, p, size=None)...Draw samples from a binomial distribution.
# Samples are drawn from a binomial distribution with specified parameters,
# n trials and p probability of success where n an integer >= 0 and p is in the interval [0,1].
# (n may be input as a float, but it is truncated to an integer in use)
n, p = 10, 0.5 # number of trials, probability of each trial
# result of flipping a coin 10 times, tested 1000 times.
np.random.binomial(n, p, 1000)
# ### 3.3. poisson(lam=1.0, size=None)
# A binomial distribution considers the number of successes out of n trials
# A Poisson Distribution considers the number of successes per unit of time*
# over the course of many units or any other continuous unit, e.g. distance
# Poisson distribution draw samples from a Poisson distribution.
# The Poisson distribution is the limit of the binomial distribution for large N.
# import numpy as np
s = np.random.poisson(10, 10000) #1000 element values center around 10, for example... 2,3, 6,10,18,...21..
# Display histogram of the sample:
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s, 14, density=True)
plt.show()
# ### 3.4 normal(loc=0.0, scale=1.0, size=None)
# Draw random samples from a normal (Gaussian) distribution.
# The probability density function of the normal distribution, first derived by De Moivre and 200 years later by both Gauss and Laplace independently [2], is often called the bell curve because of its characteristic shape (see the example below).
# The normal distributions occurs often in nature. For example, it describes the commonly occurring distribution of samples influenced by a large number of tiny, random disturbances, each with its own unique distribution [5]
# +
# Example: Draw samples from the distribution:
mu, sigma = 0, 0.1 # mean and standard deviation
s = np.random.normal(mu, sigma, 1000)
# Verify the mean and the variance:
abs(mu - np.mean(s)) < 0.01
abs(sigma - np.std(s, ddof=1)) < 0.01
# Display the histogram of the samples, along with the probability density function:
# import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s, 30, density=True)
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ), linewidth=2, color='r')
plt.show()
# -
# ### 3.4 laplace([loc, scale, size])
# numpy.random.laplace(loc=0.0, scale=1.0, size=None)¶
# Draw samples from the Laplace or double exponential distribution with specified location (or mean) and scale (decay).
# The Laplace distribution is similar to the Gaussian/normal distribution, but is sharper at the peak and has fatter tails. It represents the difference between two independent, identically distributed exponential random variables
# The first law of Laplace, from 1774, states that the frequency of an error can be expressed as an exponential function of the absolute magnitude of the error, which leads to the Laplace distribution. For many problems in economics and health sciences, this distribution seems to model the data better than the standard Gaussian distribution
# Example: Draw samples from the distribution
loc, scale = 0., 1.
s = np.random.laplace(loc, scale, 1000)
# Display the histogram of the samples, along with the probability density function:
# import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s, 30, density=True)
x = np.arange(-8., 8., .01)
pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
plt.plot(x, pdf, linewidth=3, color='r')
# Plot Gaussian for comparison:
g = (1/(scale * np.sqrt(2 * np.pi)) * np.exp(-(x - loc)**2 / (2 * scale**2)))
plt.plot(x,g, linewidth=3, color='y')
# # 4… The use of seeds in generating pseudorandom numbers
# Pseudo-random number generators work by performing some operation on a value.
# Generally this value is the previous number generated by the generator. However, the first time you use the generator, there is no previous value.
# Seeding a pseudo-random number generator gives it its first "previous" value. Each seed value will correspond to a sequence of generated values for a given random number generator. That is, if you provide the same seed twice, you get the same sequence of numbers twice. Generally, you want to seed your random number generator with some value that will change each execution of the program. For instance, the current time is a frequently-used seed. The reason why this doesn't happen automatically is so that if you want, you can provide a specific seed to get a known sequence of numbers, for example by using the decimal portion of the irrational number pi.[6]
#
# #### Seeding will enable me get the same result each time I run this dataframe
import pandas as pd
#import numpy as np
from numpy.random import randn
np.random.seed(101) #Seeding will enable me get the same result each time I run this dataframe
df = pd.DataFrame(randn(5,4), index='A B C D E'.split(), columns='W X Y Z'.split())
print("DataFrame from above")
print(df)
print("\nGet a column 'X' from above dataframe. This is actually a Series")
print (df['X'])
# ### References:
# - [1][https://stackoverflow.com/questions/993984/what-are-the-advantages-of-numpy-over-regular-python-lists]
# - [2][https://cn.bing.com/searchq=what+is+python+numpy.random+package&form=EDGEAR&qs=PF&cvid=886e21aff31446b69a150e0ea3e75902& cc=CN&setlang=en-US&PC=LCTS]
# - [3] [https://docs.scipy.org/doc/numpy/user/whatisnumpy.html]
# - [4] [Python for Data Analysis (<NAME>), 2nd Edition. Pages 85 – 110]
# - [5] [ <NAME>r., “Central Limit Theorem” in “Probability, Random Variables and Random Signal Principles”, 4th ed., 2001, pp. 51, 51, 125.]
# - [6] [https://stackoverflow.com/questions/22639587/random-seed-what-does-it-do]
#
#
# ## END...
| numpyRandom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + init_cell=true
# %logstop
# %logstart -rtq ~/.logs/PY_Intro.py append
import seaborn as sns
sns.set()
# -
# # Introduction to Data Science
# Welcome! We will be taking a deep dive into Data Science over the next few months. This course will be divided into two parts, the first component will cover basic Python and programming while the second component will cover material in Data Science ranging from basic programming to data cleaning and aggregation. The basic syllabus is as follows
#
# |Week|Material|
# |---|---|
# |1| Programming and Python fundamentals|
# |2| Data structures|
# |3| Algorithms, object-oriented programming, & Pythonic style|
# |4| Reading and writing data|
# |5| Python data science packages|
# |6| SQL|
# |7| Data munging|
# |8| Object-relation mapping|
#
# The second component is a series of miniprojects which will test your mastery of the lecture subjects. Data Science is not a spectator sport, so its important to practice, the miniprojects are a great way to do that. There will be a series of five miniprojects which will need to be completed by the end of the course.
#
# There is an online discussion board where we can help each other learn, share code snippets, and ask questions. The instructor will be monitoring the board and watching for questions, but it is also important that you interact with your peers. Remember the best learning often happens when you need to explain something to someone else.
# ## Learning Platform
#
# We will be using the Jupyter notebook interface for all of our work. The Jupyter notebook is a great tool for Data Science, especially for exploratory Data Science. Lets go over a few things about the notebook.
#
# The notebook is divided into cells, some of which are markdown.
print('some of which are code')
# The code you are writing in your web browser gets sent to a Python "kernel" living on a cloud server which will execute your code and return the result back to the notebook. To run a cell we can either click the run button at the top of the screen, or use `shift+enter`.
#
# If I define a variable in one cell
a = 5
# The value is still accessible in another cell:
print(a)
# Jupyter notebooks do have some autosave functionality, but please remember to save your notebooks manually often to make sure you don't use any work. If you are familiar with a version control too like `git`, it is not a bad idea to version control the notebooks, although we do ask you don't push the material to a public repository.
# ## Exercises
#
# These notebooks contain many small exercises which help practice the material being discussed. Some of the exercises will be writing a bit of code, others will be written. Some of the exercises will be covered in the lecture and some will be left as practice. Exercises are a great topic to discuss on the forum and please feel free to help each other solve them.
#
# Now a few exercises for the introduction:
#
# 1. Make a few cells in the Jupyter notebook and execute them
# 2. Save your Jupyter notebook
# *Copyright © 2021 WorldQuant University. This content is licensed solely for personal use. Redistribution or publication of this material is strictly prohibited.*
| DS_1/PY_Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# Fill in any place that says `# YOUR CODE HERE` or YOUR ANSWER HERE, as well as your name and collaborators below.
# Grading for pre-lecture assignments is all or nothing. Partial credit is available for in-class assignments and checkpoints, but **only when code is commented**.
# -
NAME = ""
COLLABORATORS = ""
# ---
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "48239f3c8e790de4ea2b65d4e1dd902f", "grade": false, "grade_id": "cell-302a8d68dcf35b8a", "locked": true, "schema_version": 3, "solution": false}
import grading_helper as _test
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "fed7d9040084bfda2e9a3fb4fcddcc55", "grade": false, "grade_id": "cell-5ac865fe6046b997", "locked": true, "schema_version": 3, "solution": false}
# # Altitude of a Satellite
#
# (Adapted from textbook exercise 2.2)
#
# A satellite is to be launched into a circular orbit around the Earth so that it orbits the planet once every $T$ seconds. The altitude $h$ above the Earth's surface that the satellite must have is
# $$h = \biggl( {GMT^2\over4\pi^2} \biggr)^{1/3} - R\,,$$
# where $G=6.67\times10^{-11}$ m$^3$kg$^{-1}$s$^{-2}$ is Newton's gravitational constant, $M=5.97\times10^{24}$ kg is the mass of the Earth, and $R=6371$ km is its radius.
#
# Import the `math` module to get access to `pi`.
#
# Calculate and print the altitude in meters for a satellite with a period of 24 hours. Be careful with the units.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "5a064da6a51ae41db12846dd3d1a08dc", "grade": false, "grade_id": "cell-702bb317365f16c0", "locked": false, "schema_version": 3, "solution": true}
# %%graded # 5 points
# YOUR CODE HERE
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "336718bcacc4ee2ded337cfdd8e388e0", "grade": true, "grade_id": "cell-19c2c0e02f4aa1c6", "locked": true, "points": 5, "schema_version": 3, "solution": false}
# %%tests
_test.code_contains("import", "math")
_test.code_contains("G", "M", "R", "T", "h") # use sensible variable names
_you_printed = float(_test._LAST_OUT)
_test.similar(_you_printed, 35900000) # check that the value you print is close to this number
# FYI these cells can have hidden tests, so don't just print(35900000) in your code!
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "64b383f007230ab9c3be642aead751fb", "grade": false, "grade_id": "cell-b47dfaa81baeb4ab", "locked": true, "schema_version": 3, "solution": false}
# # Catalan Numbers
#
# (Adapted from textbook exercise 2.7)
#
# The Catalan numbers $C_n$ are a sequence of integers 1, 1, 2, 5, 14, 42, 132... that play an important role in quantum mechanics and the theory of disordered systems. (They were central to Eugene Wigner's proof of the so-called [semicircle law](https://en.wikipedia.org/wiki/Wigner_semicircle_distribution).) They are given by
#
# $$C_0 = 1,\qquad C_{n+1} = \frac{4n+2}{n+2}C_n\,.$$
#
# Print all Catalan numbers less than or equal to one billion. Note that the Catalan numbers are all integers.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "44a6fc28907cf576224cd4d9bd532b81", "grade": false, "grade_id": "cell-272f0469717e7873", "locked": false, "schema_version": 3, "solution": true}
# %%graded # 5 points
# YOUR CODE HERE
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "723c92053b7eb9bf6329487429d83986", "grade": true, "grade_id": "cell-08dcb55a83caefa0", "locked": true, "points": 5, "schema_version": 3, "solution": false}
# %%tests
_test.printed("1", "4862", "477638700") # Check some of the printed values. Hidden test checks others.
# Did you go over one billion?
_values = [int(v) for v in _test._LAST_OUT.split()]
assert max(_values) < 1e9, f"You went over one billion. Your largest value is {max(_values)}"
| Assignments/01.2 Assignment - Python Intro 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to the Memory Data
#
# ##### This is a script to load and work with data that was collected to analyze memorability of sounds. Please see the below paper for details.
#
# #### please cite:
#
# Ramsay, David, <NAME>, and <NAME>. "The Intrinsic Memorability of Everyday Sounds." Audio Engineering Society Conference: 2019 AES International Conference on Immersive and Interactive Audio. Audio Engineering Society, 2019.
#
# ### Getting Started
#
# This will just demonstrate how to load the included data for use in your analysis.
#
#
# We have 2 sets of data; (1) memory games associated with Amazon Turk user ids, and (2) surveys to collect background information from a unique token we track using cookies.
#
# Turk IDs from Amazon are called *WorkerIDs* and are an uppercase string that starts with A and is 14 characters long.
# Our user ID is called *uid* and is a 32 character string.
#
# It turns out some workers log out and log in as 'different people', but they don't refresh their browser so they retain our UID across multiple WorkerIDs. There are others that might have refreshed the browser and gotten a new UID despite the same WorkerID. We also have a lot of people that might've started the task but never finished it, filling out a survey and getting a UID without ever getting far enough to trigger data collection and a WorkerID.
#
import pandas as pd
# ## Processed Memory/Confusability Scores for Simple Analysis
#
# Most people will just want *memory scores* (how likely to be selected when it was played 60 samples ago) and *confusability* scores (how likely to be selected when it was not presented) associated with each sample; we've done that. To make confusability a bit more reliable, we focus on a version calculated when a sound falls in the last 10 samples of the test (after hearing 60 other random sounds) for its first presentation, as this is closer to the conditions under which memory is being assessed for the target sounds. Certainly you are more likely to confuse a sound after hearing 60 other sounds than hearing none.
#
# In the data you can choose whether to view data from all games, or data only from games that pass a vigilance criteria of a false positive rate for new sounds < 0.4 and a vigilance sound second presentation true positive rate > 0.6 (filtered games). In our paper, we consider memorability normalized by confusability (in other words, how much does the presentation of the sound earlier *increase the likelihood* that you will click it? It's possible that some sounds are ambiguous and people naturally *think* they've heard them before; these will have a high memorability score even though they are definitely *not* memorable sounds. To address this, we subtract the confusability score from the memorability score before trying to assess any trends in the data.
#
# ```
# Confusability = how likely you are to click on a sound when you haven't heard before
# Confusability_last10 = how likely you are to click on a sound when you haven't heard it before, in the last 10 presentations (having heard 60 other sounds)
# Memorability = how likely you are to remember a sound that is separated by 60 other random sounds
# Memorability_norm = Memorability - Confusability
# Memorability_norm_last10 = Memorability - Confusability_last10
# ```
#
# It may also be possible to draw some conclusions given the vigilance score, which has hundreds of presentations, but are only separated by 2-3 other sounds.
# #### loading memory scores
memory_scores = pd.read_pickle('memory_scores_by_sound.pkl')
memory_scores.head()
# ## Raw Memory Data, Survey Data and User IDs
#
# for those looking to do more advanced analysis, we can also look at individual memory game data in raw form. We can compare this against survey data from each particpant (or examine survey data alone).
#
# As noted above, cross-validation of who is who is quite messy (Turkers changing their Turk ID while retaining our cookie/ID in browser, rejecting the cookie with the same Turk ID, not making it far enough after doing the survey and creating our cookie/ID). We've done our best to match these IDs, and you'll find a single USER_ID column which is our best matching of Amazon IDs (which frequently have many of our user ids), falling back to our IDs if there is no Amazon match. There might be a little bit of noise in the user matching, but we've done the hard work of cross-referencing submitted codes to Turk and browser tokens to try and uniquely identify individuals. This ID is constant across survey results and game data.
#
# We also collected a small amount of test data from MIT community members through a different portal. The email field will show 'mturker' if they were directed to the game through Amazon Turk, and either 'NA' if they filled out the survey anonymously or 'XXX.mit.edu' if they filled it out with their email (incentivized by a giftcard). No submitted emails were the same.
#
# For the user surveys, answers capture the amount of waking time allocated for each location/activity:
#
# ```
# 0 = never
# 1 = once a month
# 2 = once a week
# 3 = a little each day
# 4 = many hours each day
# ```
#
# and their situation was marked as
#
# ```
# 0 = urban
# 1 = suburban
# 2 = rural
# ```
#
# #### loading raw games
raw_games = pd.read_pickle('raw_game_data.pkl')
raw_games.head()
# +
## we suggest filtering these, as we did in our publication, for people who paid attention
FALSE_NEG_MAX = 0.4
VIGIL_POS_MIN = 0.6
filtered_games = raw_games.loc[(raw_games['falsePositives'] < FALSE_NEG_MAX) & (raw_games['vPercent'] > VIGIL_POS_MIN)]
print('This operation filters the data from ' + str(len(raw_games)) + ' to ' + str(len(filtered_games)) + '.')
# -
# #### loading user surveys
user_surveys = pd.read_pickle('user_surveys.pkl')
user_surveys.head()
| An Introduction to the Included Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2 as cv
import tensorflow as tf
import numpy as np
import scipy, os, sys, time, functools
from scipy import signal
from functools import reduce
import matplotlib.pylab as plt
gray_ = 'gray'
img = cv.imread('./image.jpg', 0)
def plot_n(imgs, infos=None):
m = len(imgs)
if infos is None:infos=["" for i in range(m)]
if m == 1:
f = plt.figure(figsize=(8, 8))
plt.imshow(imgs[0], cmap=gray_)
plt.title(infos[0])
plt.axis('off')
elif m == 4:
f = plt.figure(figsize=(18, 18))
f.subplots_adjust(wspace=0.1)
for i in range(4):
ax = f.add_subplot(2,2,i+1, title=infos[i])
ax.axis('off')
ax.imshow(imgs[i], cmap='gray')
elif m < 4:
f = plt.figure(figsize=(18, 18))
f.subplots_adjust(wspace=0.1)
for i in range(m):
ax = f.add_subplot(1,m,i+1, title=infos[i])
ax.axis('off')
ax.imshow(imgs[i], cmap='gray')
else:
print('!!!!!!!')
plt.show()
plot_n([img], ['原图'])
Ke = np.array([
[0, -4, 0],
[-4, 16, -4],
[0, -4, 0]
])
Kh = np.array([
[1,2,1],
[0,0,0],
[-1,-2,-1]
])
Kv = np.array([
[1,0,-1],
[2,0,-2],
[1,0,-1]
])
plot_n([cv.filter2D(img, -1, -Ke),cv.filter2D(img, -1, -Kh),cv.filter2D(img, -1, -Kv)],
['整体边缘滤波器', '横向边缘滤波器', '纵向边缘滤波器'])
# 汇合操作降采样, 相当于用$p-$范数作为非线性映射的“卷积”,当$p$趋于正无穷时就是常见的最大值汇合。
# 汇合层通常有三种功效:
# 1. 特征不变形($\mathrm{feature\ invariant}$)
# 2. 特征降维,维数约减
| CV/CNN_book.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from spellchecker import SpellChecker
spell = SpellChecker()
# find those words that may be misspelled
misspelled = spell.unknown(['yes','encyclopeddia','dictionary','favorute','analysiz','woden','dog','forrrest'])
for word in misspelled:
# Get the one `most likely` answer
print(spell.correction(word))
# Get a list of `likely` options
print(spell.candidates(word))
# Cause the word 'pokemon' is a compound word which cannot be identified. In other words,
# there are no midspelling words in our database. So we pick some other words to do this part.
| project/code/misspelling words.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Solution for migrating all user data between ArcGIS Online/Enterprise Orgs.**
#
# Requirements:
# - Administrator level permissions on the "Source Org"
#
# Required Steps:
# - add source org credentials to "credentials/credentials_source.json"
# - add target org credentials to "credentials/credentials_target.json"
#
# Enjoy :)
# +
from arcgis.gis import GIS, Item
from arcgis.env import active_gis
from arcgis.features import FeatureLayerCollection
from arcgis.mapping import WebMap
from common_lib import get_user_items, print_user_inventory
from pathlib import Path
import sys
try:
from ujson import loads
except ModuleNotFoundError:
from json import loads
def authenticate_gis(in_json, verify_cert=False):
# load source credentials
f = open(in_json, "r")
creds = loads(f.read())
# Closing file
f.close()
# Authenticate
return GIS(creds['portal'], creds['username'], creds['password'], verify_cert=verify_cert)
source = authenticate_gis('credentials/credentials_source.json', verify_cert=False)
print("Source Org: {0}".format(source))
print("Source Access User: {0} \n".format(source.users.me))
target = authenticate_gis('credentials/credentials_target.json', verify_cert=False)
print("Target Org: {0}".format(target))
print("Target Access User: {0}".format(target.users.me))
print("\n **Proceed only if the above information is correct**")
# + pycharm={"name": "#%%\n"}
# Clone Content from all users
users = source.users.search(max_users=1000)
print("Detected Source Org Users for Copying items from as: \n")
for n in [user.username for user in users]:
print(n)
print("\n **Proceed only if the above information is correct**")
# + pycharm={"name": "#%%\n"}
bypass_items_list = ["Points From Inbox"]
# Iteration 1 for base features & Service Definitions
for user in users:
username = user.username
# TODO: Get list of all user items.
user_inventory = source.content.search("owner:{0}".format(username))
if user_inventory:
for i in user_inventory:
print(i.type)
# Note: Feature Layer Collections appears as Feature Service on i.type
if i.type not in ["Web Map", "Web Scene", "Form", "Web Mapping Application"] and "type:Feature Layer Collection" not in str(i):
if i.title not in bypass_items_list:
# Ensure item does not exist on target org
if len(target.content.search("title:{0}".format(i.title), item_type=i.type, max_items=1000)) > 0:
print("detected item {0} already on target machine... skipping".format(i))
else: # Upload item to target org
print("transferring {0}".format(i))
cloned_items = target.content.clone_items(items=[i])
'''
# Iteration 2 for Feature Layer Collection
for user in users:
username = user.username
# TODO: Get list of all user items.
user_inventory = source.content.search("owner:{0}".format(username))
if user_inventory:
for i in user_inventory:
print(i.type)
# Note: Feature Layer Collections appears as Feature Service on i.type
if "type:Feature Layer Collection" in str(i):
# Ensure item does not exist on target org
if len(target.content.search("title:{0}".format(i.title), item_type=i.type, max_items=1000)) > 0:
print("detected item {0} already on target machine... skipping".format(i))
else: # Upload item to target org
print("transferring {0}".format(i))
cloned_items = target.content.clone_items(items=[i])
'''
# Iteration 3 for Maps and Scenes
for user in users:
username = user.username
# TODO: Get list of all user items.
user_inventory = source.content.search("owner:{0}".format(username))
if user_inventory:
for i in user_inventory:
print(i.type)
# Note: Feature Layer Collections appears as Feature Service on i.type
if i.type in ["Web Map", "Web Scene"] and "type:Feature Layer Collection" not in str(i):
if i.title not in bypass_items_list:
# Ensure item does not exist on target org
if len(target.content.search("title:{0}".format(i.title), item_type=i.type, max_items=1000)) > 0:
print("detected item {0} already on target machine... skipping".format(i))
else: # Upload item to target org
print("transferring {0}".format(i))
cloned_items = target.content.clone_items(items=[i])
# Iteration 4 for all other Apps
# + pycharm={"name": "#%%\n"}
print("Data in Target User Directory")
target_user_inventory = get_user_items(user=target.users.me, active_gis=target)
print_user_inventory(inventory=target_user_inventory)
| transfer_data_between_orgs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="_Pkmfqg8zWco" colab_type="code" colab={}
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# import data from the github page of the book
data = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter02/data/Absenteeism_at_work.csv', sep=";")
# + id="iSLL4gk5zWct" colab_type="code" outputId="f0d50c8b-ba9e-4aa1-ea10-21e869f7aa9c" colab={"base_uri": "https://localhost:8080/", "height": 391}
# print dimensionality of the data, columns, types and missing values
print(f"Data dimension: {data.shape}")
for col in data.columns:
print(f"Column: {col:35} | type: {str(data[col].dtype):7} | missing values: {data[col].isna().sum():3d}")
# + colab_type="code" outputId="cc22ea2d-0cda-4793-e7d5-067455db58d3" id="yYBokFuztYyt" colab={"base_uri": "https://localhost:8080/", "height": 700}
# compute statistics on numerical features
data.describe().T
# + id="KM11JNkzzWc0" colab_type="code" colab={}
# define encoding dictionaries
month_encoding = {1: "January", 2: "February", 3: "March", 4: "April",
5: "May", 6: "June", 7: "July", 8: "August",
9: "September", 10: "October", 11: "November", 12: "December", 0: "Unknown"}
dow_encoding = {2: "Monday", 3: "Tuesday", 4: "Wednesday", 5: "Thursday", 6: "Friday"}
season_encoding = {1: "Spring", 2: "Summer", 3: "Fall", 4: "Winter"}
education_encoding = {1: "high_school", 2: "graduate", 3: "postgraduate", 4: "master_phd"}
yes_no_encoding = {0: "No", 1: "Yes"}
# backtransform numerical variables to categorical
preprocessed_data = data.copy()
preprocessed_data["Month of absence"] = preprocessed_data["Month of absence"]\
.apply(lambda x: month_encoding[x])
preprocessed_data["Day of the week"] = preprocessed_data["Day of the week"]\
.apply(lambda x: dow_encoding[x])
preprocessed_data["Seasons"] = preprocessed_data["Seasons"]\
.apply(lambda x: season_encoding[x])
preprocessed_data["Education"] = preprocessed_data["Education"]\
.apply(lambda x: education_encoding[x])
preprocessed_data["Disciplinary failure"] = preprocessed_data["Disciplinary failure"]\
.apply(lambda x: yes_no_encoding[x])
preprocessed_data["Social drinker"] = preprocessed_data["Social drinker"]\
.apply(lambda x: yes_no_encoding[x])
preprocessed_data["Social smoker"] = preprocessed_data["Social smoker"]\
.apply(lambda x: yes_no_encoding[x])
# + id="E7G0sSqtzWc3" colab_type="code" outputId="99286e6c-6570-4fb1-a9d4-45f747b27dd2" colab={"base_uri": "https://localhost:8080/", "height": 700}
# transform columns
preprocessed_data.head().T
# + id="bcSvhsJNzWc8" colab_type="code" outputId="936a58df-3570-4ba1-8ca7-6fda96b34006" colab={"base_uri": "https://localhost:8080/", "height": 499}
# define function, which checks if the provided integer value
# is contained in the ICD or not
def in_icd(val):
return "Yes" if val >= 1 and val <= 21 else "No"
# add Disease column
preprocessed_data["Disease"] = preprocessed_data["Reason for absence"]\
.apply(in_icd)
# plot value counts
plt.figure(figsize=(10, 8))
sns.countplot(data=preprocessed_data, x='Disease')
plt.savefig('figs/disease_plot.png', format='png', dpi=300)
# + [markdown] colab_type="text" id="JgJuyfIquqyV"
# # Initial analysis on the reason for absence
# + colab_type="code" outputId="6e2062ae-4c51-45e3-a423-6b63699b47ab" id="mVe_vMm7upJm" colab={"base_uri": "https://localhost:8080/", "height": 334}
# get the number of entries for each reason for absence
plt.figure(figsize=(10, 5))
ax = sns.countplot(data=preprocessed_data, x="Reason for absence")
ax.set_ylabel("Number of entries per reason of absence")
plt.savefig('figs/absence_reasons_distribution.png', format='png', dpi=300)
# + [markdown] colab_type="text" id="g17FCIDmuofe"
# #### Social drinkers and smokers analysis
# + colab_type="code" outputId="3e5f0ff5-41f6-4f47-9a7e-1d1dc5b5c5c5" id="pYBpeU4GumOF" colab={"base_uri": "https://localhost:8080/", "height": 759}
# plot reasons for absence against being a social drinker/smoker
plt.figure(figsize=(8, 6))
sns.countplot(data=preprocessed_data, x="Reason for absence",
hue="Social drinker", hue_order=["Yes", "No"])
plt.savefig('figs/absence_reasons_drinkers.png', format='png', dpi=300)
plt.figure(figsize=(8, 6))
sns.countplot(data=preprocessed_data, x="Reason for absence",
hue="Social smoker", hue_order=["Yes", "No"])
plt.savefig('figs/absence_reasons_smokers.png', format='png', dpi=300)
# + colab_type="code" outputId="9405b4fb-6522-45fe-dcb5-d93dceef377f" id="32EkWWuYul1s" colab={"base_uri": "https://localhost:8080/", "height": 119}
print(preprocessed_data["Social drinker"].value_counts(normalize=True))
print(preprocessed_data["Social smoker"].value_counts(normalize=True))
# + colab_type="code" outputId="96a8b827-1dec-47e1-e8ae-f376efd7348e" id="8s3cb__CulDS" colab={"base_uri": "https://localhost:8080/", "height": 34}
# computation of conditional probability
sample_space = set(["BB", "BG", "GB", "GG"])
event_a = set(["BB"])
event_b = set(["BB", "BG", "GB"])
cond_prob = (0.25*len(event_a.intersection(event_b))) / (0.25*len(event_b))
print(round(cond_prob, 4))
# + [markdown] colab_type="text" id="B3HLb_N8tj9U"
# **Exercise 2.02: Identifying Disease Reasons with Higher Probability among Drinkers and Smokers**
# + colab_type="code" outputId="6b76d43f-ee02-428a-fef0-c4fc8287a410" id="RplQu9eitZVM" colab={"base_uri": "https://localhost:8080/", "height": 411}
# compute probabilities of being a drinker and smoker
drinker_prob = preprocessed_data["Social drinker"]\
.value_counts(normalize=True)["Yes"]
smoker_prob = preprocessed_data["Social smoker"]\
.value_counts(normalize=True)["Yes"]
print(f"P(social drinker) = {drinker_prob:.3f} | P(social smoker) = {smoker_prob:.3f}")
# create mask for social drinkers/smokers
drinker_mask = preprocessed_data["Social drinker"] == "Yes"
smoker_mask = preprocessed_data["Social smoker"] == "Yes"
# compute probabilities of absence reasons and being a social drinker/smoker
total_entries = preprocessed_data.shape[0]
absence_drinker_prob = preprocessed_data["Reason for absence"]\
[drinker_mask].value_counts()/total_entries
absence_smoker_prob = preprocessed_data["Reason for absence"]\
[smoker_mask].value_counts()/total_entries
# compute conditional probabilities
cond_prob = pd.DataFrame(index=range(0,29))
cond_prob["P(Absence | social drinker)"] = absence_drinker_prob/drinker_prob
cond_prob["P(Absence | social smoker)"] = absence_smoker_prob/smoker_prob
# plot probabilities
plt.figure()
ax = cond_prob.plot.bar(figsize=(10,6))
ax.set_ylabel("Conditional probability")
plt.savefig('figs/conditional_probabilities.png', format='png', dpi=300)
| Chapter02/Exercise2.02/Exercise2.02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NLP Part 2
#
# ## Stop Words
# - Youtube tutorial [Stop Words - Natural Language Processing With Python and NLTK p.2](https://www.youtube.com/watch?v=w36-U-ccajM&index=2&list=PLQVvvaa0QuDf2JswnfiGkliBInZnIC4HL)
#
# > Stop words are the words such as 'a', or 'the', it's there for humans to make sense, but not necessary to
# understand the meaning of the context.
# +
# importing tokenizer and stopwords list
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
example_sentence = 'This is an example showing off stop words filteration.'
stop_words = set(stopwords.words('english')) # Picking a language, can be other language as well
print('stop words list: \n', stop_words)
# tokenize the original sentence
words = word_tokenize(example_sentence)
print('Original sentence tokenized: \n', words)
filtered_sentence = []
## filtering the sentence to remove the stop words, using for-loops
for w in words:
if w not in stop_words:
filtered_sentence.append(w)
print('Filtered sentence: \n', filtered_sentence)
# -
N_stop_words = set(stopwords.words('norwegian'))
print('Norwegian stop words list: \n', N_stop_words)
| nlp/NLP - Part 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import os
import glob
from analysis_helper_exp3 import *
from IPython.display import clear_output
# %load_ext autoreload
# %autoreload 2
iter_max=50
task_col=None
cluster_col='BT_0.4 ID'
hs_ids = ['ClusterBasedWCSelector_609', 'MABSelector_exploitive']
root_dir = '../../../aldd_results/aldd_exp_3_final//params_results\\'
a_dir = glob.glob(root_dir+'sampled_hyparams/ClusterBasedWCSelector_609/*/*/*/')
b_dir = glob.glob(root_dir+'benchmarks/MABSelector_exploitive/*/*/*/')
df_from_file = True
# +
recompute_task_info=False
if recompute_task_info:
task_names = [r.split('\\')[-2][:-6] for r in glob.glob('../datasets/pcba/*_cv_96/')]
task_hit_dict = {}
for task_col in task_names:
task_df = pd.concat([pd.read_csv(x) for x in glob.glob('../datasets/pcba/{}_cv_96/unlabeled_*.csv'.format(task_col))])
cpd_count = task_df.shape[0]
hit_limit = task_df[task_col].sum()
unique_hit_limit = task_df[task_df[task_col] == 1][cluster_col].unique().shape[0]
task_hit_dict[task_col] = (hit_limit, unique_hit_limit, cpd_count)
else:
import pickle
with open('task_info_dict.pickle', 'rb') as handle:
task_hit_dict = pickle.load(handle)
task_list = np.unique([af.split('\\')[-4] for af in a_dir])
task_info_list = []
for tcol in task_list:
a, b, c = task_hit_dict[tcol]
task_info_list.append([tcol, a, b, c])
full_task_info = pd.DataFrame(data=task_info_list, columns=['task_col', 'hit_limit', 'unique_hit_limit', 'cpd_count'])
full_task_info['active_ratio'] = np.around(100.0 * full_task_info['hit_limit'] / full_task_info['cpd_count'], decimals=2)
full_task_info['hit_limit'] = full_task_info['hit_limit'].astype(int)
# -
excluded_tasks = ['pcba-aid588342','pcba-aid1030', 'pcba-aid504332',
'pcba-aid686979', 'pcba-aid686978']
task_info = full_task_info[~full_task_info['task_col'].isin(excluded_tasks)]
sorted_task_info = task_info.sort_values('active_ratio')
task_list = task_info['task_col'].tolist()
if not df_from_file:
cluster_col = 'BT_0.4 ID'
rf_ids = ['{}'.format(i) for i in range(10)]
data = []
for task_col in task_list:
for rf_id in rf_ids:
task_data = task_info[task_info['task_col'] == task_col].iloc[0].tolist()[1:]
af = root_dir+'sampled_hyparams/ClusterBasedWCSelector_609/{}/{}/batch_size_96/'.format(task_col, rf_id)
bf = root_dir+'benchmarks/MABSelector_exploitive/{}/{}/batch_size_96/'.format(task_col, rf_id)
adf = pd.concat([pd.read_csv(af+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
bdf = pd.concat([pd.read_csv(bf+'/training_data/iter_{}.csv'.format(i)) for i in range(1, iter_max+1)])
a_actives = adf[adf[task_col] == 1]
b_actives = bdf[bdf[task_col] == 1]
a_actives_idx, b_actives_idx = a_actives['Index ID'].values, b_actives['Index ID'].values
a_uactives, b_uactives = a_actives[cluster_col].unique(), b_actives[cluster_col].unique()
a_hits, b_hits = a_actives.shape[0], b_actives.shape[0]
a_uhits, b_uhits = a_uactives.shape[0], b_uactives.shape[0]
intersect_actives = np.intersect1d(a_actives_idx, b_actives_idx)
union_actives = np.union1d(a_actives_idx, b_actives_idx)
symmetric_diff_actives = np.setdiff1d(union_actives, intersect_actives)
intersect_uactives = np.intersect1d(a_uactives, b_uactives)
union_uactives = np.union1d(a_uactives, b_uactives)
symmetric_diff_uactives = np.setdiff1d(union_uactives, intersect_uactives)
data.append([task_col, rf_id, a_hits, b_hits, a_uhits, b_uhits,
intersect_actives.shape[0], union_actives.shape[0], symmetric_diff_actives.shape[0],
intersect_uactives.shape[0], union_uactives.shape[0], symmetric_diff_uactives.shape[0]] + task_data)
data_df = pd.DataFrame(data=data,
columns=['task_col', 'rf_id', '609_hits', 'MABE_hits', '609_uhits', 'MABE_uhits',
'intersect', 'union', 'sym_diff',
'intersect_u', 'union_u', 'sym_diff_u',
'hit_limit', 'unique_hit_limit', 'cpd_count', 'active_ratio'])
else:
data_df = pd.read_csv('./exp3/exp3_vs_data_df.csv.gz')
sorted_tasks = task_info.sort_values('active_ratio')['task_col'].tolist()
task_means = data_df.groupby('task_col').mean().loc[sorted_tasks]
task_max = data_df.groupby('task_col').max().loc[sorted_tasks]
task_min = data_df.groupby('task_col').min().loc[sorted_tasks]
task_means['disagree %'] = task_means['sym_diff'] / task_means['union']
# +
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_context("paper")
sns.set(font_scale=1.5)
figsize=(32, 8)
plt.figure(figsize=figsize)
sns.lineplot(x=task_means.index, y=task_means['sym_diff'].values, sort=False)
sns.lineplot(x=task_means.index, y=task_means['sym_diff_u'].values, sort=False)
plt.xticks(rotation=90);
plt.legend(['sym_diff', 'sym_diff_u'])
# -
cols = ['609_hits', 'MABE_hits', 'union', 'sym_diff', 'disagree %', 'active_ratio']
tmp_df = task_means[cols]
tmp_df.columns = ['CBWS_609 Mean Total Hits', 'MABSel_exploitive Mean Total Hits',
'Union Mean Total Hits', 'Symmatric Difference Mean Total Hits', 'Mean Disagreement Ratio', 'Hit %']
tmp_df
print(tmp_df.round(2).to_latex(longtable=True))
tmp_df[tmp_df['Mean Disagreement Ratio'] > 0.5]
| analysis_notebooks/MABSelector_exploitive vs ClusterBasedWCSelector_609.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Tensorflow GPU (tf-gpu)
# language: python
# name: tf-gpu
# ---
# ## Keras GAN
# A simple GAN for generating digits from MNIST.
#
# Note keras_adverserial seems broken on later versions of Keras (use 2.0.0)
# +
import os
import numpy as np
import pandas as pd
from time import time
from scipy.misc import imread
import keras
from keras_adversarial import AdversarialModel, simple_gan, gan_targets
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Reshape, InputLayer
from keras.regularizers import L1L2
from keras.utils import np_utils
import matplotlib.pyplot as plt
# -
# Load the mnist data and show a few samples
# To stop potential randomness
seed = 128
rng = np.random.RandomState(seed)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
fig=plt.figure(figsize=(8, 8))
for i in range(9):
plt.subplot(3,3,i+1)
plt.title("Class {}".format(y_train[i]))
plt.imshow(X_train[i], cmap='gray', interpolation='none')
plt.tight_layout()
# The pixel values are gray scale between 0 and 255. It is almost always a good idea to perform some scaling of input values when using neural network models. Because the scale is well known and well behaved, we can very quickly normalize the pixel values to the range 0 and 1 by dividing each value by the maximum of 255.
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# Finally, the output variable is an integer from 0 to 9. As this is a multi-class classification problem we need to one hot encoding of the class values, transforming the vector of class integers into a binary matrix.
#
# We can easily do this using the built-in np_utils.to_categorical() helper function in Keras.
# one hot encode outputs and save original classes
print(y_train.shape)
print(y_test.shape)
print(y_train[:10])
y_train_classes = y_train.copy()
y_test_classes = y_test.copy()
y_train = np_utils.to_categorical(y_train_classes)
y_test = np_utils.to_categorical(y_test_classes)
num_classes = y_test.shape[1]
print(y_train.shape)
print(y_test.shape)
print(y_train[:3])
# ## GAN
# The training dataset is structured as a 3-dimensional array of (instance, image width and image height). Our neural-network is going to take a single vector for each training example, so we need to reshape the input so that each 28x28 image becomes a single 784 dimensional vector.
#
# We can do this transform easily using the reshape() function on the NumPy array. We can also reduce our memory requirements by forcing the precision of the pixel values to be 32 bit, the default precision used by Keras anyway.
# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train_FF = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test_FF = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')
print(X_train_FF.shape)
print(X_test_FF.shape)
# Define our generator and discriminator networks.
# define vars
g_input_shape = 100
d_input_shape = (28, 28)
hidden_1_num_units = 500
hidden_2_num_units = 500
g_output_num_units = 784
d_output_num_units = 1
epochs = 25
batch_size = 128
# +
# generator
model_1 = Sequential([
Dense(units=hidden_1_num_units, input_dim=g_input_shape, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=g_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
Reshape(d_input_shape),
])
# discriminator
model_2 = Sequential([
InputLayer(input_shape=d_input_shape),
Flatten(),
Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)),
Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)),
])
# gan
gan = simple_gan(model_1, model_2, normal_latent_sampling((100,)))
# final model
model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights])
model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy')
# -
print(model_1.summary())
print(model_2.summary())
print(gan.summary())
# Fit the model
history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=10, batch_size=batch_size)
# Plot loss for the model to see if more epochs would have helped and to ensure that we don't start overfitting.
plt.plot(history.history['player_0_loss'])
plt.plot(history.history['player_1_loss'])
plt.plot(history.history['loss'])
# ### Generate Images
zsamples = np.random.normal(size=(10, 100))
pred = model_1.predict(zsamples)
for i in range(pred.shape[0]):
plt.imshow(pred[i, :], cmap='gray')
plt.show()
| GAN/GAN Basics/Keras GAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# \# Developer: <NAME> (<EMAIL>) <br>
# \# 3rd February 2019 <br>
#
# \# © 2019 initOS GmbH <br>
# \# License MIT <br>
#
#
# \# Library for TSVM and SelfLearning taken from https://github.com/tmadl/semisup-learn <br>
# \# Thanks to the authors for brilliant work
#
from sklearn.svm import SVC
import pandas as pd
import numpy as np
from __future__ import division
import re
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from frameworks.SelfLearning import *
from collections import Counter
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
import time
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
regex_square_brackets = re.compile(r'(\[)|(\])')
bugs = pd.read_csv('../datasets/lexical_semantic_preprocessed_mantis_bugs_less_columns_with_class_expansion.csv')
bug_notes = pd.read_csv('../datasets/lexical_semantic_preprocessed_mantis_bugnotes.csv')
bug_notes['bug_note'] = bug_notes['bug_note'].str.replace(regex_square_brackets, '')
bugs['additional_information'] = bugs['additional_information'].str.replace(regex_square_brackets, '')
bugs['description'] = bugs['description'].str.replace(regex_square_brackets, '')
bugs['summary'] = bugs['summary'].str.replace(regex_square_brackets, '')
df_bug_note_table = bug_notes.groupby(['bug_id'])['bug_note'].apply(','.join).to_frame('bug_notes').reset_index()
result = pd.merge(bugs, df_bug_note_table, how='left', left_on='id', right_on='bug_id')
result['textual_data'] = result['summary'].fillna('') + ',' + result['description'].fillna('') + ',' + result['additional_information'].fillna('') + ',' + result['bug_notes'].fillna('')
result['textual_data'] = result['textual_data'].str.replace(" ", "")
result.sort_values(by=['class'], inplace=True)
result.reset_index(drop=True, inplace= True)
result.loc[result['class']=='critical', 'class'] = 0
result.loc[result['class']=='non-critical', 'class'] = 1
unlabelled_index = result[(result['class'].isnull())].index
labelled_index = result[~(result['class'].isnull())].index
print result['class'].value_counts()
def apply_self_learning_model(X_train, Y_train, X_test, Y_test, X_U, sklearn_classifier, classifier, n_o_f, iteration):
X = np.vstack((X_train, X_U))
Y_U = np.full((X_U.shape[0], ), -1, dtype=float)
Y = np.concatenate((Y_train, Y_U)).astype(float)
if classifier=='SVM':
C=[1/100, 1/10, 1, 10, 100]
gamma=[]
gamma += [1/X.shape[1]]
kernel=['rbf']
params_grid = [
{'C': C,
'gamma': gamma,
'kernel': kernel}
]
gs = GridSearchCV(SVC(probability=True), params_grid, cv=3)
gs.fit(X, Y)
sklearn_classifier = gs.best_estimator_
slm = SelfLearningModel(sklearn_classifier)
slm.fit(X, Y)
joblib.dump(slm, 'models/selfLearning_{}_{}_{}.pkl'.format(classifier, n_o_f, iteration))
joblib.dump(X_test, 'models/X_test_{}_{}_{}.pkl'.format(classifier, n_o_f, iteration))
joblib.dump(Y_test, 'models/Y_test_{}_{}_{}.pkl'.format(classifier, n_o_f, iteration))
score = slm.score(X_test, Y_test.astype(float))
return score
def SLM(X_l, Y_l, X_U, sklearn_classifier, classifier, n_o_f):
stratified_shuffle_split = StratifiedShuffleSplit(n_splits=3, test_size=0.6, random_state=0)
scores = []
iteration = 1
for train_index, test_index in stratified_shuffle_split.split(X_l, Y_l):
X_train = X_l[train_index].copy()
Y_train = Y_l[train_index].copy()
X_test = X_l[test_index].copy()
Y_test = Y_l[test_index].copy()
score = apply_self_learning_model(X_train, Y_train, X_test, Y_test, X_U,
sklearn_classifier, classifier, n_o_f, iteration)
scores.append(score)
iteration += 1
print "self-learning {} average score: {}".format(classifier, np.mean(scores))
def main(df, sklearn_classifier, features_list, classifier, unlabelled_index, labelled_index):
results = {}
for no_of_features in features_list:
print("""####################### Running for Number of features {} ############################""".format(no_of_features))
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=no_of_features, stop_words='english')
X = tfidf_vectorizer.fit_transform(df['textual_data']).toarray()
Y = np.array(df['class'])
#print("members for classes {}".format(",".join("(%s,%s)" % tup for tup in sorted(Counter(Y).items()))))
X_U = X[unlabelled_index]
X_l = X[labelled_index]
Y_l = Y[labelled_index]
# without sample
print("####################### Running without Sampling ############################")
SLM(X_l.copy(), Y_l.copy(), X_U.copy(), sklearn_classifier, classifier, no_of_features)
# with oversampling
#print("#######################Running with oversampling############################")
#smote = SMOTE('minority')
#x_OS, y_OS = smote.fit_sample(X.copy(), Y.copy())
#slm_OS = SLM(x_OS, y_OS, sklearn_classifier, classifier)
# with undersampling
#print("#######################Running with undersampling############################")
#enn = RepeatedEditedNearestNeighbours()
#x_US, y_US = enn.fit_resample(X.copy(), Y.copy())
#slm_US = SLM(x_US, y_US, sklearn_classifier, classifier)
#results[str(no_of_features)] = [slm]#, slm_OS, slm_US]
#return results
start_time = time.time()
sklearn_mnb = MultinomialNB(alpha=.01)
mnb_results = main(result, sklearn_mnb, [100, 500, 1000], 'Multinomial Naive Bayes',
unlabelled_index, labelled_index)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
sklearn_lr = LogisticRegression(n_jobs=-1, class_weight='balanced')
lr_results = main(result, sklearn_lr, [100, 500, 1000], 'Logistic Regression', unlabelled_index, labelled_index)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
sklearn_svm = sklearn.svm.SVC(kernel="rbf", probability=True, class_weight='balanced')
svm_results = main(result, sklearn_svm, [100, 500, 1000], 'SVM', unlabelled_index, labelled_index)
print("--- %s seconds ---" % (time.time() - start_time))
def get_results(classifier, data_type):
dict_features = {}
dict_acc = {}
for features in [500]:
model = joblib.load('models/selfLearning_{}_{}_{}.pkl'.format(classifier, features, 3))
x_tst = joblib.load('models/X_test_{}_{}_{}.pkl'.format(classifier, features, 3))
y_tst = joblib.load('models/Y_test_{}_{}_{}.pkl'.format(classifier, features, 3))
acc = model.score(x_tst, y_tst.astype(data_type))
y_pred = model.predict(x_tst)
result = classification_report(y_tst.astype(data_type), y_pred.astype(data_type), output_dict=True)
dict_features[str(features)] = pd.DataFrame(result)
dict_features[str(features)].transpose().to_csv('ST_{}_{}_latex_table_report.csv'.format(classifier, features))
dict_acc[str(features)] = acc
#vals = [[round(dict_features['500'].loc['precision', 'weighted avg'], 2),
# round(dict_features['500'].loc['recall', 'weighted avg'], 2),
# round(dict_features['500'].loc['f1-score', 'weighted avg'], 2),
# round(dict_acc['500'],2)]]
#columns=["Precision", "Recall", "F1-score", "Accuracy"]
arrays = [["Precision", "Precision", "Precision", "Recall", "Recall", "Recall", "F1-score", "F1-score",
"F1-score"], ['Critical', 'Non-Critical', 'Weighted Avg.', 'Critical', 'Non-Critical',
'Weighted Avg.', 'Critical', 'Non-Critical', 'Weighted Avg.']]
MI = pd.MultiIndex.from_arrays(arrays, names=('Measures', 'Classes'))
vals = [[round(dict_features[str(features)].loc['precision', '0.0'], 2)],
[round(dict_features[str(features)].loc['precision', '1.0'], 2)],
[round(dict_features[str(features)].loc['precision', 'weighted avg'], 2)],
[round(dict_features[str(features)].loc['recall', '0.0'], 2)],
[round(dict_features[str(features)].loc['recall', '1.0'], 2)],
[round(dict_features[str(features)].loc['recall', 'weighted avg'], 2)],
[round(dict_features[str(features)].loc['f1-score', '0.0'], 2)],
[round(dict_features[str(features)].loc['f1-score', '1.0'], 2)],
[round(dict_features[str(features)].loc['f1-score', 'weighted avg'], 2)]]
#[round(dict_acc[str(features)],2)]]
df = pd.DataFrame(vals, index=MI, columns=['vals'])
df = df.unstack().transpose().reset_index(level=0, drop=True)
ax = df.plot(kind='bar', figsize=(8,6), rot=False)
patches, labels = ax.get_legend_handles_labels()
ax.legend(patches, labels, loc='best')
plt.xlabel('Evaluation Measure ST MNB', fontsize=12)
plt.savefig('{}_results.pdf'.format(classifier), dpi=720)
return df
self_training_mnb = get_results('Multinomial Naive Bayes', float)
self_training_lr = get_results('Logistic Regression', float)
self_training_svm = get_results('SVM', float)
# +
#self_training_mnb = self_training_mnb.rename({'accuracy': 'Self-Training MNB Accuracy',
# 'f1-score': 'Self-Training MNB f1-score'}, axis=1)
#self_training_svm = self_training_svm.rename({'accuracy': 'Self-Training SVM Accuracy',
# 'f1-score': 'Self-Training SVM f1-score'}, axis=1)
#self_training_lr = self_training_lr.rename({'accuracy': 'Self-Training Logistic Regression Accuracy',
# 'f1-score': 'Self-Training Logistic Regression f1-score'}, axis=1)
# -
self_training_mnb = self_training_mnb.rename({0: 'Multinomial Naive Bayes'}, axis=1)
self_training_svm = self_training_svm.rename({0: 'SVMs'}, axis=1)
self_training_lr = self_training_lr.rename({0: 'Logistic Regression'}, axis=1)
st_result = pd.concat([self_training_mnb, self_training_svm, self_training_lr], axis=1)
st_result
ax = st_result.plot(kind='bar', figsize=(8,6), rot=False)
plt.xlabel('Features', fontsize=12)
plt.savefig('{}_results.pdf'.format('SelfTraining'), dpi=720)
# # Get predictions from Logistic Regression Self training for Ensemble
self_training_lr = LogisticRegression(n_jobs=-1, class_weight='balanced')
df = result.copy()
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=500, stop_words='english')
X_tf = tfidf_vectorizer.fit_transform(df['textual_data']).toarray()
Y_tf = np.array(df['class'])
X_U = X_tf[unlabelled_index]
X_l = X_tf[labelled_index]
Y_l = Y_tf[labelled_index]
stratified_shuffle_split = StratifiedShuffleSplit(n_splits=3, test_size=0.6, random_state=0)
scores = []
iteration = 1
for train_index, test_index in stratified_shuffle_split.split(X_l, Y_l):
X_train = X_l[train_index].copy()
Y_train = Y_l[train_index].copy()
X_test = X_l[test_index].copy()
Y_test = Y_l[test_index].copy()
X = np.vstack((X_train, X_U))
Y_U = np.full((X_U.shape[0], ), -1, dtype=float)
Y = np.concatenate((Y_train, Y_U)).astype(float)
slm = SelfLearningModel(self_training_lr)
slm.fit(X, Y)
print slm.score(X_test, Y_test.astype(float))
Y_Pred = slm.predict(X_tf)
df['ST_MNB'] = Y_Pred
tsvm_model = joblib.load('models/TSVM_500_3.pkl')
Y_Pred_tsvm = tsvm_model.predict(X_tf)
| 3) semi-supervised_text/3.SELF_LERANING.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import dill
import os
import dill
import os
dir_name = ['LR', 'Leap', 'seq2seq', 'Retain', 'DMNC', 'NEW_GAMENet', 'GAMENet_no_neg']
prefix_path = 'saved'
chose_dir_idx = -2
history = dill.load(open(os.path.join(prefix_path, dir_name[chose_dir_idx], 'history.pkl'), 'rb'))
print(dir_name[chose_dir_idx])
for i in range(20):
print('epoch:%d, JA:%.4f, DDI RATE:%.4f, P:%.4f, R:%.4f, F1:%.4f, PRAUC:%.4f'
% (i,
history['ja'][i] if chose_dir_idx!=1 else 0,
history['ddi_rate'][i],
history['avg_p'][i],
history['avg_r'][i],
history['avg_f1'][i],
history['prauc'][i]))
history['ddi_rate'][10]
# # plot
run ddi_rate_plot.py
| code/deplicated/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import glob
import re
import numpy as np
files = glob.glob('/Users/BP2317/Downloads/archivosnatalia/*.csv')
files
precipitacion = pd.read_csv(files[0], parse_dates=True)
precipitacion.head()
precipitacion.describe()
# +
precipitacion.isnull().sum()
# -
precipitacion.head()
precipitacion['Mes'] = precipitacion['Fecha'].apply(lambda x : x.split('/')[1])
precipitacion['Dia'] = precipitacion['Fecha'].apply(lambda x : x.split('/')[0])
precipitacion.dtypes
precipitacion.head()
precipitacion.describe()
medias=precipitacion.groupby(['Mes','Dia']).mean()
medias.head()
medias.loc[('01','01'),'Moche']
medias.describe()
precipitacion.describe()
# groupedSan_Carlos = precipitacion.groupby('Mes')['San_Carlos']
# lstGroups =[]
# for cname in precipitacion.columns[1:]:
# lstGroups.append(precipitacion.groupby('Mes')[cname])
# lstGroups
nans = lambda df: df[df.isnull().any(axis=1)]
nans(precipitacion).head()
# for index,row in precipitacion.iterrows():
# if np.isnan(row['San_Carlos']):
# precipitacion.loc[index,'San_Carlos']=medias.loc[(row['Mes'],row['Dia']),'San_Carlos']
# print('rep San Carlos')
# if np.isnan(row['San_Jose']):
# precipitacion.loc[index,'San_Jose']=medias.loc[(row['Mes'],row['Dia']),'San_Jose']
# print('rep San José')
# if np.isnan(row['Moche']):
# precipitacion.loc[index,'Moche']=medias.loc[(row['Mes'],row['Dia']),'Moche']
# print('rep Moche')
# if np.isnan(row['Casa_Grande']):
# precipitacion.loc[index,'Casa_Grande']=medias.loc[(row['Mes'],row['Dia']),'Casa_Grande']
# print('rep Casa Grande')
# precipitacion.isnull().sum()
# precipitacion.head()
precipitacion.describe()
names=[]
for f in files:
names.append(f.split('\\')[1])
print (names)
dfs=[]
i =0
root = 'C:\\Users\\BP2317\\Downloads\\archivosnatalia\\output\\'
for f in files:
df=pd.read_csv(f, parse_dates=True)
df['Mes'] = df['Fecha'].apply(lambda x : x.split('/')[1])
df['Dia'] = df['Fecha'].apply(lambda x : x.split('/')[0])
medias=df.groupby(['Mes','Dia']).mean()
for index,row in df.iterrows():
if np.isnan(row['San_Carlos']):
df.loc[index,'San_Carlos']=float(format(medias.loc[(row['Mes'],row['Dia']),'San_Carlos'],'.2f'))
if np.isnan(row['San_Jose']):
df.loc[index,'San_Jose']=float(format(medias.loc[(row['Mes'],row['Dia']),'San_Jose'],'.2f'))
if np.isnan(row['Moche']):
df.loc[index,'Moche']=float(format(medias.loc[(row['Mes'],row['Dia']),'Moche'],'.2f'))
if np.isnan(row['Casa_Grande']):
df.loc[index,'Casa_Grande']=float(format(medias.loc[(row['Mes'],row['Dia']),'Casa_Grande'],'.2f'))
del df['Mes']
del df['Dia']
df.to_csv(root+names[i],index=False)
i+=1
print(df.isnull().sum())
for df in dfs:
print(df.isnull().sum())
| Natalia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Web App BackEnd
from flask import Flask, render_template, request
from elasticsearch import Elasticsearch
app = Flask(__name__)
@app.route('/')
def home():
return render_template('search.html')
@app.route('/search/results', methods=['GET', 'POST'])
def search_request():
query = request.form["input"]
es = Elasticsearch('localhost', port=9200)
results = es.search(
index="bestgadgetfinder",
size=25,
body={
"_source": [
"New_Name",
"Amazon_SalesPrice",
"Flipkart_SalesPrice",
"Snapdeal_SalesPrice",
"Amazon_ProductLink",
"Flipkart_ProductLink",
"Snapdeal_ProductLink",
],
'query': {
"match": {
"Cleaned_Name": query
}
}
}
)
return render_template('result.html', results=results)
if __name__ == '__main__':
app.run(port=5000)
| Code & Dataset/6.UI Development/app.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overlap Tomography Code
#
# This code performs the tomographic reconstruction from the overlap tomography experiment. Specifically, this code is for the overlap tomography of a single-photon Fock state. The basis for this reconstruction is a convex-optimization semidefinite programming (SDP) algorithm used to solve the matrix equation $M=CP$, where $M$ is a column vector containing all of the overlap measurements, $C$ is a coefficient matrix that can be calculated from the coherent state calibration measurements, and $P$ is the unknown density operator in Liouville vector representation. Instead of directly inverting $C$, which could cause problems due to even small errors in the calibration blowing up upon matrix inversion, the more robust minimization of the $L2$ norm, $||M-CP||_2$ can be performed. Minimizing this quantity yields a global minimum that this computationally efficient to solve. Additionally, we can use physicality constraints on the density matrix $\rho=\rho^\dagger$ and $\text{Tr}[\rho]=1$ to ensure a physical reconstruction. This code uses the Python libraries CVXPY and CVXOPT to perform the optimization.
#
# Once the reconstruction is performed, a second optimization problem can be solved to compensate for calibrated loss, as the loss process is just a different matrix mapping from the pure state to the loss-degraded state.
# +
import time
from matplotlib import rc
import math
from qutip import *
#from qutip.ipynbtools import plot_animation
#import qutip
import numpy as np
# %matplotlib inline
import matplotlib.pylab as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from IPython.display import display, Math, Latex
import cmath
from mpl_toolkits.axes_grid1 import AxesGrid
from scipy.special import factorial
from scipy.special import binom
from scipy import linalg
import scipy as scp
import cvxpy as cp
import cvxopt
global N_dim
global N_trun
N_dim = 10;
N_trun = 6;
# -
# First we'll calculate the coefficient matrix from the measured coherent states. We could use directly measured results, but we can improved our results by including some additional calibrations such as imperfect visibility between the signal photon and the coherent state. If we assume that the fiber collects everything, then the only mode we should care about is the portion of the signal that correctly interferes with the coherent state, but in actuality, we also measure the non-interferring photons. Additionally, when calibrating the coherent state, we collect more light than actually interferes with the signal, as some of this coherent state will not overlap. Thus, when calculating the coefficient matrix, the value of the coherent state used should actually be smaller than what is measured by a factor of the visibility. Below, we have the measured amplitudes of the five non-zero coherent state values which were calibrated before and after each experimental run. The values used for the reconstruction is the average of the measurements scaled by the visibility.
'''Coherent State calibrations'''
vis=0.9
M=vis**2
alpha_LO1 = np.sqrt(2)*np.array([0,0.2392,0.2577, 0.2877, 0.3158, 0.50774]) #Measured values at start
alpha_LO2 =np.sqrt(2)*np.array([0,0.2653,0.2728, 0.293 ,0.3198, 0.5054]) #Measured values at end
alpha_LO=(alpha_LO1+alpha_LO2)/2
alpha_LO_new=alpha_LO*vis
np.sqrt(1-M)
# The measured amplitudes are only part of the calibrations. Additionally, we need the phase of each measurement. In this experiment, I used a mirror-mounted PZT to vary the phase of the overlapping coherent states. Each shift to the piezo applied a 0.58 radian phase. Additionally, because the EOM was used to control the amplitudes of the coherent states, the phase also depends on the amplitude since changing the voltage on the EOM will change the phase. The phases were all calibrated with classical beams before the experimental data collection. At the end of the cell below, the variable 'Final_prob' is now a 2D array that contains all of the coefficients needed to solve the SDP problem.
# +
'''Create a matrix of the coefficients of all of the
coherent states used to probe the state to tomograph'''
prob_matrix_tomo=[]
phase_step=0.58 #the PZT changes the phase by 0.58 rads
EOM_phase_step=0.277 #phase change due to EOM in rads
'''In this set of data, I did i=24;16;8;1 for the EOM values, so I need to assign EOM values to negative phases.
Hence the minus sign on the EOM_phase_step in line 22 of this cell'''
amp_num=5 #number of coherent state probe amplitudes (not including vacuum)
for i in range(amp_num):
amp=alpha_LO_new[i+1];
if (i+1)>4:
EOM_phase=np.e**(-EOM_phase_step*(i-5)*1j)
elif 5>(i+1)>0:
EOM_phase=np.e**(-EOM_phase_step*(i)*1j)
else:
EOM_phase=1
for k in range(10): #(number of phases used)
phase=np.e**(k*1j*phase_step)
B = coherent_dm(N_dim,amp*phase*EOM_phase).full()
ele = B.flatten()
prob_matrix_tomo.append(ele)
'''Add the vacuum component last'''
B_vac=coherent_dm(N_dim,0).full()
prob_matrix_tomo.append(B_vac.flatten())
Final_prob = np.array(prob_matrix_tomo);
# -
# The cell below contains the measured photon-number probability distributions from the TES for each overlap setting for five values of coherent state amplitude each at 10 phases from 0 to $2\pi$. The first set of 'amp_0' is when the coherent state field is block, thus the signal is simply overlapped with vacuum.
# +
'''experimental number distributions go here'''
refl_tot=.4 #This is the loss we determined from the heralding ratio
amp_0_1=[0.8134,0.1791,0.0075,0,0,0,0]
amp_0_2=[0.7974,0.2026,0,0,0,0,0]
amp_0_3=[0.7869,0.2131,0,0,0,0,0]
amp_0=np.zeros(7)
for i in range(len(amp_0_1)): #average the 3 measurements
amp_0[i]=(amp_0_1[i]+amp_0_2[i]+amp_0_3[i])/3
amp_1_p1 = [0.6619, 0.3094,0.0288,0,0,0,0]
amp_1_p2 = [ 0.6917,0.2667,0.0417,0,0,0,0]
amp_1_p3 = [ 0.6771,0.2396,0.0833,0,0,0,0]
amp_1_p4 = [ 0.6579,0.2895,0.0439,0.0088,0,0,0]
amp_1_p5 = [0.66950,0.3136,0.0169,0,0,0,0]
amp_1_p6 = [0.7254,0.2042,0.0634,0.007,0,0,0]
amp_1_p7 = [0.7109,0.25,0.0391,0,0,0,0]
amp_1_p8 = [0.7284,0.2407,0.0309,0,0,0,0]
amp_1_p9 = [0.6855,0.2642,0.044,0.0063,0,0,0]
amp_1_p10 = [ 0.7414,0.2241,0.0345,0,0,0,0]
amp_2_p1 = [0.7143,0.2476,0.0381,0,0,0,0]
amp_2_p2 = [ 0.7596,0.2212,0.0192,0,0,0,0]
amp_2_p3 = [ 0.7042,0.2535,0.0352,0.007,0,0,0]
amp_2_p4 = [ 0.731,0.2164,0.0526,0,0,0,0]
amp_2_p5 = [0.7345,0.2373,0.0226,0.0056,0,0,0]
amp_2_p6 = [0.6842,0.25,0.0658,0,0,0,0]
amp_2_p7 = [0.6698,0.283,0.0472,0,0,0,0]
amp_2_p8 = [0.7068,0.2408,0.0419,0.0105,0,0,0]
amp_2_p9 = [0.7341,0.2197,0.0405,0.0058,0,0,0]
amp_2_p10=[0.6587,0.3077,0.024,0.0096,0,0,0]
amp_3_p1 = [0.6863,0.2598,0.049,0.0049,0,0,0]
amp_3_p2 = [ 00.7037,0.2361,0.0602,0,0,0,0]
amp_3_p3 = [ 0.6364,0.298,0.0606,0.0051,0,0,0]
amp_3_p4 = [ 0.6524,0.2866,0.061,0,0,0,0]
amp_3_p5 = [0.6946,0.2635,0.0419,0,0,0,0]
amp_3_p6 = [0.7113,0.232,0.0515,0.0052,0,0,0]
amp_3_p7 = [0.7468,0.1962,0.038,0.019,0,0,0]
amp_3_p8 = [0.6526,0.3105,0.0368,0,0,0,0]
amp_3_p9 = [0.7086,0.2514,0.04,0,0,0,0]
amp_3_p10=[0.6527,0.3054,0.0359,0.006,0,0,0]
amp_4_p1 = [0.6866,0.2687,0.0398,0.005,0,0,0]
amp_4_p2 = [ 0.6842,0.269,0.0409,0.0058,0,0,0]
amp_4_p3 = [0.6258,0.2945,0.0736,0.0061,0,0,0]#check this point - might be wrong
amp_4_p4 = [ 0.6299,0.3052,0.0519,0.013,0,0,0]
amp_4_p5 = [0.6848,0.2446,0.0707,0,0,0,0]
amp_4_p6 = [0.6377,0.3261,0.029,0.0072,0,0,0]
amp_4_p7 = [0.671,0.2645,0.0581,0.0065,0,0,0]
amp_4_p8 = [0.6918,0.2453,0.0503,0.0126,0,0,0]
amp_4_p9 = [0.7299,0.1971,0.073,0,0,0,0]
amp_4_p10=[0.7071,0.2357,0.0571,0,0,0,0]
amp_5_p1 = [0.5798,0.3109,0.1008,0.0084,0,0,0]
amp_5_p2 = [ 0.5783,0.2771,0.0904,0.0482,0.006,0,0]
amp_5_p3 = [0.5333,0.3222,0.1111,0.0167,0.0167,0,0]
amp_5_p4 = [ 0.5839,0.2919,0.0932,0.0311,0,0,0]
amp_5_p5 = [0.549,0.3464,0.0915,0.0131,0,0,0]
amp_5_p6 = [0.6557,0.2842,0.0546,0.0055,0,0,0]
amp_5_p7 = [0.6481,0.2099,0.1049,0.037,0,0,0]
amp_5_p8 = [0.6051,0.2484,0.1274,0.0127,0,0.0064,0]
amp_5_p9 = [0.5988,0.2515,0.0958,0.0359,0.012,0.006,0]
amp_5_p10=[0.6175,0.2568,0.1038,0.0219,0,0,0]
'''Restructure data slightly'''
Amp0=[]; Amp1=[]; Amp2=[]; Amp3=[]; Amp4=[]; Amp5 = []
for k in range(6):
for i in range(10):
if k==0:
temp_val=np.asarray(eval('amp_0'))
else:
temp_val=np.asarray(eval('amp_'+str(k)+'_p'+str(i+1)))
eval('Amp'+str(k)).append(temp_val)
# -
# Earlier, it was mentioned that a correction needed to be applied to the coherent state amplitudes to account for the imperfect visibility. Similarly, the extra photons from the coherent state that don't properly interfere with the signal add extra photons to the measured probabilities. Since we know the visibility ahead of time, we can use the measured value and deconvolve the erroneous photons from the data to recover the true measured distribution. This is done by the function 'Amp_correction' below, which simply performs matrix multiplication to map the raw distribution to the deconvoluted distribution. The matrix needed is calculated in the rest of the cell. Note that this matrix is actually a ragged matrix of many submatrices, each designed to correct the mismatched visibility for the different coherent state amplitudes used for the experiment.
#
# First, 'matrix_M' is calculated, which contains all of the matrix maps that convolve the true distributions with non-interfering coherent state photons. These matrices must then be inverted to 'M_inv' which will be applied to the actually measured distributions to attain the true distributions that we can use for further tomographic reconstruction.
# +
def Amp_correction(num_dist,Matrix):
leng=len(num_dist)
new=[]
for i in range(leng):
new.append(np.dot(Matrix,num_dist[i]))
return new
#Matrices that describes the convolution of a density matrix with coherent state probabilities
matrix_M=[]
dim_out=N_dim #Hilbert space dimentsion cuttoff of the output (measured) state
leng=len(alpha_LO)
for i in range(leng):
Prob_vec=[]
state=coherent_dm(N_dim,np.sqrt(1-M)*alpha_LO[i])
for j in range(7):
row=[]
for k in range(7):
#for k in range(dim_in):
if k > j: #ensures the result is upper triangular
val=0
else:
#val=(binom(j+k,k)*binom(j+i+k,k))**(1/2)*r**(2*k)*t**(j+(i+j))
#The above line is correct for the values as they start, but I forgot
#that there is an offset, so I need to replace k with (k-j)
val=state.diag()[j-k]
row.append(val) #appends each value to the k^th position in the j^th row
#for n in range(j):
# row[n]=row[j-1-n]
Prob_vec.append(row) #appends the j^th row to the i^th matrix
matrix_M.append(Prob_vec)
M_inv=[] #Invert the above matrix
for i in range(len(alpha_LO)):
inverse=np.linalg.inv(matrix_M[i])
M_inv.append(inverse)
#New amplitudes that are now the corrected measured probability distributions
Amp0_new=Amp0
for k in range(5):
vars()['Amp'+str(k+1)+'_new']=Amp_correction(eval('Amp'+str(k+1)),M_inv[k+1])
# -
# Next, take array of probability distributions and turn it into an array of parities. Remeber that the parity for each distribution is directly related to the overlap fidelity of the unknown density matrix with the calibrated coherent states.
#
# The capital P# variables (P0, P1, etc.) hold the parities for all measured data points. The lowercase p# variables hold the averaged measured parity accross all phases for a given amplitude. The final array, 'Fid_array_fin', is the vector of overlap measurements that can go into our SDP optimization algorithm along with the coefficient matrix to solve for the unknown density operator.
# +
def par_array(num_dist_matrix, length, n_trun):
P_array=[]
for k in range(length):
P_n=num_dist_matrix[k]
Parity = 0;
for i in range(n_trun):
Parity = Parity + math.pow(-1,i)*P_n[i]
P_array.append(Parity)
return P_array
for k in range(6):
vars()['P'+str(k)]=par_array(eval('Amp'+str(k)+'_new'),10,N_trun)
for i in range(5):
temp=eval('P'+str(i+1))
avg=np.average(temp)
vars()['p'+str(i+1)]=[avg,avg,avg,avg,avg,avg,avg,avg,avg,avg]
p0=P0
#sets the final distsributions used, P# has data for amplitude# at all phases
Fid_array=np.ndarray.flatten(np.array([P1,P2,P3,P4,P5]))
#This is the array that now averages the measurements at each amplitude overall all phases instead
Fid_array_averaged=np.ndarray.flatten(np.array([p1,p2,p3,p4,p5]))
'''append the vacuum measurement data'''
Fid_array_fin=np.append(Fid_array,P0[1])
Fid_array_fin_averaged=np.append(Fid_array_averaged,p0[1])
# -
# The function below is designed to use the measurements ('Fid_array_fin') and the calibrated coefficients from the coherent state probe matrix ('Final_prob') to solve the semidefinite programming opimization problem. This function minimizes the error function, which is the L2 norm mentioned above, $||CP-M||_2$. In addition, a small parameter $\gamma$ is used as a regularizer that penalizes large elements of the reconstruction. This is a small overall effect but helps minimize the spurious effects of noise that could lead to unrealistically large coherences in the density matrix from experimental errors.
# +
def Convex_optimization_state_tomography(Matrix_prob, Measurements, gamma):
#gamma: small regularizing parameter that helps with noise. Aribitrary, but should be small, order of 0.01 or less
C = Matrix_prob; #This is the input coefficient matrix
Meas = Measurements; #These are the measured overlap fidelities
P = cp.Variable((N_dim,N_dim), PSD = True) #unknown density matrix
#P = cp.Variable((N_dim,N_dim), Hermitian = True) #set hermitian true when I include phase
Error = cp.norm(C@cp.vec(P) - Meas,2) #second paramter gives norm type
Obj_detect = cp.Minimize(Error + gamma*cp.norm(cp.vec(P),2))
constraints = [cp.trace(P)==1]# physicality constraint
for i in range(N_dim):
constraints.append(cp.real(P[i][i]) >= 0) #ensure diagonals are real
if i>5:
#based on direct photon counting (without overlap tomo), we know the probability distribution for the state is zero beyond 4 photons
constraints.append(cp.real(P[i][i]) == 0)
Prob_detect = cp.Problem(Obj_detect,constraints)
Prob_detect.solve(verbose = False)
#Prob_detect.solve(cp.CVXOPT) #can choose a different solver
p_values = (P.value)
return p_values
P1 = Convex_optimization_state_tomography(Final_prob, Fid_array_fin, .01) #All data points used
P2 = Convex_optimization_state_tomography(Final_prob, Fid_array_fin_averaged, .01) #averaged phases
P_arr1 = np.array(P1).reshape(N_dim,N_dim)
P_arr2 = np.array(P2).reshape(N_dim,N_dim)
#Plots show the reconstructed photon number distributions
fig, ax = plt.subplots(1,2, sharey=True,figsize=(9,4))
ax[0].bar(range(N_dim),P_arr1.diagonal(0))
ax[0].set_title('Reconstruction with all data')
ax[0].set_ylabel('P(n)',fontsize=14)
ax[0].set_xlabel('n',fontsize=14)
ax[0].set_xlim([-0.5,6])
#ax1.xlabel('n',fontsize=12)
ax[1].bar(range(N_dim),P_arr2.diagonal(0))
ax[1].set_title('Reconstruction with averaged phases')
ax[1].set_xlabel('n',fontsize=14)
ax[1].set_xlim([-0.5,6])
plt.tight_layout()
plt.show()
# -
# Looking at the photon-number distributions, we see that for the most part we just have vacuum and single photon probabilities. This makes sense, since we send a single photon through a lossy channel. Now let's plot the Wigner functions for both reconstructions.
# +
xvec = np.arange(-20.,20.)*5./40
yvec = np.arange(-50.,50)*5/40
X,Y = np.meshgrid(xvec, xvec)
X1,Y1 = np.meshgrid(yvec,yvec)
q_tomo1=Qobj(P_arr1)
q_tomo2=Qobj(P_arr2)
W1=wigner(q_tomo1,xvec,xvec)
W2=wigner(q_tomo2,xvec,xvec)
fig = plt.figure(figsize=(16,10))
# `ax` is a 3D-aware axis instance, because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(2, 2, 2)
p = ax.contourf(X, Y, W1, 80, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32)
#cb = fig.colorbar(p, shrink = 0.7)
cb = fig.colorbar(p, shrink = 1)
cb.ax.tick_params(labelsize=16)
cb.set_ticks([-0.3,-0.2,-0.1, 0,0.1,0.2,0.3]);
# surface_plot with color grading and color bar
ax = fig.add_subplot(2, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, W1, rstride=1, cstride=1, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32, linewidth=0.5)
cb = fig.colorbar(p,shrink = .7)
plt.title('Reconstruction with all data')
plt.show()
print('Fidelity with a single photon is ',fidelity(q_tomo1,fock(N_dim,1))**2)
fig = plt.figure(figsize=(16,10))
# `ax` is a 3D-aware axis instance, because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(2, 2, 2)
p = ax.contourf(X, Y, W2, 80, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32)
#cb = fig.colorbar(p, shrink = 0.7)
cb = fig.colorbar(p, shrink = 1)
cb.ax.tick_params(labelsize=16)
cb.set_ticks([-0.3,-0.2,-0.1, 0,0.1,0.2,0.3]);
# surface_plot with color grading and color bar
ax = fig.add_subplot(2, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, W2, rstride=1, cstride=1, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32, linewidth=0.5)
cb = fig.colorbar(p,shrink = .7)
plt.title('Reconstruction with phase-averaged data')
plt.show()
print('Fidelity with a single photon is ',fidelity(q_tomo2,fock(N_dim,1))**2)
# -
# The above Wigner functions do not dip negative since the overall loss in this run of the experiment slightly exceed 50%. While the phase-averaged data more closely resembles the symmetry of the Fock state, the whole data set is not too bad. The asymmetries originate from experimental imperfections such as finite sampling and amplitude fluctuations over the course of data collection (~6 hours).
#
# Fortunately, loss is a statistical process that uniquely maps one density operator to another loss-degraded density operator. As discussed in the main text, another SDP optimiztion problem can be solved to determine the true density matrix before loss. First, define 'matrix_L' which is the matrix that applies loss to the density matrix. Here, loss was as high as 60%.
# +
refl_tot=0.6
'''Define the Matrix to correct the loss on the reconstructed \rho' '''
#dim_in=N_dim #Hilbert space dimension cuttoff of the input quantum state
dim_out=N_dim #Hilbert space dimentsion cuttoff of the output (measured) state
r=np.sqrt(refl_tot) #loss BS reflection coefficient
t=np.sqrt(1-refl_tot) #loss BS transmission coefficient
#Define my 'ragged' matrix that gives me the transformation between my initial state
#and my final state after the BS and trace. Here, each layer gives a matrix that
#maps ones of the diagonals of my new, measured density matrix to the same
#diagonal of my original matrix.
matrix_L=[]
for i in range(N_dim):
M=[]
for j in range(dim_out-i):
row=[]
for k in range(dim_out-i):
if k < j: #ensures the result is upper triangular
val=0
else:
val=(binom(k,k-j)*binom(i+k,k-j))**(1/2)*(refl_tot)**((k-j))*(1-refl_tot)**(j+(i/2))
row.append(val) #appends each value to the k^th position in the j^th row
M.append(row) #appends the j^th row to the i^th matrix
matrix_L.append(M)
#For example, the main diagonals map according to
#rho_out.diagonal(0)=np.matmul(matrix_M[0],rho_in.diagonal(0))
#Note, this is NOT YET NORMALIZED! I still need to do that in the optimization algorithm
#rearrange the varibles into the same form that works with the SDP problem
rho_temp1=np.zeros((dim_out,dim_out),complex) #all data points
rho_temp2=np.zeros((dim_out,dim_out),complex) #phase-averaged data
for i in range(N_dim):
for j in range(N_dim):
rho_temp1[i][j]=P_arr1[i][j]
rho_temp2[i][j]=P_arr2[i][j]
# -
# The next cell has the SDP algorithm used to correct for loss. As a different matrix must map each diagonal (not just the main diagonal) of the density matrix into a loss-degraded density matrix, this SDP problem actually optimizes over several matrix inversions. It is thus a bit more messy than before.
# +
'''Method using SDPs to reconstruct for loss, which uses the first matrix, matrix_L as the loss matrix
which we need to find the inverse of'''
from cvxopt import blas, lapack, solvers
def Convex_optimization_loss_reconstruct(Matrix_map, Rho_measured, gamma,delta):
M = np.asarray(Matrix_map);
Rho = Rho_measured;
Error1=0
Error2=0
P = cp.Variable((N_dim,N_dim), PSD = True) #set hermitian true when I include phase
#P = cp.Variable((N_dim,N_dim), complex = True)
#P = cp.Variable((N_dim,N_dim), hermitian=True)
norm=0
shifted=[]
for i in range(N_dim):
row=[]
for j in range(N_dim-i):
row.append(P[j][i+j])
shifted.append(row)
#The above loop rearranges the variable matrix P into a new matrix where
#the new rows are now the different diagonals of the original P matrix
Error_vec=[]
for k in range(N_dim):
vec=[]
for i in range(N_dim-k):
num=0
for j in range(N_dim-k):
num=num+M[k][i][j]*shifted[k][j]
#inner loop performs matrix multiplication between
#one of the rows from the new shifted P matrix and
#one of the matrices that needs to be inverted, M[k].
vec.append(num)
#This inner multiplications results in the vector vec
#I also use this number to subract element-wise the corresponding
#values of the diagonals of the measured Rho
temp_error=cp.abs(num-np.diagonal(Rho,k)[i])
Error1=Error1+cp.power(temp_error,2)
shifted2=[]
for i in range(N_dim):
row=[]
for j in range(N_dim-i):
row.append(P[j+i][j])
shifted2.append(row)
for k in range(N_dim):
vec=[]
for i in range(N_dim-k):
num=0
for j in range(N_dim-k):
num=num+M[k][i][j]*shifted2[k][j]
#inner loop performs matrix multiplication between
#one of the rows from the new shifted P matrix and
#one of the matrices that needs to be inverted, M[k].
vec.append(num)
#This inner multiplications results in the vector vec
#I also use this number to subract element-wise the corresponding
#values of the diagonals of the measured Rho
temp_error=cp.abs(num-np.diagonal(Rho,k)[i].conj())
Error2=Error2+cp.power(temp_error,2)
Obj_detect = cp.Minimize(Error1+Error2+ gamma*cp.norm(P,2))
positive_diag=[]
constraints = [cp.trace(P)==1,cp.diag(cp.real(P))>=0] #constrains the density matrix to be physical
for i in range(N_dim):
for j in range(N_dim):
if i==j:
constraints.append(cp.abs(P[i][j]) <= (1/np.sqrt(1-refl_tot)**(i+j))*cp.abs(Rho[i][j])+delta)
Prob_detect = cp.Problem(Obj_detect,constraints)
Prob_detect.solve(verbose = False) #set verbose = True to see output logs
#Prob_detect.solve(cp.CVXOPT,verbose = False)
p_values = (P.value)
return p_values
Rho1=Convex_optimization_loss_reconstruct(matrix_L,rho_temp1,0.0,0.0) #all data
Rho2=Convex_optimization_loss_reconstruct(matrix_L,rho_temp2,0.0,0.0) #phase-averaged data
# +
#Plots show the reconstructed photon number distributions
fig, ax = plt.subplots(1,2, sharey=True,figsize=(9,4))
ax[0].bar(range(N_dim),Rho1.diagonal(0))
ax[0].set_title('Reconstruction with all data')
ax[0].set_ylabel('P(n)',fontsize=14)
ax[0].set_xlabel('n',fontsize=14)
ax[0].set_xlim([-0.5,6])
#ax1.xlabel('n',fontsize=12)
ax[1].bar(range(N_dim),Rho2.diagonal(0))
ax[1].set_title('Reconstruction with averaged phases')
ax[1].set_xlabel('n',fontsize=14)
ax[1].set_xlim([-0.5,6])
plt.tight_layout()
plt.show()
# -
# From the above probability distributions, it is clear that the loss reconstruction removed the vacuum component and produced a state that is considerably closer to the pure single-photon we expect. However, the relatively large 2 photon component may not be erroneous after all. In fact, the pump power was too high, so the 2 photon probability in reality was likely non-negligible. Thus, the tomography protocol was able to provide us information on the generated state.
#
# We can also plot the density matrix elements, followed by Wigner functions of the full reconstruction with loss compensation.
# +
s1=np.abs(Rho1)
temp1=np.delete(s1,np.s_[6:],0)
state1=np.delete(temp1,np.s_[6:],1)
s2=np.real(Rho2)
temp2=np.delete(s2,np.s_[6:],0)
state2=np.delete(temp2,np.s_[6:],1)
fig = plt.figure(figsize=(5,3))
plt.pcolormesh(state1, vmin=0.0, vmax=.9,edgecolor='k', linewidth=1)
cbar=plt.colorbar()
cbar.ax.tick_params(labelsize=16)
plt.xticks([0,2,4,6])
plt.tick_params(labelsize=16)
plt.title('Reconstruction with all data')
plt.show()
#plt.imshow(state)
fig = plt.figure(figsize=(5,3))
plt.pcolormesh(state2, vmin=0.0, vmax=.9,edgecolor='k', linewidth=1)
cbar=plt.colorbar()
cbar.ax.tick_params(labelsize=16)
plt.xticks([0,2,4,6])
plt.tick_params(labelsize=16)
plt.title('Reconstruction with averaged phases')
plt.show()
# +
xvec = np.arange(-20.,20.)*5./40
yvec = np.arange(-50.,50)*5/40
X,Y = np.meshgrid(xvec, xvec)
X1,Y1 = np.meshgrid(yvec,yvec)
q_tomo1=Qobj(Rho1)
q_tomo2=Qobj(Rho2)
W1=wigner(q_tomo1,xvec,xvec)
W2=wigner(q_tomo2,xvec,xvec)
fig = plt.figure(figsize=(16,10))
# `ax` is a 3D-aware axis instance, because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(2, 2, 2)
p = ax.contourf(X, Y, W1, 80, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32)
#cb = fig.colorbar(p, shrink = 0.7)
cb = fig.colorbar(p, shrink = 1)
cb.ax.tick_params(labelsize=16)
cb.set_ticks([-0.3,-0.2,-0.1, 0,0.1,0.2,0.3]);
# surface_plot with color grading and color bar
ax = fig.add_subplot(2, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, W1, rstride=1, cstride=1, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32, linewidth=0.5)
cb = fig.colorbar(p,shrink = .7)
plt.title('Reconstruction with all data')
plt.show()
print('Fidelity with a single photon is ',fidelity(Qobj(Rho1),fock(N_dim,1))**2)
fig = plt.figure(figsize=(16,10))
# `ax` is a 3D-aware axis instance, because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(2, 2, 2)
p = ax.contourf(X, Y, W2, 80, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32)
#cb = fig.colorbar(p, shrink = 0.7)
cb = fig.colorbar(p, shrink = 1)
cb.ax.tick_params(labelsize=16)
cb.set_ticks([-0.3,-0.2,-0.1, 0,0.1,0.2,0.3]);
# surface_plot with color grading and color bar
ax = fig.add_subplot(2, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, W2, rstride=1, cstride=1, cmap=cm.seismic_r,vmin=-0.32,vmax=0.32, linewidth=0.5)
cb = fig.colorbar(p,shrink = .7)
plt.title('Reconstruction with phase-averaged data')
plt.show()
print('Fidelity with a single photon is ',fidelity(Qobj(Rho2),fock(N_dim,1))**2)
| Fock_state_overlaptomo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 2: Bias
import numpy as np
import pandas as pd
import json
import requests
# Filter out the rows in the page_data data frame that contain "Template:" in the "page" column.
page_data = pd.read_csv('country/data/page_data.csv')
page_data = page_data[page_data['page'].str.contains('Template:', na = False) == 0]
page_data
# Filter out the data frame to fields without capital letters, and store the capital-only fields in a separate variable for later anaylsis.
wpds = pd.read_csv('WPDS_2020_data.csv')
wpds_caps = wpds[wpds['Name'].str.isupper()]
wpds = wpds[wpds['Name'].str.isupper() == 0]
# Write the grouping function that will batch the API call into 50 at a time.
def grouping(count, lst):
for i in range(0,len(lst),count):
yield lst[i:i+count]
# Write the API call function that uses the endpoint to access the score predictions group them.
def api_call(rev_id):
headers = {
'User-Agent': 'https://github.com/anantr98',
'From': '<EMAIL>'
}
endpoint = 'https://ores.wikimedia.org/v3/scores/enwiki/?models=articlequality&revids={rev_id}'
call = requests.get(endpoint.format(rev_id = rev_id), headers=headers)
response = call.json()
qual_preds = []
for rev_id, val in response['enwiki']['scores'].items():
val_dict = val['articlequality']
if "error" not in val_dict:
prediction = {
'rev_id': int(rev_id),
'prediction': val_dict['score']['prediction']
}
qual_preds.append(prediction)
return qual_preds
# Get the predictions from the call.
id_group = list(grouping(50,page_data['rev_id']))
predictions=[]
for id_val in id_group:
predictions.append(api_call("|".join(str(x) for x in id_val)))
# Create a data frame with solely the rev_ids and the prediction scores for that particular ID.
rev_id = []
prediction = []
for val in predictions:
for innerVal in val:
rev_id.append(innerVal['rev_id'])
prediction.append(innerVal['prediction'])
wiki_data = pd.DataFrame({'rev_id' : rev_id,'prediction':prediction})
# Merge the wiki data and the population data together.
merge1 = pd.merge(wiki_data,page_data,on='rev_id',how='left')
merge1 = merge1.rename(columns={'country':'Name'})
merge2 = pd.merge(merge1, wpds, on = 'Name', how = 'left')
# Separate the data frame into two separate data frames, those with matches and those without matches for population data.
wp_wpds_politicians_by_country = merge2.dropna()
wp_wpds_countries_no_match = merge2[merge2.isna().any(axis=1)]
# Filter out the data frame to include only the 5 columns of concern.
wp_wpds_politicians_by_country = wp_wpds_politicians_by_country[['Name', 'page', 'rev_id', 'prediction', 'Population']]
wp_wpds_politicians_by_country = wp_wpds_politicians_by_country.rename(columns={'Name':'country',
'page':'article_name',
'rev_id':'revision_id',
'prediction': 'article_quality_est.',
'Population': 'population'})
#wp_wpds_politicians_by_country.head()
# Write the two new data frames to the csv.
wp_wpds_countries_no_match.to_csv('wp_wpds_countries_no_match.csv')
wp_wpds_politicians_by_country.to_csv('wp_wpds_politicians_by_country.csv')
# Top 10 countries by coverage: 10 highest-ranked countries in terms of number of politician articles as a proportion of country population
countries = {}
for country in wp_wpds_politicians_by_country['country'].unique():
countries[country] = wp_wpds_politicians_by_country['country'].value_counts()[country]/wp_wpds_politicians_by_country['population'][wp_wpds_politicians_by_country['country']==country].unique()[0]
top_ten_countries_by_proportion = pd.DataFrame(countries, index=[0]).T.sort_values(by=[0], ascending=False)[0:10]
top_ten_countries_by_proportion
# Bottom 10 countries by coverage: 10 lowest-ranked countries in terms of number of politician articles as a proportion of country population
bottom_ten_countries_by_proportion = pd.DataFrame(countries, index=[0]).T.sort_values(by=[0], ascending=True)[0:10]
bottom_ten_countries_by_proportion
# Top 10 countries by relative quality: 10 highest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality
good_quality_by_country = wp_wpds_politicians_by_country[(wp_wpds_politicians_by_country['article_quality_est.']=='GA') | (wp_wpds_politicians_by_country['article_quality_est.']=='FA')]
countries = {}
for country in good_quality_by_country['country'].unique():
good_count = len(good_quality_by_country[good_quality_by_country['country']==country])
total = len(wp_wpds_politicians_by_country[wp_wpds_politicians_by_country['country']==country])
countries[country] = good_count/total
top_ten_countries_by_relative_quality = pd.DataFrame(countries, index=[0]).T.sort_values(by=[0], ascending=False)[0:10]
top_ten_countries_by_relative_quality
# Bottom 10 countries by relative quality: 10 lowest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality
bottom_ten_countries_by_relative_quality = pd.DataFrame(countries, index=[0]).T.sort_values(by=[0], ascending=True)[0:10]
bottom_ten_countries_by_relative_quality
# Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the total count of politician articles from countries in each region as a proportion of total regional population
# +
wp_wpds_politicians_by_country = wp_wpds_politicians_by_country.reset_index(drop=False)
## Define the regions
wpds_original = pd.read_csv('WPDS_2020_data.csv')
northern_africa = wpds_original[3:10]
western_africa = wpds_original[11:27]
eastern_africa = wpds_original[28:48]
middle_africa = wpds_original[49:58]
southern_africa = wpds_original[59:64]
northern_america = wpds_original[65:67]
central_america = wpds_original[69:77]
caribbean = wpds_original[78:95]
south_america = wpds_original[96:110]
western_asia = wpds_original[111:129]
central_asia = wpds_original[130:135]
south_asia = wpds_original[136:145]
southeast_asia = wpds_original[146:157]
east_asia = wpds_original[158:166]
northern_europe = wpds_original[168:179]
western_europe = wpds_original[180:189]
eastern_europe = wpds_original[190:200]
southern_europe = wpds_original[201:216]
oceania = wpds_original[217:233]
sub_regions = ['NORTHERN AFRICA', 'WESTERN AFRICA',
'EASTERN AFRICA', 'MIDDLE AFRICA', 'SOUTHERN AFRICA',
'NORTHERN AMERICA','CENTRAL AMERICA', 'CARIBBEAN', 'SOUTH AMERICA',
'WESTERN ASIA', 'CENTRAL ASIA', 'SOUTH ASIA', 'SOUTHEAST ASIA',
'EAST ASIA', 'NORTHERN EUROPE', 'WESTERN EUROPE',
'EASTERN EUROPE', 'SOUTHERN EUROPE', 'OCEANIA']
subsets = [northern_africa, western_africa, eastern_africa, middle_africa,
southern_africa, northern_america,central_america, caribbean,
south_america, western_asia, central_asia, south_asia,
southeast_asia, east_asia, northern_europe, western_europe,
eastern_europe, southern_europe, oceania]
region = []
for i in range(0,len(subsets)):
for j in range(0,len(subsets[i])):
region.append(sub_regions[i])
wpds['region'] = region
wpds = wpds.rename(columns={'Name':'country'})
wpds_merged = pd.merge(wp_wpds_politicians_by_country, wpds[['country', 'region']],on='country',how='left')
sub_region_counts = {}
for subreg in wpds_merged['region'].unique():
sub_region_counts[subreg] = wpds_merged['region'].value_counts()[subreg]/int(wpds_caps['Population'][wpds_caps['Name']==subreg])
top_ten_subregions_by_proportion = pd.DataFrame(sub_region_counts, index=[0]).T.sort_values(by=[0], ascending=False)[0:10]
top_ten_subregions_by_proportion
# -
# Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the relative proportion of politician articles from countries in each region that are of GA and FA-quality
# +
good_quality_by_subregion = wpds_merged[(wpds_merged['article_quality_est.']=='GA') | (wpds_merged['article_quality_est.']=='FA')]
good_quality_subregion = {}
for country in good_quality_by_subregion['region'].unique():
good_quality_subregion[country] = good_quality_by_subregion['region'].value_counts()[country]/wpds_merged['region'].value_counts()[country]
top_ten_subregions_by_quality = pd.DataFrame(good_quality_subregion, index=[0]).T.sort_values(by=[0], ascending=False)[0:10]
top_ten_subregions_by_quality
| hcds-a2-bias.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="HOtxmevmzwOh" colab_type="text"
# ### 특잇값 분해
# + id="0eF7mUIZzwOh" colab_type="code" colab={}
from numpy.linalg import svd
# + id="F9PzpwXzzwOj" colab_type="code" colab={}
A = np.array([[3,-1],[1,3],[1,1]])
U, S, VT = svd(A)
# + id="4vm-PdtczwOm" colab_type="code" outputId="d9bbd11b-6192-499e-ffa0-78d962b63a86" colab={}
U
# + id="71QZ-prWzwOo" colab_type="code" outputId="a653aa8c-d1c7-4603-ec71-b059896bcafa" colab={}
S
# + id="cI2j_y6VzwOq" colab_type="code" outputId="1dc71a60-31fc-4373-8bfa-07a6d5a5a28a" colab={}
VT
# + id="Q7gsGTirzwOs" colab_type="code" outputId="cd846546-6475-4289-93f2-ab9b8e630005" colab={}
# 다이어분할을 지정하는 1 == 대각화를 실행할 때 한 칸 올려서 실행해라.
np.diag(S,1)[:,1:]
# + id="BDt-btZ7zwOu" colab_type="code" outputId="8c57f197-553b-41db-a9f2-91e10723de9a" colab={}
U @ np.diag(S,1)[:,1:] @ VT
# + id="ZlAlLnP7zwOx" colab_type="code" colab={}
U2, S2, VT2 = svd(A, full_matrices=False)
# + id="gwLyTdkEzwOz" colab_type="code" outputId="f0024820-cf2a-4e6c-c022-c95d1e2fc68a" colab={}
U2
# + id="i59OTNakzwO1" colab_type="code" outputId="f54fe989-76d4-465a-c396-f3be0eca9e1e" colab={}
S2
# + id="LL_D2L6SzwO4" colab_type="code" outputId="2de00e16-6742-4e7c-c020-f87de442f010" colab={}
VT2
# + id="Ibn7ID42zwO6" colab_type="code" outputId="9cd0d972-c1f5-4f98-caa5-ea0791390c5b" colab={}
U2 @ np.diag(S2) @ VT2
# + id="WT3x9JUpzwO9" colab_type="code" colab={}
B = np.array([[3,2,2],[2,3,-2]])
C = np.array([[2,4],[1,3],[0,0],[0,0]])
# + id="_zDPLQpqzwO_" colab_type="code" colab={}
U3, S3, VT3 = svd(B)
U4, S4, VT4 = svd(B, full_matrices=False)
# + id="Vagyg22DzwPB" colab_type="code" outputId="8b043d30-994e-4658-f3fb-b3cfd990e2a5" colab={}
U3
# + id="HegQVtOOzwPD" colab_type="code" outputId="bc07b566-df72-4778-c7dc-d3e87b385c80" colab={}
U4
# + id="sYlSSapUzwPG" colab_type="code" outputId="b42e8146-441a-40ff-a72b-b0cfa61dcb77" colab={}
np.diag(S3,-1)[1:,:]
# + id="crtainenzwPI" colab_type="code" outputId="780232a4-e7d6-4f8d-8167-5c3a4eeebb38" colab={}
S4
# + id="ta56c_oLzwPK" colab_type="code" outputId="f94be3fa-5263-4b16-c3b8-f2f91e328b8f" colab={}
VT3
# + id="pPhS8w8IzwPM" colab_type="code" outputId="d251b48f-9431-408c-f71c-140a295723ba" colab={}
VT4
# + id="D1Iya-mszwPN" colab_type="code" outputId="210dd48e-ff60-4d4a-b193-3bc0a562c1cc" colab={}
np.diag(S3)
# + id="XoyaRf9_zwPR" colab_type="code" outputId="8c0ee089-3909-470c-c57d-20323a40db81" colab={}
U3 @ np.diag(S3,-1)[1:,:] @ VT3
# + id="_eVxPX7_zwPU" colab_type="code" outputId="28c4560b-4824-472f-99ed-80961219d455" colab={}
U4 @ np.diag(S4) @ VT4
# + id="chM_Mmz9zwPX" colab_type="code" colab={}
U5, S5, VT5 = svd(C)
U6, S6, VT6 = svd(C, full_matrices=False)
# + id="krw0BJnnzwPb" colab_type="code" outputId="4b49da9c-f77f-43f4-b52c-7ca7616bb2dd" colab={}
print(U5)
print(np.diag(S5,2)[:,2:])
print(VT5)
print(U5 @ np.diag(S5,2)[:,2:]@VT5)
# + id="PcmZwvCmzwPe" colab_type="code" outputId="32323271-98be-4083-80d7-473bb10d8c8e" colab={}
print(U6)
print(np.diag(S6))
print(VT6)
print(U6 @ np.diag(S6) @ VT6)
# + id="urckprP5zwPg" colab_type="code" colab={}
| MATH/04_singular_value_decomposition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Below is a table schema for a P2P messaging application.
# The table contains send/receive message data for the application's users.
# Column Name Data Type Description
# date string date of the message sent/received, format is 'YYYY-mm-dd'
# timestamp integer timestamp of the message sent/received, epoch seconds
# sender_id integer id of the message sender
# receiver_id integer id of the message receiver
# Question: Using Python and the Pandas library,
# how would you find the fraction of messages that get a response within 5 minutes?
# For simplicity, let's limit data to Jan 1, 2019.
import pandas as pd
# mock up data: expect this DF to return 2/6
s = """date,timestamp,sender_id,receiver_id
2019-01-01,1546322000,1,2
2019-01-01,1546322050,1,2
2019-01-01,1546322100,2,1
2019-01-01,1546325500,3,1
2019-01-01,1546329200,1,3
2019-01-01,1546329300,3,2
2019-01-01,1546329400,2,3
2019-03-09,1552111656,1,3
"""
df = pd.read_csv(pd.compat.StringIO(s))
# -
d = df[df["date"]=='2019-01-01']
msg_responded = len(
d.merge(d, left_on='sender_id', right_on="receiver_id")
.query("timestamp_y <= timestamp_x + 5*60")
.query("timestamp_y > timestamp_x")
)
msg_sent = len(d)
print('{} msg sent'.format(msg_sent))
print('{} msg responded'.format(msg_responded))
print('{:.2f} fraction of msg responded to within 5m'.format(msg_responded / msg_sent))
d
| interviewq_exercises/q009_pandas_msg_response_time.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Author: <NAME> - Undergraduate on electrical engineering at UFMA
# ## This code employs a model for face recognition by using CNN Net
# Dataset used was 'The ORL Database of Faces'(retrieved from http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html)
# Part of this code was referenced from Yunjey's Tutorials(https://github.com/yunjey/pytorch-tutorial)
import torch
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torchvision import transforms, datasets, utils
import torch.nn as nn
#%% Defining the processor device:
device = torch.device('cpu')
#%% Hyper-parameters:
num_epochs = 3
num_classes = 40
learning_rate = 0.0001
batch_size=8
test_batch=4
# +
#%% Loading the data:
dataset = datasets.ImageFolder(root='data/faces/', transform=transforms.ToTensor())
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4)
for i, batch in enumerate(train_loader):
print(i, batch[0].size())
if i==3:
grid=utils.make_grid(batch[0])
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.title('4th Batch')
plt.show()
print('Labels:', batch[1].numpy())
break
# -
#%% Convnet Architecture:
class ConvNet(nn.Module):
def __init__(self, num_classes=num_classes):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 12, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(12),
nn.ReLU())
self.layer2 = nn.Sequential(
nn.Conv2d(12, 24, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(24),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2))
self.layer3 = nn.Sequential(
nn.Conv2d(24, 64, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU())
self.layer4 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU())
self.fc = nn.Linear(56 * 46 * 64, out_features=num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = out.view(-1, 56 * 46 * 64)
out = self.fc(out)
return out
model = ConvNet(num_classes).to(device)
#%% Calling the optimizer and Cross Entropy:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#%% Training the model:
model.train()
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % batch_size == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
#%% Loading the test set:
test_set = datasets.ImageFolder(root='data/test_faces/', transform=transforms.ToTensor() )
test_loader = DataLoader(test_set, batch_size=test_batch,shuffle=True)
#%% Evaluating the performance of the model:
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
predicted = torch.argmax(outputs.data, dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test accuracy of the model on the test set: {} %' .format(100*correct/total))
#%% Showing some images and your labels and predictions:
grid=utils.make_grid(images)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.show()
print('Labels:', labels)
print('Predicted:', predicted)
#%% Show all labels and predicted labels of the test set:
model.eval()
batch=list(test_loader)
for i in range(len(test_loader)):
image=batch[i][0]
label=batch[i][1]
image=image.to(device)
label=label.to(device)
output=model(image)
predicted = torch.argmax(output.data, dim=1)
print('{} P:' .format(i), predicted)
print('{} L:' .format(i),label)
#%% Show a batch:
image=batch[4][0]
label=batch[4][1]
grid=utils.make_grid(image)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.show()
output=model(image)
predicted = torch.argmax(output.data,dim=1)
print('Predicted:', predicted)
print('Label:',label)
#%% Show a unique image:
model.eval()
test_image=image[0]
test_image=test_image.unsqueeze(0)
test_label=label[0]
output=model(test_image)
predicted = torch.argmax(output)
grid=utils.make_grid(test_image)
plt.imshow(grid.numpy().transpose((1,2,0)))
plt.show()
print("Predicted:",predicted)
print("Label:",test_label)
#%% Showing the indexes:
dataset.class_to_idx
#%% Save the model:
torch.save(model.state_dict(), 'cnn.ckpt')
| Faces.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os
sys.path.append(".")
os.chdir("../")
# -
os.getcwd()
import pathlib
import numpy
indir = "data/parsed"
path_list = pathlib.Path(indir).glob("doc/**/*.txt")
path_list
l = numpy.array([p for p in path_list])
n = len(l)
n_samples = min(n, 2*1000*1000)
filter_flags = numpy.random.binomial(1, n_samples/n, n)
print(filter_flags.sum(), n_samples)
filter_flags = filter_flags.astype(numpy.bool8)
filter_flags
ll = l[filter_flags]
assert len(ll) == filter_flags.sum()
len(ll)
ll[0]
p = pathlib.Path(indir)
with p.joinpath("samples.list").open("w") as f:
for elm in ll:
line = f"{str(elm)}{os.linesep}"
f.writelines(line)
indir
| notebook/glob-sampling.ipynb |