code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.1
# language: julia
# name: julia-1.7
# ---
# # ME 617 HW 3
#
# **Author: <NAME>**
#
# _Note: This source code was implemented in Julia. This notebook requires a Julia 1.7.x kernel as well as an environment that includes the necessary dependencies. See the [repo](https://github.com/camirmas/DesignAutomation) for full implementations, testing, and dependency information. All relevant code has been copied into this notebook, so no importing of individual modules is necessary._
using LinearAlgebra
using Optim
function levy3(x)
n = length(x)
y(x_i) = 1 + (x_i-1)/4
term1 = sin(π*y(x[1]))^2
term3 = y(x[n]-1)^2 * (1+sin(2π*y(x[n]))^2)
sum = 0
for i=1:n-1
new = (y(x[i])-1)^2*(1+10sin(π*y(x[i])+1)^2)
sum += new
end
return term1 + sum + term3
end
# ## 1 Exhaustive Search
"""
Performs an exhaustive search for a function using a discretized search space.
The total number of function calls will be d^n, where d is the number of
dimensions, and n is the length of the discretized search space.
Args:
fn (Function): Objective function
x_range (Array): Range of x values to explore
dimensions (Int64): Dimensionality of search space
spacing (Float64, optional): Discretization
verbose (Bool, optional): Print results
Returns:
Tuple: (x, fx, f_calls) where x is the minimizer, fx is the minimum, and
f_calls is the number of function calls made.
"""
function exhaustive_search(fn::Function, x_range::Array, dimensions::Int64;
spacing=.1, verbose=true)
f_calls = 1
i = 1
k = [1 for _ in 1:dimensions]
min_x, max_x = x_range
x_values = collect(min_x:spacing:max_x)
n_x_values = length(x_values)
x = [x_values[begin] for _ in 1:dimensions]
x_star = copy(x)
f_star = fn(x_star)
n = length(x)
while i <= n
k[i] += 1
if k[i] > n_x_values
x[i] = min_x
k[i] = 1
i += 1
continue
end
x[i] = x_values[k[i]]
fnew = fn(x)
f_calls += 1
if verbose && (f_calls % 1000000 == 0)
println("Iterations: $(f_calls)\nx*: $(x_star)\nf*: $(f_star)\n")
end
if fnew < f_star
f_star = fnew
x_star = copy(x)
end
i = 1
end
if verbose
println("Iterations: $(f_calls)\nx*: $(x_star)\nf*: $(f_star)\n")
end
return x_star, f_star, f_calls
end
(x, fx, f_calls) = exhaustive_search(levy3, [-2, 6], 4; verbose=true)
# ## 2 Random Hill Climbing
"""
Performs a random hill climb solution for a discretized search space.
Args:
fn (Function): Objective function
x0 (Array{Any}): Starting point
h (Float64, optional): Discretization
max_failed (Int64, optional): Max number of failed moves before exiting.
Defaults to the dimensionality of the design space.
verbose (Bool, optional): Optionally print results
Returns:
tuple: (x, fx, f_calls) where x is the minimizer, fx is the minimum, and
f_calls is the number of function calls made.
"""
function random_hill(fn::Function, x0::Array{Float64};
h=.1, max_failed=nothing, verbose=false)
x = copy(x0)
fx = fn(x)
n = length(x)
failed = 0
f_calls = 1
# use an I matrix to randomly determine direction
e_hat = Matrix(1I, n, n)
if isnothing(max_failed)
max_failed = n
end
while failed < max_failed
d = e_hat[:, rand(1:n)] # randomly choose basis vec
d *= rand([-1, 1]) # randomly choose pos/neg
xnew = x + h*d
fnew = fn(xnew)
f_calls += 1
if fnew < fx
fx = fnew
x = xnew
failed = 0
else
failed += 1
end
end
if verbose
println("Iterations: $(f_calls)\nx*: $(x)\nf*: $(fx)\n")
end
return x, fx, f_calls
end
x0 = [6.0, -2.0, -2.0, 6.0]
random_hill(levy3, x0; max_failed=100, verbose=true)
random_hill(levy3, x0; h=.01, max_failed=100, verbose=true)
x0 = [4.2, -1.2, 2.4, -3.8]
random_hill(levy3, x0; max_failed=100, verbose=true)
random_hill(levy3, x0; h=.01, max_failed=100, verbose=true)
x0 = [0.0, 0.0, 0.0, 3.2]
random_hill(levy3, x0; max_failed=100, verbose=true)
random_hill(levy3, x0; h=.01, max_failed=100, verbose=true)
x0 = [1.0, 1.0, 1.0, 1.0]
random_hill(levy3, x0; max_failed=100, verbose=true)
random_hill(levy3, x0; h=.01, max_failed=100, verbose=true)
# ## 3 Simulated Annealing
max_iter = 1000
SA_options = Optim.Options(iterations=max_iter)
x0 = [6.0, -2.0, -2.0, 6.0]
res = optimize(levy3, x0, SimulatedAnnealing(), SA_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
x0 = [4.2, -1.2, 2.4, -3.8]
res = optimize(levy3, x0, SimulatedAnnealing(), SA_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
x0 = [0.0, 0.0, 0.0, 3.2]
res = optimize(levy3, x0, SimulatedAnnealing(), SA_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
x0 = [1.0, 1.0, 1.0, 1.0]
res = optimize(levy3, x0, SimulatedAnnealing(), SA_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
# ## 4 Particle Swarm
max_iter = 1000
PS_options = Optim.Options(iterations=max_iter)
x0 = [6.0, -2.0, -2.0, 6.0]
res = optimize(levy3, x0, ParticleSwarm(), PS_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
x0 = [4.2, -1.2, 2.4, -3.8]
res = optimize(levy3, x0, ParticleSwarm(), PS_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
x0 = [0.0, 0.0, 0.0, 3.2]
res = optimize(levy3, x0, ParticleSwarm(), PS_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
x0 = [1.0, 1.0, 1.0, 1.0]
res = optimize(levy3, x0, ParticleSwarm(), PS_options)
display(res)
println("x*: $(res.minimizer)")
println("f*: $(res.minimum)")
# ## 5 Analysis
# **Discuss differences in the four approaches above. Consider observations based on the robustness, generality, accuracy, ease of use, and efficiency of the methods.**
#
# **Robustness:**
#
# In general, the stochastic methods utilized in this notebook are robust in the sense that they can typically find at least a local minimum within the search space. However, for the case described, some clearly had more trouble than others in finding the global minimum, particularly Random Hill Climbing. This is expected, as this method makes no particular efforts to escape falling into local minima.
#
# **Generality:**
#
# All the methods utilized are generally applicable to discretized optimization problems, though for a multimodal objective function, Random Hill climbing stands out as a poor fit. Conversely, if this problem had been unimodal, Simulated Annealing and Particle Swarm may have proved to be overkill. Exhaustive Search is arguably the most applicable, though only for problems that are not intractably large.
#
# **Accuracy:**
#
# As expected, Exhaustive Search is the most accurate method, as it completely trades off performance for accuracy. Among the other methods, PSO performed the best, often (but not always) achieving the global optimum. Simulated Annealing had some difficulties with local minima, and Random Hill Climbing had significant difficulties.
#
# **Ease of use:**
#
# Exhaustive Search is particularly easy to use, though its performance would become prohibitive for a larger search space. Random Hill climbing is also easy to use, with fairly intuitive knobs for discretization and convergence that can easily be experimented with and iterated upon to achieve better results. Simulated Annealing and Particle Swarm are relatively more complicated; though their analogies to physical systems are useful for conceptual understanding, the effects of particular knobs like Temperature and Population on results is less clear.
#
# **Efficiency:**
#
# While all of the methods described require a significant number of function calls, Exhaustive Search is, as expected, the least efficient, as it searches the entire search space, even if it happens to find the optimal result along the way. Simulated Annealing (SA) and Particle Swarm (PSO) also require many function calls: SA is often exploring to escape local minima; and PSO is informed via function calls for all particles in the population. Random hill climbing is presumably the most efficient, though it trades off accuracy/precision, as random steps can lead to very different outcomes depending on their directions.
| src/notebooks/hw3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv("drug200.csv")
df.head()
df.describe()
df.info()
df.Sex.value_counts()
plt.figure(figsize=(9,5))
sns.countplot(x = df.Sex)
plt.show()
df.Cholesterol.value_counts()
print("Max Na_to_K:",df.Na_to_K.max())
print("Min Na_to_K:",df.Na_to_K.min())
print("Mean Na_to_K:",df.Na_to_K.mean())
sns.heatmap(df.isnull(),cbar=False,cmap='viridis')
plt.figure(figsize = (9,5))
sns.countplot(df.Drug)
plt.show()
print("Minimum Age of DrugB",df.Age[df.Drug == "drugB"].min())
print("Maximum Age of DrugA",df.Age[df.Drug == "drugA"].max())
df_Sex_Drug = df.groupby(["Drug","Sex"]).size().reset_index(name = "Count")
df_Sex_Drug
df_BP_Drug = df.groupby(["Drug","BP"]).size().reset_index(name = "Count")
df_BP_Drug
df_CH_Drug = df.groupby(["Drug","Cholesterol"]).size().reset_index(name = "Count")
df_CH_Drug
plt.figure(figsize = (9,5))
sns.swarmplot(x = "Drug", y = "Na_to_K",hue="BP",data = df)
plt.legend()
plt.title("Na_to_K -- BP -- Drug")
plt.show()
# +
#Drug A and Drug B if Na_to_K is less than 15 and high BP
#Drug C if Na_to_K is less than 15 and low BP
# -
df['Na_to_K_Bigger_Than_15'] = [1 if i >=15.015 else 0 for i in df.Na_to_K]
df.head()
| Machine Learning/01 Data Visualization/Data Visualization_Drug Analysis/Drug_Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Entity Extraction+Linking using modern SciSpaCy models
#
# ## Initialize Dask Clusters
# +
from dask_saturn.core import describe_sizes
describe_sizes()
# +
from dask.distributed import Client, wait
from dask_saturn import SaturnCluster
import time
n_workers = 5
cluster = SaturnCluster(n_workers=n_workers,
scheduler_size='2xlarge',
worker_size='4xlarge',
nthreads=16)
client = Client(cluster)
cluster
# -
while len(client.scheduler_info()['workers']) < n_workers:
print('Waiting for workers, got', len(client.scheduler_info()['workers']))
time.sleep(30)
print('Done!')
# +
import dask.dataframe as dd
import json
import numpy as np
import pandas as pd
import s3fs
import spacy
import scispacy
from dask.distributed import Client, progress, get_worker
from scispacy.abbreviation import AbbreviationDetector
from scispacy.linking import EntityLinker
# -
# ## Processing
MODEL_KB = "umls"
# MODEL_KB = "mesh"
# MODEL_KB = "go"
# MODEL_KB = "hpo"
# MODEL_KB = "rxnorm"
# +
BUCKET_NAME = "saturn-elsevierinc"
SENTENCE_FOLDER = "/".join(["s3:/", BUCKET_NAME, "incremental", "add-sents"])
ENTITIES_FOLDER = "/".join(["s3:/", BUCKET_NAME, "incremental",
"add-ents-{:s}".format(MODEL_KB)])
# -
sentences_df = dd.read_parquet(SENTENCE_FOLDER, engine="pyarrow")
sentences_df.head()
len(sentences_df)
# +
def build_nlp_pipeline(model_kb):
nlp = spacy.load("en_core_sci_md")
abbr_detector = AbbreviationDetector(nlp)
nlp.add_pipe(abbr_detector)
linker = EntityLinker(resolve_abbreviations=True,
filter_for_definitions=False,
name=model_kb)
nlp.add_pipe(linker)
return nlp
def nlp_workers():
import traceback
try:
worker = get_worker()
nlp = build_nlp_pipeline(MODEL_KB)
worker.nlp = nlp
except:
return traceback.format_exc()
return 0
def check_nlp_workers():
worker = get_worker()
return str(worker.nlp)
# %time client.run(nlp_workers)
# -
client.run(check_nlp_workers)
# +
def handle_batch(sents, nlp, model_kb):
docs = nlp.pipe(sents, n_threads=16, batch_size=len(sents))
ents_list = []
try:
for doc in docs:
ents = []
for eid, ent in enumerate(doc.ents):
try:
kb_ents = ent._.kb_ents
for cid, score in kb_ents:
ents.append((eid, model_kb, ent.text, cid,
score, ent.start_char, ent.end_char))
except KeyError:
continue
ents_list.append(ents)
except KeyError:
pass
return ents_list
def handle_partition(part):
worker = get_worker()
nlp = worker.nlp
batch_size = 32
sent_batch, ent_batch, entities = [], [], []
for _, row in part.iterrows():
if len(sent_batch) % batch_size == 0 and len(sent_batch) > 0:
ent_batch = handle_batch(sent_batch, nlp, MODEL_KB)
entities.extend(ent_batch)
sent_batch = []
try:
sent_batch.append(row.stext)
except ValueError:
continue
if len(sent_batch) > 0:
ent_batch = handle_batch(sent_batch, nlp, MODEL_KB)
entities.extend(ent_batch)
return entities
# -
entities_df = sentences_df.copy()
# +
entities_df["entities"] = entities_df.map_partitions(
lambda part: handle_partition(part), meta=("object"))
entities_df = entities_df.drop(columns=["stext"])
entities_df = entities_df.explode("entities")
entities_df = entities_df.dropna()
entities_df["eid"] = entities_df.apply(
lambda row: row.entities[0], meta=("int"), axis=1)
entities_df["eclass"] = entities_df.apply(
lambda row: row.entities[1], meta=("str"), axis=1)
entities_df["etext"] = entities_df.apply(
lambda row: row.entities[2], meta=("str"), axis=1)
entities_df["elabel"] = entities_df.apply(
lambda row: row.entities[3], meta=("str"), axis=1)
entities_df["escore"] = entities_df.apply(
lambda row: row.entities[4], meta=("float"), axis=1)
entities_df["ent_start_char"] = entities_df.apply(
lambda row: row.entities[5], meta=("int"), axis=1)
entities_df["ent_end_char"] = entities_df.apply(
lambda row: row.entities[6], meta=("int"), axis=1)
entities_df = entities_df.drop(columns=["entities"])
# -
entities_df.cord_uid = entities_df.cord_uid.astype(str)
entities_df.pid = entities_df.pid.astype(str)
entities_df.sid = entities_df.sid.astype(np.int32)
entities_df.eid = entities_df.eid.astype(np.int32)
entities_df.eclass = entities_df.eclass.astype(str)
entities_df.etext = entities_df.etext.astype(str)
entities_df.elabel = entities_df.elabel.astype(str)
entities_df.escore = entities_df.escore.astype(np.float32)
entities_df.ent_start_char = entities_df.ent_start_char.astype(np.int32)
entities_df.ent_end_char = entities_df.ent_end_char.astype(np.int32)
fs = s3fs.S3FileSystem()
if fs.exists(ENTITIES_FOLDER):
fs.rm(ENTITIES_FOLDER, recursive=True)
# %%time
entities_df.to_parquet(ENTITIES_FOLDER, engine="pyarrow", compression="snappy")
# ## Verify Result
ENTITIES_FOLDER
fs.du(ENTITIES_FOLDER) / 1e6
entities_df = dd.read_parquet(ENTITIES_FOLDER, engine="pyarrow")
entities_df.head()
len(entities_df)
# do this if youre done using the cluster
cluster.close()
| notebooks/incremental/04ai-extract-link-ner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Pointer
# [Fruit Into Baskets](https://leetcode.com/problems/fruit-into-baskets/)。给一数组表示一片小树林,数组中的数字表示树上结的果子。只能摘取种植区域相邻的两种果子,而且只能从左往右走,问最多能摘取多少个果子。
#
# 思路:使用双指针来指示当前摘果子的区域,并使用字典来表示当前篮子中的情况。每次移动后更新篮子情况,然后判断篮子中的果子种类是不是大于$2$,若是则需要移动左指针同时丢掉果子。
def totalFruit(tree) -> int:
n = len(tree)
res = left = right = 0
basket = dict() # 字典记录篮子情况
while right < n:
basket.setdefault(tree[right], 0)
basket[tree[right]] += 1 # 摘果子
while len(basket) > 2:
basket[tree[left]] -= 1
if basket[tree[left]] == 0:
del basket[tree[left]]
left += 1
res = max(res, right-left+1)
right += 1
return res
# [Valid Triangle Number](https://leetcode.com/problems/valid-triangle-number/)。给一正整数数组,每一个数字代表一条边的边长,求这些边能构成多少个三角形。不同元素(即使长度相同)视为不同边。
#
# 思路:构成三角形的充要条件,任意两边之和大于第三边。将数组排序,由大往小固定住最长的一条边$c$,然后在前面使用首尾双指针寻找两条边$a$和$b$。只要$a+b>c$就说明$a\in{[a,b-1]}$的边都符合条件。
def triangleNumber(nums) -> int:
n = len(nums)
nums.sort()
res = 0
for c in range(n-1, 1, -1):
a, b = 0, c-1
while a < b:
if nums[a]+nums[b] > nums[c]:
res += b-a
b -= 1
else:
a += 1
return res
# [Subarray Product Less Than K](https://leetcode.com/problems/subarray-product-less-than-k/)。**2019作业帮手撕代码题**。给一正整数数组,求有多少个连续子数组的累乘积小于$k$。
#
# 思路:暴力思路超时。使用滑动窗口优化,左指针$i$初始化为$0$,右指针$j$线性扫描数组,每次循环都计算出以$j$结尾且包含```nums[j]```的子数组数量。
def numSubarrayProductLessThanK(nums, k: int) -> int:
res = 0
if k <= 1:
return res
n = len(nums)
left = 0
cur_prod = 1
for right in range(n):
cur_prod *= nums[right]
while cur_prod >= k:
cur_prod /= nums[left]
left += 1
res += right-left+1 # 以j结尾且包含j的子数组数量等于窗口大小
return res
# [Count and Say](https://leetcode.com/problems/count-and-say/)。假定初始数字为$1$,其读法为$1$个$1$,写成$11$;$11$读作$2$个$1$,写成$21$;...。求第$n$个数字。
#
# 思路:初始结果为'1',然后迭代$n-1$次。每轮迭代中需要对当前结果的所有数字计数。
def countAndSay(n: int) -> str:
res = '1'
for _ in range(n-1):
tmp = list()
num, cnt = res[0], 1
for i in range(1, len(res)):
if res[i] == num:
cnt += 1
else:
tmp.append(str(cnt))
tmp.append(num)
num = res[i]
cnt = 1
tmp.append(str(cnt))
tmp.append(num)
res = tmp
return ''.join(res)
# [Sort Colors](https://leetcode.com/problems/sort-colors/)。
#
# 思路:
def sortColors(nums) -> None:
pass
# ## Index
# [Array Nesting](https://leetcode.com/problems/array-nesting/)。给一非重复数组,每一个数字都表示下一位置的索引,找出最长非重复连续访问序列的长度。
#
# 思路:令某一连续访问序列的起始值为```start```,若某一时刻访问的下一位置的值等于```start```,则说明产生了环,需要停止。以每一个位置为起点,连续访问,随时维护一个最大长度即可。但是直接用该方法会超时,下面的优化才是难点:因为**数组中没有重复值**,所以若存在多条路径,则**路径之间是独立**的。访问一条路径时,将路径上所有点全部做标记,时间复杂度由$O(n^{2})$降为$O(n)$。
def arrayNesting(nums) -> int:
res = 0
for idx in range(len(nums)):
if nums[idx] != -1:
cur_res = [idx]
while nums[idx] != cur_res[0]:
cur_res.append(nums[idx])
next_idx = nums[idx]
nums[idx] = -1
idx = next_idx
nums[idx] = -1
res = max(res, len(cur_res))
return res
# [Max Chunks To Make Sorted](https://leetcode.com/problems/max-chunks-to-make-sorted/)。给一无序自然数数组,将其分成若干块,允许对块内元素排序,但不能更改块间顺序。问最多能将该数组分成几块。
#
# 思路:该题思路比较难。由于不允许调整块间顺序,所以每一块所包含的元素必须是该区间对应的值,如区间$[i,j]$中只能包含$[i,j]$的值。因为要求尽量多的切分,易得若$nums[i]=i$时,该位置就可单独成块。若索引与值不对应,则需要记录某段区间的最大值,直到扫描到最大值对应的索引位置,然后才可成块。
def maxChunksToSorted(arr) -> int:
res = 0
cur_max = -0x80000000
for idx, num in enumerate(arr):
cur_max = max(cur_max, num)
if cur_max == idx:
res += 1
return res
# [Global and Local Inversions](https://leetcode.com/problems/global-and-local-inversions/)。给一无序的自然数数组,令局部逆序对是相邻元素的大小逆序,而全部逆序对是任何位置元素对的大小逆序。判断该数组局部逆序对的数量是否等于全部逆序对的数量。
#
# 思路:易得全局逆序对包含了局部逆序对,所以该题实质要判断该数组是否只存在局部逆序对。可以发现规律,若对所有位置的元素,都有$\vert nums[i]-i \vert\le{1}$,则该数组只存在局部逆序对。
def isIdealPermutation(A) -> bool:
for idx, num in enumerate(A):
if abs(num-idx) > 1:
return False
return True
# [Circular Array Loop](https://leetcode.com/problems/circular-array-loop/)。给以循环数组,假设其首尾相接。数组中的每个值都代表往前或往后所走的步数,判断该循环数组中是否有长度大于$2$的单向环。
#
# 思路:判断有环使用快慢指针法。用一个```for```循环尝试所有可能的起点,对于每一个起点使用死循环判断是否有环。注意当出现反向时或者单点循环时都表明不满足题意。
def circularArrayLoop(nums) -> bool:
n = len(nums)
if n < 2:
return False
def next_idx(idx):
return (idx+nums[idx]) % n
for start in range(n):
slow, fast = start, next_idx(start)
while True:
# 出现反向
if nums[slow]*nums[next_idx(slow)] < 0 or \
nums[next_idx(fast)]*nums[next_idx(next_idx(fast))] < 0:
break
if slow == fast:
if next_idx(slow) == fast: # 单点循环
break
else:
return True
slow, fast = next_idx(slow), next_idx(next_idx(fast))
return False
| Algorithm/OnlineExam/Python/Idx&Ptr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Lesson 07 - Outliers
#
# ## What causes outliers?
# - Sensor malfunctions
# - data entry errors
# - freak events (events that happen very rarely but do happen)
#
# Some outliers need to be ignored. Sensor problem and data entry can be ignored in robotics but in some fields the freak events are very important like fraud detection.
# ## Outlier Detection
# - Train your regression
# - Remove points with largest residual errors
# - Train again
# ## Residual Error
# - The error left after fitting the regression
| udacity_data_science_notes/intro_machine_learning/lesson_07/lesson_07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# 说明:
# 给你一个 rows x cols 的矩阵 grid 来表示一块樱桃地。 grid 中每个格子的数字表示你能获得的樱桃数目。
# 你有两个机器人帮你收集樱桃,机器人 1 从左上角格子 (0,0) 出发,机器人 2 从右上角格子 (0, cols-1) 出发。
# 请你按照如下规则,返回两个机器人能收集的最多樱桃数目:
# 1、从格子 (i,j) 出发,机器人可以移动到格子 (i+1, j-1),(i+1, j) 或者 (i+1, j+1) 。
# 2、当一个机器人经过某个格子时,它会把该格子内所有的樱桃都摘走,然后这个位置会变成空格子,即没有樱桃的格子。
# 3、当两个机器人同时到达同一个格子时,它们中只有一个可以摘到樱桃。
# 4、两个机器人在任意时刻都不能移动到 grid 外面。
# 5、两个机器人最后都要到达 grid 最底下一行。
#
# 提示:
# 1、rows == grid.length
# 2、cols == grid[i].length
# 3、2 <= rows, cols <= 70
# 4、0 <= grid[i][j] <= 100
# -
# <img src='https://assets.leetcode-cn.com/aliyun-lc-upload/uploads/2020/05/30/sample_1_1802.png'>
# + active=""
# 输入:grid = [[3,1,1],[2,5,1],[1,5,5],[2,1,1]]
# 输出:24
# 解释:
# 机器人 1 和机器人 2 的路径在上图中分别用绿色和蓝色表示。
# 机器人 1 摘的樱桃数目为 (3 + 2 + 5 + 2) = 12 。
# 机器人 2 摘的樱桃数目为 (1 + 5 + 5 + 1) = 12 。
# 樱桃总数为: 12 + 12 = 24 。
# -
# <img src='https://assets.leetcode-cn.com/aliyun-lc-upload/uploads/2020/05/30/sample_2_1802.png'>
# + active=""
# 输入:grid = [[1,0,0,0,0,0,1],[2,0,0,0,0,3,0],[2,0,9,0,0,0,0],[0,3,0,5,4,0,0],[1,0,2,3,0,0,6]]
# 输出:28
# 解释:
# 机器人 1 和机器人 2 的路径在上图中分别用绿色和蓝色表示。
# 机器人 1 摘的樱桃数目为 (1 + 9 + 5 + 2) = 17 。
# 机器人 2 摘的樱桃数目为 (1 + 3 + 4 + 3) = 11 。
# 樱桃总数为: 17 + 11 = 28 。
# +
import copy
class Solution:
def cherryPickup(self, grid) -> int:
rows, cols = len(grid), len(grid[0])
# dp[i][j] 代表两个机器人在 某一行的时候所在的位置
dp = [[-float('inf')] * cols for _ in range(cols)]
dp[0][-1] = grid[0][0] + grid[0][-1] # 初始值
res = -float('inf')
for r in range(1, rows):
dp_old = copy.deepcopy(dp)
for c1 in range(cols): # 第一个机器人可能在的位置
for c2 in range(cols): # 第二个机器人可能在的位置
# 遍历左下、正下、右下三个方向上的最大值
for i in range(c1-1, c1+2):
for j in range(c1-1, c2+2):
if i < 0 or j < 0 or i >= cols or j >= cols:
continue
if c1 != c2:
dp[c1][c2] = max(dp[c1][c2], dp_old[i][j] + grid[r][c1] + grid[r][c2])
else:
dp[c1][c2] = max(dp[c1][c2], dp_old[i][j] + grid[r][c1])
res = max(res, dp[c1][c2])
# 最后的dp是两个机器人运动到最后一行的时候,处于不同列所能得到的最大值
return res
# -
solution = Solution()
solution.cherryPickup([[3,1,1],[2,5,1],[1,5,5],[2,1,1]])
# ### 12月20日
# +
import copy
class Solution:
def cherryPickup(self, grid) -> int:
rows, cols = len(grid), len(grid[0])
# dp[i][j]: 表示机器人 1 在i位置,机器人2在 j位置的时候,能够获得的最高收益
dp = [[-float('inf')] * cols for _ in range(cols)]
# 初始值,1在(0,0)位置,2在(0,-1)位置
dp[0][-1] = grid[0][0] + grid[0][-1] # 因为当前位置获得的樱桃数量,仅仅与前一行有关,因此只保留一行的数据
res = dp[0][-1] # 两者能够拿到最多的樱桃数量
for r in range(1, rows): # 第 0 行结果已知,从第一行开始
dp_temp = copy.deepcopy(dp)
for i in range(cols):
for j in range(cols):
# 第一个机器人在第 i 个位置的时候,追溯从上一行哪一个位置来的
for ni in i-1, i, i+1: # 直上方:i,左上方 i -1,右上方 i + 1
for nj in j-1, j, j+1:
if ni < 0 or nj < 0 or ni >= cols or nj >= cols:
continue
if i == j:
dp[i][j] = max(dp[i][j], dp_temp[ni][nj] + grid[r][i])
else:
dp[i][j] = max(dp[i][j], dp_temp[ni][nj] + grid[r][i] + grid[r][j])
res = max(res, dp[i][j])
return res
# +
from functools import lru_cache
class Solution:
def cherryPickup(self, grid) -> int:
@lru_cache(None)
def dp(row, c1, c2):
if c1 < 0 or c2 < 0 or c1 >= n or c2 >= n:
return -float('inf')
result = 0
result += grid[row][c1]
if c1 != c2:
result += grid[row][c2]
if row != m-1:
result += max(dp(row+1, nc1, nc2) for nc1 in [c1-1,c1, c1+1] for nc2 in [c2-1, c2, c2+1])
return result
m, n = len(grid), len(grid[0])
return dp(0, 0, n-1)
# -
solution = Solution()
solution.cherryPickup([[3,1,1],[2,5,1],[1,5,5],[2,1,1]])
i = 2
for i in i-1, i+1:
print(i)
| Dynamic Programming/1029/1463. Cherry Pickup II.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 8.3
# language: ''
# name: sagemath
# ---
# # CÁLCULO DIFERENCIAL E INTEGRAL
# # FUNCIONES Y SUS LÍMITES
# ## LÍMITES QUE INVOLUCRAN INFINITOS
#
# En esta lección, investigamos el comportamiento global de funciones y, en particular, cuando sus gráficas aproximan *asíntotas* verticales u horizontales.
# ### Límites infinitos
#
# Como pudimos observar anteriormente, el límite de $$f(x)=\dfrac{1}{x^2}$$ cuando $x\to 0$ *no existe*:
f(x)=1/x^2
intervalo = (x,-1,1)
plot(f, intervalo, ymin=0, ymax=1000)
f(x)=1/x
intervalo = (x,-1,1)
plot(f, intervalo, ymin=-100, ymax=100)
# Sin embargo, como podemos observar, cuando aproximamos $x=0$ por ambos lados, observamos un patrón:
# +
import numpy as np
f(x) = 1/x^2
izquierda = np.linspace(-0.01,0,11)
show(izquierda)
for x in izquierda:
try:
print(x, f(x))
except:
print("División entre cero")
# +
derecha = np.linspace(1,0,11)
for x in derecha:
try:
print(x, f(x))
except:
print("División entre cero")
# -
# #### Ejercicio
# Repita el ejercicio, comenzando ahora en $x=-0.1$ por la izquierda, y en $x=0.1$ por la derecha.
# Ya sea por la izquierda o por la derecha, la función sigue creciendo de manera arbitraria cuando $x\to0$.
#
# Esto motiva la siguiente definición
#
# #### Definición
# Diremos que $f(x) \to \infty$ cuando $x\to c$ si los valores de $f(x)$ pueden hacerse arbitrariamente grande si $x$ es suficientemente cercano a $c$ en este caso escribimos
# $$
# \lim_{x\to a} f(x) = \infty
# $$
# #### Ejercicio
# ¿Cuál sería el límite de $$f(x)=-\dfrac{1}{x^2}$$ cuando $x\to 0$?
# #### Límites laterales
# 
#
# Si $f(x)\to \pm\infty$ cuando $x\to a^{\pm}$, diremos que la recta vertical $x=a$ es una **asíntota vertical**.
# ### Ejercicio
# * Trace la gráfica de la función
# $$f(x) = \dfrac{2x}{x-3} $$
# * Determine
# $$\lim_{x\to 3^+}f(x)$$
# * Determine
# $$\lim_{x\to 3^-}f(x)$$
# #### Ejercicio
# Trace la gráfica de de $\tan(x)$ y determine sus asíntotas vérticales.
# *Sugerencia: Recuerde que $\tan(x)=\sin(x)/\cos(x)$*
# ### Límites al infinito
# Consideremos la función
# $$f(x)=\dfrac{x^2-1}{x^2+1}$$
#
# Primero, tracemos su gráfica
f(x) = (x^2-1)/(x^2+1)
intervalo=(x,-1000,1000)
grafica = plot(f, intervalo)
show(grafica)
# La gráfica nos sugiere que la función tiene un "tope" superior, al que se acerca cuando nos alejamos hacias los extremos.
#
# Verifiquemos este comportamiento:
for x in np.linspace(0,100,11):
print(x,f(x))
for x in np.linspace(0,-100,11):
print(x,f(x))
# En efecto, los cálculos númericos nos sugieren que cuando $x\to \pm \infty$, es decir, $x$ tiende a los extremos, $f(x) \to 1$.
g(x)=1
asintota = plot(g, intervalo, color="red")
show(grafica+asintota)
# #### Definición
#
# Sea $f$ una función definida en algún intervalo $(a,\infty)$. Entonces
# $$
# \lim_{\infty} f(x) = L
# $$
# significa que los valores de $f(x)$ se pueden hacer arbitrariamente cercanos a $L$ si $x$ es suficientemente grande.
# Hagamos un experimento numérico para ilustar la definición. Vamos a encontrar $x$ tal que
# $$
# |1-f(x)|< \epsilon
# $$
# donde $\epsilon$ es un margen de error dado.
# +
error = 0.001
x = 0
while(np.abs(1-f(x))>=error):
x+=1
print(x, f(x).n())
# -
# Grafiquemos la función a partir de este número
infinito = 10^3
al_infinito = (x,45,infinito)
grafica_2 = plot(f, al_infinito)
asintota_2 = plot(g, al_infinito, color="red")
show(grafica_2+asintota_2)
# ##### Ejercicio
# Determine gráfica y númericamente el comportamiento asíntotico ($x\to \pm \infty$) de las siguientes funciones:
# 1. $f(x)=\frac{1}{x} $
# 2. $g(x)=\frac{1}{x^2} $
# 3. $h(x)=\frac{1}{x^3} $
#
# ¿Qué comportamiento observa para las funciones de la forma $\frac{1}{x^n}$ con $n$ entero positivo?
# ##### Ejercicio
# Determine gráfica y númericamente el comportamiento asíntotico ($x\to \pm \infty$) de las siguientes funciones:
# 1. $$ f(x) = \dfrac{3x^2-x-2}{5x^+4x+1} $$
#
# 2. $$ g(x) = \sqrt{x^2+1}-x$$
#
# 3. $$ h(x) = \sin\left(\dfrac{1}{x}\right) $$
# ### Límites infinitos en el infinito
#
# ##### Ejercicio
# Determine gráfica y númericamente el comportamiento asíntotico ($x\to \pm \infty$) de las siguientes funciones:
#
# 1. $f(x)=x^3$
# 2. $g(x)=x^2-x$
# 3. $h(x)=\dfrac{x^2+x}{3-x}$
# #### Definición
#
# Sea $f$ una función definida en algún intervalo $(a,\infty)$. Entonces
#
# $$\lim_{x\to \infty} f(x) = \infty $$
#
# significa que para cada número positivo $M$ existe un correspondiente número positivo $N$ tal que si $x>N$, entonces $f(x) > M$
# ### Ejemplo
#
# Sea $f(x)= x^3$ y elijamos $M=10,000$. Determinaremos cual es la $N$ correspondiente:
f(x) = x^3
x = 0
M = 10000
while(f(x)<=M):
x+=1
print(x, f(x))
infinito = 30
intervalo = (x, 22, infinito)
plot(f, intervalo, ymin=0)
| CALCULO 101 LECCION 6 LIMITES AL INFINITO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Periscope + Sagemaker
# Hello! Welcome to Periscope + Sagemaker!
#
# ## Introduction
# In this demo, we will be using the XGBoost library on Sagemaker to predict the lifetime revenue of Toto customers. If you are new to Jupyter notebooks, just press the Run button at the top to run a code block.
# ## Getting Started
# Let's start by specifying:
#
# * The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
#
# * The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with a the appropriate full IAM role arn string(s).
#
# **Note:** This notebook was created and tested on an ml.t2.medium notebook instance.
# +
# Define IAM role
import boto3
import re
from sagemaker import get_execution_role
role = get_execution_role()
bucket='sagemaker-periscopedata-demo-nyc'
data_key = 'enhancedtotodataset.csv'
data_location = 's3://{}/{}'.format(bucket, data_key)
# set prefix for this instance
# please input your name in the following set of square brackets, making sure to use appropriate directory characters
prefix = 'sagemaker/[your-name-here]-xgboost-batch-dm'
# -
# Now we'll import the Python libraries we'll need.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from io import BytesIO
import os
import time
import json
import sagemaker.amazon.common as smac
import sagemaker
from sagemaker.predictor import csv_serializer, json_deserializer
# ## Data Import
# Because Periscope has already brought the data into S3 as a CSV, importing it into a dataframe requires only a single line of Python. Here we bring the data in from S3 to a pandas dataframe and confirm the information by printing the top 5 records.
# +
# read the csv from S3
df = pd.read_csv(data_location)
# display the first 5 records to verify the import
df.head(5)
# -
# ## Data Preparation
# Most of the data preparation and feature engineering has already been performed in Periscope Data. There is one final step that is best done in Python: one-hot encoding the categorical variables. After importing the data from Periscope, this is the last step needed before running the training data through an ML model.
# +
# some of the categorical variables are currently encoded as numeric. The number of categories is low and can easily be one-hot encoded using get_dummy()
# categorical columns = max_dog_size, min_dog_size, requester_gender, provider_gender, experience
# continuous = walk_count, dog_count, requester_fee, previous_client_count, price_per_walk, provider_fee, percent_morning_walks, percent_afternoon_walks, percent_evening_walks
df = pd.get_dummies(df, columns = ["max_dog_size", "min_dog_size", "requester_gender", "provider_gender", "experience"])
#verify that the one-hot encoding (creation of boolean for each categorical variable) succeeded
df.head(5)
# -
# ## Building Models
#
# The most common way of preventing overfitting is to build models with the concept that a model shouldn't only be judged on its fit to the data it was trained on, but also on "new" data. There are several different ways of operationalizing this, holdout validation, cross-validation, leave-one-out validation, etc. For our purposes, we'll simply randomly split the data into 3 uneven groups. The model will be trained on 70% of data, it will then be evaluated on 20% of data to give us an estimate of the accuracy we hope to have on "new" data, and 10% will be held back as a final testing dataset which will be used later on.
train_data, validation_data, test_data = np.split(df.sample(frac=1, random_state=1729), [int(0.7 * len(df)), int(0.9 * len(df))])
# Amazon SageMaker's XGBoost container expects data in the libSVM or CSV data format. For this example, we'll stick to CSV. Note that the first column must be the target variable and the CSV should not include headers. Also, notice that although repetitive it's easiest to do this after the train|validation|test split rather than before. This avoids any misalignment issues due to random reordering.
pd.concat([train_data['lifetime_revenue'], train_data.drop(['lifetime_revenue'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
pd.concat([validation_data['lifetime_revenue'], validation_data.drop(['lifetime_revenue'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)
test_data.drop(['lifetime_revenue'], axis=1).to_csv('test.csv', index=False, header=False)
# Now we'll copy the files to S3 for Amazon SageMaker's managed training to pickup.
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation/validation.csv')).upload_file('validation.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'test/test.csv')).upload_file('test.csv')
# ## Train
# There are several intricacies to understanding the algorithm, but at a high level, gradient boosted trees works by combining predictions from many simple models, each of which tries to address the weaknesses of the previous models. By doing this the collection of simple models can actually outperform large, complex models. Other Amazon SageMaker notebooks elaborate on gradient boosting trees further and how they differ from similar algorithms.
#
# xgboost is an extremely popular, open-source package for gradient boosted trees. It is computationally powerful, fully featured, and has been successfully used in many machine learning competitions. Let's start with a simple xgboost model, trained using Amazon SageMaker's managed, distributed training framework.
#
# First we'll need to specify the ECR container location for Amazon SageMaker's implementation of XGBoost.
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/xgboost:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/xgboost:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/xgboost:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/xgboost:latest',
'ap-northeast-1': '501404015308.dkr.ecr.ap-northeast-1.amazonaws.com/xgboost:latest',
'ap-northeast-2': '306986355934.dkr.ecr.ap-northeast-2.amazonaws.com/xgboost:latest'}
# Then, because we're training with the CSV file format, we'll create s3_inputs that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
s3_input_train = sagemaker.s3_input(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data='s3://{}/{}/validation/'.format(bucket, prefix), content_type='csv')
# First we'll need to specify training parameters to the estimator. This includes:
#
# 1. The xgboost algorithm container
# 2. The IAM role to use
# 3. Training instance type and count
# 4. S3 location for output data
# 5. Algorithm hyperparameters
# And then a .fit() function which specifies:
#
# 1. S3 location for output data. In this case we have both a training and validation set which are passed in.
# +
sess = sagemaker.Session()
xgb = sagemaker.estimator.Estimator(containers[boto3.Session().region_name],
role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sess)
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='reg:linear', # use linear regression to create a continuous output
num_round=100)
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
# -
# ## Hosting
# Now that we've trained the xgboost algorithm on our data, let's deploy a model that's hosted behind a real-time endpoint.
xgb_predictor = xgb.deploy(initial_instance_count=1,
instance_type='ml.m4.xlarge')
# ## Evaluation
# There are many ways to compare the performance of a machine learning model, but let's start by simply comparing actual to predicted values.
#
# Let's use SageMaker's newly announced bulk inferencing functionality to make the predictions.
# +
# %%time
from time import gmtime, strftime
input_prefix = prefix + '/test'
csv_file = 'test.csv'
input_data = 's3://{}/{}'.format(bucket, input_prefix)
output_prefix = prefix + '/xgboost-batch-test-output'
output_data = 's3://{}/{}'.format(bucket, output_prefix)
# Important
# Update this value with the model name from the output of the hosting step
model_name = 'xgboost-2018-07-17-09-08-32-655'
job_name = model_name
batch_job_name = 'xgboost-batch' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
batch = boto3.client('sagemaker')
create_params = \
{
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 8,
"BatchStrategy": 'MultiRecord',
"TransformInput": {
"ContentType": "text/csv",
"SplitType": "Line",
"CompressionType": "None",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": input_data
}
}
},
"TransformOutput": {
"S3OutputPath": output_data,
"AssembleWith": "Line"
},
"TransformResources": {
"InstanceCount": 1,
"InstanceType": "ml.m4.xlarge"
}
}
print("Job name is " + job_name)
batch.create_transform_job(**create_params)
# -
# ### Wait for it to Finish
# +
import time
def describe(job_name):
b = batch.describe_transform_job(TransformJobName=job_name)
b.pop('ResponseMetadata')
return b
def wait_for(job_name, sleep_time=30):
while True:
desc = describe(job_name)
print('Status: {}'.format(desc['TransformJobStatus']))
if desc['TransformJobStatus'] != 'InProgress':
break
time.sleep(sleep_time)
return desc
# +
# %%time
import yaml
desc = wait_for(batch_job_name)
print()
print(yaml.dump(desc, default_flow_style=False))
# -
# #### Retrieve the data
#
# The output is written to S3 and we can recover it from there.
# +
part_file = csv_file + '.out'
boto3.resource('s3').Bucket(bucket).Object('{}/{}'.format(output_prefix,part_file)).download_file(part_file)
import pandas as pd
predictions = pd.read_csv(part_file, header=None)
predictions.columns = ['predictions']
predictions.head(5)
# -
# ## Saving the Predictions
#
# Let's use another method to make predictions on the training data so we can compare how the model fits the test data and the training data.
#
# First we'll need to determine how we pass data into and receive data from our endpoint. Our data is currently stored as NumPy arrays in memory of our notebook instance. To send it in an HTTP POST request, we'll serialize it as a CSV string and then decode the resulting CSV.
#
# _Note: For inference with CSV format, SageMaker XGBoost requires that the data does NOT include the target variable._
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
# Now, we'll use a simple function to:
#
# 1. Loop over our test dataset
# 2. Split it into mini-batches of rows
# 3. Convert those mini-batches to CSV string payloads (notice, we drop the target variable from our dataset first)
# 4. Retrieve mini-batch predictions by invoking the XGBoost endpoint
# 5. Collect predictions and convert from the CSV output our model provides into a NumPy array
# +
def predict(data, rows=500):
split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))
predictions = ''
for array in split_array:
predictions = ','.join([predictions, xgb_predictor.predict(array).decode('utf-8')])
return np.fromstring(predictions[1:], sep=',')
# export test predictions
test_data_with_pred = test_data
test_data_with_pred.insert(2, 'predictions', predictions)
test_data_with_pred.head()
test_data_with_pred.to_csv('toto_test_predictions.csv', index=False, header=True)
# export train data with predictiona
train_predictions = predict(train_data.drop(['lifetime_revenue'], axis=1).values)
train_data_with_pred = train_data
train_data_with_pred.insert(2, 'predictions', train_predictions)
train_data_with_pred.head()
train_data_with_pred.to_csv('toto_train_predictions.csv', index=False, header=True)
# -
# ## RMSE
#
# The root mean square error helps us understand the difference between our predictions and the actual lifetime revenue. It aggregates the error/residuals in a way and helps us better evaluate the performance of our model.
# +
def rmse(predictions, actuals):
rmse = ((predictions - actuals) ** 2).mean() ** .5
return rmse
rmse(predictions = np.round(predictions['predictions']), actuals = test_data['lifetime_revenue'])
# -
# In this case, the RSME is $1932.86. Although this is a large number, it does make sense given the range of our dataset.
# ### Visualizing Predictions
#
# Visualizing predictions is a helpful way to evaluate the efficacy of the model.
# +
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
plt.figure(figsize=(7,7))
plt.gca().set_aspect('equal', adjustable='box')
max_lim = max(int(np.max(np.round(predictions['predictions']))), int(np.max(test_data['lifetime_revenue'])))
x = np.linspace(0, max_lim, 10)
plt.plot(x, x, linewidth = 2.5, linestyle = '-.', alpha = 0.5, label = 'Actual')
#regression part
sns.regplot(x=np.round(predictions['predictions']), y=test_data['lifetime_revenue'], color = 'purple', label = 'Prediction')
plt.xlabel('Predictions')
plt.ylabel('Actual')
plt.title('Predictive vs Actual Lifetime Revenue')
plt.legend()
plt.show()
# -
# Here, we see that predictions are more accurate in the lower ranges from 0 to \$150.00 since this is where we have the most data. For larger values and predictions, it becomes more difficult for the model to extrapolate. Looking at the graph, it appears that the model tends to overpredict for values over $150.00. Another graph that investigates this widening margin of error can be created with a simple linear regression model plot:
import seaborn as sns
plt.figure(figsize=(5,5))
sns.residplot(x=np.round(predictions['predictions']), y=test_data['lifetime_revenue'], color = 'purple')
plt.xlabel('LTV')
plt.ylabel('Residual')
plt.title('Residual Plot')
# ## Extensions
# This example analyzed a relatively small dataset, but utilized Amazon SageMaker features such as distributed, managed training and real-time model hosting, which could easily be applied to much larger problems. In order to improve predictive accuracy further, we could tweak value we threshold our predictions at to alter the mix of false-positives and false-negatives, or we could explore techniques like hyperparameter tuning. In a real-world scenario, we would also spend more time engineering features by hand and would likely look for additional datasets to include which contain additional information not available in our initial dataset.
#
# ### Clean-up
# If you are done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
sagemaker.Session().delete_endpoint(xgb_predictor.endpoint)
| Toto Dog Walking Predictions-Batch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quantum Gates
from qiskit import *
# To manipulate an input state we need to apply the basic operations of quantum computing. These are known as quantum gates. Here we'll give an introduction to some of the most fundamental gates in quantum computing. Most of those we'll be looking at act only on a single qubit. This means that their actions can be understood in terms of the Bloch sphere.
#
#
# ### The Pauli operators
#
# The simplest quantum gates are the Paulis: $X$, $Y$ and $Z$. Their action is to perform a half rotation of the Bloch sphere around the x, y and z axes. They therefore have effects similar to the classical NOT gate or bit-flip. Specifically, the action of the $X$ gate on the states $|0\rangle$ and $|1\rangle$ is
#
# $$
# X |0\rangle = |1\rangle,\\\\ X |1\rangle = |0\rangle.
# $$
#
# The $Z$ gate has a similar effect on the states $|+\rangle$ and $|-\rangle$:
#
# $$
# Z |+\rangle = |-\rangle, \\\\ Z |-\rangle = |+\rangle.
# $$
#
# These gates are implemented in Qiskit as follows (assuming a circuit named `qc`).
#
# ```python
# qc.x(0) # x on qubit 0
# qc.y(0) # y on qubit 0
# qc.z(0) # z on qubit 0
# ```
#
# The matrix representations of these gates have already been shown in a previous section.
#
# $$
# X= \begin{pmatrix} 0&1 \\\\ 1&0 \end{pmatrix}\\\\
# Y= \begin{pmatrix} 0&-i \\\\ i&0 \end{pmatrix}\\\\
# Z= \begin{pmatrix} 1&0 \\\\ 0&-1 \end{pmatrix}
# $$
#
# There, their job was to help us make calculations regarding measurements. But since these matrices are unitary, and therefore define a reversible quantum operation, this additional interpretation of them as gates is also possible.
#
# Note that here we referred to these gates as $X$, $Y$ and $Z$ and `x`, `y` and `z`, depending on whether we were talking about their matrix representation or the way they are written in Qiskit. Typically we will use the style of $X$, $Y$ and $Z$ when referring to gates in text or equations, and `x`, `y` and `z` when writing Qiskit code.
# ### Hadamard and S
#
# The Hadamard gate is one that we've already used. It's a key component in performing an x measurement:
#
# ```python
# measure_z = QuantumCircuit(1,1)
# measure_z.measure(0,0);
# ```
#
# Like the Paulis, the Hadamard is also a half rotation of the Bloch sphere. The difference is that it rotates around an axis located halfway between x and z. This gives it the effect of rotating states that point along the z axis to those pointing along x, and vice versa.
#
# $$
# H |0\rangle = |+\rangle, \, \, \, \, H |1\rangle = |-\rangle,\\\\
# H |+\rangle = |0\rangle, \, \, \, \, H |-\rangle = |1\rangle.
# $$
#
# This effect makes it an essential part of making x measurements, since the hardware behind quantum computing typically only allows the z measurement to be performed directly. By moving x basis information to the z basis, it allows an indirect measurement of x.
#
# The property that $H |0\rangle = |+\rangle $ also makes the Hadamard our primary means of generating superposition states. Its matrix form is
#
# $$
# H = \frac{1}{\sqrt{2}} \begin{pmatrix} 1&1 \\\\ 1&-1 \end{pmatrix}.
# $$
#
# The $S$ and $S^\dagger$ gates have a similar role to play in quantum computation.
#
# ```python
# qc.s(0) # s gate on qubit 0
# qc.sdg(0) # s† on qubit 1
# ```
#
# They are quarter turns of the Bloch sphere around the z axis, and so can be regarded as the two possible square roots of the $Z$ gate,
#
# $$
# S = \begin{pmatrix} 1&0 \\\\ 0&i \end{pmatrix}, \, \, \, \, S^\dagger = \begin{pmatrix} 1&0 \\\\ 0&-i \end{pmatrix}.
# $$
#
# The effect of these gates is to rotate between the states of the x and y bases.
#
# $$
# S |+\rangle = |\circlearrowright\rangle, \, \, \, \, S |-\rangle = |\circlearrowleft\rangle,\\\\
# S^\dagger |\circlearrowright\rangle = |+\rangle, \, \, \, \, S^\dagger |\circlearrowleft\rangle = |-\rangle.
# $$
#
# They are therefore used as part of y measurements.
#
# ```python
# measure_y = QuantumCircuit(1,1)
# measure_y.sdg(0)
# measure_y.h(0)
# measure_y.measure(0,0);
# ```
#
# The $H$, $S$ and $S^\dagger$ gates, along with the Paulis, form the so-called 'Clifford group' for a single qubit, which will be discussed more in later sections. These gates are extremely useful for many tasks in making and manipulating superpositions, as well as facilitating different kinds of measurements. But to unlock the full potential of qubits, we need the next set of gates.
#
# ### Other single-qubit gates
#
# We've already seen the $X$, $Y$ and $Z$ gates, which are rotations around the x , y and z axes by a specific angle. More generally we can extend this concept to rotations by an arbitrary angle $\theta$. This gives us the gates $R_x(\theta)$, $R_y(\theta)$ and $R_z(\theta)$. The angle is expressed in radians, so the Pauli gates correspond to $\theta=\pi$ . Their square roots require half this angle, $\theta=\pm \pi/2$, and so on.
#
# In Qasm, these rotations can be implemented with `rx`, `ry`, and `rz` as follows.
#
# ```python
# qc.rx(theta,0) # rx rotation on qubit 0
# qc.ry(theta,0) # ry rotation on qubit 0
# qc.rz(theta,0) # rz rotation on qubit 0
# ```
#
# Two specific examples of $R_z(\theta)$ have their own names: those for $\theta=\pm \pi/4$. These are the square roots of $S$, and are known as $T$ and $T^\dagger$.
#
# ```python
# qc.t(0) # t gate on qubit 0
# qc.tdg(0) # t† on qubit 1
# ```
#
# Their matrix form is
#
# $$
# T = \begin{pmatrix} 1&0 \\\\ 0&e^{i\pi/4}\end{pmatrix}, \, \, \, \, T^\dagger = \begin{pmatrix} 1&0 \\\\ 0&e^{-i\pi/4} \end{pmatrix}.
# $$
#
#
# All single-qubit operations are compiled down to gates known as $U_1$ , $U_2$ and $U_3$ before running on real IBM quantum hardware. For that reason they are sometimes called the _physical gates_. Let's have a more detailed look at them. The most general is
#
# $$
# U_3(\theta,\phi,\lambda) = \begin{pmatrix} \cos(\theta/2) & -e^{i\lambda}\sin(\theta/2) \\\\ e^{i\phi}\sin(\theta/2)
# & e^{i\lambda+i\phi}\cos(\theta/2) \end{pmatrix}.
# $$
#
# This has the effect of rotating a qubit in the initial $|0\rangle$ state to one with an arbitrary superposition and relative phase:
#
# $$
# U_3|0\rangle = \cos(\theta/2)|0\rangle + \sin(\theta/2)e^{i\phi}|1\rangle.
# $$
#
# The $U_1$ gate is known as the phase gate and is essentially the same as $R_z(\lambda)$. Its relationship with $U_3$ and its matrix form are,
#
# $$
# U_1(\lambda) = U_3(0,0,\lambda) = \begin{pmatrix} 1 & 0 \\\\ 0 & e^{i\lambda} \end{pmatrix}.
# $$
#
# In IBM Q hardware, this gate is implemented as a frame change and takes zero time.
#
# The second gate is $U_2$, and has the form
#
# $$
# U_2(\phi,\lambda) = U_3(\pi/2,\phi,\lambda) = \frac{1}{\sqrt{2}}\begin{pmatrix} 1 & -e^{i\lambda} \\\\ e^{i\phi} & e^{i\lambda+i\phi} \end{pmatrix}.
# $$
#
# From this gate, the Hadamard is done by $H= U_2(0,\pi)$. In IBM Q hardware, this is implemented by a pre- and post-frame change and an $X_{\pi/2}$ pulse.
# ### Multiqubit gates
#
# To create quantum algorithms that beat their classical counterparts, we need more than isolated qubits. We need ways for them to interact. This is done by multiqubit gates.
#
# The most prominent multiqubit gates are the two-qubit CNOT and the three-qubit Toffoli. These have already been introduced in 'The atoms of computation'. They essentially perform reversible versions of the classical XOR and AND gates, respectively.
#
# ```python
# qc.cx(0,1) # CNOT controlled on qubit 0 with qubit 1 as target
# qc.ccx(0,1,2) # Toffoli controlled on qubits 0 and 1 with qubit 2 as target
# ```
# Note that the CNOT is referred to as ```cx``` in Qiskit.
#
# We can also interpret the CNOT as performing an $X$ on its target qubit, but only when its control qubit is in state $|1\rangle$, and doing nothing when the control is in state $|0\rangle$. With this interpretation in mind, we can similarly define gates that work in the same way, but instead peform a $Y$ or $Z$ on the target qubit depending on the $|0\rangle$ and $|1\rangle$ states of the control.
#
# ```python
# qc.cy(0,1) # controlled-Y, controlled on qubit 0 with qubit 1 as target
# qc.cz(0,1) # controlled-Z, controlled on qubit 0 with qubit 1 as target
# ```
#
# The Toffoli gate can be interpreted in a similar manner, except that it has a pair of control qubits. Only if both are in state $|1\rangle$ is the $X$ applied to the target.
# ### Composite gates
#
# When we combine gates, we make new gates. If we want to see the matrix representation of these, we can use the 'unitary simulator' of Qiskit.
#
# For example, let's try something simple: a two qubit circuit with an `x` applied to one and a `z` to the other. Using tensor products, we can expect the result to be,
#
# $$
# Z \otimes X= \begin{pmatrix} 1&0 \\\\ 0&-1 \end{pmatrix} \otimes \begin{pmatrix} 0&1 \\\\ 1&0 \end{pmatrix} = \begin{pmatrix} 0&1&0&0 \\\\ 1&0&0&0\\\\0&0&0&-1\\\\0&0&-1&0 \end{pmatrix}.
# $$
#
# This is exactly what we find when we analyze the circuit with this tool.
# +
# set up circuit (no measurements required)
qc = QuantumCircuit(2)
qc.x(0) # qubits numbered from the right, so qubit 0 is the qubit on the right
qc.z(1) # and qubit 1 is on the left
# set up simulator that returns statevectors
backend = Aer.get_backend('unitary_simulator')
# run the circuit to get the matric
gate = execute(qc,backend).result().get_unitary()
# now we use some fanciness to display it in latex
from IPython.display import display, Markdown, Latex
gate_latex = '\\begin{pmatrix}'
for line in gate:
for element in line:
gate_latex += str(element) + '&'
gate_latex = gate_latex[0:-1]
gate_latex += '\\\\'
gate_latex = gate_latex[0:-4]
gate_latex += '\end{pmatrix}'
display(Markdown(gate_latex))
| ch-gates/quantum-gates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Data and Visualize
from IPython.display import clear_output
import numpy as np
import pandas as pd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
# Preprocesing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, plot_confusion_matrix
# Modelling
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
import optuna
# -
data = pd.read_csv("../input/heart-failure-prediction/heart.csv")
# ## Attribute Information
# ---
# * **Age**: age of the patient [years]
# * **Sex**: sex of the patient
# * M: Male,
# * F: Female
# * **ChestPainType**: chest pain type
# * TA: Typical Angina,
# * ATA: Atypical Angina,
# * NAP: Non-Anginal Pain,
# * ASY: Asymptomatic
# * **RestingBP**: resting blood pressure [mm Hg]
# * **Cholesterol**: serum cholesterol [mm/dl]
# * **FastingBS**: fasting blood sugar
# * 1: if FastingBS > 120 mg/dl,
# * 0: otherwise
# * **RestingECG**: resting electrocardiogram results
# * Normal: Normal,
# * ST: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV),
# * LVH: showing probable or definite left ventricular hypertrophy by Estes' criteria
# * **MaxHR**: maximum heart rate achieved [Numeric value between 60 and 202]
# * **ExerciseAngina**: exercise-induced angina
# * Y: Yes,
# * N: No
# * **Oldpeak**: oldpeak = ST [Numeric value measured in depression]
# * **ST_Slope**: the slope of the peak exercise ST segment
# * Up: upsloping,
# * Flat: flat,
# * Down: downsloping
# * **HeartDisease**: output class
# * 1: heart disease,
# * 0: Normal
data
data.info()
# # Attributes
# ## Age
# *Age of the patient [years]*
data[["Age"]].describe().T
fig = px.histogram(data["Age"], x="Age", width=600, height=400)
fig.show()
# ## Sex
# *Sex of the patient*
# * M: Male
# * F: Female
data["Sex"].value_counts()
fig = px.pie(data["Sex"], names="Sex", width=600, height=400)
fig.show()
# ## Chest Pain Type
# *Chest pain type*
# * TA: Typical Angina
# * ATA: Atypical Angina
# * NAP: Non-Anginal Pain
# * ASY: Asymptomatic
fig = px.histogram(data["ChestPainType"], x="ChestPainType", width=600, height=400)
fig.show()
# ## Resting Blood Pressure
# *Resting blood pressure [mm Hg]*
data[["RestingBP"]].describe().T
# +
fig = px.histogram(data["RestingBP"], x="RestingBP", width=600, height=400)
fig.show()
# Looks like there are incorrectly entered value
# -
# ## Cholesterol
# *Serum cholesterol [mm/dl]*
data[["Cholesterol"]].describe().T
# +
fig = px.histogram(data["Cholesterol"], x="Cholesterol", width=600, height=400)
fig.show()
# Looks like there are incorrectly entered value
# -
# ## Fasting Blood Sugar
# *Fasting blood sugar*
# * 1: if FastingBS > 120 mg/dl,
# * 0: otherwise
data[["FastingBS"]].value_counts()
fig = px.pie(data["FastingBS"], names="FastingBS", width=600, height=400)
fig.show()
# ## RestingECG:
# *Resting electrocardiogram results*
# * Normal: Normal
# * ST: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
# * LVH: showing probable or definite left ventricular hypertrophy by Estes' criteria
data[["RestingECG"]].value_counts()
fig = px.histogram(data["RestingECG"], x="RestingECG", width=600, height=400)
fig.show()
# ## MaxHR
# *Maximum heart rate achieved [Numeric value between 60 and 202]*
data[["MaxHR"]].describe().T
fig = px.histogram(data["MaxHR"], x="MaxHR", width=600, height=400)
fig.show()
# ## ExerciseAngina
# *Exercise-induced angina*
# * Y: Yes
# * N: No
data["ExerciseAngina"].value_counts()
fig = px.pie(data["ExerciseAngina"], names="ExerciseAngina", width=600, height=400)
fig.show()
# ## Oldpeak
# *oldpeak = ST [Numeric value measured in depression]*
data[["Oldpeak"]].describe().T
fig = px.histogram(data["Oldpeak"], x="Oldpeak", width=600, height=400)
fig.show()
# ## ST_Slope
# *The slope of the peak exercise ST segment*
# * Up: upsloping
# * Flat: flat
# * Down: downsloping
data["ST_Slope"].value_counts()
fig = px.pie(data["ST_Slope"], names="ST_Slope", width=600, height=400)
fig.show()
# ## HeartDisease
# *Output class*
# * 1: heart disease,
# * 0: Normal
data["HeartDisease"].value_counts()
fig = px.pie(data["HeartDisease"], names="HeartDisease", width=600, height=400)
fig.show()
plt.figure(figsize=(10,10))
matrix = np.triu(data.corr())
sns.heatmap(data.corr(), annot=True, mask=matrix, cmap="twilight")
# # Preprocessing
# ## Outlier Suppression
# +
data.loc[data["RestingBP"] < 100, "RestingBP"] = 100
data.loc[data["RestingBP"] > 170, "RestingBP"] = 170
data.loc[data["Cholesterol"] < 100, "Cholesterol"] = 100
data.loc[data["Cholesterol"] > 320, "Cholesterol"] = 320
data.loc[data["MaxHR"] < 60, "MaxHR"] = 60
data.loc[data["MaxHR"] > 202, "MaxHR"] = 202
data.loc[data["Oldpeak"] < 0, "Oldpeak"] = 0
data.loc[data["Oldpeak"] > 2.5, "Oldpeak"] = 2.5
# -
# Getting Dummy Variables
data = pd.get_dummies(data, columns=["Sex", "ChestPainType", "RestingECG", "ExerciseAngina", "ST_Slope"], drop_first=True)
# # Modelling
# +
x = data.drop("HeartDisease", axis=1)
y = data["HeartDisease"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=44, shuffle=True)
# -
algorithms = [RidgeClassifier,
SVC,
LinearSVC,
RandomForestClassifier,
KNeighborsClassifier,
DecisionTreeClassifier,
LGBMClassifier,
XGBClassifier,
]
df_algorithms = pd.DataFrame(columns=["Model", "Train Accuracy", "Test Accuracy"])
def autoML(algorithm):
model = algorithm().fit(x_train, y_train)
train_acc = model.score(x_train, y_train)
model_name = algorithm.__name__
y_pred = model.predict(x_test)
test_acc = accuracy_score(y_test, y_pred)
return model_name, train_acc, test_acc
# + _kg_hide-output=true
for alg in algorithms:
model_name, train_acc, test_acc = autoML(alg)
df_algorithms = df_algorithms.append({"Model" : model_name,
"Train Accuracy": train_acc,
"Test Accuracy": test_acc}, ignore_index=True)
# -
df_algorithms.sort_values(by=["Test Accuracy", "Train Accuracy"],ascending=False)
model = LGBMClassifier().fit(x_train, y_train)
y_pred = model.predict(x_test)
accuracy_score(y_test, y_pred)
print(classification_report(y_pred, y_test))
plot_confusion_matrix(model,
x_test,
y_test,
display_labels=["Normal", "Heart Disease"],
cmap=plt.cm.Blues,
)
| heart-disease-prediction-lightgbm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import libraries
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch.data import Dataset
# +
class dataset(Dataset):
def __init__(self, transform=None):
super(dataset, self).__init__()
xy = torch.from_numpy(np.loadtxt(path, dtype=np.float32, delimiter=",", skiprows=1, usecols=(0, 2, 3, 4)))
self.x = xy[:, :-1]
self.y = xy[:, -1]
self.n_samples = xy.shape[0]
self.transform = transform
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__():
return self.n_samples
def call_data():
intdata = input("enter name of data:" )
path = os.path.join("./", intdata+".csv")
type_data = input("tensor or pandas: ")
if type_data == "tensor":
data = dataset()
elif type_data == pandas:
data = pd.read_csv(path)
return data
# +
# load data
"""def load_data():
intdata = input("enter name of data:" )
path = os.path.join("./", intdata+".csv")
data = pd.read_csv(path)
return data
dataset = load_data
"""
data = call_data()
# +
# preprocess data
def data_assignment():
pass
# +
# build the model
def model():
pass
# +
# fit the model
def fit():
pass
# +
# making predictions
def make_prediction():
pass
# +
# visualize the result
def visualize():
pass
# +
# analysis
def anlyse():
pass
| project somo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modeling and Simulation in Python
#
# SymPy code for Chapter 16
#
# Copyright 2017 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
#
# ### Mixing liquids
# We can figure out the final temperature of a mixture by setting the total heat flow to zero and then solving for $T$.
# +
from sympy import *
init_printing()
# +
C1, C2, T1, T2, T = symbols('C1 C2 T1 T2 T')
eq = Eq(C1 * (T - T1) + C2 * (T - T2), 0)
eq
# -
solve(eq, T)
# ### Analysis
# We can use SymPy to solve the cooling differential equation.
# +
T_init, T_env, r, t = symbols('T_init T_env r t')
T = Function('T')
eqn = Eq(diff(T(t), t), -r * (T(t) - T_env))
eqn
# -
# Here's the general solution:
solution_eq = dsolve(eqn)
solution_eq
general = solution_eq.rhs
general
# We can use the initial condition to solve for $C_1$. First we evaluate the general solution at $t=0$
at0 = general.subs(t, 0)
at0
# Now we set $T(0) = T_{init}$ and solve for $C_1$
solutions = solve(Eq(at0, T_init), C1)
value_of_C1 = solutions[0]
value_of_C1
# Then we plug the result into the general solution to get the particular solution:
particular = general.subs(C1, value_of_C1)
particular
# We use a similar process to estimate $r$ based on the observation $T(t_{end}) = T_{end}$
t_end, T_end = symbols('t_end T_end')
# Here's the particular solution evaluated at $t_{end}$
at_end = particular.subs(t, t_end)
at_end
# Now we set $T(t_{end}) = T_{end}$ and solve for $r$
solutions = solve(Eq(at_end, T_end), r)
value_of_r = solutions[0]
value_of_r
# We can use `evalf` to plug in numbers for the symbols. The result is a SymPy float, which we have to convert to a Python float.
subs = dict(t_end=30, T_end=70, T_init=90, T_env=22)
r_coffee2 = value_of_r.evalf(subs=subs)
type(r_coffee2)
r_coffee2 = float(r_coffee2)
r_coffee2
| code/chap16sympy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Prepare train_data and test_data
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split
from hypergbm import make_experiment
from hypernets.tabular.metrics import metric_to_scoring
from sklearn.metrics import get_scorer
X,y = datasets.load_breast_cancer(as_frame=True,return_X_y=True)
X_train,X_test,y_train,y_test = train_test_split(X,y,train_size=0.7,random_state=335)
train_data = pd.concat([X_train,y_train],axis=1)
# # Add your own estimator(svm)
from hypergbm.sklearn.sklearn_ops import numeric_pipeline_simple,categorical_pipeline_simple
from hypergbm.estimators import HyperEstimator
from hypernets.pipeline.base import DataFrameMapper
from hypernets.core.ops import ModuleChoice, HyperInput
from hypernets.core.search_space import Choice, Int,Real
from hypernets.core.search_space import HyperSpace
from hypernets.tabular.column_selector import column_object
from hypernets.utils import const
from sklearn import svm
# ## Define SVMEstimator
class SVMEstimator(HyperEstimator):
def __init__(self, fit_kwargs, C=1.0, kernel='rbf', gamma='auto',degree=3,random_state=666,probability=True,
decision_function_shape=None,space=None, name=None, **kwargs):
if C is not None:
kwargs['C'] = C
if kernel is not None:
kwargs['kernel'] = kernel
if gamma is not None:
kwargs['gamma'] = gamma
if degree is not None:
kwargs['degree'] = degree
if random_state is not None:
kwargs['random_state'] = random_state
if decision_function_shape is not None:
kwargs['decision_function_shape'] = decision_function_shape
kwargs['probability'] = probability
HyperEstimator.__init__(self, fit_kwargs, space, name, **kwargs)
def _build_estimator(self, task, kwargs):
if task == const.TASK_REGRESSION:
hsvm = SVMRegressorWrapper(**kwargs)
else:
hsvm = SVMClassifierWrapper(**kwargs)
hsvm.__dict__['task'] = task
return hsvm
class SVMClassifierWrapper(svm.SVC):
def fit(self, X, y=None, **kwargs):
return super().fit(X, y)
class SVMRegressorWrapper(svm.SVC):
def fit(self, X, y=None, **kwargs):
return super().fit(X, y)
# ## Define search_space
def search_space():
space = HyperSpace()
with space.as_default():
input = HyperInput(name='input1')
num_pipeline = numeric_pipeline_simple()(input)
cat_pipeline = categorical_pipeline_simple()(input)
union_pipeline = DataFrameMapper(default=False, input_df=True, df_out=True,
df_out_dtype_transforms=[(column_object, 'int')])([num_pipeline, cat_pipeline])
svm_init_kwargs = {
'C': Real(0.1,5,0.1),
'kernel':Choice(['rbf','poly','sigmoid']),
'degree':Int(1,5),
'gamma':Real(0.0001,5,0.0002)
}
svm_est =SVMEstimator(fit_kwargs={},**svm_init_kwargs)
ModuleChoice([svm_est], name='estimator_options')(union_pipeline)
space.set_inputs(input)
return space
experiment = make_experiment(train_data.copy(),target='target',test_data=X_test,
search_space=search_space)
estimator = experiment.run()
scorer = get_scorer(metric_to_scoring('precision',pos_label=1))
score = scorer(estimator, X_test, y_test)
score
# # Add SVM into GeneralSearchSpaceGenerator
# +
from hypergbm.search_space import GeneralSearchSpaceGenerator
from hypergbm.estimators import XGBoostEstimator,LightGBMEstimator,CatBoostEstimator
class GeneralSearchSpaceGeneratorWithSVM(GeneralSearchSpaceGenerator):
def __init__(self,enable_svm=True, **kwargs):
super().__init__(**kwargs)
self.enable_svm = enable_svm
@property
def default_svm_init_kwargs(self):
return {
'C': Real(0.1,5,0.1),
'kernel':Choice(['rbf','poly','sigmoid']),
'degree':Int(1,5),
'gamma':Real(0.0001,5,0.0002)
}
@property
def default_svm_fit_kwargs(self):
return {}
@property
def estimators(self):
r=super().estimators
if self.enable_svm:
r['svm'] = (SVMEstimator, self.default_svm_init_kwargs, self.default_svm_fit_kwargs)
return r
generalSearchSpaceGeneratorWithSVM = GeneralSearchSpaceGeneratorWithSVM()
# -
experiment = make_experiment(train_data.copy(),target='target',test_data=X_test,
search_space=generalSearchSpaceGeneratorWithSVM)
estimator = experiment.run()
scorer = get_scorer(metric_to_scoring('precision',pos_label=1))
score = scorer(estimator, X_test, y_test)
score
| hypergbm/examples/32.customize_estimator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Prep
#
# For this vignette, we'll be using acquisition loan data from [<NAME>'s public datasets](https://loanperformancedata.fanniemae.com/lppub/index.html#Portfolio). There is sadly not yet a Python script to facilitate the reading in of all the data—which are stored as pipe-delimited .txt files, released quarterly on ~1 year lag—so I had to infer some of my data cleaning steps from the [R code that Fannie made available](https://loanperformancedata.fanniemae.com/lppub-docs/FNMA_SF_Loan_Performance_r_Primary.zip) to download and clean the data.
#
# If you are not immediately familiar with these data (or mortgage finance-related data in general), I highly recommend referencing the 1st table—"Acquisition File Layout"—in [this dictionnary file here](https://loanperformancedata.fanniemae.com/lppub-docs/FNMA_SF_Loan_Performance_File_layout.pdf), along with the [corresponding glossary here](https://loanperformancedata.fanniemae.com/lppub-docs/FNMA_SF_Loan_Performance_Glossary.pdf), which provides an explanation of the meaning of the values for each variable.
#
# For our purposes here, I will only be using acquisition data from the 1st and 4th quarter of 2017; thus, I will need to read in 2 .txt files. The code below is generalizable up to however many text files you want to use; you need only change the path_to_data variable to point at whatever path stores your .txt files.
#
# ## Setup
# +
# basic packages
import numpy as np
import pandas as pd
import datetime
# for data importing
import os
import csv
# for data cleaning
from janitor import clean_names, remove_empty
# -
# store the datetime of the most recent running of this notebook as a form of a log
most_recent_run_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
f"This notebook was last executed on {most_recent_run_datetime}"
# ## Reading in the Data
# +
# location of my .txt data files
path_to_data = '../data/'
# instantiate an empty list to store the file names to read in
filelist = []
# loop over each filename in the specified directory
for filename in os.listdir(path_to_data):
# check if the file path corresponds to a file
# ensure the file in question is a .txt file
# ensure we haven't already added that file to the list
if os.path.isfile(path_to_data + filename) \
and filename.endswith(".txt") \
and filename not in filelist:
# append the file to our list of files
filelist.append(path_to_data+filename)
# -
# take a look at the files we'll be reading in
filelist
# +
# instantiate an empty df that we'll use to store all our data
acq_df = pd.DataFrame()
# list of variable names for all the fields in the .txt files (adapted from the aforementioned docs)
acq_var_names = ['LOAN_ID', 'ORIG_CHN', 'Seller.Name', 'ORIG_RT', 'ORIG_AMT', 'ORIG_TRM', 'ORIG_DTE','FRST_DTE',
'OLTV', 'OCLTV', 'NUM_BO', 'DTI', 'CSCORE_B', 'FTHB_FLG', 'PURPOSE', 'PROP_TYP', 'NUM_UNIT',
'OCC_STAT', 'STATE', 'ZIP_3', 'MI_PCT', 'Product.Type', 'CSCORE_C', 'MI_TYPE', 'RELOCATION_FLG']
# loop over the .txt files, read them in, and append them to make our master acquisitions df
for f in filelist:
# specify that our delimiter is a pipe, ignore the header, and use pre-specified variable names
temp_df = pd.read_csv(filepath_or_buffer = f, sep="|", header=None, names=acq_var_names)
# ensure that concatenation is row-wise and ignore the index values as they don't convey meaning here
acq_df = pd.concat(objs=[acq_df, temp_df], axis=0, ignore_index=True)
# -
# taking a look at the data structure we have so far
acq_df.head()
# ## Cleaning the Data
#
# For the most part, our data here are pretty clean. In the section below, we'll just make a few convenience changes, execute a couple checks, and create a few new variables.
# use pyjanitor package to take care of basic data cleaning
acq_df = (
acq_df
# clean the column names, remove any leading/trailing underscores
.clean_names(strip_underscores=True)
# remove any rows that are entirely NA
.remove_empty()
)
# +
# create a few new fields as recommended by the aformentioned docs
# find minimum credit score of borrower and co-borrower
acq_df['cscore_min'] = (
acq_df[['cscore_b','cscore_c']].min(axis=1)
)
# find origination value = origination amount / origination loan-to-value ratio
acq_df['orig_val'] = (
acq_df['orig_amt'] / (acq_df['oltv']/100)
)
# check if the ocltv is null; if it is, set it to the oltv
acq_df['ocltv'] = (
np.where(acq_df['ocltv'].isnull(), acq_df['oltv'], acq_df['ocltv'])
)
# -
# inspect our final cleaned data
acq_df.head()
# # Final Data Trimming
#
# For the sake of size / efficiency, I am going to create a dataset composed of only loans originated in two months: January 2017 and December 2017.
# +
# filter original acq_df to just loans with origination dates in jan or dec 2017
jan_and_dec_17_acqs = acq_df.loc[
(acq_df['orig_dte'] == '01/2017') | (acq_df['orig_dte'] == '12/2017')
]
# inspect the features of the resulting dataset
row_count, column_count = jan_and_dec_17_acqs.shape
f"The final dataset filtered to just Jan2017 and Dec2017 originations has {row_count} rows and {column_count} columns."
# -
# lastly, we'll save out this dataset for use elsewhere
jan_and_dec_17_acqs.to_csv(path_or_buf='../data/jan_and_dec_17_acqs.csv', index=False)
| notebooks/data_prep_nb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.utils import shuffle
from time import time
import cv2
# %matplotlib inline
# ## Constants
# +
boundaries_of = {
'red': (
np.array([50, 56, 179], dtype='uint8'),
np.array([101, 101, 255], dtype='uint8'),
),
'blue': (
np.array([156, 0, 0], dtype='uint8'),
np.array([255, 101, 101], dtype='uint8'),
)
}
direction_when_see = {
'red': 'left',
'blue': 'right',
}
# -
# ## Image processing utils
# +
def is_color_in_range(color, lower, upper):
return np.all(lower < cluster_center) and np.all(upper > cluster_center)
def crop_image(image, w_crop_size, h_crop_size=None):
if h_crop_size is None:
h_crop_size = w_crop_size
w, h, d = tuple(image.shape)
w_lower_bound = int(w * w_crop_size)
w_upper_bound = int(w * (1 - w_crop_size))
h_lower_bound = int(h * h_crop_size)
h_upp,
'blue': 'righer_bound = int(h * (1 - h_crop_size))
return image[
w_lower_bound: w_upper_bound,
h_lower_bound: h_upper_bound,
:
]
def train_k_means(image_array, n_colors):
image_array_sample = shuffle(image_array, random_state=0)[:500]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
return kmeans
def recreate_image(codebook, labels, w, h):
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# -
# ## Clusterize the image
# +
# %%time
image = cv2.imread('test_blue.jpeg')
# Uncomment if you need crop
# cropped_image = crop_image(image, 0.2)
cropped_image = image
w, h, d = tuple(cropped_image.shape)
image_array = np.reshape(cropped_image, (w * h, d))
kmeans = train_k_means(image_array, 2)
labels = kmeans.predict(image_array)
recreated_image = recreate_image(
kmeans.cluster_centers_,
labels,
w,
h
)
print kmeans.cluster_centers_
plt.figure(1, figsize=(10, 10))
plt.imshow(cv2.cvtColor(np.uint8(recreated_image), cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
# -
# ## Find out the direction to go
# +
direction_to_go = None
needed_colors = boundaries_of.keys()
for cluster_center in kmeans.cluster_centers_:
for needed_color in needed_colors:
if is_color_in_range(
cluster_center,
boundaries_of[needed_color][0],
boundaries_of[needed_color][1],
):
direction_to_go = direction_when_see[needed_color]
print direction_to_go
| ColorDetection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from keras import models
from keras import layers
def build_network(hidden_layer_size, output_size=10):
network = models.Sequential()
network.add(layers.Dense(hidden_layer_size, activation='relu', input_shape=(28*28,)))
network.add(layers.Dense(output_size, activation='softmax'))
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
return network
# +
from keras.datasets import mnist
from keras.utils import to_categorical
def prepare_data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape((-1, 28*28))
x_train = x_train.astype('float32')/255
x_test = x_test.reshape((-1, 28*28))
x_test = x_test.astype('float32')/255
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
return x_train, y_train, x_test, y_test
# -
network = build_network(512)
x_train, y_train, x_test, y_test = prepare_data()
history = network.fit(x_train, y_train, epochs=20, batch_size=128, validation_split=0.2)
# +
from util import plot_history
plot_history(history)
# -
test_loss, test_acc = network.evaluate(x_test, y_test)
print('test acc:', test_acc)
network = build_network(256)
history2 = network.fit(x_train, y_train, epochs=20, batch_size=128, validation_split=0.2)
plot_history(history2)
test_loss, test_acc = network.evaluate(x_test, y_test)
print('test acc:', test_acc)
# +
from util import compare_history
compare_history(history, history2)
# -
| mnist_nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS-109B Data Science 2: Advanced Topics in Data Science
# ## Lab 7: Autoencoders
#
# **Harvard University**<br>
# **Spring 2020**<br>
# **Instructors:** <NAME>, <NAME>, and <NAME><br>
# **Lab Instructors:** <NAME> and <NAME><br>
# **Content:** <NAME>, <NAME>, <NAME>, and <NAME>
#
# ---
# RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
# <div class="discussion"><b>Welcome to our New Virtual Classroom!!</b>
#
# <NAME>, myself, and the lab TFs, have very much enjoyed your participation during our previous on-campus lab meetings, and will try to maintain interactivity via this new medium as best as we can. You can also do your part by:
#
# - using your real name, if possible, so as to recreate a classroom feeling :)
# - turning off your video to conserve bandwith
# - muting your microphone unless you are invited to speak
# - **raising your hand** in the Chat when we invite questions
# - writing comments and questions in the Chat
#
# If you have any questions after the lab is done, please post them on **Ed**, in the Lab7 section.
#
# </div>
# ## Learning Goals
#
# By the end of this lab, you should be able to:
#
# - Connect the representation that Principal Component Analysis produces to the that of an autoencoder (AE).
# - Add tf.keras Functional API into your machine learning arsenal.
# - Implement an autoencoder using `tf.keras`:
# - build the encoder network/model
# - build the decoder network/model
# - decide on the latent/bottleneck dimension
# - train your AE
# - predict on unseen data
#
# ### Note: To see solutions, uncomment and run the following:
#
# ```
# # # %load solutions/exercise2.py
# ```
#
# First time you run will load solution, then you need to **run the cell again** to actually run the code.
# <a id=top></a>
#
# ## Table of Contents
#
# - **Part 1**: [Autoencoders and their connection to Principal Component Analysis](#part1).
# - **Part 2**: [Denoising Images using AEs](#part2).
# - **Part 3**: [Visualizing Intermediate Layers of an AE](#part3).
# +
from __future__ import annotations
import numpy as np
import seaborn as sns
import os
import datetime
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (5,5)
# %matplotlib inline
# -
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, Conv1D, MaxPooling2D, MaxPooling1D,\
Dropout, Flatten, Activation, Input, UpSampling2D
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.metrics import AUC, Precision, Recall, FalsePositives, \
FalseNegatives, TruePositives, TrueNegatives
from tensorflow.keras.preprocessing import image
from tensorflow.keras.regularizers import l2
tf.keras.backend.clear_session() # For easy reset of notebook state.
print(tf.__version__) # You should see a > 2.0.0 here!
from tf_keras_vis.utils import print_gpus
print_gpus()
# set the seed for reproducability of results
seed = 109
np.random.seed(seed)
tf.random.set_seed(seed)
# install this if you want to play around with Tensorboard
# #!pip install tf-keras-vis tensorflow
# %load_ext tensorboard
# remove old logs
# !rm -rf ./logs/
# <a id=part1></a>
#
# ## Part 1: Autoencoders and the connection to Principal Component Analysis
#
# #### Principal Component Analysis (PCA)
#
# **PCA** decomposes a multivariate dataset in a set of eigenvectors - successive orthogonal coeefficients that explain a large amount of the variance. By using only a number of the highest valued vectors, let's say $N$ of them, we effectively reduce the dimensionality of our data to $N$ with minimal loss of information as measured by RMS (Root Mean Squared) Error.
#
# PCA in `sklearn` is a transformer that learns those $N$ components via its `.fit` method. It can then be used to project a new data object in these components. Remember from 109a that we always `.fit` only to the training set and `.transform` both training and test set.
#
# ```
# from sklearn.decomposition import PCA
# k = 2 # number of components that we want to keep
#
# X_train, X_test = load_data()
# pca = PCA(n_components=k)
#
# principal_components = pca.fit_transform(X_train)
# principal_components = pca.transform(X_test)
# ```
#
# #### Autoencoders (AE)
#
# 
#
# *image source: Deep Learning by <NAME>*
#
# An **AE** maps its input, usually an image, to a latent vector space via an encoder function, and then decodes it back to an output that is the same as the input, via a decoder function. It’s effectively being trained to reconstruct the original input. By trying to minimize the reconstruction MSE error, on the output of the encoder, you can get the autoencoder to learn interesting latent representations of the data. Historically, autoencoders have been used for tasks such as dimentionality reduction, feature learning, and outlier detection.
#
# One type of architecture for an AE is to have the decoder network be a 'mirror image' of the encoder. It makes more sense this way but it is not necessary.
#
# We can say that AEs are self-supervised learning networks!
#
#
# #### Understandind the connection between PCA and AEs
#
# If the hidden and output layers of an autoencoder are linear, the autoencoder will learn hidden units that are linear representations of the data, just like PCA does. If we have $M$ hidden units in our AE, those will span the same space as the $M$ first principal components. The hidden layers of the AE will not produce orthogonal representations of the data as PCA would but if we add non-linear components in our encoder-decoder networks we can represent a non-linear space/manifold;
# #### Fashion-MNIST
#
# We will use the dataset of clothing article images (created by [Zalando](https://github.com/zalandoresearch/fashion-mnist)), consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a **28 x 28** grayscale image, associated with a label from **10 classes**. The names of the classes corresponding to numbers 0-9 are:
# ```
# 'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat','Sandal', 'Shirt', 'Sneaker', 'Bag', and 'Ankle boot'
# ```
# The creators intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits. Each pixel is 8 bits so its value ranges from 0 to 255.
#
# Let's load and look at it!
# +
# get the data from keras - how convenient!
fashion_mnist = tf.keras.datasets.fashion_mnist
# load the data splitted in train and test! how nice!
(X_train, y_train),(X_test, y_test) = fashion_mnist.load_data()
# normalize the data by dividing with pixel intensity
# (each pixel is 8 bits so its value ranges from 0 to 255)
X_train, X_test = X_train / 255.0, X_test / 255.0
print(f'X_train shape: {X_train.shape}, X_test shape: {X_test.shape}')
print(f'y_train shape: {y_train.shape}, and y_test shape: {y_test.shape}')
# classes are named 0-9 so define names for plotting clarity
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[i], cmap=plt.cm.binary)
plt.xlabel(class_names[y_train[i]])
plt.show()
# -
# choose one image to look at
i = 5
plt.imshow(X_train[i], cmap='gray');
plt.xlabel(class_names[y_train[i]]);
# <div class="exercise"><b>Exercise 1:</b> Calculate the dimensionality of the Fashion dataset. Then flatten it to prepare for PCA.</div>
## your code here
# +
# # %load solutions/exercise1.py
n_samples, h, w = X_train.shape
print(f'We have {n_samples} data sample images, each with height: {h} and width: {w}')
print(f'Data dimensionality: {h*w}')
print(f'X_train shape: {X_train.shape}, X_test shape: {X_test.shape}')
print(f'y_train shape: {y_train.shape}, and y_test shape: {y_test.shape}')
# Flatten images for PCA
X_train_flat = X_train.reshape(X_train.shape[0], -1)
X_test_flat = X_test.reshape(X_test.shape[0], -1)
print(f'X_train flattened shape: {X_train_flat.shape}, X_test flattened shape: {X_test_flat.shape}')
# -
from sklearn.decomposition import PCA
# <div class="exercise"><b>Exercise 2:</b> Find the 2 first principal components by fitting on the train set. Print the shape of the components matrix.</div>
# your code here
# +
# # %load solutions/exercise2.py
# Let's find the first 2 PCA components
num_components = 2
pca = PCA(n_components=num_components).fit(X_train_flat)
# take a look at them
print(f'Shape of the 2 component vector (First and Second): {pca.components_.shape}')
print(f'Shape of first PC: {pca.components_[0].shape}')
print(f'Shape of second PC: {pca.components_[0].shape}')
# reshape so they resemble images and we can print them
eigenclothes = pca.components_.reshape((num_components, h, w))
print(f'Shape of reshaped eigenclothes: {eigenclothes.shape}, shape of the first article of eigenclothing: {eigenclothes[0].shape}')
# show the reshaped principal components (eigenclothes)
f, ax = plt.subplots(1,2)
ax[0].imshow(eigenclothes[0], cmap='gray');
ax[0].set_xlabel('First Principal Component');
ax[1].imshow(eigenclothes[1], cmap='gray');
ax[1].set_xlabel('Second Principal Component');
# -
# print the variance explained by those components
pca.explained_variance_
# **Note:** The first two components explain ~19+12 = 31% of the variance.
# <div class="discussion"><b>Discussion: </b> Comment on what you see here.</div>
# transform the train and test set
X_train_pca = pca.transform(X_train_flat)
X_test_pca = pca.transform(X_test_flat)
X_train_pca.shape, X_train_pca[1:5,0].shape
# +
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
fig, ax1 = plt.subplots(1,1, figsize=(9,9))
sns.scatterplot(x=X_train_pca[:,0], y=X_train_pca[:,1], hue=y_train,
palette=sns.color_palette("deep", 10), ax=ax1)
ax1.set_title("FASHION MNIST, First 2 principal components");
# -
# <a id=part2></a>
#
# ## Part 2: Denoise Images using AEs
#
# We will create an autoencoder which will accept "noisy" images as input and try to produce the original images. Since we do not have noisy images to start with, we will add random noise to our Fashion-MNIST images. To do this we will use the image augmentation library [imgaug docs](https://imgaug.readthedocs.io/en/latest/source/api_augmenters_arithmetic.html).
#
# From this library we will use `SaltAndPepper`, an augmenter which replaces pixels in images with salt/pepper noise (white/black-ish colors), randomly with probability passed as parameter `p`. Use the code below to install the library in your virtual environment.
# +
# # !conda install imgaug
# ### OR
# # !pip install imgaug
# -
from imgaug import augmenters
# +
# NNs want the inputs to be 4D
X_train = X_train.reshape(-1, h, w, 1)
X_test = X_test.reshape(-1, h, w, 1)
# Lets add sample noise - Salt and Pepper
noise = augmenters.SaltAndPepper(p=0.1, seed=seed)
seq_object = augmenters.Sequential([noise])
# Augment the data (add the noise)
X_train_n = seq_object.augment_images(X_train * 255) / 255
X_test_n = seq_object.augment_images(X_test * 255) / 255
# +
f, ax = plt.subplots(1,5, figsize=(20,10))
for i in range(5,10):
ax[i-5].imshow(X_train_n[i, :, :, 0].reshape(28, 28), cmap=plt.cm.binary)
ax[i-5].set_xlabel('Noisy '+class_names[y_train[i]])
f, ax = plt.subplots(1,5, figsize=(20,10))
for i in range(5,10):
ax[i-5].imshow(X_train[i, :, :, 0].reshape(28, 28), cmap=plt.cm.binary)
ax[i-5].set_xlabel('Clean '+class_names[y_train[i]])
# -
# #### `tf.keras.Sequential` API
#
# This is what we have been using so far for building our models. Its pros are: it's simple to use, it allows you to create models layer-by-layer. Its basic con is: it is not very flexible, and although it includes layers such as Merge, Concatenate, and Add that allow for a combination of models, it is difficult to make models with many inputs or shared-layers. All layers, as the name implies, are connected sequentially.
#
# #### Intro to `tf.keras.Functional` API
#
# https://www.tensorflow.org/guide/keras/functional.
#
# In this API, layers are built as graphs, with each layer indicating to which layer it is connected. Functional API helps us make more complex models which include non-sequential connections and multiple inputs or outputs.
#
# Let's say we have an image input with a shape of (28, 28, 1) and a classification task:
#
# ```
# num_classes = 10
#
# inputs = keras.Input(shape=(h, w, 1))
# x = Dense(64, activation='relu')(inputs)
# x = layers.Dense(64, activation='relu')(x)
# outputs = Dense(num_classes, activation='softmax', name='output')(x)
#
# ae_model = Model(inputs=inputs, outputs=outputs, name='autoencoder')
#
# ae_model.summary()
# ```
#
#
# #### Create the Encoder
# input layer
input_layer = Input(shape=(h, w, 1))
# <div class="exercise"><b>Exercise 3:</b> Create your "encoder" as a 2D CNN a follows: </div>
#
# - Use the Functional API
# - Create a pair of layers consisting of a `Conv2D` and a `MaxPool` layer which takes in our `input_layer`. Choose the number of filters.
# - Stack 3 of these layers, one after the other.
# - Give this model the name `latent_model` (it's not your final model).
#
# your code here
# +
# # %load solutions/exercise3.py
# encoding architecture
encoded_layer1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_layer)
encoded_layer1 = MaxPooling2D( (2, 2), padding='same')(encoded_layer1)
encoded_layer2 = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded_layer1)
encoded_layer2 = MaxPooling2D( (2, 2), padding='same')(encoded_layer2)
encoded_layer3 = Conv2D(4, (3, 3), activation='relu', padding='same')(encoded_layer2)
latent_view = MaxPooling2D( (2, 2), padding='same', name='latent_view')(encoded_layer3)
# call this the "encoder", we will use it later
encoder = Model(input_layer, latent_view, name='encoder_model')
# -
# <div class="exercise"><b>Exercise 4:</b> Create your "decoder" as a 2D CNN as follows: </div>
#
# - repeat the structure of your encoder but in "reverse".
# - What is the output layer activation function? What are the dimensions of the output?
#
# your code here
# # %load solutions/exercise4.py
# decoding architecture
decoded_layer1 = Conv2D(4, (3, 3), activation='relu', padding='same')(latent_view)
decoded_layer1 = UpSampling2D((2, 2))(decoded_layer1)
decoded_layer2 = Conv2D(32, (3, 3), activation='relu', padding='same')(decoded_layer1)
decoded_layer2 = UpSampling2D((2, 2))(decoded_layer2)
decoded_layer3 = Conv2D(64, (3, 3), activation='relu')(decoded_layer2)
decoded_layer3 = UpSampling2D((2, 2))(decoded_layer3)
# Note that the loss will be computed after every batch between the predicted output pixel and
# the ground truth pixel using mean squared error pixel by pixel:
output_layer = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoded_layer3)
# <div class="exercise"><b>Exercise 5:</b> Connect the two parts (encoder, decoder) to create your autoencoder. Compile and then train your autoencoder.</div>
#
# Choose an optimizer and a loss function. Use Early Stopping. To get good results you will need to run this about 20 epochs and this will take a long time depending on your machine. **For the purposes of this lab run only for 2 epochs**.
#
# (Optional: add Tensorboard).
#
# Here is how to connect the two models:
# create the model
ae_model = Model(input_layer, output_layer, name='ae_model')
ae_model.summary()
# your code here
# +
# # %load solutions/exercise5-1.py
loss = keras.losses.mse
optimizer = Adam() #RMSprop(learning_rate=0.001)
metrics = ['accuracy']
ae_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# +
# # %load solutions/exercise5-2.py
batch_size = 1048
epochs = 2 # do 2 for now, 20 when you have time
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
callbacks = [ tf.keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor='val_loss',
# "no longer improving" being further defined as "for at least `patience` epochs
patience=10,
verbose=5, mode='auto'),
tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
]
history = ae_model.fit(X_train_n, X_train, epochs=epochs, batch_size=batch_size,
validation_data=(X_test_n, X_test), callbacks=callbacks)
# -
# Let's see how our AE did
fig, ax = plt.subplots(1,1, figsize=(10,6))
ax.plot(history.history['loss'], label='Train')
ax.plot(history.history['val_loss'], label='Val')
ax.set_xlabel("Epoch", fontsize=20)
ax.set_ylabel("Loss", fontsize=20)
ax.legend()
ax.set_title('Autoencoder Loss')
# +
# start Tensorboard - requires grpcio>=1.24.3
# #%tensorboard --logdir logs
# -
# save your model
ae_model.save_weights('ae_model.h5')
# <a id=part3></a>
#
# ## Part 3: Visualizing Intermediate Layers of AE
# <div class="exercise"><b>Exercise 6:</b> Let's now visualize the latent layer of our encoder network. </div>
#
# This is our "encoder" model which we have saved as:
# ```
# encoder = Model(input_layer, latent_view, name='encoder_model')
# ```
# your code here
# +
# # %load solutions/exercise6.py
# plot the original noisy images
n = np.random.randint(0,len(X_test)-5)
f, ax = plt.subplots(1,5,figsize=(20,10))
for i,a in enumerate(range(n,n+5)):
ax[i].imshow(X_test_n[a, :, :, 0].reshape(28, 28), cmap='gray')
plt.show()
# print the predictions
preds = encoder.predict(X_test_n[n:n+5])
print(f'Shape of the predictions matrix: {preds.shape}')
latent_channels = 4
f, ax = plt.subplots(latent_channels,5, figsize=(20,10))
ax = ax.ravel()
for j in range(latent_channels):
for i,a in enumerate(range(n,n+5)):
ax[j*5 + i].imshow(preds[i, :, :, j], cmap='gray')
plt.show()
# -
# <div class="discussion"><b>Discussion</b>:
# <li> What do you see in the little images above?</li>
# <li> Could we have included Dense layers as bottleneck instead of just Conv2D and MaxPoool/upsample?</li></div>
# #### possible answers
# We can have bottleneck layers in convolutional autoencoders that are not dense but simply a few stacked featuremaps such as above. They might have better generalizability due to only using shared weights. One interesting consequence is that without the dense layer you'll force translational equivariance on the latent representation (a particular feature in the top right corner will appear as an activation in the top right corner of the featuremaps at the level of the bottleneck, and if the feature is moved in the original image the activation in the bottleneck will move proportionally in the same direction). This isn't necessarily a problem, but you are enforcing some constraints on the relationships between the latent space directions that you wouldn't be with the presence of a dense layer.
# ### Visualize Samples reconstructed by our AE
n = np.random.randint(0,len(X_test)-5)
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i,a in enumerate(range(n,n+5)):
ax[i].imshow(X_test[a, :, :, 0].reshape(28, 28), cmap='gray')
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i,a in enumerate(range(n,n+5)):
ax[i].imshow(X_test_n[a, :, :, 0].reshape(28, 28), cmap='gray')
preds = ae_model.predict(X_test_n[n:n+5])
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i,a in enumerate(range(n,n+5)):
ax[i].imshow(preds[i].reshape(28, 28), cmap='gray')
plt.show()
# <div class="discussion"><b>Discussion:</b> Comment on the predictions.</div>
| content/labs/lab07/notebook/cs109b_lab07_AE_2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from chempy.kinetics.arrhenius import ArrheniusParamWithUnits
from chempy.kinetics.rates import MassAction
from chempy.kinetics._rates import RTPoly
from chempy.reactionsystem import ReactionSystem
from chempy.units import patched_numpy, Backend, unit_of, simplified, default_constants as dc, default_units as u
from chempy.util._expr import Expr, Constant
from chempy.util.parsing import get_parsing_context
def log10recipoly(*args):
return 10**RTPoly(*args)
rpkw = dict(globals_=dict(u=u, log10recipoly=log10recipoly, Constant=Constant, MassAction=MassAction))
lgRTp = "MassAction([Constant(1/u.M/u.s) * log10recipoly([x*u.K**i for i, x in enumerate([13.339, -2.22e3, 7.333e5, -1.065e8])])])"
rs1 = ReactionSystem.from_string("OH + OH- -> H2O + O-; %s; name='R27f'" % lgRTp, rxn_parse_kwargs=rpkw)
rs1
rs1.rxns[0].param.rate_coeff({'temperature': 298*u.K})
be = Backend()
variables = {'temperature': 298.15*u.kelvin}
rates = rs1['R27f'].rate(dict(variables, **{'OH': 1e-11*u.molar, 'OH-': 1e-7*u.molar}), backend=be)
for sk in rs1.substances:
assert sk in rates
assert unit_of(rates[sk]) == u.molar/u.second
rates
# +
class GibbsExpr(Expr):
nargs = 4
parameter_keys = ('temperature',)
def __call__(self, variables, backend=patched_numpy, **kwargs):
dS_over_R, dCp_over_R, dH_over_R, Tref = map(simplified, self.all_args(variables, backend=backend))
T, = self.all_params(variables, backend=backend)
return backend.exp(dS_over_R)*(T/Tref)**dCp_over_R*backend.exp(-dH_over_R/T)
GeNH3 = GibbsExpr([
18.8*u.cal/u.K/u.mol/dc.molar_gas_constant,
52*u.cal/u.K/u.mol/dc.molar_gas_constant,
-0.87e3*u.cal/u.mol/dc.molar_gas_constant,
298.15*u.K
])
# -
dCp_R = 18.8*u.cal/u.K/u.mol/dc.molar_gas_constant
dCp_R, dCp_R.simplified
str_rs2 = """
NH4+ + OH- -> NH3 + H2O; rs1['R27f'].param; name='ammonium_hydroxide'
NH3 + H2O -> NH4+ + OH-; MassAction(rs1['R27f'].param.args[0]/GeNH3); name='ammonia_water'
"""
globals_ = get_parsing_context()
globals_['rs1'] = rs1
globals_['GeNH3'] = GeNH3
globals_['Arrh'] = ArrheniusParamWithUnits
rs2 = ReactionSystem.from_string(str_rs2, rxn_parse_kwargs=dict(globals_=globals_))
rs2
rs2.rxns[-1].rate_expr()
rs2.rxns[-1].param.rate_coeff({'temperature': 298*u.K})
for rk, cd in dict(ammonium_hydroxide={'NH4+': 1e-3*u.molar, 'OH-': 1e-7*u.molar},
ammonia_water={'NH3': 0.42*u.molar, 'H2O': 55.4*u.molar}).items():
print(rs2[rk].rate(dict(variables, **cd), backend=be))
| examples/_Expr_custom_Gibbs_kinetics_odesys.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predictive Maintenance using Random Forests
# ## Problem Statement
#
# ### Background
#
# > A company has a fleet of devices transmitting daily telemetry readings. They would like to create a predictive
# maintenance solution to proactively identify when maintenance should be performed. This approach promises cost
# savings over routine or time-based preventive maintenance, because tasks are performed only when warranted.
#
# ### Goal
#
# >You are tasked with building a predictive model using machine learning to predict the probability of a device failure.
# When building this model, be sure to minimize false positives and false negatives. The column you are trying to predict
# is called failure with binary value 0 for non-failure and 1 for failure.
# ## Tech Stack
#
# * [Kedro](https://github.com/quantumblacklabs/kedro) - workflow development tool that helps you build data pipelines that are robust, scalable, deployable, reproducible and versioned.
# * [PySpark](https://github.com/apache/spark/tree/master/python) - Python API to Spark (a fast and general cluster computing system for Big Data).
# * [spark-sklearn](https://github.com/databricks/spark-sklearn) - tools to integrate the Spark computing framework with the popular scikit-learn machine learning library.
# * pandas
# * NumPy
# * matplotlib
# * seaborn
# +
# Kedro imports.
from kedro.io import DataCatalog
from kedro.contrib.io.pyspark import SparkDataSet
from kedro.pipeline import Pipeline as KedroPipeline, node
from kedro.config import ConfigLoader
from kedro.runner import SequentialRunner
# PySpark imports.
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName('predictive-maintenance') \
.master('local[*]') \
.getOrCreate()
from pyspark.ml.feature import SQLTransformer
from pyspark.ml import Pipeline as SparkPipeline
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.stat import Correlation
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.feature import StandardScaler
# Import Pandas and NumPy.
import pandas as pd
pd.options.display.max_columns = 500
import numpy as np
# Import matplotlib and seaborn.
import matplotlib.pyplot as plt
import seaborn as sns
# -
# ## Read and Clean Data
#
# Load telemetry data into Spark DataFrame using Kedro.
telemetry_sds = SparkDataSet(
filepath='../data/01_raw/predictive_maintenance.csv',
file_format='csv',
load_args={'sep': ',', 'header': True},
save_args={'sep': ',', 'header': True}
)
catalog = DataCatalog({'telemetry': telemetry_sds})
telemetry_sdf = catalog.load('telemetry')
# Create Kedro node (just a function) to clean telemetry data.
# +
def clean_telemetry(telemetry):
"""Clean telemetry data.
Args:
telemetry: Spark DataFrame containing telemetry data.
Returns:
Cleaned telemetry Spark DataFrame.
"""
telemetry.createOrReplaceTempView('telemetry')
telemetry = spark.sql(
'''
SELECT
CAST(date AS DATE) AS date,
id,
CAST(failure AS INTEGER) AS label,
CAST(metric1 AS DOUBLE) AS metric1,
CAST(metric2 AS DOUBLE) AS metric2,
CAST(metric3 AS DOUBLE) AS metric3,
CAST(metric4 AS DOUBLE) AS metric4,
CAST(metric5 AS DOUBLE) AS metric5,
CAST(metric6 AS DOUBLE) AS metric6,
CAST(metric7 AS DOUBLE) AS metric7,
CAST(metric8 AS DOUBLE) AS metric8,
CAST(metric9 AS DOUBLE) AS metric9
FROM telemetry
''')
spark.catalog.dropTempView('telemetry')
return telemetry
clean_telemetry_sdf = clean_telemetry(telemetry_sdf)
clean_telemetry_sdf.printSchema()
# -
# ## Create Data Pipeline
#
# That's it! We've created an (admittedly simple) data pipeline with Kedro. We'll create a machine learning pipeline with PySpark in a bit, but first let's explore our data to figure out what features to build!
kedro_pipeline = KedroPipeline([
node(func=clean_telemetry, inputs='telemetry', outputs='clean_telemetry')
])
clean_telemetry_sdf = SequentialRunner().run(kedro_pipeline, catalog)['clean_telemetry']
clean_telemetry_sdf.select('*').limit(5).toPandas()
# ## Explore Data
clean_telemetry_sdf.createOrReplaceTempView('clean_telemetry')
# Drift, 5-day moving average, 5-day moving average z-score, standard deviation, age
failures_df = spark.sql(
'''
SELECT *
FROM clean_telemetry
WHERE id IN (
SELECT id
FROM clean_telemetry
WHERE label = 1
)
ORDER BY id, date
''').toPandas()
failures_df['date'] = pd.to_datetime(failures_df['date'])
# +
failure_id = np.random.choice(failures_df['id'].unique())
failure_df = failures_df.loc[failures_df['id']==failure_id]
sns.set(font_scale=1.25, style='whitegrid')
fig, axs = plt.subplots(nrows=10, ncols=1, sharex=True, figsize=(12,12))
axs[0].set_title('Time Histories for Device ' + failure_id)
axs[0].plot('date','label', data=failure_df, linestyle='-', marker='o', markersize=3); axs[0].set_ylabel('label')
axs[1].plot('date','metric1', data=failure_df, linestyle='-', marker='o', markersize=3); axs[1].set_ylabel('metric1')
axs[2].plot('date','metric2', data=failure_df, linestyle='-', marker='o', markersize=3); axs[2].set_ylabel('metric2')
axs[3].plot('date','metric3', data=failure_df, linestyle='-', marker='o', markersize=3); axs[3].set_ylabel('metric3')
axs[4].plot('date','metric4', data=failure_df, linestyle='-', marker='o', markersize=3); axs[4].set_ylabel('metric4')
axs[5].plot('date','metric5', data=failure_df, linestyle='-', marker='o', markersize=3); axs[5].set_ylabel('metric5')
axs[6].plot('date','metric6', data=failure_df, linestyle='-', marker='o', markersize=3); axs[6].set_ylabel('metric6')
axs[7].plot('date','metric7', data=failure_df, linestyle='-', marker='o', markersize=3); axs[7].set_ylabel('metric7')
axs[8].plot('date','metric8', data=failure_df, linestyle='-', marker='o', markersize=3); axs[8].set_ylabel('metric8')
axs[9].plot('date','metric9', data=failure_df, linestyle='-', marker='o', markersize=3); axs[9].set_ylabel('metric9'); axs[9].set_xlabel('date');
# -
# ## Engineer Features
#
# Drift, 5-day moving average, 5-day moving average z-score, standard deviation, age
# +
summary_sql = \
'''
SELECT
*,
-- Calculate device level label.
MAX(date_level_label) OVER (PARTITION BY id) AS label,
-- Calculate device level age in days.
CAST(DATEDIFF(max_date, min_date) + 1 AS DOUBLE) AS age,
-- Calculate device level 5-day moving average z-scores.
COALESCE(AVG((metric1 - mean_metric1) / sd_metric1) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric1,
COALESCE(AVG((metric2 - mean_metric2) / sd_metric2) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric2,
COALESCE(AVG((metric3 - mean_metric3) / sd_metric3) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric3,
COALESCE(AVG((metric4 - mean_metric4) / sd_metric4) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric4,
COALESCE(AVG((metric5 - mean_metric5) / sd_metric5) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric5,
COALESCE(AVG((metric6 - mean_metric6) / sd_metric6) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric6,
COALESCE(AVG((metric7 - mean_metric7) / sd_metric7) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric7,
COALESCE(AVG((metric8 - mean_metric8) / sd_metric8) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric8,
COALESCE(AVG((metric9 - mean_metric9) / sd_metric9) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW),0) AS zs_metric9,
-- Calculate device level first to last value drifts.
(last_metric1 - first_metric1) AS drift_metric1,
(last_metric2 - first_metric2) AS drift_metric2,
(last_metric3 - first_metric3) AS drift_metric3,
(last_metric4 - first_metric4) AS drift_metric4,
(last_metric5 - first_metric5) AS drift_metric5,
(last_metric6 - first_metric6) AS drift_metric6,
(last_metric7 - first_metric7) AS drift_metric7,
(last_metric8 - first_metric8) AS drift_metric8,
(last_metric9 - first_metric9) AS drift_metric9
FROM (
SELECT
id,
date,
label AS date_level_label,
metric1,
metric2,
metric3,
metric4,
metric5,
metric6,
metric7,
metric8,
metric9,
-- Calculate device level averages.
AVG(metric1) OVER (PARTITION BY id) AS mean_metric1,
AVG(metric2) OVER (PARTITION BY id) AS mean_metric2,
AVG(metric3) OVER (PARTITION BY id) AS mean_metric3,
AVG(metric4) OVER (PARTITION BY id) AS mean_metric4,
AVG(metric5) OVER (PARTITION BY id) AS mean_metric5,
AVG(metric6) OVER (PARTITION BY id) AS mean_metric6,
AVG(metric7) OVER (PARTITION BY id) AS mean_metric7,
AVG(metric8) OVER (PARTITION BY id) AS mean_metric8,
AVG(metric9) OVER (PARTITION BY id) AS mean_metric9,
-- Calculate device level 5-day moving averages.
AVG(metric1) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric1,
AVG(metric2) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric2,
AVG(metric3) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric3,
AVG(metric4) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric4,
AVG(metric5) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric5,
AVG(metric6) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric6,
AVG(metric7) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric7,
AVG(metric8) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric8,
AVG(metric9) OVER (PARTITION BY id ORDER BY date ASC ROWS BETWEEN 4 PRECEDING AND CURRENT ROW) AS ma_metric9,
-- Calculate device level standard deviations.
STDDEV_SAMP(metric1) OVER (PARTITION BY id) AS sd_metric1,
STDDEV_SAMP(metric2) OVER (PARTITION BY id) AS sd_metric2,
STDDEV_SAMP(metric3) OVER (PARTITION BY id) AS sd_metric3,
STDDEV_SAMP(metric4) OVER (PARTITION BY id) AS sd_metric4,
STDDEV_SAMP(metric5) OVER (PARTITION BY id) AS sd_metric5,
STDDEV_SAMP(metric6) OVER (PARTITION BY id) AS sd_metric6,
STDDEV_SAMP(metric7) OVER (PARTITION BY id) AS sd_metric7,
STDDEV_SAMP(metric8) OVER (PARTITION BY id) AS sd_metric8,
STDDEV_SAMP(metric9) OVER (PARTITION BY id) AS sd_metric9,
-- Calculate device level first values.
FIRST_VALUE(metric1) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric1,
FIRST_VALUE(metric2) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric2,
FIRST_VALUE(metric3) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric3,
FIRST_VALUE(metric4) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric4,
FIRST_VALUE(metric5) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric5,
FIRST_VALUE(metric6) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric6,
FIRST_VALUE(metric7) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric7,
FIRST_VALUE(metric8) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric8,
FIRST_VALUE(metric9) OVER (PARTITION BY id ORDER BY date ASC) AS first_metric9,
-- Calculate device level last values.
LAST_VALUE(metric1) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric1,
LAST_VALUE(metric2) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric2,
LAST_VALUE(metric3) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric3,
LAST_VALUE(metric4) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric4,
LAST_VALUE(metric5) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric5,
LAST_VALUE(metric6) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric6,
LAST_VALUE(metric7) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric7,
LAST_VALUE(metric8) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric8,
LAST_VALUE(metric9) OVER (PARTITION BY id ORDER BY date ASC) AS last_metric9,
-- Calculate device level minimum date.
MIN(date) OVER (PARTITION BY id) AS min_date,
-- Calculate device level maximum date.
MAX(date) OVER (PARTITION BY id) AS max_date,
-- Count device level number of readings.
CAST(COUNT(*) OVER (PARTITION BY id) AS DOUBLE) AS readings
FROM clean_telemetry
)
WHERE date = max_date
'''
summary_transformer = SQLTransformer(statement=summary_sql.replace('clean_telemetry','__THIS__'))
summary_sdf = summary_transformer.transform(clean_telemetry_sdf)
summary_sdf.select('*').limit(5).toPandas()
# -
spark.sql(
'''
SELECT
id,
-- Label indicating device failure.
MAX(label) AS label,
-- Min and max date for each device.
MIN(date) AS min_date,
MAX(date) AS max_date,
-- Age of device in days.
CAST(DATEDIFF(MAX(date), MIN(date)) + 1 AS DOUBLE) AS age,
-- Number of telemetry readings.
CAST(COUNT(*) AS DOUBLE) AS readings,
-- Mean values for each device.
AVG(metric1) AS mean_metric1,
AVG(metric2) AS mean_metric2,
AVG(metric3) AS mean_metric3,
AVG(metric4) AS mean_metric4,
AVG(metric5) AS mean_metric5,
AVG(metric6) AS mean_metric6,
AVG(metric7) AS mean_metric7,
AVG(metric8) AS mean_metric8,
AVG(metric9) AS mean_metric9,
-- Standard deviation values for each device.
STDDEV_SAMP(metric1) AS sd_metric1,
STDDEV_SAMP(metric2) AS sd_metric2,
STDDEV_SAMP(metric3) AS sd_metric3,
STDDEV_SAMP(metric4) AS sd_metric4,
STDDEV_SAMP(metric5) AS sd_metric5,
STDDEV_SAMP(metric6) AS sd_metric6,
STDDEV_SAMP(metric7) AS sd_metric7,
STDDEV_SAMP(metric8) AS sd_metric8,
STDDEV_SAMP(metric9) AS sd_metric9
FROM clean_telemetry
GROUP BY id
LIMIT 5
''').toPandas()
summary_sql = \
'''
SELECT
summary.*,
-- Drift or difference between first and last values for each device.
(t2.metric1 - t1.metric1) AS drift_metric1,
(t2.metric2 - t1.metric2) AS drift_metric2,
(t2.metric3 - t1.metric3) AS drift_metric3,
(t2.metric4 - t1.metric4) AS drift_metric4,
(t2.metric5 - t1.metric5) AS drift_metric5,
(t2.metric6 - t1.metric6) AS drift_metric6,
(t2.metric7 - t1.metric7) AS drift_metric7,
(t2.metric8 - t1.metric8) AS drift_metric8,
(t2.metric9 - t1.metric9) AS drift_metric9,
-- Last reading for each device.
t2.metric1 AS last_metric1,
t2.metric2 AS last_metric2,
t2.metric3 AS last_metric3,
t2.metric4 AS last_metric4,
t2.metric5 AS last_metric5,
t2.metric6 AS last_metric6,
t2.metric7 AS last_metric7,
t2.metric8 AS last_metric8,
t2.metric9 AS last_metric9
FROM (
SELECT
id,
-- Label indicating device failure.
MAX(label) AS label,
-- Min and max date for each device.
MIN(date) AS min_date,
MAX(date) AS max_date,
-- Age of device in days.
CAST(DATEDIFF(MAX(date), MIN(date)) + 1 AS DOUBLE) AS age,
-- Number of telemetry readings.
CAST(COUNT(*) AS DOUBLE) AS readings,
-- Mean values for each device.
AVG(metric1) AS mean_metric1,
AVG(metric2) AS mean_metric2,
AVG(metric3) AS mean_metric3,
AVG(metric4) AS mean_metric4,
AVG(metric5) AS mean_metric5,
AVG(metric6) AS mean_metric6,
AVG(metric7) AS mean_metric7,
AVG(metric8) AS mean_metric8,
AVG(metric9) AS mean_metric9,
-- Standard deviation values for each device.
STDDEV_SAMP(metric1) AS sd_metric1,
STDDEV_SAMP(metric2) AS sd_metric2,
STDDEV_SAMP(metric3) AS sd_metric3,
STDDEV_SAMP(metric4) AS sd_metric4,
STDDEV_SAMP(metric5) AS sd_metric5,
STDDEV_SAMP(metric6) AS sd_metric6,
STDDEV_SAMP(metric7) AS sd_metric7,
STDDEV_SAMP(metric8) AS sd_metric8,
STDDEV_SAMP(metric9) AS sd_metric9
FROM telemetry
GROUP BY id
) AS summary
LEFT JOIN telemetry AS t1 ON summary.id = t1.id AND summary.min_date = t1.date
LEFT JOIN telemetry AS t2 ON summary.id = t2.id AND summary.max_date = t2.date
WHERE summary.sd_metric1 != 'NaN'
AND summary.sd_metric2 != 'NaN'
AND summary.sd_metric3 != 'NaN'
AND summary.sd_metric4 != 'NaN'
AND summary.sd_metric5 != 'NaN'
AND summary.sd_metric6 != 'NaN'
AND summary.sd_metric7 != 'NaN'
AND summary.sd_metric8 != 'NaN'
AND summary.sd_metric9 != 'NaN'
'''
summary_transformer = SQLTransformer(statement=summary_sql.replace('telemetry','__THIS__'))
summary_sdf = summary_transformer.transform(clean_telemetry_sdf)
summary_sdf.select('*').limit(5).toPandas()
# +
transformers = [
summary_transformer
]
transformed_sdf = Pipeline(stages=transformers).fit(telemetry_sdf).transform(telemetry_sdf)
transformed_sdf.createOrReplaceTempView('transformed')
transformed_cols = transformed_sdf.columns
transformed_sdf.printSchema()
transformed_sdf.toPandas().head(5)
# +
def engineer_features(clean_telemetry):
"""Engineer features for machine learning.
Args:
clean_telemetry: Spark DataFrame containing cleaned telemetry data.
Returns:
Features Spark DataFrame.
"""
clean_telemetry.createOrReplaceTempView('clean_telemetry')
features = spark.sql(
'''
SELECT
CAST(date AS DATE) AS date,
id,
CAST(failure AS INTEGER) AS label,
CAST(metric1 AS DOUBLE) AS metric1,
CAST(metric2 AS DOUBLE) AS metric2,
CAST(metric3 AS DOUBLE) AS metric3,
CAST(metric4 AS DOUBLE) AS metric4,
CAST(metric5 AS DOUBLE) AS metric5,
CAST(metric6 AS DOUBLE) AS metric6,
CAST(metric7 AS DOUBLE) AS metric7,
CAST(metric8 AS DOUBLE) AS metric8,
CAST(metric9 AS DOUBLE) AS metric9
FROM telemetry
''')
spark.catalog.dropTempView('clean_telemetry')
return features
features_sdf = engineer_features(clean_telemetry_sdf)
features_sdf.printSchema()
features_sdf.select('*').limit(5).toPandas()
# -
# ## Correlations
# As can be seen in the correlation table and plot below, metrics 7 and 8 are highly (perfectly) correlated. So we should only consider one of these feaures in modeling. We'll pick metric 7 and drop metric 8.
correlation_cols = sorted(list(set(transformed_cols) - {'id','min_date','max_date'}))
correlation_assembler = VectorAssembler(inputCols=correlation_cols, outputCol='features')
correlation_features = correlation_assembler.transform(transformed_sdf)
correlation_sdm = Correlation.corr(correlation_features, 'features').head()[0]
correlation_array = correlation_sdm.toArray()
correlation_df = pd.DataFrame(data=correlation_array, index=correlation_cols, columns=correlation_cols)
# +
correlation_mask = np.zeros_like(correlation_df.values)
correlation_mask[np.triu_indices_from(correlation_mask)] = True
sns.set(font_scale=1, style='whitegrid')
fig, ax = plt.subplots(figsize=(14,10))
ax.set_title('Correlation of response and predictor variables\n')
sns.heatmap(correlation_df, mask=correlation_mask, center=0, vmin=0, vmax=1, linewidths=.1, cmap='YlGnBu', square=True, ax=ax);
# +
correlation_mask = np.zeros_like(correlation_df.values)
correlation_mask[correlation_df.values < 0.4] = True
correlation_mask[np.triu_indices_from(correlation_mask)] = True
sns.set(font_scale=1, style='whitegrid')
fig, ax = plt.subplots(figsize=(14,10))
ax.set_title('Correlation of response and predictor variables\n')
sns.heatmap(correlation_df, mask=correlation_mask, center=0, vmin=0, vmax=1, linewidths=.1, linecolor='gray', cmap='YlGnBu', square=True, ax=ax);
# -
# ## Stratified Train / Test Split
# +
train_pct = 0.8
test_pct = 1 - train_pct
telemetry_0 = spark.sql(
'''
SELECT
*
FROM telemetry
WHERE label = 0
''')
telemetry_0_train, telemetry_0_test = telemetry_0.randomSplit([train_pct, test_pct])
telemetry_1 = spark.sql(
'''
SELECT
*
FROM telemetry
WHERE label = 1
''')
telemetry_1_train, telemetry_1_test = telemetry_1.randomSplit([train_pct, test_pct])
train_sdf = telemetry_0_train.union(telemetry_1_train)
test_sdf = telemetry_0_test.union(telemetry_1_test)
# -
train_sdf.groupBy('label').count().toPandas()
test_sdf.groupBy('label').count().toPandas()
# ## Machine Learning Pipeline
feature_cols = sorted(list(set(transformed_cols) - {'id','min_date','max_date','label','readings'} - set([col for col in transformed_cols if 'metric8' in col])))
feature_assembler = VectorAssembler(
inputCols=feature_cols,
outputCol='features'
)
# +
rf = RandomForestClassifier(
featuresCol='features',
labelCol='label',
predictionCol='prediction',
probabilityCol='probability',
rawPredictionCol='raw_prediction',
numTrees=500
)
rf_pipeline = Pipeline(stages=transformers + [feature_assembler, rf])
# +
rf_model = rf_pipeline.fit(train_sdf)
# rf predictions on test data.
rf_predict_test_sdf = rf_model.transform(test_sdf)
rf_predict_test_sdf.createOrReplaceTempView('predictions')
rf_predict_test_sdf.toPandas().head(5)
# +
from pyspark.sql.functions import udf, col
from pyspark.sql.types import ArrayType, DoubleType
def to_array(col):
def to_array_(v):
return v.toArray().tolist()
return udf(to_array_, ArrayType(DoubleType()))(col)
rf_predict_test_sdf = rf_predict_test_sdf \
.withColumn('probability', to_array(col('probability'))) \
.select(['*'] + [col('probability')[i] for i in range(2)])
rf_predict_test_sdf.createOrReplaceTempView('predictions')
# +
def confusion_summary(thresholds=[0.5], predictions_tbl='predictions'):
confusion_summaries_df = pd.DataFrame(columns=['Threshold','TP','TN','FP','FN','Accuracy','Precision','Recall','F1'])
for threshold in thresholds:
confusion_summary_sql = \
'''
SELECT
{threshold} AS Threshold,
TP,
TN,
FP,
FN,
(TP + TN) / (TP + TN + FP + FN) AS Accuracy,
TP / (TP + FP) AS Precision,
TP / (TP + FN) AS Recall,
2*TP / (2*TP + FP + FN) AS F1
FROM (
SELECT
SUM(CASE WHEN prediction = 1 AND label = 1 THEN 1 ELSE 0 END) AS TP,
SUM(CASE WHEN prediction = 0 AND label = 0 THEN 1 ELSE 0 END) AS TN,
SUM(CASE WHEN prediction = 1 AND label = 0 THEN 1 ELSE 0 END) AS FP,
SUM(CASE WHEN prediction = 0 AND label = 1 THEN 1 ELSE 0 END) AS FN
FROM (
SELECT
label,
CASE WHEN probability[1] > {threshold} THEN 1 ELSE 0 END AS prediction
FROM {predictions}
)
)
'''.format(**{'threshold':threshold,'predictions':predictions_tbl})
confusion_summary_df = spark.sql(confusion_summary_sql).toPandas()
confusion_summaries_df = confusion_summaries_df.append(confusion_summary_df, ignore_index=True)
return confusion_summaries_df
confusion_summary(thresholds=[0.1,0.2,0.3,0.4,0.5])
# +
evaluator = BinaryClassificationEvaluator(metricName='areaUnderROC', rawPredictionCol='raw_prediction', labelCol='label')
area_under_roc = evaluator.evaluate(rf_predict_test_sdf)
print('Area under ROC = %g' % area_under_roc)
evaluator = BinaryClassificationEvaluator(metricName='areaUnderPR', rawPredictionCol='raw_prediction', labelCol='label')
area_under_pr = evaluator.evaluate(rf_predict_test_sdf)
print('Area under PR = %g' % area_under_pr)
# -
# ### Class weight transformer
#
# Create a class weight transformer that adds a class_weight column to be used for weighting classes.
# +
weight_sql = \
'''
SELECT telemetry.*, weights.class_weight
FROM telemetry
LEFT JOIN (
-- Calculate % 0s and 1s and use complement rule to calculate class weights.
SELECT *, (1.0 - (count / SUM(count) OVER ())) AS class_weight
FROM (
-- Count 0s and 1s.
SELECT
label, COUNT(label) AS count
FROM telemetry
GROUP BY label
)
) AS weights
ON telemetry.label = weights.label
'''
weight_transformer = SQLTransformer(statement=weight_sql.replace('telemetry','__THIS__'))
# +
transformers = [
summary_transformer,
weight_transformer
]
transformed_sdf = Pipeline(stages=transformers).fit(telemetry_sdf).transform(telemetry_sdf)
transformed_sdf.createOrReplaceTempView('transformed')
transformed_cols = transformed_sdf.columns
# -
# ## Machine Learning Pipeline
feature_cols = sorted(list(set(transformed_cols) - {'id','min_date','max_date','class_weight','label','readings'} - set([col for col in transformed_cols if 'metric8' in col])))
feature_assembler = VectorAssembler(
inputCols=feature_cols,
outputCol='features'
)
scaler = StandardScaler(
inputCol='features',
outputCol='scaled_features',
withStd=True,
withMean=True)
# +
lr = LogisticRegression(
featuresCol='scaled_features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
fitIntercept=True,
threshold=0.5,
probabilityCol='probability',
rawPredictionCol='raw_prediction',
standardization=False,
weightCol='class_weight',
family='binomial'
)
lr_pipeline = Pipeline(stages=transformers + [feature_assembler, scaler, lr])
# +
lr_model = lr_pipeline.fit(train_sdf)
# lr predictions on test data.
lr_predict_test_sdf = lr_model.transform(test_sdf)
lr_predict_test_sdf.createOrReplaceTempView('predictions')
lr_predict_test_sdf.toPandas().head(5)
# +
lr_predict_test_sdf = lr_predict_test_sdf \
.withColumn('probability', to_array(col('probability'))) \
.select(['*'] + [col('probability')[i] for i in range(2)])
lr_predict_test_sdf.createOrReplaceTempView('predictions')
confusion_summary(thresholds=[0.1,0.2,0.3,0.4,0.5])
# -
# ## We'll use PySpark, Pandas, NumPy, Matplotlib, and Seaborn
# +
from pyspark.sql import SparkSession
from pyspark.ml.stat import Correlation
from pyspark.ml.feature import SQLTransformer
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import OneHotEncoderEstimator
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import StandardScaler
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
from pyspark.ml.tuning import ParamGridBuilder
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from spark_stratifier import StratifiedCrossValidator
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# -
# ## Start a Spark Session
spark = SparkSession.builder \
.appName('predictive-maintenance') \
.master('local[8]') \
.getOrCreate()
# ## Read and clean data
telemetry_sdf = spark.read.csv('predictive_maintenance.csv', sep=',', header=True)
telemetry_sdf.createOrReplaceTempView('telemetry')
telemetry_sdf.show(5)
telemetry_sdf = spark.sql(
'''
SELECT
CAST(date AS DATE) AS date,
id,
CAST(failure AS INTEGER) AS label,
CAST(metric1 AS DOUBLE) AS metric1,
CAST(metric2 AS DOUBLE) AS metric2,
CAST(metric3 AS DOUBLE) AS metric3,
CAST(metric4 AS DOUBLE) AS metric4,
CAST(metric5 AS DOUBLE) AS metric5,
CAST(metric6 AS DOUBLE) AS metric6,
CAST(metric7 AS DOUBLE) AS metric7,
CAST(metric8 AS DOUBLE) AS metric8,
CAST(metric9 AS DOUBLE) AS metric9
FROM telemetry
''')
telemetry_sdf.createOrReplaceTempView('telemetry')
telemetry_sdf.printSchema()
# ## Exploratory data analysis
# ### How many observations do we have?
#
# We have 124,494 data points. We don't need to use Spark, but we may not always be so lucky. So let's build something scalable!
spark.sql(
'''
SELECT
COUNT(*) AS total
FROM telemetry
''').toPandas()
# ### How imbalanced are we at the observation level?
#
# Very imbalanced! Only ~0.085% of our observations are failures. We could rebalance the data using undersampling, oversampling, or SMOTE. However, I (and most of my clients) prefer to build a better model over engineering new data. So I typically prefer to use **class weighting**. Alternatively, we could see if our data is less imbalanced at the device level.
spark.sql(
'''
SELECT *, (count / SUM(count) OVER ()) AS percent
FROM (
SELECT
label, COUNT(label) AS count
FROM telemetry
GROUP BY label
)
''').toPandas()
# ### How imbalanced are we at the device level?
#
# Less imbalanced indeed! ~9% of our devices fail. We would still need to rebalance the data. However, the class imbalance is much less pronounced at the device level.
spark.sql(
'''
SELECT *, (count / SUM(count) OVER ()) AS percent
FROM (
SELECT
label, COUNT(label) AS count
FROM (
SELECT
id, MAX(label) AS label
FROM telemetry
GROUP BY id
)
GROUP BY label
)
''').toPandas()
# ### Should our model be at the observation or device level?
#
# In a first model, at the observation level, we were able to overcome the class imbalance (achieved high recall). However, we had less luck separating the signal from the noise (achieved low precision).
#
# In my experience working with advertising data, the right amount of aggregation can go a long way to separate the signal from the noise.
#
# **So let's try an aggregate model at the device level.**
#
# We'll have fewer data points to work with, but we'll also be using "summary" features. So we should be able to maintain a reasonable sampling density.
# ### How many unique values in each column and what do they look like?
#
# Metrics 1 and 6 have a large number of unique unequally spaced integer values over large ranges and, therefore, are likely **numeric features**.
#
# Metric 2 has a moderate number of unique equally spaced integer values and, therefore, is likely an **interval feature** (with interval 8). While it could be a categorical or ordinal feature, 558 categories or scales would be a lot. So we've assumed it is an interval feature and, therefore, the difference between two values is assumed to be meaningful.
#
# Metric 5 has a small number of unique almost equally spaced integer values over the range 1 to 98 and, therefore, is likely an **interval feature** (with interval 1) and may represent temperature in degrees Celsius.
#
# Metrics 3, 4, 7, 8, and 9 have a small number of unique unequally spaced integer values over large ranges and, therefore, are probably **numeric features**.
spark.sql(
'''
SELECT
COUNT(DISTINCT id) AS id,
COUNT(DISTINCT metric1) AS metric1,
COUNT(DISTINCT metric2) AS metric2,
COUNT(DISTINCT metric3) AS metric3,
COUNT(DISTINCT metric4) AS metric4,
COUNT(DISTINCT metric5) AS metric5,
COUNT(DISTINCT metric6) AS metric6,
COUNT(DISTINCT metric7) AS metric7,
COUNT(DISTINCT metric8) AS metric8,
COUNT(DISTINCT metric9) AS metric9
FROM telemetry
''').toPandas()
spark.sql(
'''
SELECT
DISTINCT metric9 AS metric9
FROM telemetry
ORDER BY metric9
LIMIT 100
''').toPandas(); # Remove semicolon to view.
# ### How many days old are devices when they fail?
# +
age_sql = \
'''
SELECT telemetry.*, CAST(DATEDIFF(telemetry.date, dates.min_date) AS DOUBLE) AS age
FROM telemetry
LEFT JOIN (
SELECT id, MIN(date) AS min_date
FROM telemetry
GROUP BY id
) AS dates
ON telemetry.id = dates.id
'''
failure_age_df = spark.sql('SELECT * FROM (' + age_sql + ') WHERE label = 1').toPandas()
sns.set(font_scale=2, style='whitegrid')
fig, ax = plt.subplots(figsize=(7,5))
sns.distplot(failure_age_df['age'], bins=25, kde=False, ax=ax)
ax.set_title('Failure Age Distribution')
ax.set_xlabel('age (days)');
# -
# ### What do our min, max, mean, median, and stdev summary statistics tell us?
#
# First, our summary statistics tell us we better **standardize** (center and scale) our features! Our summary statistics vary greatly across our features. We'll need to standardize in order to put our features on the same playing field numerically. We want our model to learn from the patterns in our data and not be skewed by the properties of our data. Another benefit of standardization, is that it makes our model less sensitive to outliers and, therefore, more robust.
#
# Second, our summary statistics suggest that min, max, mean, and standard deviation may be good "summary" features for our aggregate model since some variation appears to exist between failures and non-failures.
#
# Third, unfortunately, it appears that median and age may not be very informative features since little variation appears to exist between failures and non-failures.
spark.sql(
'''
SELECT
label,
MIN(metric1) AS metric1,
MIN(metric2) AS metric2,
MIN(metric3) AS metric3,
MIN(metric4) AS metric4,
MIN(metric5) AS metric5,
MIN(metric6) AS metric6,
MIN(metric7) AS metric7,
MIN(metric8) AS metric8,
MIN(metric9) AS metric9,
MIN(age) AS age
FROM (''' + age_sql + ''')
GROUP BY label
''').toPandas()
spark.sql(
'''
SELECT
label,
MAX(metric1) AS metric1,
MAX(metric2) AS metric2,
MAX(metric3) AS metric3,
MAX(metric4) AS metric4,
MAX(metric5) AS metric5,
MAX(metric6) AS metric6,
MAX(metric7) AS metric7,
MAX(metric8) AS metric8,
MAX(metric9) AS metric9,
MAX(age) AS age
FROM (''' + age_sql + ''')
GROUP BY label
''').toPandas()
spark.sql(
'''
SELECT
label,
AVG(metric1) AS metric1,
AVG(metric2) AS metric2,
AVG(metric3) AS metric3,
AVG(metric4) AS metric4,
AVG(metric5) AS metric5,
AVG(metric6) AS metric6,
AVG(metric7) AS metric7,
AVG(metric8) AS metric8,
AVG(metric9) AS metric9,
AVG(age) AS age
FROM (''' + age_sql + ''')
GROUP BY label
''').toPandas()
spark.sql(
'''
SELECT
label,
PERCENTILE(metric1, 0.5) AS metric1,
PERCENTILE(metric2, 0.5) AS metric2,
PERCENTILE(metric3, 0.5) AS metric3,
PERCENTILE(metric4, 0.5) AS metric4,
PERCENTILE(metric5, 0.5) AS metric5,
PERCENTILE(metric6, 0.5) AS metric6,
PERCENTILE(metric7, 0.5) AS metric7,
PERCENTILE(metric8, 0.5) AS metric8,
PERCENTILE(metric9, 0.5) AS metric9,
PERCENTILE(age, 0.5) AS age
FROM (''' + age_sql + ''')
GROUP BY label
''').toPandas()
spark.sql(
'''
SELECT
label,
STDDEV_SAMP(metric1) AS metric1,
STDDEV_SAMP(metric2) AS metric2,
STDDEV_SAMP(metric3) AS metric3,
STDDEV_SAMP(metric4) AS metric4,
STDDEV_SAMP(metric5) AS metric5,
STDDEV_SAMP(metric6) AS metric6,
STDDEV_SAMP(metric7) AS metric7,
STDDEV_SAMP(metric8) AS metric8,
STDDEV_SAMP(metric9) AS metric9,
STDDEV_SAMP(age) AS age
FROM (''' + age_sql + ''')
GROUP BY label
''').toPandas()
# ### How many null values in each column?
#
# Today must be our lucky day! No nulls = no imputation required.
spark.sql(
'''
SELECT
SUM(CASE WHEN id IS NULL THEN 1 ELSE 0 END) AS id,
SUM(CASE WHEN metric1 IS NULL THEN 1 ELSE 0 END) AS metric1,
SUM(CASE WHEN metric2 IS NULL THEN 1 ELSE 0 END) AS metric2,
SUM(CASE WHEN metric3 IS NULL THEN 1 ELSE 0 END) AS metric3,
SUM(CASE WHEN metric4 IS NULL THEN 1 ELSE 0 END) AS metric4,
SUM(CASE WHEN metric5 IS NULL THEN 1 ELSE 0 END) AS metric5,
SUM(CASE WHEN metric6 IS NULL THEN 1 ELSE 0 END) AS metric6,
SUM(CASE WHEN metric7 IS NULL THEN 1 ELSE 0 END) AS metric7,
SUM(CASE WHEN metric8 IS NULL THEN 1 ELSE 0 END) AS metric8,
SUM(CASE WHEN metric9 IS NULL THEN 1 ELSE 0 END) AS metric9
FROM telemetry
''').toPandas()
# ## Transformations / feature engineering
# As we noted earlier, we have a very imbalanced dataset. Rather than undersampling, oversampling, or using SMOTE, we chose class weighting. In order to do this, we'll need to develop a class weight transformer.
#
# Then we'll create 3 feature transformers: (1) age, (2) device class, and (3) summary.
# ### Summary feature transformer
#
# Create a summary feature transformer.
summary_sql = \
'''
SELECT
summary.*,
-- Difference between first and last values for each device.
(t2.metric1 - t1.metric1) AS delta_metric1,
(t2.metric2 - t1.metric2) AS delta_metric2,
(t2.metric3 - t1.metric3) AS delta_metric3,
(t2.metric4 - t1.metric4) AS delta_metric4,
(t2.metric5 - t1.metric5) AS delta_metric5,
(t2.metric6 - t1.metric6) AS delta_metric6,
(t2.metric7 - t1.metric7) AS delta_metric7,
(t2.metric8 - t1.metric8) AS delta_metric8,
(t2.metric9 - t1.metric9) AS delta_metric9
FROM (
SELECT
id,
-- Label indicating device failure.
MAX(label) AS label,
-- Min and max date for each device.
MIN(date) AS min_date,
MAX(date) AS max_date,
-- Age of each device.
CAST(DATEDIFF(MAX(date), MIN(date)) AS DOUBLE) AS age,
-- Minimum values for each device.
MIN(metric1) AS min_metric1,
MIN(metric2) AS min_metric2,
MIN(metric3) AS min_metric3,
MIN(metric4) AS min_metric4,
MIN(metric5) AS min_metric5,
MIN(metric6) AS min_metric6,
MIN(metric7) AS min_metric7,
MIN(metric8) AS min_metric8,
MIN(metric9) AS min_metric9,
-- Maximum values for each device.
MAX(metric1) AS max_metric1,
MAX(metric2) AS max_metric2,
MAX(metric3) AS max_metric3,
MAX(metric4) AS max_metric4,
MAX(metric5) AS max_metric5,
MAX(metric6) AS max_metric6,
MAX(metric7) AS max_metric7,
MAX(metric8) AS max_metric8,
MAX(metric9) AS max_metric9,
-- Mean values for each device.
AVG(metric1) AS mean_metric1,
AVG(metric2) AS mean_metric2,
AVG(metric3) AS mean_metric3,
AVG(metric4) AS mean_metric4,
AVG(metric5) AS mean_metric5,
AVG(metric6) AS mean_metric6,
AVG(metric7) AS mean_metric7,
AVG(metric8) AS mean_metric8,
AVG(metric9) AS mean_metric9,
-- Standard deviation values for each device.
STDDEV_SAMP(metric1) AS sd_metric1,
STDDEV_SAMP(metric2) AS sd_metric2,
STDDEV_SAMP(metric3) AS sd_metric3,
STDDEV_SAMP(metric4) AS sd_metric4,
STDDEV_SAMP(metric5) AS sd_metric5,
STDDEV_SAMP(metric6) AS sd_metric6,
STDDEV_SAMP(metric7) AS sd_metric7,
STDDEV_SAMP(metric8) AS sd_metric8,
STDDEV_SAMP(metric9) AS sd_metric9,
-- Device class indicator for each device.
CASE WHEN SUBSTRING(id, 1, 1) = 'S' THEN 1 ELSE 0 END AS S,
CASE WHEN SUBSTRING(id, 1, 1) = 'W' THEN 1 ELSE 0 END AS W,
CASE WHEN SUBSTRING(id, 1, 1) = 'Z' THEN 1 ELSE 0 END AS Z
FROM telemetry
GROUP BY id
) AS summary
LEFT JOIN telemetry AS t1 ON summary.id = t1.id AND summary.min_date = t1.date
LEFT JOIN telemetry AS t2 ON summary.id = t2.id AND summary.max_date = t2.date
WHERE summary.sd_metric1 != 'NaN'
'''
summary_sdf = spark.sql(summary_sql)
summary_sdf.createOrReplaceTempView('summary')
summary_sdf.printSchema()
summary_transformer = SQLTransformer(statement=summary_sql.replace('telemetry','__THIS__'))
# ### Class weight transformer
#
# Create a class weight transformer that adds a class_weight column to be used for weighting classes.
# +
weight_sql = \
'''
SELECT telemetry.*, weights.class_weight
FROM telemetry
LEFT JOIN (
-- Calculate % 0s and 1s and use complement rule to calculate class weights.
SELECT *, (1.0 - (count / SUM(count) OVER ())) AS class_weight
FROM (
-- Count 0s and 1s.
SELECT
label, COUNT(label) AS count
FROM telemetry
GROUP BY label
)
) AS weights
ON telemetry.label = weights.label
'''
weight_transformer = SQLTransformer(statement=weight_sql.replace('telemetry','__THIS__'))
# -
spark.sql(weight_sql.replace('telemetry','summary') + 'WHERE summary.label = 0 LIMIT 5').toPandas()
spark.sql(weight_sql.replace('telemetry','summary') + 'WHERE summary.label = 1 LIMIT 5').toPandas()
# ### Combining transformers
# +
transformers = [
summary_transformer,
weight_transformer
]
transformed_sdf = Pipeline(stages=transformers).fit(telemetry_sdf).transform(telemetry_sdf)
transformed_sdf.createOrReplaceTempView('transformed')
transformed_cols = transformed_sdf.columns
# -
# ## Correlations
# As can be seen in the correlation table and plot below, metrics 7 and 8 are highly (perfectly) correlated. So we should only consider one of these feaures in modeling. We'll pick metric 7 and drop metric 8.
correlation_cols = sorted(list(set(transformed_cols) - {'id','min_date','max_date','class_weight'}))
correlation_assembler = VectorAssembler(inputCols=correlation_cols, outputCol='features')
correlation_features = correlation_assembler.transform(transformed_sdf)
correlation_sdm = Correlation.corr(correlation_features, 'features').head()[0]
correlation_array = correlation_sdm.toArray()
correlation_df = pd.DataFrame(data=correlation_array, index=correlation_cols, columns=correlation_cols)
# +
correlation_mask = np.zeros_like(correlation_df.values)
correlation_mask[np.triu_indices_from(correlation_mask)] = True
sns.set(font_scale=1, style='whitegrid')
fig, ax = plt.subplots(figsize=(14,10))
ax.set_title('Correlation of response and predictor variables\n')
sns.heatmap(correlation_df, mask=correlation_mask, center=0, vmin=0, vmax=1, linewidths=.1, cmap='YlGnBu', square=True, ax=ax);
# +
correlation_mask = np.zeros_like(correlation_df.values)
correlation_mask[correlation_df.values < 0.4] = True
correlation_mask[np.triu_indices_from(correlation_mask)] = True
sns.set(font_scale=1, style='whitegrid')
fig, ax = plt.subplots(figsize=(14,10))
ax.set_title('Correlation of response and predictor variables\n')
sns.heatmap(correlation_df, mask=correlation_mask, center=0, vmin=0, vmax=1, linewidths=.1, linecolor='gray', cmap='YlGnBu', square=True, ax=ax);
# -
# ## Machine Learning Pipeline
feature_cols = sorted(list(set(transformed_cols) - {'id','min_date','max_date','class_weight','label','Z'} - set([col for col in transformed_cols if 'metric8' in col])))
feature_assembler = VectorAssembler(
inputCols=feature_cols,
outputCol='features'
)
scaler = StandardScaler(
inputCol='features',
outputCol='scaled_features',
withStd=True,
withMean=True)
# +
lr = LogisticRegression(
featuresCol='scaled_features',
labelCol='label',
predictionCol='prediction',
maxIter=100,
fitIntercept=True,
threshold=0.5,
probabilityCol='probability',
rawPredictionCol='raw_prediction',
standardization=False,
weightCol='class_weight',
family='binomial'
)
pipeline = Pipeline(stages=transformers + [feature_assembler, scaler, lr])
# -
# ## Train / Test Split
# +
train_pct = 0.8
test_pct = 1 - train_pct
telemetry_0 = spark.sql(
'''
SELECT
*
FROM telemetry
WHERE label = 0
''')
telemetry_0_train, telemetry_0_test = telemetry_0.randomSplit([train_pct, test_pct])
telemetry_1 = spark.sql(
'''
SELECT
*
FROM telemetry
WHERE label = 1
''')
telemetry_1_train, telemetry_1_test = telemetry_1.randomSplit([train_pct, test_pct])
train_sdf = telemetry_0_train.union(telemetry_1_train)
test_sdf = telemetry_0_test.union(telemetry_1_test)
# -
train_sdf.groupBy('label').count().show()
test_sdf.groupBy('label').count().show()
# ## Feature Selection using LASSO
# +
# lasso_reg_params = [0.0, 0.001, 0.01, 0.1, 1.0]
lasso_reg_params = [0.01]
lasso_param_grid = ParamGridBuilder() \
.addGrid(lr.elasticNetParam, [1.0]) \
.addGrid(lr.regParam, lasso_reg_params) \
.build()
lasso_cv = StratifiedCrossValidator(
estimator=pipeline,
estimatorParamMaps=lasso_param_grid,
evaluator=BinaryClassificationEvaluator(metricName='areaUnderPR', rawPredictionCol='raw_prediction', labelCol='label'),
numFolds=5,
collectSubModels=True
)
# -
# Fit LASSO cross validation model.
lasso_cv_model = lasso_cv.fit(train_sdf)
# Construct LASSO regularization strength vs area under precision-recall curve dataframe.
lasso_avg_metrics = lasso_cv_model.avgMetrics
lasso_avg_metrics_df = pd.DataFrame(columns=['LASSO Regularization','Area Under PR'])
for reg_param, avg_metric in zip(lasso_reg_params, lasso_avg_metrics):
lasso_avg_metrics_df = lasso_avg_metrics_df.append({'LASSO Regularization':reg_param,'Area Under PR':avg_metric}, ignore_index=True)
lasso_avg_metrics_df
# Extract nonzero coefficients from best model.
coefficients = lasso_cv_model.bestModel.stages[-1].coefficientMatrix.toArray()[0]
lasso_coefficients_df = pd.DataFrame(columns=['Feature','Coefficient'])
for feature, coefficient in zip(feature_cols, coefficients):
if np.abs(coefficient) > 0.000001:
lasso_coefficients_df = lasso_coefficients_df.append({'Feature':feature,'Coefficient':coefficient}, ignore_index=True)
lasso_coefficients_df
# Extract intercept from best model.
intercept = lasso_cv_model.bestModel.stages[-1].interceptVector[0]
print('Intercept:',intercept)
# Extract parameters from best model.
param_map = lasso_cv_model.bestModel.stages[-1].extractParamMap()
for param, value in param_map.items():
print(str(param.name), ' : ', str(value))
# +
# LASSO predictions on test data.
lasso_predict_test_sdf = lasso_cv_model.transform(test_sdf)
lasso_predict_test_df = lasso_predict_test_sdf.toPandas()
lasso_predict_test_df.head();
lasso_probability = []
for index, value in lasso_predict_test_df['probability'].iteritems():
lasso_probability.append(value[1])
lasso_probability = np.array(lasso_probability)
# +
true_labels = lasso_predict_test_df['label'].values
pred_labels = np.zeros_like(lasso_probability)
pred_labels[lasso_probability > 0.4] = True
# True Positive (TP) Predict a label of 1 and the true label is 1.
TP = np.sum(np.logical_and(pred_labels == 1, true_labels == 1))
# True Negative (TN): Predict a label of 0 and the true label is 0.
TN = np.sum(np.logical_and(pred_labels == 0, true_labels == 0))
# False Positive (FP): Predict a label of 1 and the true label is 0.
FP = np.sum(np.logical_and(pred_labels == 1, true_labels == 0))
# False Negative (FN): Predict a label of 0 and the true label is 1.
FN = np.sum(np.logical_and(pred_labels == 0, true_labels == 1))
TP = 64
TN = 1036
FP = 26
FN = 42
# How often are we right when we predict failure?
precision = TP / (TP + FP)
# How often do we catch failures?
recall = TP / (TP + FN)
print('TP: %i, FP: %i, TN: %i, FN: %i' % (TP,FP,TN,FN))
print('Precision: %f, Recall: %f' % (precision,recall))
# -
# +
from pyspark.ml.classification import RandomForestClassifier
rf = RandomForestClassifier(
featuresCol='features',
labelCol='label',
predictionCol='prediction',
probabilityCol='probability',
rawPredictionCol='raw_prediction'
)
pipeline_rf = Pipeline(stages=transformers + [feature_assembler, rf])
# -
rf_model = pipeline_rf.fit(train_sdf)
# +
# rf predictions on test data.
rf_predict_test_sdf = rf_model.transform(test_sdf)
rf_predict_test_df = rf_predict_test_sdf.toPandas()
rf_predict_test_df.head();
rf_probability = []
for index, value in rf_predict_test_df['probability'].iteritems():
rf_probability.append(value[1])
rf_probability = np.array(rf_probability)
# +
# true_labels = rf_predict_test_df['label'].values
# pred_labels = np.zeros_like(rf_probability)
# pred_labels[rf_probability > 0.5] = True
# # True Positive (TP) Predict a label of 1 and the true label is 1.
# TP = np.sum(np.logical_and(pred_labels == 1, true_labels == 1))
# # True Negative (TN): Predict a label of 0 and the true label is 0.
# TN = np.sum(np.logical_and(pred_labels == 0, true_labels == 0))
# # False Positive (FP): Predict a label of 1 and the true label is 0.
# FP = np.sum(np.logical_and(pred_labels == 1, true_labels == 0))
# # False Negative (FN): Predict a label of 0 and the true label is 1.
# FN = np.sum(np.logical_and(pred_labels == 0, true_labels == 1))
TP = 64
TN = 1036
FP = 26
FN = 42
# TP = 54
# TN = 1054
# FP = 17
# FN = 52
# How often are we right when we predict failure?
precision = TP / (TP + FP)
# How often do we catch failures?
recall = TP / (TP + FN)
print('TP: %i, FP: %i, TN: %i, FN: %i' % (TP,FP,TN,FN))
print('Precision: %f, Recall: %f' % (precision,recall))
# -
TP: 54, FP: 17, TN: 1054, FN: 52
Precision: 0.760563, Recall: 0.509434
# +
# prediction_df = prediction_sdf.select(['label','prediction','probability','raw_prediction']).toPandas()
# prediction_df.loc[prediction_df['label'] == 1]
# -
# ##
# +
# Take final features as those
final_feature_cols = sorted(list(set(lasso_coefficients_df['Feature'].values) - {'ln_metric8','recip_metric8','neg_exp_metric2','recip_metric4','sqrt_metric3'}))
feature_assembler = VectorAssembler(
inputCols=final_feature_cols,
outputCol='features'
)
pipeline = Pipeline(stages=transformers + [feature_assembler, scaler, lr])
# +
ridge_reg_params = [0.0, 0.001, 0.01, 0.1, 1.0]
ridge_param_grid = ParamGridBuilder() \
.addGrid(lr.elasticNetParam, [0.0]) \
.addGrid(lr.regParam, ridge_reg_params) \
.build()
ridge_cv = StratifiedCrossValidator(
estimator=pipeline,
estimatorParamMaps=ridge_param_grid,
evaluator=BinaryClassificationEvaluator(metricName='areaUnderPR', rawPredictionCol='raw_prediction', labelCol='label'),
numFolds=5,
collectSubModels=True
)
# -
ridge_cv_model = ridge_cv.fit(train_sdf)
# Construct ridge regularization strength vs area under precision-recall curve dataframe.
ridge_avg_metrics = ridge_cv_model.avgMetrics
ridge_avg_metrics_df = pd.DataFrame(columns=['Ridge Regularization','Area Under PR'])
for reg_param, avg_metric in zip(ridge_reg_params, ridge_avg_metrics):
ridge_avg_metrics_df = ridge_avg_metrics_df.append({'Ridge Regularization':reg_param,'Area Under PR':avg_metric}, ignore_index=True)
ridge_avg_metrics_df
# Extract nonzero coefficients from best model.
ridge_coefficients = ridge_cv_model.bestModel.stages[-1].coefficientMatrix.toArray()[0]
ridge_coefficients_df = pd.DataFrame(columns=['Feature','Coefficient'])
for feature, coefficient in zip(final_feature_cols, coefficients):
ridge_coefficients_df = ridge_coefficients_df.append({'Feature':feature,'Coefficient':coefficient}, ignore_index=True)
ridge_coefficients_df
# Extract intercept from best model.
intercept = ridge_cv_model.bestModel.stages[-1].interceptVector[0]
print('Intercept:',intercept)
# Extract parameters from best model.
param_map = ridge_cv_model.bestModel.stages[-1].extractParamMap()
for param, value in param_map.items():
print(str(param.name), ' : ', str(value))
# Ridge predictions on test data.
ridge_predict_test_sdf = ridge_cv_model.transform(test_sdf)
ridge_predict_test_df = ridge_predict_test_sdf.toPandas()
ridge_predict_test_df.head()
# +
ridge_predict_test_df = ridge_predict_test_sdf.toPandas()
probability = []
for index, value in ridge_predict_test_df['probability'].iteritems():
probability.append(value[1])
probability = np.array(probability)
# -
probability
# +
true_labels = ridge_predict_test_df['label'].values
pred_labels = np.zeros_like(probability)
pred_labels[probability > 0.9] = True
# True Positive (TP) Predict a label of 1 and the true label is 1.
TP = np.sum(np.logical_and(pred_labels == 1, true_labels == 1))
# True Negative (TN): Predict a label of 0 and the true label is 0.
TN = np.sum(np.logical_and(pred_labels == 0, true_labels == 0))
# False Positive (FP): Predict a label of 1 and the true label is 0.
FP = np.sum(np.logical_and(pred_labels == 1, true_labels == 0))
# False Negative (FN): Predict a label of 0 and the true label is 1.
FN = np.sum(np.logical_and(pred_labels == 0, true_labels == 1))
TP = 64
TN = 1036
FP = 26
FN = 42
# How often are we right when we predict failure?
precision = TP / (TP + FP)
# How often do we catch failures?
recall = TP / (TP + FN)
print('TP: %i, FP: %i, TN: %i, FN: %i' % (TP,FP,TN,FN))
print('Precision: %f, Recall: %f' % (precision,recall))
# -
TP: 18, FP: 2310, TN: 22729, FN: 6
Precision: 0.007732, Recall: 0.750000
# +
from pyspark.ml.evaluation import BinaryClassificationEvaluator
evaluator = BinaryClassificationEvaluator(metricName='areaUnderROC', rawPredictionCol='rawPrediction', labelCol='label')
area_under_roc = evaluator.evaluate(prediction_sdf)
print('Area under ROC = %g' % area_under_roc)
evaluator = BinaryClassificationEvaluator(metricName='areaUnderPR', rawPredictionCol='rawPrediction', labelCol='label')
area_under_pr = evaluator.evaluate(prediction_sdf)
print('Area under PR = %g' % area_under_pr)
# -
pipeline.fit(telemetry_sdf).transform(telemetry_sdf)
model = pipeline.fit(telemetry_train)
prediction = model.transform(telemetry_test)
prediction.toPandas()
selected = prediction.select("id", "text", "probability", "prediction")
for row in selected.collect():
rid, text, prob, prediction = row
print("(%d, %s) --> prob=%s, prediction=%f" % (rid, text, str(prob), prediction))
# ### Natural log feature transformer
# +
ln_sql = \
'''
SELECT
*,
-- Natural log transform. Add 1 to avoid LN(0) errors.
LN(metric1 + 1) AS ln_metric1,
LN(metric2 + 1) AS ln_metric2,
LN(metric3 + 1) AS ln_metric3,
LN(metric4 + 1) AS ln_metric4,
LN(metric5 + 1) AS ln_metric5,
LN(metric6 + 1) AS ln_metric6,
LN(metric7 + 1) AS ln_metric7,
LN(metric8 + 1) AS ln_metric8,
LN(metric9 + 1) AS ln_metric9,
LN(age + 1) AS ln_age
FROM telemetry
'''
ln_transformer = SQLTransformer(statement=ln_sql.replace('telemetry','__THIS__'))
spark.sql(ln_sql.replace('telemetry','(' + age_sql + ')') + 'LIMIT 5').toPandas()
# -
# ### Negative exponential feature transformer
# +
neg_exp_sql = \
'''
SELECT
*,
-- Negative exponential transform.
EXP(-metric1) AS neg_exp_metric1,
EXP(-metric2) AS neg_exp_metric2,
EXP(-metric3) AS neg_exp_metric3,
EXP(-metric4) AS neg_exp_metric4,
EXP(-metric5) AS neg_exp_metric5,
EXP(-metric6) AS neg_exp_metric6,
EXP(-metric7) AS neg_exp_metric7,
EXP(-metric8) AS neg_exp_metric8,
EXP(-metric9) AS neg_exp_metric9,
EXP(-age) AS neg_exp_age
FROM telemetry
'''
neg_exp_transformer = SQLTransformer(statement=neg_exp_sql.replace('telemetry','__THIS__'))
spark.sql(neg_exp_sql.replace('telemetry','(' + age_sql + ')') + 'LIMIT 5').toPandas()
# -
# ### Square root feature transformer
# +
sqrt_sql = \
'''
SELECT
*,
-- Square root transform.
SQRT(metric1) AS sqrt_metric1,
SQRT(metric2) AS sqrt_metric2,
SQRT(metric3) AS sqrt_metric3,
SQRT(metric4) AS sqrt_metric4,
SQRT(metric5) AS sqrt_metric5,
SQRT(metric6) AS sqrt_metric6,
SQRT(metric7) AS sqrt_metric7,
SQRT(metric8) AS sqrt_metric8,
SQRT(metric9) AS sqrt_metric9,
SQRT(age) AS sqrt_age
FROM telemetry
'''
sqrt_transformer = SQLTransformer(statement=sqrt_sql.replace('telemetry','__THIS__'))
spark.sql(sqrt_sql.replace('telemetry','(' + age_sql + ')') + 'LIMIT 5').toPandas()
# -
# ### Reciprocal feature transformer
# +
reciprocal_sql = \
'''
SELECT
*,
-- Reciprocal transform. Add 1 to avoid divide by 0 errors.
POW(metric1 + 1, -1) AS recip_metric1,
POW(metric2 + 1, -1) AS recip_metric2,
POW(metric3 + 1, -1) AS recip_metric3,
POW(metric4 + 1, -1) AS recip_metric4,
POW(metric5 + 1, -1) AS recip_metric5,
POW(metric6 + 1, -1) AS recip_metric6,
POW(metric7 + 1, -1) AS recip_metric7,
POW(metric8 + 1, -1) AS recip_metric8,
POW(metric9 + 1, -1) AS recip_metric9,
POW(age + 1, -1) AS recip_age
FROM telemetry
'''
reciprocal_transformer = SQLTransformer(statement=reciprocal_sql.replace('telemetry','__THIS__'))
spark.sql(reciprocal_sql.replace('telemetry','(' + age_sql + ')') + 'LIMIT 5').toPandas()
# -
# ### Age feature transformer
#
# Create an age feature transformer that adds an "age" column representing the age (days since first telemetry reading) of a device.
# +
spark.sql(age_sql + 'WHERE telemetry.label = 0 LIMIT 5').toPandas()
spark.sql(age_sql + 'WHERE telemetry.label = 1 LIMIT 5').toPandas()
age_transformer = SQLTransformer(statement=age_sql.replace('telemetry','__THIS__'))
# -
# ### Device class feature transformer
# +
device_class_sql = \
'''
SELECT
*,
CASE WHEN SUBSTRING(id, 1, 1) = 'S' THEN 1 ELSE 0 END AS S,
CASE WHEN SUBSTRING(id, 1, 1) = 'W' THEN 1 ELSE 0 END AS W,
CASE WHEN SUBSTRING(id, 1, 1) = 'Z' THEN 1 ELSE 0 END AS Z
FROM telemetry
'''
device_class_transformer = SQLTransformer(statement=device_class_sql.replace('telemetry','__THIS__'))
spark.sql('SELECT SUM(S) AS S, SUM(W) AS W, SUM(Z) AS Z FROM (' + device_class_sql + ') GROUP BY label').toPandas()
| notebooks/predictive-maintenance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <p><font size="6"><b>Spatial operations and overlays: creating new geometries</b></font></p>
#
#
# > *DS Python for GIS and Geoscience*
# > *October, 2021*
# >
# > *© 2021, <NAME> and <NAME> (<mailto:<EMAIL>>, <mailto:<EMAIL>>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*
#
# ---
# In the previous notebook we have seen how to identify and use the spatial relationships between geometries. In this notebook, we will see how to create new geometries based on those relationships.
import pandas as pd
import geopandas
import matplotlib.pyplot as plt
countries = geopandas.read_file("zip://./data/ne_110m_admin_0_countries.zip")
cities = geopandas.read_file("zip://./data/ne_110m_populated_places.zip")
rivers = geopandas.read_file("zip://./data/ne_50m_rivers_lake_centerlines.zip")
# defining the same example geometries as in the previous notebook
belgium = countries.loc[countries['name'] == 'Belgium', 'geometry'].item()
brussels = cities.loc[cities['name'] == 'Brussels', 'geometry'].item()
# ## Spatial operations
#
# Next to the spatial predicates that return boolean values, Shapely and GeoPandas also provide operations that return new geometric objects.
#
# **Binary operations:**
#
# <table><tr>
# <td> <img src="../img/spatial-operations-base.png"/> </td>
# <td> <img src="../img/spatial-operations-intersection.png"/> </td>
# </tr>
# <tr>
# <td> <img src="../img/spatial-operations-union.png"/> </td>
# <td> <img src="../img/spatial-operations-difference.png"/> </td>
# </tr></table>
#
# **Buffer:**
#
# <table><tr>
# <td> <img src="../img/spatial-operations-buffer-point1.png"/> </td>
# <td> <img src="../img/spatial-operations-buffer-point2.png"/> </td>
# </tr>
# <tr>
# <td> <img src="../img/spatial-operations-buffer-line.png"/> </td>
# <td> <img src="../img/spatial-operations-buffer-polygon.png"/> </td>
# </tr></table>
#
#
# See https://shapely.readthedocs.io/en/stable/manual.html#spatial-analysis-methods for more details.
# For example, using the toy data from above, let's construct a buffer around Brussels (which returns a Polygon):
# + jupyter={"outputs_hidden": false}
geopandas.GeoSeries([belgium, brussels.buffer(1)]).plot(alpha=0.5, cmap='tab10')
# -
# and now take the intersection, union or difference of those two polygons:
# + jupyter={"outputs_hidden": false}
brussels.buffer(1).intersection(belgium)
# + jupyter={"outputs_hidden": false}
brussels.buffer(1).union(belgium)
# + jupyter={"outputs_hidden": false}
brussels.buffer(1).difference(belgium)
# -
# ### Spatial operations with GeoPandas
# Above we showed how to create a new geometry based on two individual shapely geometries. The same operations can be extended to GeoPandas. Given a GeoDataFrame, we can calculate the intersection, union or difference of each of the geometries with another geometry.
#
# Let's look at an example with a subset of the countries. We have a GeoDataFrame with the country polygons of Africa, and now consider a rectangular polygon, representing an area around the equator:
africa = countries[countries.continent == 'Africa']
from shapely.geometry import LineString
box = LineString([(-10, 0), (50, 0)]).buffer(10, cap_style=3)
# + jupyter={"outputs_hidden": false}
fig, ax = plt.subplots(figsize=(6, 6))
africa.plot(ax=ax, facecolor='none', edgecolor='k')
geopandas.GeoSeries([box]).plot(ax=ax, facecolor='C0', edgecolor='k', alpha=0.5)
# -
# The intersection method of the GeoDataFrame will now calculate the intersection with the rectangle for each of the geometries of the africa GeoDataFrame element-wise. Note that for many of the countries, those that do not overlap with the rectangle, this will be an empty geometry:
# + jupyter={"outputs_hidden": false}
africa_intersection = africa.intersection(box)
africa_intersection.head()
# -
# What is returned is a new GeoSeries of the same length as the original dataframe, containing one row per country, but now containing only the intersection. In this example, the last element shown is an empty polygon, as that country was not overlapping with the box.
# + jupyter={"outputs_hidden": false}
# remove the empty polygons before plotting
africa_intersection = africa_intersection[~africa_intersection.is_empty]
# plot the intersection
africa_intersection.plot()
# -
# # Unary union and dissolve
#
# Another useful method is the `unary_union` attribute, which converts the set of geometry objects in a GeoDataFrame into a single geometry object by taking the union of all those geometries.
#
# For example, we can construct a single Shapely geometry object for the Africa continent:
africa_countries = countries[countries['continent'] == 'Africa']
africa = africa_countries.unary_union
# + jupyter={"outputs_hidden": false}
africa
# + jupyter={"outputs_hidden": false}
print(str(africa)[:1000])
# -
# Alternatively, you might want to take the unary union of a set of geometries but *grouped* by one of the attributes of the GeoDataFrame (so basically doing "groupby" + "unary_union"). For this operation, GeoPandas provides the `dissolve()` method:
continents = countries.dissolve(by="continent") # , aggfunc="sum"
# + jupyter={"outputs_hidden": false}
continents
# -
# <div class="alert alert-info" style="font-size:120%">
#
# **REMEMBER**:
#
# GeoPandas (and Shapely for the individual objects) provide a whole lot of basic methods to analyze the geospatial data (distance, length, centroid, boundary, convex_hull, simplify, transform, ....), much more than what we can touch in this tutorial.
#
# An overview of all methods provided by GeoPandas can be found here: https://geopandas.readthedocs.io/en/latest/docs/reference.html
#
#
# </div>
# ## Let's practice!
# <div class="alert alert-success">
#
# **EXERCISE: What are the districts close to the Seine?**
#
# Below, the coordinates for the Seine river in the neighborhood of Paris are provided as a GeoJSON-like feature dictionary (created at http://geojson.io).
#
# Based on this `seine` object, we want to know which districts are located close (maximum 150 m) to the Seine.
#
# * Create a buffer of 150 m around the Seine.
# * Check which districts intersect with this buffered object.
# * Make a visualization of the districts indicating which districts are located close to the Seine.
#
# </div>
# + clear_cell=false
districts = geopandas.read_file("data/paris_districts.geojson").to_crs(epsg=2154)
# + clear_cell=false
# created a line with http://geojson.io
s_seine = geopandas.GeoDataFrame.from_features({"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"LineString","coordinates":[[2.408924102783203,48.805619828930226],[2.4092674255371094,48.81703747481909],[2.3927879333496094,48.82325391133874],[2.360687255859375,48.84912860497674],[2.338714599609375,48.85827758964043],[2.318115234375,48.8641501307046],[2.298717498779297,48.863246707697],[2.2913360595703125,48.859519915404825],[2.2594070434570312,48.8311646245967],[2.2436141967773438,48.82325391133874],[2.236919403076172,48.82347994904826],[2.227306365966797,48.828339513221444],[2.2224998474121094,48.83862215329593],[2.2254180908203125,48.84856379804802],[2.2240447998046875,48.85409863123821],[2.230224609375,48.867989496547864],[2.260265350341797,48.89192242750887],[2.300262451171875,48.910203080780285]]}}]},
crs='EPSG:4326')
# -
# convert to local UTM zone
s_seine_utm = s_seine.to_crs(epsg=2154)
# + jupyter={"outputs_hidden": false}
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(20, 10))
districts.plot(ax=ax, color='grey', alpha=0.4, edgecolor='k')
s_seine_utm.plot(ax=ax)
# -
# access the single geometry object
seine = s_seine_utm.geometry.item()
seine
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays1.py
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays2.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays3.py
# -
# ------
# <div class="alert alert-success">
#
# **EXERCISE: Exploring a Land Use dataset**
#
# For the following exercises, we first introduce a new dataset: a dataset about the land use of Paris (a simplified version based on the open European [Urban Atlas](https://land.copernicus.eu/local/urban-atlas)). The land use indicates for what kind of activity a certain area is used, such as residential area or for recreation. It is a polygon dataset, with a label representing the land use class for different areas in Paris.
#
# In this exercise, we will read the data, explore it visually, and calculate the total area of the different classes of land use in the area of Paris.
#
# * Read in the `'paris_land_use.shp'` file and assign the result to a variable `land_use`.
# * Make a plot of `land_use`, using the `'class'` column to color the polygons. Add a legend with `legend=True`, and make the figure size a bit larger.
# * Add a new column `'area'` to the dataframe with the area of each polygon.
# * Calculate the total area in km² for each `'class'` using the `groupby()` method, and print the result.
#
# <details><summary>Hints</summary>
#
# * Reading a file can be done with the `geopandas.read_file()` function.
# * To use a column to color the geometries, use the `column` keyword to indicate the column name.
# * The area of each geometry can be accessed with the `area` attribute of the `geometry` of the GeoDataFrame.
# * The `groupby()` method takes the column name on which you want to group as the first argument.
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays4.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays5.py
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays6.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays7.py
# -
# <div class="alert alert-success">
#
# **EXERCISE: Intersection of two polygons**
#
# For this exercise, we are going to use 2 individual polygons: the district of Muette extracted from the `districts` dataset, and the green urban area of Boulogne, a large public park in the west of Paris, extracted from the `land_use` dataset. The two polygons have already been assigned to the `muette` and `park_boulogne` variables.
#
# We first visualize the two polygons. You will see that they overlap, but the park is not fully located in the district of Muette. Let's determine the overlapping part:
#
# * Plot the two polygons in a single map to examine visually the degree of overlap
# * Calculate the intersection of the `park_boulogne` and `muette` polygons.
# * Plot the intersection.
# * Print the proportion of the area of the district that is occupied by the park.
#
# <details><summary>Hints</summary>
#
# * To plot single Shapely objects, you can put those in a `GeoSeries([..])` to use the GeoPandas `plot()` method.
# * The intersection of to scalar polygons can be calculated with the `intersection()` method of one of the polygons, and passing the other polygon as the argument to that method.
#
# </details>
#
# </div>
land_use = geopandas.read_file("data/paris_land_use.zip")
districts = geopandas.read_file("data/paris_districts.geojson").to_crs(land_use.crs)
# extract polygons
land_use['area'] = land_use.geometry.area
park_boulogne = land_use[land_use['class'] == "Green urban areas"].sort_values('area').geometry.iloc[-1]
muette = districts[districts.district_name == 'Muette'].geometry.item()
# + jupyter={"outputs_hidden": false}
# Plot the two polygons
geopandas.GeoSeries([park_boulogne, muette]).plot(alpha=0.5, color=['green', 'blue'])
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays8.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays9.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays10.py
# -
# <div class="alert alert-success">
#
# **EXERCISE: Intersecting a GeoDataFrame with a Polygon**
#
# Combining the land use dataset and the districts dataset, we can now investigate what the land use is in a certain district.
#
# For that, we first need to determine the intersection of the land use dataset with a given district. Let's take again the *Muette* district as example case.
#
# * Calculate the intersection of the `land_use` polygons with the single `muette` polygon. Call the result `land_use_muette`.
# * Remove the empty geometries from `land_use_muette`.
# * Make a quick plot of this intersection, and pass `edgecolor='black'` to more clearly see the boundaries of the different polygons.
# * Print the first five rows of `land_use_muette`.
#
# <details><summary>Hints</summary>
#
# * The intersection of each geometry of a GeoSeries with another single geometry can be performed with the `intersection()` method of a GeoSeries.
# * The `intersection()` method takes as argument the geometry for which to calculate the intersection.
# * We can check which geometries are empty with the `is_empty` attribute of a GeoSeries.
#
# </details>
#
# </div>
land_use = geopandas.read_file("data/paris_land_use.zip")
districts = geopandas.read_file("data/paris_districts.geojson").to_crs(land_use.crs)
muette = districts[districts.district_name == 'Muette'].geometry.item()
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays11.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays12.py
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays13.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays14.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays15.py
# -
# You can see in the plot that we now only have a subset of the full land use dataset. The original `land_use_muette` (before removing the empty geometries) still has the same number of rows as the original `land_use`, though. But many of the rows, as you could see by printing the first rows, consist now of empty polygons when it did not intersect with the Muette district.
#
# The `intersection()` method also returned only geometries. If we want to combine those intersections with the attributes of the original land use, we can take a copy of this and replace the geometries with the intersections (you can uncomment and run to see the code):
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays16.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays17.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays18.py
# -
# <div class="alert alert-success">
#
# **EXERCISE: The land use of the Muette district**
#
# Based on the `land_use_muette` dataframe with the land use for the Muette districts as calculated above, we can now determine the total area of the different land use classes in the Muette district.
#
# * Calculate the total area per land use class.
# * Calculate the fraction (in percentage) for the different land use classes.
#
# <details><summary>Hints</summary>
#
# * The intersection of each geometry of a GeoSeries with another single geometry can be performed with the `intersection()` method of a GeoSeries.
# * The `intersection()` method takes as argument the geometry for which to calculate the intersection.
# * We can check which geometries are empty with the `is_empty` attribute of a GeoSeries.
#
# </details>
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays19.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays20.py
# -
# The above was only for a single district. If we want to do this more easily for all districts, we can do this with the overlay operation.
# ## The overlay operation
#
# In a spatial join operation, we are not changing the geometries itself. We are not joining geometries, but joining attributes based on a spatial relationship between the geometries. This also means that the geometries need to at least overlap partially.
#
# If you want to create new geometries based on joining (combining) geometries of different dataframes into one new dataframe (eg by taking the intersection of the geometries), you want an **overlay** operation.
# ### How does it differ compared to the intersection method?
# With the `intersection()` method introduced in the previous section, we could for example determine the intersection of a set of countries with another polygon, a circle in the example below:
#
# <img width="70%" src="../img/geopandas/chapter3-overlay-countries-circle-intersection-new.png"/>
# However, this method (`countries.intersection(circle)`) also has some limitations.
#
# * Mostly useful when intersecting a GeoSeries with a single polygon.
# * Does not preserve attribute information of the intersecting polygons.
#
# For cases where we require a bit more complexity, it is preferable to use the "overlay" operation, instead of the intersection method.
# Consider the following simplified example. On the left we see again the 3 countries. On the right we have the plot of a GeoDataFrame with some simplified geologic regions for the same area:
#
# <table width="80%"><tr>
# <td> <img src="../img/geopandas/chapter3-overlay-countries.png"/> </td>
# <td> <img src="../img/geopandas/chapter3-overlay-regions.png"/> </td>
# </tr></table>
#
# By simply plotting them on top of each other, as shown below, you can see that the polygons of both layers intersect.
#
# But now, by "overlaying" the two layers, we can create a third layer that contains the result of intersecting both layers: all the intersections of each country with each geologic region. It keeps only those areas that were included in both layers.
#
# <table width="80%"><tr>
# <td> <img src="../img/geopandas/chapter3-overlay-both.png"/> </td>
# <td> <img src="../img/geopandas/chapter3-overlay-overlayed.png"/> </td>
# </tr></table>
#
# This operation is called an intersection overlay, and in GeoPandas we can perform this operation with the `geopandas.overlay()` function.
# Another code example:
africa = countries[countries['continent'] == 'Africa']
# + jupyter={"outputs_hidden": false}
africa.plot()
# + jupyter={"outputs_hidden": false}
cities['geometry'] = cities.buffer(2)
# + jupyter={"outputs_hidden": false}
intersection = geopandas.overlay(africa, cities, how='intersection')
intersection.plot()
# + jupyter={"outputs_hidden": false}
intersection.head()
# -
# With the overlay method, we pass the full GeoDataFrame with all regions to intersect the countries with. The result contains all non-empty intersections of all combinations of countries and city regions.
#
# Note that the result of the overlay function also keeps the attribute information of both the countries as the city regions. That can be very useful for further analysis.
# + jupyter={"outputs_hidden": false}
geopandas.overlay(africa, cities, how='intersection').plot() # how="difference"/"union"/"symmetric_difference"
# -
# <div class="alert alert-info" style="font-size:120%">
# <b>REMEMBER</b> <br>
#
# * **Spatial join**: transfer attributes from one dataframe to another based on the spatial relationship
# * **Spatial overlay**: construct new geometries based on spatial operation between both dataframes (and combining attributes of both dataframes)
#
# </div>
# ## Let's practice!
# <div class="alert alert-success">
#
# **EXERCISE: Overlaying spatial datasets I**
#
# We will now combine both datasets in an overlay operation. Create a new `GeoDataFrame` consisting of the intersection of the land use polygons which each of the districts, but make sure to bring the attribute data from both source layers.
#
# * Create a new GeoDataFrame from the intersections of `land_use` and `districts`. Assign the result to a variable `combined`.
# * Print the first rows the resulting GeoDataFrame (`combined`).
#
# <details><summary>Hints</summary>
#
# * The intersection of two GeoDataFrames can be calculated with the `geopandas.overlay()` function.
# * The `overlay()` functions takes first the two GeoDataFrames to combine, and a third `how` keyword indicating how to combine the two layers.
# * For making an overlay based on the intersection, you can pass `how='intersection'`.
#
# </details>
#
# </div>
land_use = geopandas.read_file("data/paris_land_use.zip")
districts = geopandas.read_file("data/paris_districts.geojson").to_crs(land_use.crs)
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays21.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays22.py
# -
# <div class="alert alert-success">
#
# **EXERCISE: Overlaying spatial datasets II**
#
# Now that we created the overlay of the land use and districts datasets, we can more easily inspect the land use for the different districts. Let's get back to the example district of Muette, and inspect the land use of that district.
#
# * Add a new column `'area'` with the area of each polygon to the `combined` GeoDataFrame.
# * Create a subset called `land_use_muette` where the `'district_name'` is equal to "Muette".
# * Make a plot of `land_use_muette`, using the `'class'` column to color the polygons.
# * Calculate the total area for each `'class'` of `land_use_muette` using the `groupby()` method, and print the result.
#
# <details><summary>Hints</summary>
#
# * The area of each geometry can be accessed with the `area` attribute of the `geometry` of the GeoDataFrame.
# * To use a column to color the geometries, pass its name to the `column` keyword.
# * The `groupby()` method takes the column name on which you want to group as the first argument.
# * The total area for each class can be calculated by taking the `sum()` of the area.
#
# </details>
#
# </div>
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays23.py
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays24.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays25.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays26.py
# -
# <div class="alert alert-success">
#
# **EXERCISE: Overlaying spatial datasets III**
#
# Thanks to the result of the overlay operation, we can now more easily perform a similar analysis for *all* districts. Let's investigate the fraction of green urban area in each of the districts.
#
# * Based on the `combined` dataset, calculate the total area per district using `groupby()`.
# * Select the subset of "Green urban areas" from `combined` and call this `urban_green`.
# * Now calculate the total area per district for this `urban_green` subset, and call this `urban_green_area`.
# * Determine the fraction of urban green area in each district.
#
# </div>
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays27.py
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays28.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays29.py
# + tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays30.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays31.py
# + jupyter={"outputs_hidden": false} tags=["nbtutor-solution"]
# # %load _solutions/05-spatial-operations-overlays32.py
# -
# An alternative to calculate the area per land use class in each district:
# + jupyter={"outputs_hidden": false}
combined.groupby(["district_name", "class"])["area"].sum().reset_index()
# -
| notebooks/05-spatial-operations-overlays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Pandas library for the pandas dataframes
import pandas as pd
from pandas import DataFrame
import numpy as np
import scipy.stats as stats
import scipy
# Import Scikit-Learn library for decision tree models
import sklearn
from sklearn import datasets, linear_model, metrics, tree
from sklearn.utils import resample
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import BaggingRegressor, ExtraTreesRegressor, GradientBoostingRegressor, RandomForestRegressor
from sklearn.linear_model import ElasticNet, Lasso, RidgeCV
from sklearn.neighbors import KNeighborsRegressor
# Dataset splitting
from sklearn.model_selection import train_test_split, LeaveOneOut, KFold, cross_validate, RandomizedSearchCV
# Errors
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, accuracy_score
from matplotlib import pyplot as plt
from tqdm import tqdm
import os
import joblib
# -
df_dia = pd.read_csv("dataset_scaled_diameter.csv")
df_dia
# +
# Input for ML models
input_col = ['in_amount_mmol',
'p_amount_mmol',
'sol_amount_ml',
'TOP_amount_mmol',
'acid_amount_mmol',
'amine_amount_mmol',
'thiol_amount_mmol',
'zinc_amount_mmol',
'other_amount_mmol',
'total_volume_ml',
'temp_c',
'time_min',
'x0_indium acetate',
'x0_indium bromide',
'x0_indium chloride',
'x0_indium iodide',
'x0_indium myristate',
'x0_indium trifluoroacetate',
'x1_bis(trimethylsilyl)phosphine',
'x1_phosphorus trichloride',
'x1_tris(diethylamino)phosphine',
'x1_tris(dimethylamino)phosphine',
'x1_tris(trimethylgermyl)phosphine',
'x1_tris(trimethylsilyl)phosphine',
'x2_None',
'x2_octadecene',
'x2_toluene',
'x3_None',
'x3_trioctylphosphine',
'x4_None',
'x4_lauric acid',
'x4_myristic acid',
'x4_oleic acid',
'x4_palmitic acid',
'x4_stearic acid',
'x5_None',
'x5_dioctylamine',
'x5_dodecylamine',
'x5_hexadecylamine',
'x5_octylamine',
'x5_oleylamine',
'x6_None',
'x7_None',
'x7_zinc bromide',
'x7_zinc chloride',
'x7_zinc iodide',
'x7_zinc oleate',
'x7_zinc stearate',
'x7_zinc undecylenate',
'x8_None',
'x8_acetic acid',
'x8_copper bromide',
'x8_superhydride',
'x8_tetrabutylammonium myristate',
'x8_trioctylamine',
'x8_trioctylphosphine oxide',
'x8_water',
'x8_zinc iodide',
'abs_nm',
'emission_nm'
]
output_col = ['diameter_nm']
X = df_dia[input_col]
Y = df_dia[output_col]
# -
# Splitting dataset for training
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15, random_state=45, shuffle=True)
# ### 1. Bagging
# +
# This is a grid search for three parameters in the Bagging algorithm.
# Parameters are: max_depth, n_estimators, random_state.
# This gives the best combination of the three parameters for the smallest mean squared error.
min_mae = 99999
min_i, min_j, min_k = 0, 0, 0
for i in tqdm(range(1, 21)):
for j in range(1, 21):
for k in range(5, 50, 2):
B_regr = BaggingRegressor(base_estimator=DecisionTreeRegressor(max_depth=i),
n_estimators=j,
random_state=k)
B_regr.fit(X_train, np.ravel(Y_train))
B_Y_pred = B_regr.predict(X_test)
mae = mean_absolute_error(Y_test, B_Y_pred)
if (min_mae > mae):
min_mae = mae
min_i = i
min_j = j
min_k = k
print(min_mae, min_i, min_j, min_k)
# -
# ### 2. Decision Trees
# +
# This is a grid search for three parameters in the Decision Trees algorithm.
# Parameters are: max_depth, max_features, random_state.
# This gives the best combination of the three parameters for the smallest mean squared error.
min_mae = 99999
min_i, min_j, min_k = 0, 0, 0
for i in tqdm(range(1, 21)):
for j in range(1, 21):
for k in range(5, 60, 2):
DT_regr = DecisionTreeRegressor(max_depth=i,
max_features=j,
random_state=k)
DT_regr.fit(X_train, Y_train)
DT_Y_pred = DT_regr.predict(X_test)
mae = mean_absolute_error(Y_test, DT_Y_pred)
if (min_mae > mae):
min_mae = mae
min_i = i
min_j = j
min_k = k
print(min_mae, min_i, min_j, min_k)
# -
# ### 3. Random Forrest
# +
# This is a grid search for three parameters in the Random Forest algorithm.
# Parameters are: max_depth, n_estimators, max_features.
# Random_state is set to 45.
# This gives the best combination of the three parameters for the smallest mean squared error.
min_mae = 99999
min_i, min_j, min_k = 0, 0, 0
for i in tqdm(range(1, 21)):
for j in range(1, 21):
for k in range(2, 50, 2):
RF_regr = RandomForestRegressor(max_depth=i,
n_estimators=j,
max_features=k,
random_state=45
)
RF_regr.fit(X_train, np.ravel(Y_train))
RF_Y_pred = RF_regr.predict(X_test)
mae = mean_absolute_error(Y_test, RF_Y_pred)
if (min_mae > mae):
min_mae = mae
min_i = i
min_j = j
min_k = k
print(min_mae, min_i, min_j, min_k)
# +
RF_regr = RandomForestRegressor(max_depth=5,
n_estimators=1,
max_features=26,
random_state=45
)
RF_regr.fit(X_train, np.ravel(Y_train))
RF_Y_pred = RF_regr.predict(X_test)
mae = mean_absolute_error(Y_test, RF_Y_pred)
mae
# -
# ### 4. Extra Trees
# +
# This is a grid search for three parameters in the Extra Trees algorithm.
# Parameters are: random_state, n_estimators, max_features.
# This gives the best combination of the three parameters for the smallest mean squared error.
min_mae = 99999
min_i, min_j, min_k = 0, 0, 0
for i in tqdm(range(1, 21)):
for j in range(1, 21):
for k in range(2, 50, 2):
ET_regr = ExtraTreesRegressor(n_estimators=i,
max_features=j,
random_state=k
)
ET_regr.fit(X_train, np.ravel(Y_train))
ET_Y_pred = ET_regr.predict(X_test)
mae = mean_absolute_error(Y_test, ET_Y_pred)
if (min_mae > mae):
min_mae = mae
min_i = i
min_j = j
min_k = k
print(min_mae, min_i, min_j, min_k)
# -
# ### 5. Gradient Boosting
# +
min_mae = 999
min_i, min_j, min_k, min_l = 0, 0, 0.0, 0
for i in tqdm(range(300, 400, 10)):
for j in range(2, 40, 2):
for k in np.arange(0.04, 0.22, 0.02):
for l in range(2, 10, 2):
GB_regr = GradientBoostingRegressor(n_estimators=i, max_depth=j, learning_rate=k, random_state=l)
GB_regr.fit(X_train, np.ravel(Y_train))
GB_Y_pred = GB_regr.predict(X_test)
mae = mean_absolute_error(Y_test, GB_Y_pred)
if (min_mae > mae):
min_mae = mae
min_i = i
min_j = j
min_k = k
min_l = l
print(min_mae, min_i, min_j, min_k, min_l)
# -
# ### 6. Others
# +
REGRESSIONS = {
"K-nn": KNeighborsRegressor(),
"Ridge": RidgeCV(),
"Lasso": Lasso(),
"ElasticNet": ElasticNet(random_state=0),
}
# mean absolute error is used to evaluate the performance of all regressions.
for name, reg in REGRESSIONS.items():
reg.fit(X_train, Y_train)
Y_pred = pd.DataFrame(reg.predict(X_test))
print(name)
mae = mean_absolute_error(Y_test, Y_pred)
print(' MAE for diameter is ', mae, '\n')
# -
# ### Conclusion
# Bagging has the best performance
# +
RF_regr = RandomForestRegressor(max_depth=5,
n_estimators=1,
max_features=26,
random_state=45
)
RF_regr.fit(X_train, np.ravel(Y_train))
RF_Y_pred = RF_regr.predict(X_test)
joblib.dump(RF_regr, "./model_aug_diameter_RandomForest.joblib")
# -
| HI_85/notebook2/hao_test/3.2. model hao diameter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rBXrqlGEYA8G"
# # USE Sentence Embeddings with NLU
# The Universal Sentence Encoder encodes text into high dimensional vectors that can be used for text classification, semantic similarity, clustering and other natural language tasks.
#
# ## Sources :
# - https://arxiv.org/abs/1803.11175
# - https://tfhub.dev/google/universal-sentence-encoder/2
#
# ## Paper Abstract :
#
# We present models for encoding sentences into embedding vectors that specifically target transfer learning to other NLP tasks. The models are efficient and result in accurate performance on diverse transfer tasks. Two variants of the encoding models allow for trade-offs between accuracy and compute resources. For both variants, we investigate and report the relationship between model complexity, resource consumption, the availability of transfer task training data, and task performance. Comparisons are made with baselines that use word level transfer learning via pretrained word embeddings as well as baselines do not use any transfer learning. We find that transfer learning using sentence embeddings tends to outperform word level transfer. With transfer learning via sentence embeddings, we observe surprisingly good performance with minimal amounts of supervised training data for a transfer task. We obtain encouraging results on Word Embedding Association Tests (WEAT) targeted at detecting model bias. Our pre-trained sentence encoding models are made freely available for download and on TF Hub.
#
#
#
# # 1. Install Java and NLU
# + id="M2-GiYL6xurJ"
import os
# ! apt-get update -qq > /dev/null
# Install java
# ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# ! pip install nlu > /dev/null
# + [markdown] id="N_CL8HZ8Ydry"
# ## 2. Load Model and embed sample string with USE
# + id="j2ZZZvr1uGpx" executionInfo={"status": "ok", "timestamp": 1602033540197, "user_tz": -120, "elapsed": 173254, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="0179efa9-bd97-478e-a02d-6b882f5a2e47" colab={"base_uri": "https://localhost:8080/", "height": 162}
import nlu
pipe = nlu.load('use')
pipe.predict('He was suprised by the diversity of NLU')
# + [markdown] id="BAUFklCqLr3V"
# # 3. Download Sample dataset
# + id="wAFAOUSuLqvn" executionInfo={"status": "ok", "timestamp": 1602033551866, "user_tz": -120, "elapsed": 184913, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="9a1f4f9c-4bae-4578-ed2c-b16f5f208298" colab={"base_uri": "https://localhost:8080/", "height": 606}
import pandas as pd
# Download the dataset
# ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sarcasm/train-balanced-sarcasm.csv -P /tmp
# Load dataset to Pandas
df = pd.read_csv('/tmp/train-balanced-sarcasm.csv')
df
# + [markdown] id="OPdBQnV46or5"
# # 4.1 Visualize Embeddings with T-SNE
#
#
#
#
# Lets add Sentiment Part Of Speech to our pipeline because its so easy and so we can hue our T-SNE plots by POS and Sentiment
# We predict the first 5k comments
# + id="9bujAZtOCfRW" executionInfo={"status": "ok", "timestamp": 1602033602668, "user_tz": -120, "elapsed": 235706, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="956eca37-7cff-44f9-bc3a-5bd0e7cb16e6" colab={"base_uri": "https://localhost:8080/", "height": 1000}
pipe = nlu.load('pos sentiment use emotion') # emotion
df['text'] = df['comment']
# We must set output level to sentence since NLU will infer a different output level for this pipeline composition
predictions = pipe.predict(df[['text','label']].iloc[0:500], output_level='sentence')
predictions
# + [markdown] id="_OypFES-8EwY"
# ## 4.2 Checkout sentiment distribution
# + id="ggbC0PxHgc2t" executionInfo={"status": "ok", "timestamp": 1602033603005, "user_tz": -120, "elapsed": 236032, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="586c5a6d-9164-4ea7-f3ac-a8f43650d1a5" colab={"base_uri": "https://localhost:8080/", "height": 332}
# Some Tokens are None which we must drop first
predictions.dropna(how='any', inplace=True)
# Some sentiment are 'na' which we must drop first
predictions = predictions[predictions.sentiment!= 'na']
predictions.sentiment.value_counts().plot.bar(title='Dataset sentiment distribution')
# + [markdown] id="LZtPxt5c8HlJ"
# ## 4.3 Checkout sentiment distribution
# + id="OA0Er5WA6l7v" executionInfo={"status": "ok", "timestamp": 1602033603007, "user_tz": -120, "elapsed": 236025, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="c63e9acc-f552-4f57-977c-2c91208ff894" colab={"base_uri": "https://localhost:8080/", "height": 329}
predictions.category.value_counts().plot.bar(title='Dataset emotion category distribution')
# + [markdown] id="ZUYHpsHTINsF"
# # 5.Prepare data for T-SNE algorithm.
# We create a Matrix with one row per Embedding vector for T-SNE algorithm
# + id="L_0jefTB6i52"
import numpy as np
# We first create a column of type np array
predictions['np_array'] = predictions.sentence_embeddings.apply(lambda x: np.array(x))
# Make a matrix from the vectors in the np_array column via list comprehension
mat = np.matrix([x for x in predictions.np_array])
# + [markdown] id="pbdi4CY2Iqc0"
# ## 5.1 Fit and transform T-SNE algorithm
#
# + id="fAFGB6iYIqmO" executionInfo={"status": "ok", "timestamp": 1602033607531, "user_tz": -120, "elapsed": 240534, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="8de9fb59-beed-426b-e9d6-bb462b3f2624" colab={"base_uri": "https://localhost:8080/"}
from sklearn.manifold import TSNE
model = TSNE(n_components=2) #n_components means the lower dimension
low_dim_data = model.fit_transform(mat)
print('Lower dim data has shape',low_dim_data.shape)
# + [markdown] id="gsi0b0XhImaz"
# ### Set plotting styles
# + id="CsPVw7NHfEgt"
# set some styles for for Plotting
import seaborn as sns
# Style Plots a bit
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1,rc={"lines.linewidth": 2.5})
# %matplotlib inline
import matplotlib as plt
plt.rcParams['figure.figsize'] = (20, 14)
# + [markdown] id="8tuoCxNPmzbo"
# ##5.2 Plot low dimensional T-SNE USE embeddings with hue for Sarcasm
#
# + id="Fbq5MAv0jkft" executionInfo={"status": "ok", "timestamp": 1602033608582, "user_tz": -120, "elapsed": 241571, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="232b6b38-2290-4c2d-d258-c06454519b15" colab={"base_uri": "https://localhost:8080/", "height": 861}
tsne_df = pd.DataFrame(low_dim_data, predictions.label.replace({1:'sarcasm',0:'normal'}))
tsne_df.columns = ['x','y']
ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index)
ax.set_title('T-SNE USE Embeddings, colored by Sarcasm label')
# + [markdown] id="Snb1gtqrnIJi"
# ## 5.3 Plot low dimensional T-SNE USE embeddings with hue for Sentiment
#
# + id="QET-Y6PdnIJt" executionInfo={"status": "ok", "timestamp": 1602033609367, "user_tz": -120, "elapsed": 242347, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="0fff4a1c-f9db-4ac4-fe8f-e34f8e6edb19" colab={"base_uri": "https://localhost:8080/", "height": 861}
tsne_df = pd.DataFrame(low_dim_data, predictions.sentiment)
tsne_df.columns = ['x','y']
ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index)
ax.set_title('T-SNE USE Embeddings, colored by Sentiment')
# + [markdown] id="fv3FIQ7j6eVv"
# # 5.4 Plot low dimensional T-SNE USE embeddings with hue for Emotions
#
# + id="7QNgruV-6eV1" executionInfo={"status": "ok", "timestamp": 1602033611825, "user_tz": -120, "elapsed": 244800, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="0f93df32-b2df-443b-e983-2e2fd72a3027" colab={"base_uri": "https://localhost:8080/", "height": 861}
tsne_df = pd.DataFrame(low_dim_data, predictions.category)
tsne_df.columns = ['x','y']
ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index)
ax.set_title('T-SNE use Embeddings, colored by Emotion')
# + [markdown] id="l3sRcFW9muEZ"
# # 6.1 Plot low dimensional T-SNE USE embeddings with hue for POS
# Because we will have a list of pos labels for each sentence, we need to explode on the pos column and then do the data peperation for T-SNE again before we can visualize with hue for POS
#
# + id="OZ_2DTk9bC-O" executionInfo={"status": "ok", "timestamp": 1602033611826, "user_tz": -120, "elapsed": 244796, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="a4f3d7da-03e6-4945-a439-0fd67d97dadd" colab={"base_uri": "https://localhost:8080/", "height": 1000}
predictions_exploded_on_pos = predictions.explode('pos')
predictions_exploded_on_pos
# + [markdown] id="k1M_a4pmfMGA"
# ## 6.2 Preprocess data for TSNE again
# + id="K0rpmiy6a2UK" executionInfo={"status": "ok", "timestamp": 1602033692373, "user_tz": -120, "elapsed": 325338, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="ffd24097-a2bf-42c1-9232-efeb81b7f9bc" colab={"base_uri": "https://localhost:8080/"}
# We first create a column of type np array
predictions_exploded_on_pos['np_array'] = predictions_exploded_on_pos.sentence_embeddings.apply(lambda x: np.array(x))
# Make a matrix from the vectors in the np_array column via list comprehension
mat = np.matrix([x for x in predictions_exploded_on_pos.np_array])
from sklearn.manifold import TSNE
model = TSNE(n_components=2) #n_components means the lower dimension
low_dim_data = model.fit_transform(mat)
print('Lower dim data has shape',low_dim_data.shape)
# + [markdown] id="6ze0HWqqfQDh"
# # 6.3 Plot low dimensional T-SNE USE embeddings with hue for POS
#
# + id="RB1qdDP3fJHN" executionInfo={"status": "ok", "timestamp": 1602033694241, "user_tz": -120, "elapsed": 327200, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="924f811b-1a4c-42e8-fab4-d9ea344592b7" colab={"base_uri": "https://localhost:8080/", "height": 861}
tsne_df = pd.DataFrame(low_dim_data, predictions_exploded_on_pos.pos)
tsne_df.columns = ['x','y']
ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index)
ax.set_title('T-SNE USE Embeddings, colored by Part of Speech Tag')
# + [markdown] id="uXb-FMA6mX13"
# # 7. NLU has many more embedding models!
# Make sure to try them all out!
# You can change 'use' in nlu.load('use') to bert, xlnet, albert or any other of the **100+ word embeddings** offerd by NLU
# + id="9qUF7jPlme-R" executionInfo={"status": "ok", "timestamp": 1602033694242, "user_tz": -120, "elapsed": 327196, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14469489166467359317"}} outputId="4b04f9e7-88a0-423e-c390-6bc58372ddd6" colab={"base_uri": "https://localhost:8080/"}
nlu.print_all_model_kinds_for_action('embed')
# + id="MvSC3rl5-adJ"
| examples/collab/Embeddings_for_Sentences/NLU_USE_Sentence_Embeddings_and_t-SNE_visualization_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/StatisticsProject/AccessingData/spotify-popularity.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# # Spotify Popularity
#
# Using [Spotify data](https://spotifycharts.com/regional) we can see which songs are the most popular.
#
# To look at just Canadian data, use the url `https://spotifycharts.com/regional/ca/daily/latest`
# +
csv_url = 'https://spotifycharts.com/regional/global/daily/latest/download'
import pandas as pd
import requests
import io
r = requests.get(csv_url)
df = pd.read_csv(io.StringIO(r.text), skiprows=1)
df
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/html/_sources/curriculum-notebooks/Mathematics/StatisticsProject/AccessingData/spotify-popularity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: ''
# name: pysparkkernel
# ---
import configparser
cfg = configparser.ConfigParser()
import boto3
# No module named 'boto3'?
#sc.install_pypi_package('boto3')
s3 = boto3.client('s3')
dl_cfg = s3.get_object(Bucket='adzugaiev-sparkify',
Key='<KEY>')
#https://stackoverflow.com/a/60232146
cfg.read_string(dl_cfg['Body'].read().decode())
cfg.sections()
| cfg_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 6.259372, "end_time": "2021-08-25T07:34:43.469231", "exception": false, "start_time": "2021-08-25T07:34:37.209859", "status": "completed"} tags=[]
from transformers import TFAutoModel
# + papermill={"duration": 21.868438, "end_time": "2021-08-25T07:35:05.350727", "exception": false, "start_time": "2021-08-25T07:34:43.482289", "status": "completed"} tags=[]
bert = TFAutoModel.from_pretrained("bert-base-cased")
# + papermill={"duration": 0.036322, "end_time": "2021-08-25T07:35:05.402428", "exception": false, "start_time": "2021-08-25T07:35:05.366106", "status": "completed"} tags=[]
bert.summary()
# + papermill={"duration": 0.02106, "end_time": "2021-08-25T07:35:05.438667", "exception": false, "start_time": "2021-08-25T07:35:05.417607", "status": "completed"} tags=[]
import tensorflow as tf
# + papermill={"duration": 0.025115, "end_time": "2021-08-25T07:35:05.478054", "exception": false, "start_time": "2021-08-25T07:35:05.452939", "status": "completed"} tags=[]
input_ids = tf.keras.layers.Input(shape=(512,), name = "input_ids", dtype="int32")
mask = tf.keras.layers.Input(shape=(512,), name = "attention_mask", dtype="int32")
# + papermill={"duration": 4.021385, "end_time": "2021-08-25T07:35:09.514098", "exception": false, "start_time": "2021-08-25T07:35:05.492713", "status": "completed"} tags=[]
embeddings = bert.bert(input_ids, attention_mask = mask)[1] # Extract max pooled activations
# + papermill={"duration": 0.034939, "end_time": "2021-08-25T07:35:09.563852", "exception": false, "start_time": "2021-08-25T07:35:09.528913", "status": "completed"} tags=[]
a = tf.keras.layers.Dense(512, activation="relu")(embeddings)
b = tf.keras.layers.Dense(3, activation="softmax", name="outputs")(a)
# + papermill={"duration": 0.03881, "end_time": "2021-08-25T07:35:09.617065", "exception": false, "start_time": "2021-08-25T07:35:09.578255", "status": "completed"} tags=[]
model = tf.keras.Model(inputs = [input_ids, mask], outputs=b)
# + papermill={"duration": 0.034837, "end_time": "2021-08-25T07:35:09.666437", "exception": false, "start_time": "2021-08-25T07:35:09.631600", "status": "completed"} tags=[]
model.summary()
# + papermill={"duration": 0.025832, "end_time": "2021-08-25T07:35:09.707801", "exception": false, "start_time": "2021-08-25T07:35:09.681969", "status": "completed"} tags=[]
model.layers[2].trainable = False
# + papermill={"duration": 0.03293, "end_time": "2021-08-25T07:35:09.754986", "exception": false, "start_time": "2021-08-25T07:35:09.722056", "status": "completed"} tags=[]
model.summary()
# + papermill={"duration": 0.232079, "end_time": "2021-08-25T07:35:10.002709", "exception": false, "start_time": "2021-08-25T07:35:09.770630", "status": "completed"} tags=[]
optimizer = tf.keras.optimizers.Adam(lr=5e-5, decay=1e-6) # recommended values for bert
loss = tf.keras.losses.CategoricalCrossentropy()
acc = tf.keras.metrics.CategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer, loss=loss, metrics=[acc])
# + papermill={"duration": 0.034863, "end_time": "2021-08-25T07:35:10.052578", "exception": false, "start_time": "2021-08-25T07:35:10.017715", "status": "completed"} tags=[]
with open("../input/sentiment-analysis-prep-2/element_spec.txt", 'r') as f:
element_spec = f.read()
print(element_spec)
# + papermill={"duration": 0.025473, "end_time": "2021-08-25T07:35:10.096408", "exception": false, "start_time": "2021-08-25T07:35:10.070935", "status": "completed"} tags=[]
element_spec = ({'input_ids': tf.TensorSpec(shape=(16, 512), dtype=tf.int64, name=None),
'attention_mask': tf.TensorSpec(shape=(16, 512), dtype=tf.int64, name=None)},
tf.TensorSpec(shape=(16, 3), dtype=tf.uint8, name=None))
# + papermill={"duration": 0.08032, "end_time": "2021-08-25T07:35:10.193449", "exception": false, "start_time": "2021-08-25T07:35:10.113129", "status": "completed"} tags=[]
train_ds = tf.data.experimental.load("../input/sentiment-analysis-prep-2/train", element_spec=element_spec)
val_ds = tf.data.experimental.load("../input/sentiment-analysis-prep-2/validation", element_spec=element_spec)
# + papermill={"duration": 0.02561, "end_time": "2021-08-25T07:35:10.234750", "exception": false, "start_time": "2021-08-25T07:35:10.209140", "status": "completed"} tags=[]
print(train_ds.take(1))
print(val_ds.take(1))
# + papermill={"duration": 2262.376432, "end_time": "2021-08-25T08:12:52.627710", "exception": false, "start_time": "2021-08-25T07:35:10.251278", "status": "completed"} tags=[]
history = model.fit(train_ds,
validation_data=val_ds,
epochs=20)
# + papermill={"duration": 30.265567, "end_time": "2021-08-25T08:13:24.150315", "exception": false, "start_time": "2021-08-25T08:12:53.884748", "status": "completed"} tags=[]
model.save("bert-sentiment-model")
| 3_Fin_Sentiment_Analysis_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="O67uhlT4MExK"
# _Lambda School Data Science — Regression 2_
#
# This sprint, your project is Caterpillar Tube Pricing: Predict the prices suppliers will quote for industrial tube assemblies.
#
# # Cross-Validation, Hyperparameter Optimization 🚜
#
#
# ### Objectives
# - Do cross-validation with independent test set
# - Use scikit-learn for hyperparameter optimization
# + [markdown] id="sTI5WqnGv2b2" colab_type="text"
# ### Install libraries
#
# We will continue to use [category_encoders](https://github.com/scikit-learn-contrib/categorical-encoding) and [xgboost](https://xgboost.readthedocs.io/en/latest/).
#
#
# #### category_encoders
# - Anaconda: `conda install -c conda-forge category_encoders`
# - Google Colab: `pip install category_encoders`
#
# #### xgboost
# - Anaconda, Mac/Linux: `conda install -c conda-forge xgboost`
# - Windows: `conda install -c anaconda py-xgboost`
# - Google Colab: already installed
# + id="WsJ7WePuv2b2" colab_type="code" colab={}
# # Uncomment & run for Google Colab
# # !pip install category_encoders
# + [markdown] id="Nh0yfm-0v2b5" colab_type="text"
# ### Get data
#
# We will continue to use the Caterpillar dataset.
#
# #### Option 1. Kaggle web UI
#
# Sign in to Kaggle and go to the [Caterpillar Tube Pricing](https://www.kaggle.com/c/caterpillar-tube-pricing) competition. Go to the Data page. After you have accepted the rules of the competition, use the download buttons to download the data.
#
#
# #### Option 2. Kaggle API
#
# Follow these [instructions](https://github.com/Kaggle/kaggle-api).
#
# #### Option 3. Google Drive
#
# Download [zip file](https://drive.google.com/uc?export=download&id=1oGky3xR6133pub7S4zIEFbF4x1I87jvC) from Google Drive.
# + id="cHdra08yv2b6" colab_type="code" colab={}
# # Uncomment & run for Option 3 on Google Colab
# from google.colab import files
# files.upload()
# + id="COK_XFdqv2b7" colab_type="code" colab={}
# # !unzip caterpillar-tube-pricing.zip
# + id="IiVjpMN7v2b9" colab_type="code" colab={}
# # !unzip data.zip
# + [markdown] id="INGXrsisSXFo" colab_type="text"
# ### Wrangle data
#
# This code is similar to what you've seen in previous notebooks this sprint. We will continue to do the same kind of data wrangling, to prepare our data for cross-validation and hyperparameter optimization. You will likely engineer more features than this!
# + id="FRHBQvDCv2b_" colab_type="code" colab={}
import category_encoders as ce
from glob import glob
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
# Read data
train = pd.read_csv('competition_data/train_set.csv')
test = pd.read_csv('competition_data/test_set.csv')
tube = pd.read_csv('competition_data/tube.csv')
materials = pd.read_csv('competition_data/bill_of_materials.csv')
components = pd.read_csv('competition_data/components.csv')
comp = pd.concat((pd.read_csv(path) for path in glob('competition_data/comp_*.csv')), sort=False)
# Get a tidy list of the component types in each tube assembly
assembly_components = materials.melt(id_vars='tube_assembly_id',
value_vars=[f'component_id_{n}' for n in range(1,9)])
assembly_components = (assembly_components
.sort_values(by='tube_assembly_id')
.dropna()
.rename(columns={'value': 'component_id'}))
assembly_component_types = assembly_components.merge(components, how='left')
# Make a crosstab of the component types for each assembly (one-hot encoding)
table = pd.crosstab(assembly_component_types['tube_assembly_id'],
assembly_component_types['component_type_id'])
table = table.reset_index()
# Get features for each component
features = ['component_id', 'component_type_id', 'orientation', 'unique_feature', 'weight']
comp = comp[features]
comp['orientation'] = (comp['orientation']=='Yes').astype(int)
comp['unique_feature'] = (comp['unique_feature']=='Yes').astype(int)
comp['weight'] = comp['weight'].fillna(comp['weight'].median())
# Get aggregated features for all components in each tube assembly.
# This code is a little complex, but we discussed in detail last lesson.
materials['components_total'] = sum(materials[f'quantity_{n}'].fillna(0) for n in range(1,9))
materials['components_distinct'] = sum(materials[f'component_id_{n}'].notnull().astype(int) for n in range(1,9))
materials['orientation'] = 0
materials['unique_feature'] = 0
materials['weight'] = 0
for n in range(1,9):
materials = materials.merge(comp, how='left',
left_on=f'component_id_{n}',
right_on='component_id',
suffixes=('', f'_{n}'))
for col in materials:
if 'orientation' in col or 'unique_feature' in col or 'weight' in col:
materials[col] = materials[col].fillna(0)
materials['orientation'] = sum(materials[f'orientation_{n}'] for n in range(1,9))
materials['unique_feature'] = sum(materials[f'unique_feature_{n}'] for n in range(1,9))
materials['weight'] = sum(materials[f'weight_{n}'] for n in range(1,9))
features = ['tube_assembly_id', 'orientation', 'unique_feature', 'weight',
'components_total', 'components_distinct', 'component_id_1']
materials = materials[features]
# Extract year from quote date
train['quote_date_year'] = pd.to_datetime(train['quote_date'], infer_datetime_format=True).dt.year
test['quote_date_year'] = pd.to_datetime(train['quote_date'], infer_datetime_format=True).dt.year
# Merge data
train = (train
.merge(tube, how='left')
.merge(materials, how='left')
.merge(table, how='left')
.fillna(0))
test = (test
.merge(tube, how='left')
.merge(materials, how='left')
.merge(table, how='left')
.fillna(0))
# Arrange X matrix and y vector.
# Drop `tube_assembly_id` because our goal is to predict unknown assemblies,
# and no tube assembly id's are shared between the train and test sets.
target = 'cost'
features = train.columns.drop([target, 'tube_assembly_id'])
X_train = train[features]
y_train = train[target]
X_test = test[features]
# Log-transform the target
y_train_log = np.log1p(y_train)
# Make pipeline
pipeline = make_pipeline(
ce.OrdinalEncoder(),
RandomForestRegressor(n_estimators=100, n_jobs=-1, random_state=42)
)
# + [markdown] id="XwrvaIwzv2cA" colab_type="text"
# ## Do cross-validation with independent test set
# + [markdown] id="Hr-Dt67Gv2cB" colab_type="text"
# Let's take another look at [<NAME>'s diagram of model evaluation methods.](https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html) So far we've been using "**train/validation/test split**", but we have more options.
#
# Today we'll learn about "k-fold **cross-validation** with independent test set", for "model selection (**hyperparameter optimization**) and performance estimation."
#
# <img src="https://sebastianraschka.com/images/blog/2018/model-evaluation-selection-part4/model-eval-conclusions.jpg" width="600">
#
# <sup>Source: https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html</sup>
#
#
# + [markdown] id="ozuzFo_Pv2cB" colab_type="text"
# The Scikit-Learn docs show a diagram of how k-fold cross-validation works, and explain the pros & cons of cross-validation versus train/validate/test split.
#
# #### [Scikit-Learn User Guide, 3.1 Cross-validation](https://scikit-learn.org/stable/modules/cross_validation.html)
#
# > When evaluating different settings (“hyperparameters”) for estimators, there is still a risk of overfitting on the test set because the parameters can be tweaked until the estimator performs optimally. This way, knowledge about the test set can “leak” into the model and evaluation metrics no longer report on generalization performance. To solve this problem, yet another part of the dataset can be held out as a so-called “validation set”: training proceeds on the training set, after which evaluation is done on the validation set, and when the experiment seems to be successful, final evaluation can be done on the test set.
#
# > However, **by partitioning the available data into three sets, we drastically reduce the number of samples which can be used for learning the model, and the results can depend on a particular random choice for the pair of (train, validation) sets.**
#
# > **A solution to this problem is a procedure called cross-validation (CV for short). A test set should still be held out for final evaluation, but the validation set is no longer needed when doing CV.**
#
# <img src="https://scikit-learn.org/stable/_images/grid_search_cross_validation.png" width="600">
#
# > In the basic approach, called k-fold CV, the training set is split into k smaller sets. The following procedure is followed for each of the k “folds”:
#
# > - A model is trained using $k-1$ of the folds as training data;
# > - the resulting model is validated on the remaining part of the data (i.e., it is used as a test set to compute a performance measure such as accuracy).
#
# > The performance measure reported by k-fold cross-validation is then the average of the values computed in the loop. **This approach can be computationally expensive, but does not waste too much data (as is the case when fixing an arbitrary validation set).**
# + [markdown] id="o10EvckQv2cC" colab_type="text"
# ### cross_val_score
#
# How do we get started? According to the [Scikit-Learn User Guide](https://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics),
#
# > The simplest way to use cross-validation is to call the [**`cross_val_score`**](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) helper function
#
# However, this isn't _so_ simple with the Caterpillar dataset, because:
#
# - We want all rows for a given `tube_assembly_id` to go into the same "fold." (Why? [See the discussion here](https://www.fast.ai/2017/11/13/validation-sets/) under _"New people, new boats"_ for a reminder.) We can do this with the `cross_val_score` function, using its `groups` parameter.
# - For scikit-learn's cross-validation [**scoring**](https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter), higher is better. But for regression error metrics, lower is better. So scikit-learn multiplies regression error metrics by -1 to make them negative. That's why the value of the `scoring` parameter is `'neg_mean_squared_error'`.
# - Scikit-learn doesn't implement RMSE, so we take the square root of MSE. First, we must multiply the scores by -1 to make them positive.
# - RMSE with the log-transformed target is equivalent to RMSLE with the original target.
#
# Put it all together, and k-fold cross-validation with the Caterpillar dataset looks like this:
# + id="N-TqB5Hsv2cC" colab_type="code" colab={}
# %%time
from sklearn.model_selection import cross_val_score
k = 3
groups = train['tube_assembly_id']
scores = cross_val_score(pipeline, X_train, y_train_log, cv=k,
scoring='neg_mean_squared_error', groups=groups)
print(f'RMSLE for {k} folds:', np.sqrt(-scores))
# + [markdown] id="7dq-PfpGZSHJ" colab_type="text"
# But the Random Forest has many hyperparameters. We mostly used the defaults, and arbitrarily chose `n_estimators`. Is it too high? Too low? Just right? How do we know?
# + id="YCubg7EbjZyT" colab_type="code" colab={}
print('Model Hyperparameters:')
print(pipeline.named_steps['randomforestregressor'])
# + [markdown] id="bk6o8W7Cv2cE" colab_type="text"
# "The universal tension in machine learning is between optimization and generalization; the ideal model is one that stands right at the border between underfitting and overfitting; between undercapacity and overcapacity. To figure out where this border lies, first you must cross it." —[<NAME>](https://books.google.com/books?id=dadfDwAAQBAJ&pg=PA114)
# + [markdown] id="p8uKvR_pv2cG" colab_type="text"
# ### Validation Curve
#
# Let's try different parameter values, and visualize "the border between underfitting and overfitting."
#
# Using scikit-learn, we can make [validation curves](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html), "to determine training and test scores for varying parameter values. This is similar to grid search with one parameter."
# + [markdown] id="hEIxeNXdv2cF" colab_type="text"
# <img src="https://jakevdp.github.io/PythonDataScienceHandbook/figures/05.03-validation-curve.png">
#
# <sup>Source: https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html#Validation-curves-in-Scikit-Learn</sup>
# + [markdown] id="r3bbgaP2c3Pr" colab_type="text"
# Validation curves are awesome for learning about overfitting and underfitting. (But less useful in real-world projects, because we usually want to vary more than one parameter.)
#
# For this example, let's see what happens when we vary the depth of a decision tree. (This will be faster than varying the number of estimators in a random forest.)
# + id="znIz2FPQv2cG" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.model_selection import validation_curve
from sklearn.tree import DecisionTreeRegressor
pipeline = make_pipeline(ce.OrdinalEncoder(), DecisionTreeRegressor())
depth = range(1, 15, 2)
train_scores, val_scores = validation_curve(
pipeline, X_train, y_train_log,
param_name='decisiontreeregressor__max_depth',
param_range=depth, scoring='neg_mean_squared_error',
cv=2, groups=groups)
train_rmsle = np.sqrt(-train_scores)
val_rmsle = np.sqrt(-val_scores)
plt.plot(depth, np.mean(train_rmsle, axis=1), color='blue', label='training error')
plt.plot(depth, np.mean(val_rmsle, axis=1), color='red', label='validation error')
plt.xlabel('depth')
plt.ylabel('RMSLE')
plt.legend();
# + [markdown] id="JUaLgk8Pv2cJ" colab_type="text"
# ## Use scikit-learn for hyperparameter optimization
#
# To vary multiple hyperparameters and find their optimal values, let's try **Randomized Search CV.**
# + [markdown] id="AexbC7fjv2cL" colab_type="text"
# #### [Scikit-Learn User Guide, 3.2 Tuning the hyper-parameters of an estimator](https://scikit-learn.org/stable/modules/grid_search.html)
#
# > Hyper-parameters are parameters that are not directly learnt within estimators. In scikit-learn they are passed as arguments to the constructor of the estimator classes.
#
# > While using a grid of parameter settings is currently the most widely used method for parameter optimization, other search methods have more favourable properties. [`RandomizedSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html) implements a randomized search over parameters, where each setting is sampled from a distribution over possible parameter values.
#
# > Specifying how parameters should be sampled is done using a dictionary. Additionally, a computation budget, being the number of sampled candidates or sampling iterations, is specified using the `n_iter` parameter.
# + [markdown] id="1RZeZd0RsWZL" colab_type="text"
# For the sake of time, let's just do 5 iterations of randomized search, with 2-fold cross-validation.
# + id="ZtZQbJQ5v2cM" colab_type="code" colab={}
from scipy.stats import randint, uniform
from sklearn.model_selection import RandomizedSearchCV
pipeline = make_pipeline(
ce.OrdinalEncoder(),
RandomForestRegressor(random_state=42)
)
param_distributions = {
'randomforestregressor__n_estimators': randint(50, 500),
'randomforestregressor__max_features': uniform(),
'randomforestregressor__min_samples_leaf': [1, 10, 100]
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=5,
cv=2,
scoring='neg_mean_squared_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train_log, groups=groups);
# + id="P9M-OOJltM_I" colab_type="code" colab={}
print('Best hyperparameters', search.best_params_)
print('Cross-validation RMSLE', np.sqrt(-search.best_score_))
# + [markdown] id="oo9-Kbx6uWM3" colab_type="text"
# The score may be underwhelming to you, but it's just a demo. Try it after the lesson, with all your features, for more iterations.
# + [markdown] id="Q79ipvpgqYwF" colab_type="text"
# ### "Fitting X folds for each of Y candidates, totalling Z fits" ?
#
# What did that mean? What do you think?
#
#
# + [markdown] id="JLjXNObHuTXx" colab_type="text"
# ### Do it with xgboost
# + id="2FabSX50trkd" colab_type="code" colab={}
from xgboost import XGBRegressor
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
pipeline = make_pipeline(
ce.OrdinalEncoder(),
XGBRegressor(random_state=42)
)
param_distributions = {
'xgbregressor__n_estimators': randint(500, 1000),
'xgbregressor__max_depth': randint(3, 7)
}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=5,
cv=2,
scoring='neg_mean_squared_error',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train, y_train_log, groups=groups);
# + id="F9NJ7deQuxCC" colab_type="code" colab={}
print('Best hyperparameters', search.best_params_)
print('Cross-validation RMSLE', np.sqrt(-search.best_score_))
# + [markdown] id="2tJr3YZ8xLt-" colab_type="text"
# ### See detailed results
# + id="IGHRUlY3xF1O" colab_type="code" colab={}
pd.DataFrame(search.cv_results_).sort_values(by='rank_test_score')
# + [markdown] id="GDZyu6FNyY2l" colab_type="text"
# ### Make predictions to submit to Kaggle
# + id="OuWqQUk_yIw4" colab_type="code" colab={}
pipeline = search.best_estimator_
y_pred_log = pipeline.predict(X_test)
y_pred = np.expm1(y_pred_log) # Convert from log-dollars to dollars
submission = pd.read_csv('sample_submission.csv')
submission['cost'] = y_pred
submission.to_csv('submission.csv', index=False)
# + [markdown] id="LLh5dBMa7d2c" colab_type="text"
# # ASSIGNMENT
# - Use the Caterpillar dataset (or _any_ dataset of your choice).
# - Use scikit-learn for hyperparameter optimization with RandomSearchCV.
# - Add comments and Markdown to your notebook. Clean up your code.
# - Commit your notebook to your fork of the GitHub repo.
#
# ### Stretch Goals
# - Make your final Kaggle submissions. Improve your scores! Look at [Kaggle Kernels](https://www.kaggle.com/c/caterpillar-tube-pricing/kernels) for ideas. **Share your best features and techniques on Slack.**
# - In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.
# - _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:
#
# > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...
#
# The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?
#
# ### Post-Reads
# - <NAME>, [_Python Data Science Handbook_, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation
# - <NAME>, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)
# - <NAME>, [_A Programmer's Guide to Data Mining_, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation
# - <NAME>, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)
# + [markdown] id="4sQiv9s2kOjn" colab_type="text"
# ## Try adjusting these hyperparameters in your future projects
#
# ### Tree ensembles
#
# #### Random Forest
# - class_weight (for imbalanced classes)
# - max_depth (usually high)
# - max_features (decrease for more variance)
# - min_samples_leaf (increase if overfitting)
# - n_estimators (too low underfits, too high wastes time)
#
# #### Xgboost
# - scale_pos_weight (for imbalanced classes)
# - max_depth (usually low)
# - n_estimators (too low underfits, too high overfits)
#
# For more ideas, see [Notes on Parameter Tuning](https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html) and [DART booster](https://xgboost.readthedocs.io/en/latest/tutorials/dart.html).
#
# ### Linear models
#
# #### Logistic Regression
# - C
# - class_weight (for imbalanced classes)
# - penalty
#
# #### Ridge / Lasso Regression
# - alpha
#
# #### ElasticNet Regression
# - alpha
# - l1_ratio
#
# For more explanation, see [**<NAME>'s 9 minute video on Ridge Regression**](https://www.youtube.com/watch?v=XK5jkedy17w)!
#
| module4-hyperparamter-optimization/hyperparameter_optimization_cross_validation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Bayesian Optimization with GPyOpt
#
#
# ### Written by <NAME>, Amazon Research Cambridge
#
# *Last updated Monday, 22 May 2017.*
#
# =====================================================================================================
# 1. **How to use GPyOpt?**
#
# 2. **The Basics of Bayesian Optimization**
# 1. Gaussian Processes
# 2. Acquisition functions
# 3. Applications of Bayesian Optimization
#
# 3. **1D optimization example**
#
# 4. **2D optimization example**
#
# =====================================================================================================
# ## 1. How to use GPyOpt?
# We start by loading GPyOpt and GPy.
# %pylab inline
import GPy
import GPyOpt
from numpy.random import seed
import matplotlib
# GPyOpt is easy to use as a black-box functions optimizer. To start you only need:
#
# * Your favorite function $f$ to minimize. We use $f(x)=2x^2$ in this toy example, whose global minimum is at x=0.
def myf(x):
return (2*x)**2
# * A set of box constrains, the interval [-1,1] in our case. You can define a list of dictionaries where each element defines the name, type and domain of the variables.
bounds = [{'name': 'var_1', 'type': 'continuous', 'domain': (-1,1)}]
# * A budget, or number of allowed evaluations of $f$.
max_iter = 15
# With this three pieces of information GPyOpt has enough to find the minimum of $f$ in the selected region. GPyOpt solves the problem in two steps. First, you need to create a GPyOpt object that stores the problem (f and and box-constrains). You can do it as follows.
myProblem = GPyOpt.methods.BayesianOptimization(myf,bounds)
# Next you need to run the optimization for the given budget of iterations. This bit it is a bit slow because many default options are used. In the next notebooks of this manual you can learn how to change other parameters to optimize the optimization performance.
myProblem.run_optimization(max_iter)
# Now you can check the best found location $x^*$ by
myProblem.x_opt
# and the predicted value value of $f$ at $x^*$ optimum by
myProblem.fx_opt
# And that's it! Keep reading to learn how GPyOpt uses Bayesian Optimization to solve this an other optimization problem. You will also learn all the features and options that you can use to solve your problems efficiently.
#
# =====================================================================================================
#
# ## 2. The Basics of Bayesian Optimization
#
# Bayesian optimization (BO) is an strategy for global optimization of black-box functions [(Snoek et al., 2012)](http://papers.nips.cc/paper/4522-practical-bayesian-optimization-of-machine-learning-algorithms.pdf). Let $f: {\mathcal X} \to R$ be a L-Lipschitz continuous function defined on a compact subset ${\mathcal X} \subseteq R^d$. We are interested in solving the global optimization problem of finding
# $$ x_{M} = \arg \min_{x \in {\mathcal X}} f(x). $$
#
# We assume that $f$ is a *black-box* from which only perturbed evaluations of the type $y_i = f(x_i) + \epsilon_i$, with $\epsilon_i \sim\mathcal{N}(0,\psi^2)$, are available. The goal is to make a series of $x_1,\dots,x_N$ evaluations of $f$ such that the *cumulative regret*
# $$r_N= Nf(x_{M})- \sum_{n=1}^N f(x_n),$$
# is minimized. Essentially, $r_N$ is minimized if we start evaluating $f$ at $x_{M}$ as soon as possible.
#
# There are two crucial bits in any Bayesian Optimization (BO) procedure approach.
#
# 1. Define a **prior probability measure** on $f$: this function will capture the our prior beliefs on $f$. The prior will be updated to a 'posterior' using the available data.
#
# 2. Define an **acquisition function** $acqu(x)$: this is a criteria to decide where to sample next in order to gain the maximum information about the location of the global maximum of $f$.
#
# Every time a new data point is collected. The model is re-estimated and the acquisition function optimized again until convergence. Given a prior over the function $f$ and an acquisition function, a BO procedure will converge to the optimum of $f$ under some conditions [(Bull, 2011)](http://arxiv.org/pdf/1101.3501.pdf).
# ### 2.1 Prior probability meassure on $f$: Gaussian processes
# A Gaussian process (GP) is a probability distribution across classes functions, typically smooth, such that each linear finite-dimensional restriction is multivariate Gaussian [(Rasmussen and Williams, 2006)](http://www.gaussianprocess.org/gpml). GPs are fully parametrized by a mean $\mu(x)$ and a covariance function $k(x,x')$. Without loss of generality $\mu(x)$ is assumed to be zero. The covariance function $k(x,x')$ characterizes the smoothness and other properties of $f$. It is known as the
# kernel of the process and has to be continuous, symmetric and positive definite. A widely used kernel is the square exponential, given by
#
# $$ k(x,x') = l \cdot \exp{ \left(-\frac{\|x-x'\|^2}{2\sigma^2}\right)} $$
# where $\sigma^2$ and and $l$ are positive parameters.
#
# To denote that $f$ is a sample from a GP with mean $\mu$ and covariance $k$ we write
#
# $$f(x) \sim \mathcal{GP}(\mu(x),k(x,x')).$$
#
# For regression tasks, the most important feature of GPs is that process priors are conjugate to the likelihood from finitely many observations $y= (y_1,\dots,y_n)^T$ and $X =\{x_1,...,x_n\}$, $x_i\in \mathcal{X}$ of the form $y_i = f(x_i) + \epsilon_i $
# where $\epsilon_i \sim \mathcal{N} (0,\sigma^2)$. We obtain the Gaussian posterior posterior $f(x^*)|X, y, \theta \sim \mathcal{N}(\mu(x^*),\sigma^2(x^*))$, where $\mu(x^*)$ and $\sigma^2(x^*)$ have close form. See [(<NAME>, 2006)](http://www.gaussianprocess.org/gpml) for details.
# ### 2.2 Acquisition Function
#
# Acquisition functions are designed represents our beliefs over the maximum of $f(x)$. Denote by $\theta$ the parameters of the GP model and by $\{x_i,y_i\}$ the available sample. Three of the most common acquisition functions, all available in GPyOpt are:
#
# * **Maximum probability of improvement (MPI)**:
#
# $$acqu_{MPI}(x;\{x_n,y_n\},\theta) = \Phi(\gamma(x)), \mbox{where}\ \gamma(x)=\frac{\mu(x;\{x_n,y_n\},\theta)-f(x_{best})-\psi}{\sigma(x;\{x_n,y_n\},\theta)}.$$
#
#
# * **Expected improvement (EI)**:
#
# $$acqu_{EI}(x;\{x_n,y_n\},\theta) = \sigma(x;\{x_n,y_n\},\theta) (\gamma(x) \Phi(\gamma(x))) + N(\gamma(x);0,1).$$
#
# * **Upper confidence bound (UCB)**:
#
# $$acqu_{UCB}(x;\{x_n,y_n\},\theta) = -\mu(x;\{x_n,y_n\},\theta)+\psi\sigma(x;\{x_n,y_n\},\theta).$$
#
# $\psi$ is a tunable parameters that help to make the acquisition functions more flexible. Also, in the case of the UBC, the parameter $\eta$ is useful to define the balance between the importance we give to the mean and the variance of the model. This is know as the **exploration/exploitation trade off**.
# ### 2.3 Applications of Bayesian Optimization
#
# Bayesian Optimization has been applied to solve a wide range of problems. Among many other, some nice applications of Bayesian Optimization include:
#
#
# * Sensor networks (http://www.robots.ox.ac.uk/~parg/pubs/ipsn673-garnett.pdf),
#
# * Automatic algorithm configuration (http://www.cs.ubc.ca/labs/beta/Projects/SMAC/papers/11-LION5-SMAC.pdf),
#
# * Deep learning (http://www.mlss2014.com/files/defreitas_slides1.pdf),
#
# * Gene design (http://bayesopt.github.io/papers/paper5.pdf),
#
# * and a long etc!
#
# In this Youtube video you can see Bayesian Optimization working in a real time in a robotics example. [(Calandra1 et al. 2008)](http://www.ias.tu-darmstadt.de/uploads/Site/EditPublication/Calandra_LION8.pdf)
from IPython.display import YouTubeVideo
YouTubeVideo('ualnbKfkc3Q')
# ## 3. One dimensional example
#
# In this example we show how GPyOpt works in a one-dimensional example a bit more difficult that the one we analyzed in Section 3. Let's consider here the Forrester function
#
# $$f(x) =(6x-2)^2 \sin(12x-4)$$ defined on the interval $[0, 1]$.
#
# The minimum of this function is located at $x_{min}=0.78$. The Forrester function is part of the benchmark of functions of GPyOpt. To create the true function, the perturbed version and boundaries of the problem you need to run the following cell.
# +
# %pylab inline
import GPy
import GPyOpt
# Create the true and perturbed Forrester function and the boundaries of the problem
f_true= GPyOpt.objective_examples.experiments1d.forrester() # noisy version
bounds = [{'name': 'var_1', 'type': 'continuous', 'domain': (0,1)}] # problem constrains
# -
# We plot the true Forrester function.
f_true.plot()
# As we did in Section 3, we need to create the GPyOpt object that will run the optimization. We specify the function, the boundaries and we add the type of acquisition function to use.
# Creates GPyOpt object with the model and anquisition fucntion
seed(123)
myBopt = GPyOpt.methods.BayesianOptimization(f=f_true.f, # function to optimize
domain=bounds, # box-constrains of the problem
acquisition_type='EI',
exact_feval = True) # Selects the Expected improvement
# Now we want to run the optimization. Apart from the number of iterations you can select
# how do you want to optimize the acquisition function. You can run a number of local optimizers (acqu_optimize_restart) at random or in grid (acqu_optimize_method).
# +
# Run the optimization
max_iter = 15 # evaluation budget
max_time = 60 # time budget
eps = 10e-6 # Minimum allows distance between the las two observations
myBopt.run_optimization(max_iter, max_time, eps)
# -
# When the optimization is done you should receive a message describing if the method converged or if the maximum number of iterations was reached. In one dimensional examples, you can see the result of the optimization as follows.
myBopt.plot_acquisition()
myBopt.plot_convergence()
# In problems of any dimension two evaluations plots are available.
#
# * The distance between the last two observations.
#
# * The value of $f$ at the best location previous to each iteration.
#
# To see these plots just run the following cell.
myBopt.plot_convergence()
# Now let's make a video to track what the algorithm is doing in each iteration. Let's use the LCB in this case with parameter equal to 2.
# ## 4. Two dimensional example
#
# Next, we try a 2-dimensional example. In this case we minimize the use the Six-hump camel function
#
# $$f(x_1,x_2) = \left(4-2.1x_1^2 = \frac{x_1^4}{3} \right)x_1^2 + x_1x_2 + (-4 +4x_2^2)x_2^2,$$
#
# in $[-3,3]\times [-2,2]$. This functions has two global minimum, at $(0.0898,-0.7126)$ and $(-0.0898,0.7126)$. As in the previous case we create the function, which is already in GPyOpt. In this case we generate observations of the function perturbed with white noise of $sd=0.1$.
# create the object function
f_true = GPyOpt.objective_examples.experiments2d.sixhumpcamel()
f_sim = GPyOpt.objective_examples.experiments2d.sixhumpcamel(sd = 0.1)
bounds =[{'name': 'var_1', 'type': 'continuous', 'domain': f_true.bounds[0]},
{'name': 'var_2', 'type': 'continuous', 'domain': f_true.bounds[1]}]
f_true.plot()
# We create the GPyOpt object. In this case we use the Lower Confidence bound acquisition function to solve the problem.
# Creates three identical objects that we will later use to compare the optimization strategies
myBopt2D = GPyOpt.methods.BayesianOptimization(f_sim.f,
domain=bounds,
model_type = 'GP',
acquisition_type='EI',
normalize_Y = True,
acquisition_weight = 2)
# We run the optimization for 40 iterations and show the evaluation plot and the acquisition function.
# +
# runs the optimization for the three methods
max_iter = 40 # maximum time 40 iterations
max_time = 60 # maximum time 60 seconds
myBopt2D.run_optimization(max_iter,max_time,verbosity=False)
# -
# Finally, we plot the acquisition function and the convergence plot.
myBopt2D.plot_acquisition()
myBopt2D.plot_convergence()
| manual/GPyOpt_reference_manual.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="vtZ1E_DEVbcb"
# # Mean Shift using Standard Scaler
# + [markdown] id="ua6-SyMeVbcd"
# This Code template is for the Cluster analysis using a simple Mean Shift(Centroid-Based Clustering using a flat kernel) Clustering algorithm along with feature scaling using Standard Scaler and includes 2D and 3D cluster visualization of the Clusters.
# + [markdown] id="YqtyYbbhVbce"
# ### Required Packages
# + id="ELq93vByVbce"
# !pip install plotly
# + id="EbYlgwXJVbcf"
import operator
import warnings
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import plotly.express as px
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly.graph_objects as go
from sklearn.cluster import MeanShift, estimate_bandwidth
warnings.filterwarnings("ignore")
# + [markdown] id="bXGOqmBOVbcf"
# ### Initialization
#
# Filepath of CSV file
# + id="MEWUuv90Vbcg"
file_path = ""
# + [markdown] id="EwRVedBaVbcg"
# List of features which are required for model training
# + id="fEFwGI90Vbch"
features=[]
# + [markdown] id="TouZwbDdVbci"
# ### Data Fetching
#
# Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
#
# We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="w26UDSLqVbcj" outputId="6915b4be-56e5-41e4-96d6-a58822e81ace"
df=pd.read_csv(file_path)
df.head()
# + [markdown] id="-OLXkTh3Vbck"
# ### Feature Selections
#
# It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
#
# We will assign all the required input features to X.
# + id="VCgsW3K1Vbck"
X = df[features]
# + [markdown] id="HdTQNaZHVbck"
# ### Data Preprocessing
#
# Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
#
# + id="pfX-zKgzVbcl"
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
# + [markdown] id="YV9szUBbVbcl"
# Calling preprocessing functions on the feature and target set.
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="V1Wa78Y8Vbcl" outputId="1b4211ff-d038-4217-ca1b-ebde9097319b"
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
X.head()
# + [markdown] id="bwbLaoN5mA4i"
# ####Feature Scaling
# + [markdown] id="REdWAzFqmGZs"
# Standard Scaler - Standardize features by removing the mean and scaling to unit variance
# Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using transform.<br>
# [For more information click here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# + id="JUztZilnmAWf"
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# + [markdown] id="EYTOQsVwVbcl"
# ### Model
#
# Mean shift clustering using a flat kernel.
#
# Mean shift clustering aims to discover “blobs” in a smooth density of samples. It is a centroid-based algorithm, which works by updating candidates for centroids to be the mean of the points within a given region. These candidates are then filtered in a post-processing stage to eliminate near-duplicates to form the final set of centroids.
#
# Seeding is performed using a binning technique for scalability.
# [More information](https://analyticsindiamag.com/hands-on-tutorial-on-mean-shift-clustering-algorithm/)
#
# #### Tuning Parameters
#
# 1. bandwidthfloat, default=None
# > Bandwidth used in the RBF kernel.
# If not given, the bandwidth is estimated using sklearn.cluster.estimate_bandwidth
#
# 2. seedsarray-like of shape (n_samples, n_features), default=None
# > Seeds used to initialize kernels. If not set, the seeds are calculated by clustering.get_bin_seeds with bandwidth as the grid size and default values for other parameters.
#
# 3. bin_seedingbool, default=False
# > If true, initial kernel locations are not locations of all points, but rather the location of the discretized version of points, where points are binned onto a grid whose coarseness corresponds to the bandwidth.
#
# 4. min_bin_freqint, default=1
# > To speed up the algorithm, accept only those bins with at least min_bin_freq points as seeds.
#
# 5. cluster_allbool, default=True
# > If true, then all points are clustered, even those orphans that are not within any kernel. Orphans are assigned to the nearest kernel. If false, then orphans are given cluster label -1
#
# 6. n_jobsint, default=None
# > The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors.
#
# 7. max_iterint, default=300
# > Maximum number of iterations, per seed point before the clustering operation terminates
#
# [For more detail on API](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.MeanShift.html)
# <br>
# <br>
# ####Estimate Bandwidth
# Estimate the bandwidth to use with the mean-shift algorithm.
#
# That this function takes time at least quadratic in n_samples. For large datasets, it’s wise to set that parameter to a small value.
# + id="EVxZNJqgVbcm"
bandwidth = estimate_bandwidth(X_scaled, quantile=0.15)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X_scaled)
y_pred = ms.predict(X_scaled)
# + [markdown] id="P8kfr4YdVbcm"
# ### Cluster Analysis
#
# First, we add the cluster labels from the trained model into the copy of the data frame for cluster analysis/visualization.
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="D_irFbchaLRg" outputId="d264c0f9-cfea-45d2-8f1c-405c4d398a19"
ClusterDF = X.copy()
ClusterDF['ClusterID'] = y_pred
ClusterDF.head()
# + [markdown] id="m2AQO04gVbcm"
# #### Cluster Records
# The below bar graphs show the number of data points in each available cluster.
# + colab={"base_uri": "https://localhost:8080/", "height": 398} id="YxRd_eviVbcm" outputId="3eadee0a-f71d-4dd6-e01e-a0b8199c55c8"
ClusterDF['ClusterID'].value_counts().plot(kind='bar')
# + [markdown] id="aYnOKvZTVbcn"
# #### Cluster Plots
# Below written functions get utilized to plot 2-Dimensional and 3-Dimensional cluster plots on the available set of features in the dataset. Plots include different available clusters along with cluster centroid.
# + id="O2PqvJnIVbcn"
def Plot2DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 2)):
plt.rcParams["figure.figsize"] = (8,6)
xi,yi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1])
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
plt.scatter(DFC[i[0]],DFC[i[1]],cmap=plt.cm.Accent,label=j)
plt.scatter(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],marker="^",color="black",label="centroid")
plt.xlabel(i[0])
plt.ylabel(i[1])
plt.legend()
plt.show()
def Plot3DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig,ax = plt.figure(figsize = (16, 10)),plt.axes(projection ="3d")
ax.grid(b = True, color ='grey',linestyle ='-.',linewidth = 0.3,alpha = 0.2)
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
ax.scatter3D(DFC[i[0]],DFC[i[1]],DFC[i[2]],alpha = 0.8,cmap=plt.cm.Accent,label=j)
ax.scatter3D(ms.cluster_centers_[:,xi],ms.cluster_centers_[:,yi],ms.cluster_centers_[:,zi],
marker="^",color="black",label="centroid")
ax.set_xlabel(i[0])
ax.set_ylabel(i[1])
ax.set_zlabel(i[2])
plt.legend()
plt.show()
def Plotly3D(X_Cols,df):
for i in list(itertools.combinations(X_Cols,3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig1 = px.scatter_3d(ms.cluster_centers_,x=ms.cluster_centers_[:,xi],y=ms.cluster_centers_[:,yi],
z=ms.cluster_centers_[:,zi])
fig2=px.scatter_3d(df, x=i[0], y=i[1],z=i[2],color=df['ClusterID'])
fig3 = go.Figure(data=fig1.data + fig2.data,
layout=go.Layout(title=go.layout.Title(text="x:{}, y:{}, z:{}".format(i[0],i[1],i[2])))
)
fig3.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mqt4e_SLjWrJ" outputId="e3dca7aa-a6f0-467a-cca3-792071352c37"
sns.set_style("whitegrid")
sns.set_context("talk")
plt.rcParams["lines.markeredgewidth"] = 1
sns.pairplot(data=ClusterDF, hue='ClusterID', palette='Dark2', height=5)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="h1HEMjF7Vbcn" outputId="66377d20-f594-40f5-9f0f-05562fa11d1c"
Plot2DCluster(X.columns,ClusterDF)
# + colab={"base_uri": "https://localhost:8080/", "height": 575} id="j_gbSXZuVbcn" outputId="64c359cc-0cab-42b9-ecfb-2ec373394142"
Plot3DCluster(X.columns,ClusterDF)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Dj60_FH2Vbco" outputId="d9c50a53-5f30-456f-e7a6-341a8ce81b36"
Plotly3D(X.columns,ClusterDF)
# + [markdown] id="DEFZns4jVbco"
# #### [Created by <NAME>](https://github.com/iamgrootsh7)
| Clustering/MeanShift/MeanShift_StandardScaler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import random
# Stabilize random
random.seed(42)
# division [0-9], employee [0-99], month [0-11], value [0-1000]
employees = {}
for eid in range(0,100):
employees[eid] = random.randrange(0, 10)
# populate dataset with [month, employee, division, revenue]
revenue = []
for month in range (0,12):
for eid in range(0,100):
revenue.append([month, eid, employees[eid], random.randrange(0, 1000)])
# -
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.DataFrame.from_records(revenue, columns = ['month', 'employee', 'division', 'revenue'])
df.sample(10)
# +
# Sum by division
# http://pandas.pydata.org/pandas-docs/stable/groupby.html
# Aggregation functions will not return the groups that you are aggregating over if they are named columns,
# when as_index=True, the default. The grouped columns will be the indices of the returned object.
# Passing as_index=False will return the groups that you are aggregating over, if they are named columns.
print('Conver hierarchical df back to df:')
groupped = df.groupby(['division', 'month'], as_index=False)['revenue'].sum()
groupped.head(6)
# -
# Sum by division
groupped = df.groupby(['division', 'month'])['revenue'].sum()
print(groupped.head(18))
pivot = groupped.reset_index().pivot(index='month', columns='division', values='revenue')
print(pivot)
pivot.plot().legend(bbox_to_anchor=(1.4, 1))
cumsum = groupped.groupby(level=[0]).cumsum()
cumsum.head(18)
# Append column to original dataset
flat = groupped.reset_index()
flat.groupby(['division'])['revenue'].apply(lambda x: x.cumsum())
flat['cumsum'] = flat.groupby(['division'])['revenue'].apply(lambda x: x.cumsum())
flat.head(18)
flat[flat['division']==0].plot(kind='line', x=['month'], y=['revenue', 'cumsum'])
# TODO rolling sum over two month
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.rolling.html
#groupped.groupby(level=[0]).rolling(2).sum()
flat_roll = groupped.copy().reset_index()
flat_roll['roll'] = flat_roll.groupby(['division']).rolling(2, min_periods=1).sum().reset_index(drop=True)['revenue']
flat_roll.head(18)
flat_roll[flat_roll['division']==0].plot(kind='line', x=['month'], y=['revenue', 'roll'])
# Lagged function
shifted = pd.concat([groupped, groupped.groupby(level=[0]).shift(-1), groupped.groupby(level=[0]).shift(1)], axis=1)
shifted.columns = ['current', 'next', 'prev']
shifted.head(18)
| window.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Export All Techniques
# ## Import Libraries
from attackcti import attack_client
from pandas.io.json import json_normalize
# ## Initialize Attack client
lift = attack_client()
# ## Get All Techniques STIX Format
all_techniques = lift.get_techniques()
all_techniques = lift.remove_revoked(all_techniques)
len(all_techniques)
# ## Export CSV File
techniques_df = json_normalize(all_techniques)
techniques_df.to_csv('all_techniques_stix.csv', index=False)
# ## Get All Techniques Non-STIX Format
all_techniques = lift.get_techniques(stix_format=False)
all_techniques = lift.remove_revoked(all_techniques)
len(all_techniques)
techniques_df = json_normalize(all_techniques)
techniques_df.to_csv('all_techniques_non_stix.csv', index=False)
| notebooks/Export_All_Techniques.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:spectrum_analysis]
# language: python
# name: conda-env-spectrum_analysis-py
# ---
# ## Spectrum Package Test with EcoFOCI Data
import spectrum
| 2018/SpectrumTesst.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
from __future__ import print_function, division
import sys, os
import json
import math
import itertools, collections
import datetime as dt
import fiona
from shapely.geometry import shape
import geopandas as gp
import numpy as np
import statsmodels.formula.api as smf
from pandas.tools.plotting import plot_frame
from matplotlib import pyplot as plt
import seaborn as sns
#sns.set_context("poster")
sns.set_context("talk", font_scale=1.4)
current_palette = sns.color_palette(sns.hls_palette(4, l=.35, s=.9))
sns.set_palette(current_palette)
sns.palplot(current_palette)
palette = itertools.cycle(sns.color_palette())
# We use Alaska Albers shapefiles all around to be able to measure distances in metres. The WGS84 shapefiles from AICC and the Forest Service Active Fire Maps web portal were converted using GDAL:
#
# `ogr2ogr -f "ESRI Shapefile" outfile_AKAlbers.shp infile.shp -s_srs EPSG:4326 -t_SRS EPSG:3338`
# ### Reload saved dataframe from file
datadir = "/Volumes/SCIENCE_mobile_Mac/Fire/DATA_BY_PROJECT/2015VIIRSMODIS/GISout/"
cleanedfireperimDF = gp.GeoDataFrame.from_file(os.path.join(datadir, "cleanedFirePerimsWithFirepoints20160228.json"))
# Add a hectares column for SI units. Add yes/no black spruce fire column.
cleanedfireperimDF['Hectares'] = cleanedfireperimDF['Acres'] / 2.47105
cleanedfireperimDF['blacksprucefire'] = cleanedfireperimDF['PRIMARYFUE'] == "Black Spruce"
cleanedfireperimDF.describe()
displayperimcols = [
u'Acres', u'Date', u'FireName', 'cleanedID', u'CONTROLDAT',
u'DISCOVERYD', u'FIREBEHAVI', u'GENERALCAU', u'LATITUDE',
u'LONGITUDE', u'MGMTOPTION', u'OUTDATE', u'PRIMARYFUE', u'STRUCTURES', 'blacksprucefire',
'MOD14count', 'VIIRSIcount']
# ### Loading fire detections for later comparison
firehotspots = "/Volumes/SCIENCE_mobile_Mac/Fire/DATA_BY_PROJECT/2015VIIRSMODIS/activefiremaps.fs.fed.us_data_fireptdata/"
mod14dir = "modis_fire_2015_344_ak_shapefile"
mod14shp = "modis_fire_2015_344_ak_AKAlbers.shp"
viirsIdir = "viirs_iband_fire_2015_344_ak_shapefile"
viirsIshp = "viirs_iband_fire_2015_344_ak_AKAlbers.shp"
mod14DF = gp.GeoDataFrame.from_file(os.path.join(firehotspots, mod14dir, mod14shp))
viirsIDF = gp.GeoDataFrame.from_file(os.path.join(firehotspots, viirsIdir, viirsIshp))
# ### We'll add the union of all buffered polygons
cleanedfireperimDF.crs = mod14DF.crs
allbufferedpoly = cleanedfireperimDF.buffer(1000).unary_union
# %%timeit
mod14DF.head(100).geometry.intersects(allbufferedpoly)
mod14DF['infireperim'] = mod14DF.geometry.intersects(allbufferedpoly)
viirsIDF['infireperim'] = viirsIDF.geometry.intersects(allbufferedpoly)
# ### Loading climate divisions
gisdir = "/Volumes/SCIENCE_mobile_Mac/GENERAL_GIS/ncdc.noaa.gov/AK_CLIMATE_DIVISIONS/"
climatedivisions = "AK_divisions_AKAlbers_NAD83.shp"
climatedivisions = "AK_divisions_AKAlbers_NAD83.gpkg"
climatedivisionsDF = gp.GeoDataFrame.from_file(os.path.join(gisdir, climatedivisions))
climatedivisionsDF
mod14DF.crs == climatedivisionsDF.crs
climatedivisionsDF.crs
mod14DF.crs
cleanedfireperimDF.sort_values('Acres', ascending=False).head(10).intersection(climatedivisionsDF)
cleanedfireperimDF.sort_values('Acres', ascending=False).head(10)
# ### Plotting
# +
SAVEFIG = False
palette = itertools.cycle(sns.color_palette())
xlim = (-10, 105)
ylim = (-10, 205)
ax1 = plot_frame(cleanedfireperimDF, kind='scatter',
x="MOD14count",
y="VIIRSIcount",
xlim=xlim,
ylim=ylim,
s=60,
c='Hectares',
cmap="gist_stern_r",
vmin=0,
vmax=5000,
colorbar=True,
legend=True,
figsize=(15, 12),
sharex=False,
zorder=2
)
sns.regplot(
x="MOD14count",
y="VIIRSIcount",
data=cleanedfireperimDF,
ax=ax1,
color="grey",
scatter=False,
line_kws={'zorder': 1}
)
#ax1.set_aspect("equal")
f = plt.gcf()
#f.subplots_adjust(bottom=0.4)
#x = np.array([xlim[0], xlim[1]])
#y = intercept + slope * x
#plt.plot(x, y, 'r-', color=next(palette))
if SAVEFIG:
f.savefig('/Users/cwaigl/Desktop/samplefigure_close')
# +
plotpalette = sns.xkcd_palette(['teal', 'dark orange', ])
palette = itertools.cycle(plotpalette)
xlim = (-100, 140000)
ylim = (-100, 4500)
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(20, 8), sharex=True, sharey=True)
plot_frame(cleanedfireperimDF[
cleanedfireperimDF.PRIMARYFUE == "Black Spruce"],
kind='scatter', x='Hectares', y="MOD14count", s=50,
ax=ax0, label="Black spruce - MODIS", color=next(palette))
plot_frame(cleanedfireperimDF[
cleanedfireperimDF.PRIMARYFUE != "Black Spruce"],
kind='scatter', x='Hectares', y="MOD14count", s=50,
ax=ax0, label="Other - MODIS", color=next(palette))
palette = itertools.cycle(plotpalette)
plot_frame(cleanedfireperimDF[
cleanedfireperimDF.PRIMARYFUE == "Black Spruce"],
kind='scatter', x='Hectares', y="VIIRSIcount", s=50,
ax=ax1, label="Black spruce - VIIRS I-band", color=next(palette))
plot_frame(cleanedfireperimDF[
cleanedfireperimDF.PRIMARYFUE != "Black Spruce"],
kind='scatter', x='Hectares', y="VIIRSIcount", s=50,
ax=ax1, label="Other - VIIRS I-band", color=next(palette))
palette = itertools.cycle(plotpalette)
sns.regplot(
x="Hectares",
y="MOD14count",
data=cleanedfireperimDF.loc[cleanedfireperimDF.blacksprucefire],
ax=ax0,
color=next(palette),
scatter=False,
line_kws={'zorder': 1, 'lw': 3}
)
sns.regplot(
x="Hectares",
y="MOD14count",
data=cleanedfireperimDF.loc[~cleanedfireperimDF.blacksprucefire],
ax=ax0,
color=next(palette),
scatter=False,
line_kws={'zorder': 1, 'lw': 3}
)
palette = itertools.cycle(plotpalette)
sns.regplot(
x="Hectares",
y="VIIRSIcount",
data=cleanedfireperimDF.loc[cleanedfireperimDF.blacksprucefire],
ax=ax1,
color=next(palette),
scatter=False,
line_kws={'zorder': 1, 'lw': 3}
)
sns.regplot(
x="Hectares",
y="VIIRSIcount",
data=cleanedfireperimDF.loc[~cleanedfireperimDF.blacksprucefire],
ax=ax1,
color=next(palette),
scatter=False,
line_kws={'zorder': 1, 'lw': 3}
)
ax0.set_xlim(xlim)
ax0.set_ylim(ylim)
ax1.set_xlim(xlim)
ax1.set_ylim(ylim)
ax0.set_ylabel('Fire pixel count')
ax0.set_title('Operational MOD14/MYD14 product')
ax1.set_title('Operational VIIRS I-band product')
ax0.legend(loc='upper left')
ax1.legend(loc='upper left')
# -
fig.savefig('/Users/cwaigl/Desktop/pixelcount_fuel.png', dpi=200, bbox_inches='tight')
# ### Inspecting the cleaned perimeter dataset
# Here is the reason I kept these columns. I'll change the case of the management options...
cleanedfireperimDF['PRIMARYFUE'].value_counts()
cleanedfireperimDF['FIREBEHAVI'].value_counts()
cleanedfireperimDF['MGMTOPTION'].value_counts()
cleanedfireperimDF['GENERALCAU'].value_counts()
cleanedfireperimDF.sort_values(by="Acres", ascending=False).head(n=10)[displayperimcols]
# +
cleanedfireperimDF['RelativeVIIRSICount'] = cleanedfireperimDF['VIIRSIcount'] / cleanedfireperimDF['Hectares']
cleanedfireperimDF['RelativeMOD14Count'] = cleanedfireperimDF['MOD14count'] / cleanedfireperimDF['Hectares']
cleanedfireperimDF[cleanedfireperimDF['Hectares'] > 1000].sort_values(
by="RelativeVIIRSICount", ascending=True).head(n=20)[
displayperimcols + ['Hectares', 'RelativeVIIRSICount', 'RelativeMOD14Count']]
# -
# Now let's plot a few of the largest ones
cleanedfireperimDF[cleanedfireperimDF.FireName.isin(['Sushgitit Hills', 'Rock', 'Isahultila', ])].plot()
cleanedfireperimDF[cleanedfireperimDF.FireName.isin(['Sushgitit Hills', 'Rock', 'Isahultila', ])].geometry.buffer(1000).plot()
# ## Cycling through fire perimeters and fire detections (MOD14 and VIIRS I-band)
# How many fires were not detected? Further data inspection
print("MOD14")
print("Number of undetected fires: {}".format(sum(cleanedfireperimDF.MOD14count == 0)))
print(("Percentage of undetected fires: {:.2f}".format(100*sum(cleanedfireperimDF.MOD14count == 0)/len(cleanedfireperimDF))))
print(("Acreage of largest undetected fire: {}".format(max(cleanedfireperimDF.Acres[cleanedfireperimDF.MOD14count == 0]))))
print("VIIRS I-band")
print("Number of undetected fires: {}".format(sum(cleanedfireperimDF.VIIRSIcount == 0)))
print(("Percentage of undetected fires: {:.2f}".format(100*sum(cleanedfireperimDF.VIIRSIcount == 0)/len(cleanedfireperimDF))))
print(("Acreage of largest undetected fire: {}".format(max(cleanedfireperimDF.Acres[cleanedfireperimDF.VIIRSIcount == 0]))))
print("Largest pixel count for individual fire:")
print("MOD14: {}".format(max(cleanedfireperimDF.MOD14count)))
print("VIIRS I-band: {}".format(max(cleanedfireperimDF.VIIRSIcount)))
print(sum(mod14DF.infireperim), sum(~mod14DF.infireperim), sum(~mod14DF.infireperim)/sum(mod14DF.infireperim))
print(sum(viirsIDF.infireperim), sum(~viirsIDF.infireperim), sum(~viirsIDF.infireperim)/sum(viirsIDF.infireperim))
cleanedfireperimDF[cleanedfireperimDF.MOD14count==max(cleanedfireperimDF.MOD14count)][displayperimcols]
cleanedfireperimDF[cleanedfireperimDF.FireName.str.startswith("Sockeye")][displayperimcols]
cleanedfireperimDF.groupby(["PRIMARYFUE"])["MOD14count"].mean()
cleanedfireperimDF.groupby(["PRIMARYFUE"])["VIIRSIcount"].mean()
cleanedfireperimDF.groupby(["MGMTOPTION"])["MOD14count"].mean()
cleanedfireperimDF.groupby(["MGMTOPTION"])["Acres"].mean()
# ## Now VIIRS I-band
# ## Data inspection for badly detected fires etc.
cleanedfireperimDF[
(cleanedfireperimDF.VIIRSIcount <10)
#& (cleanedfireperimDF.MOD14count == 0)
].sort_values(
by='Acres', ascending=False
).head(15)[displayperimcols]
cleanedfireperimDF[
(cleanedfireperimDF.VIIRSIcount < 50)
& (cleanedfireperimDF.Acres > 10000)
# & (cleanedfireperimDF.MOD14count > 10)
][displayperimcols]
cleanedfireperimDF[['Acres', 'MOD14count', 'VIIRSMcount', 'VIIRSIcount', 'LATITUDE', 'LONGITUDE']].corr()
cleanedfireperimDF.blacksprucefire.value_counts()
# ### Statistics!
# Somehow Pandas-style plotting doesn't work on GeoPandas GeoDataFrames. Until I find a better solution, I'm makeing a copy that's just a regular dataframe:
keepperimcols = list(cleanedfireperimDF.columns)
keepperimcols.remove('geometry')
cleanedfireperimDF_forstats = cleanedfireperimDF[keepperimcols].copy()
type(cleanedfireperimDF_forstats)
results = smf.ols('VIIRSIcount ~ MOD14count', cleanedfireperimDF).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('MOD14count ~ Acres', cleanedfireperimDF.loc[cleanedfireperimDF['blacksprucefire']]).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results = smf.ols('VIIRSIcount ~ Acres', cleanedfireperimDF.loc[cleanedfireperimDF['blacksprucefire']]).fit()
print(results.summary())
intercept, slope = results.params
r2 = results.rsquared
print(results.conf_int())
print(slope, intercept, r2)
results.params
results = smf.ols('VIIRSIcount ~ MOD14count', cleanedfireperimDF).fit()
print(results.summary())
ax1 = cleanedfireperimDF["Acres"].plot(kind='hist', bins=50, legend=True)
# +
palette = itertools.cycle(sns.color_palette())
ax1 = cleanedfireperimDF[
(cleanedfireperimDF.MGMTOPTION == "Critical")& (cleanedfireperimDF.Acres <= 20000 )
][['Acres', 'VIIRSIcount']].plot(
kind='scatter', x='Acres', y="VIIRSIcount", s=50, label="Critical", color=next(palette))
cleanedfireperimDF_forstats[
(cleanedfireperimDF.MGMTOPTION == "Full")& (cleanedfireperimDF.Acres <= 20000 )
][['Acres', 'VIIRSIcount']].plot(
kind='scatter', x='Acres', y="VIIRSIcount", ax=ax1, s=50, label="Full", color=next(palette), alpha=0.5)
cleanedfireperimDF_forstats[
(cleanedfireperimDF.MGMTOPTION == "Modified")& (cleanedfireperimDF.Acres <= 20000 )
][['Acres', 'VIIRSIcount']].plot(
kind='scatter', x='Acres', y="VIIRSIcount", ax=ax1, s=50, label="Modified", color=next(palette), alpha=0.7)
cleanedfireperimDF_forstats[
(cleanedfireperimDF.MGMTOPTION == "Limited") & (cleanedfireperimDF.Acres <= 20000 )
][['Acres', 'VIIRSIcount']].plot(kind='scatter', x='Acres', y="VIIRSIcount", ax=ax1, s=50, label="Limited", color=next(palette), alpha=0.7)
# -
# %debug
cleanedfireperimDF.drop(['blacksprucefire'], axis=1).to_crs(epsg=4326).to_file(
os.path.join(datadir, "cleanedFirePerimsWithFirepoints20160228_new.gpkg"),
driver="GPKG")
cleanedfireperimDF.drop(['blacksprucefire'], axis=1).columns
mod14DF['inbufferedfireperim'] = 0
viirsIDF['inbufferedfireperim'] = 0
mod14DF.loc[mod14DF.infireperim, 'inbufferedfireperim'] = 1
viirsIDF.loc[viirsIDF.infireperim, 'inbufferedfireperim'] = 1
mod14DF.head(10)
mod14DF.drop(['infireperim'], axis=1).to_file(os.path.join(datadir, "mod14_with_flags.shp"))
viirsIDF.drop(['infireperim'], axis=1).to_file(os.path.join(datadir, "viirsI_with_flags.shp"))
| ipynb_viirs1/2015FireGISStats-plots_for_paper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <a href="https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb" target="_parent"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open in Kaggle"/></a>
# -
# # Tutorial 2: Hidden Markov Model
# **Week 3, Day 2: Hidden Dynamics**
#
# **By Neuromatch Academy**
#
# __Content creators:__ <NAME> with help from <NAME> and <NAME>
#
# __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# __Production editor:__ <NAME>
# # Tutorial objectives
#
# *Estimated timing of tutorial: 1 hour, 5 minutes*
#
# The world around us is often changing, but we only have noisy sensory measurements. Similarly, neural systems switch between discrete states (e.g. sleep/wake) which are observable only indirectly, through their impact on neural activity. **Hidden Markov Models** (HMM) let us reason about these unobserved (also called hidden or latent) states using a time series of measurements.
#
# Here we'll learn how changing the HMM's transition probability and measurement noise impacts the data. We'll look at how uncertainty increases as we predict the future, and how to gain information from the measurements.
#
# We will use a binary latent variable $s_t \in \{0,1\}$ that switches randomly between the two states, and a 1D Gaussian emission model $m_t|s_t \sim \mathcal{N}(\mu_{s_t},\sigma^2_{s_t})$ that provides evidence about the current state.
#
# By the end of this tutorial, you should be able to:
# - Describe how the hidden states in a Hidden Markov model evolve over time, both in words, mathematically, and in code
# - Estimate hidden states from data using forward inference in a Hidden Markov model
# - Describe how measurement noise and state transition probabilities affect uncertainty in predictions in the future and the ability to estimate hidden states.
#
# <br>
#
# **Summary of Exercises**
# 1. Generate data from an HMM.
# 2. Calculate how predictions propagate in a Markov Chain without evidence.
# 3. Combine new evidence and prediction from past evidence to estimate hidden states.
# + cellView="form"
# @title Video 1: Introduction
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Hh411r7JE", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="pIXxVl1A4l0", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# # Setup
# +
# Imports
import numpy as np
import time
from scipy import stats
from scipy.optimize import linear_sum_assignment
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib import patches
# + cellView="form"
#@title Figure Settings
# import ipywidgets as widgets # interactive display
from IPython.html import widgets
from ipywidgets import interactive, interact, HBox, Layout,VBox
from IPython.display import HTML
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/NMA2020/nma.mplstyle")
# + cellView="form"
# @title Plotting Functions
def plot_hmm1(model, states, measurements, flag_m=True):
"""Plots HMM states and measurements for 1d states and measurements.
Args:
model (hmmlearn model): hmmlearn model used to get state means.
states (numpy array of floats): Samples of the states.
measurements (numpy array of floats): Samples of the states.
"""
T = states.shape[0]
nsteps = states.size
aspect_ratio = 2
fig, ax1 = plt.subplots(figsize=(8,4))
states_forplot = list(map(lambda s: model.means[s], states))
ax1.step(np.arange(nstep), states_forplot, "-", where="mid", alpha=1.0, c="green")
ax1.set_xlabel("Time")
ax1.set_ylabel("Latent State", c="green")
ax1.set_yticks([-1, 1])
ax1.set_yticklabels(["-1", "+1"])
ax1.set_xticks(np.arange(0,T,10))
ymin = min(measurements)
ymax = max(measurements)
ax2 = ax1.twinx()
ax2.set_ylabel("Measurements", c="crimson")
# show measurement gaussian
if flag_m:
ax2.plot([T,T],ax2.get_ylim(), color="maroon", alpha=0.6)
for i in range(model.n_components):
mu = model.means[i]
scale = np.sqrt(model.vars[i])
rv = stats.norm(mu, scale)
num_points = 50
domain = np.linspace(mu-3*scale, mu+3*scale, num_points)
left = np.repeat(float(T), num_points)
# left = np.repeat(0.0, num_points)
offset = rv.pdf(domain)
offset *= T / 15
lbl = "measurement" if i == 0 else ""
# ax2.fill_betweenx(domain, left, left-offset, alpha=0.3, lw=2, color="maroon", label=lbl)
ax2.fill_betweenx(domain, left+offset, left, alpha=0.3, lw=2, color="maroon", label=lbl)
ax2.scatter(np.arange(nstep), measurements, c="crimson", s=4)
ax2.legend(loc="upper left")
ax1.set_ylim(ax2.get_ylim())
plt.show(fig)
def plot_marginal_seq(predictive_probs, switch_prob):
"""Plots the sequence of marginal predictive distributions.
Args:
predictive_probs (list of numpy vectors): sequence of predictive probability vectors
switch_prob (float): Probability of switching states.
"""
T = len(predictive_probs)
prob_neg = [p_vec[0] for p_vec in predictive_probs]
prob_pos = [p_vec[1] for p_vec in predictive_probs]
fig, ax = plt.subplots()
ax.plot(np.arange(T), prob_neg, color="blue")
ax.plot(np.arange(T), prob_pos, color="orange")
ax.legend([
"prob in state -1", "prob in state 1"
])
ax.text(T/2, 0.05, "switching probability={}".format(switch_prob), fontsize=12,
bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.6))
ax.set_xlabel("Time")
ax.set_ylabel("Probability")
ax.set_title("Forgetting curve in a changing world")
#ax.set_aspect(aspect_ratio)
plt.show(fig)
def plot_evidence_vs_noevidence(posterior_matrix, predictive_probs):
"""Plots the average posterior probabilities with evidence v.s. no evidence
Args:
posterior_matrix: (2d numpy array of floats): The posterior probabilities in state 1 from evidence (samples, time)
predictive_probs (numpy array of floats): Predictive probabilities in state 1 without evidence
"""
nsample, T = posterior_matrix.shape
posterior_mean = posterior_matrix.mean(axis=0)
fig, ax = plt.subplots(1)
# ax.plot([0.0, T],[0.5, 0.5], color="red", linestyle="dashed")
ax.plot([0.0, T],[0., 0.], color="red", linestyle="dashed")
ax.plot(np.arange(T), predictive_probs, c="orange", linewidth=2, label="No evidence")
ax.scatter(np.tile(np.arange(T), (nsample, 1)), posterior_matrix, s=0.8, c="green", alpha=0.3, label="With evidence(Sample)")
ax.plot(np.arange(T), posterior_mean, c='green', linewidth=2, label="With evidence(Average)")
ax.legend()
ax.set_yticks([0.0, 0.25, 0.5, 0.75, 1.0])
ax.set_xlabel("Time")
ax.set_ylabel("Probability in State +1")
ax.set_title("Gain confidence with evidence")
plt.show(fig)
def plot_forward_inference(model, states, measurements, states_inferred,
predictive_probs, likelihoods, posterior_probs,
t=None,
flag_m=True, flag_d=True, flag_pre=True, flag_like=True, flag_post=True,
):
"""Plot ground truth state sequence with noisy measurements, and ground truth states v.s. inferred ones
Args:
model (instance of hmmlearn.GaussianHMM): an instance of HMM
states (numpy vector): vector of 0 or 1(int or Bool), the sequences of true latent states
measurements (numpy vector of numpy vector): the un-flattened Gaussian measurements at each time point, element has size (1,)
states_inferred (numpy vector): vector of 0 or 1(int or Bool), the sequences of inferred latent states
"""
T = states.shape[0]
if t is None:
t = T-1
nsteps = states.size
fig, ax1 = plt.subplots(figsize=(11,6))
# inferred states
#ax1.step(np.arange(nstep)[:t+1], states_forplot[:t+1], "-", where="mid", alpha=1.0, c="orange", label="inferred")
# true states
states_forplot = list(map(lambda s: model.means[s], states))
ax1.step(np.arange(nstep)[:t+1], states_forplot[:t+1], "-", where="mid", alpha=1.0, c="green", label="true")
ax1.step(np.arange(nstep)[t+1:], states_forplot[t+1:], "-", where="mid", alpha=0.3, c="green", label="")
# Posterior curve
delta = model.means[1] - model.means[0]
states_interpolation = model.means[0] + delta * posterior_probs[:,1]
if flag_post:
ax1.step(np.arange(nstep)[:t+1], states_interpolation[:t+1], "-", where="mid", c="grey", label="posterior")
ax1.set_xlabel("Time")
ax1.set_ylabel("Latent State", c="green")
ax1.set_yticks([-1, 1])
ax1.set_yticklabels(["-1", "+1"])
ax1.legend(bbox_to_anchor=(0,1.02,0.2,0.1), borderaxespad=0, ncol=2)
ax2 = ax1.twinx()
ax2.set_ylim(
min(-1.2, np.min(measurements)),
max(1.2, np.max(measurements))
)
if flag_d:
ax2.scatter(np.arange(nstep)[:t+1], measurements[:t+1], c="crimson", s=4, label="measurement")
ax2.set_ylabel("Measurements", c="crimson")
# show measurement distributions
if flag_m:
for i in range(model.n_components):
mu = model.means[i]
scale = np.sqrt(model.vars[i])
rv = stats.norm(mu, scale)
num_points = 50
domain = np.linspace(mu-3*scale, mu+3*scale, num_points)
left = np.repeat(float(T), num_points)
offset = rv.pdf(domain)
offset *= T /15
# lbl = "measurement" if i == 0 else ""
lbl = ""
# ax2.fill_betweenx(domain, left, left-offset, alpha=0.3, lw=2, color="maroon", label=lbl)
ax2.fill_betweenx(domain, left+offset, left, alpha=0.3, lw=2, color="maroon", label=lbl)
ymin, ymax = ax2.get_ylim()
width = 0.1 * (ymax-ymin) / 2.0
centers = [-1.0, 1.0]
bar_scale = 15
# Predictions
data = predictive_probs
if flag_pre:
for i in range(model.n_components):
domain = np.array([centers[i]-1.5*width, centers[i]-0.5*width])
left = np.array([t,t])
offset = np.array([data[t,i]]*2)
offset *= bar_scale
lbl = "todays prior" if i == 0 else ""
ax2.fill_betweenx(domain, left+offset, left, alpha=0.3, lw=2, color="dodgerblue", label=lbl)
# Likelihoods
# data = np.stack([likelihoods, 1.0-likelihoods],axis=-1)
data = likelihoods
data /= np.sum(data,axis=-1, keepdims=True)
if flag_like:
for i in range(model.n_components):
domain = np.array([centers[i]+0.5*width, centers[i]+1.5*width])
left = np.array([t,t])
offset = np.array([data[t,i]]*2)
offset *= bar_scale
lbl = "likelihood" if i == 0 else ""
ax2.fill_betweenx(domain, left+offset, left, alpha=0.3, lw=2, color="crimson", label=lbl)
# Posteriors
data = posterior_probs
if flag_post:
for i in range(model.n_components):
domain = np.array([centers[i]-0.5*width, centers[i]+0.5*width])
left = np.array([t,t])
offset = np.array([data[t,i]]*2)
offset *= bar_scale
lbl = "posterior" if i == 0 else ""
ax2.fill_betweenx(domain, left+offset, left, alpha=0.3, lw=2, color="grey", label=lbl)
if t<T-1:
ax2.plot([t,t],ax2.get_ylim(), color='black',alpha=0.6)
if flag_pre or flag_like or flag_post:
ax2.plot([t,t],ax2.get_ylim(), color='black',alpha=0.6)
ax2.legend(bbox_to_anchor=(0.4,1.02,0.6, 0.1), borderaxespad=0, ncol=4)
ax1.set_ylim(ax2.get_ylim())
return fig
# plt.show(fig)
# -
# ---
# # Section 1: Binary HMM with Gaussian measurements
#
# In contrast to last tutorial, the latent state in an HMM is not fixed, but may switch to a different state at each time step. The time dependence is simple: the probability of the state at time $t$ is wholely determined by the state at time $t-1$. This is called called the **Markov property** and the dependency of the whole state sequence $\{s_1,...,s_t\}$ can be described by a chain structure called a Markov Chain. You have seen a Markov chain in the [pre-reqs Statistics day](https://compneuro.neuromatch.io/tutorials/W0D5_Statistics/student/W0D5_Tutorial2.html#section-1-2-markov-chains) and in the [Linear Systems Tutorial 2](https://compneuro.neuromatch.io/tutorials/W2D2_LinearSystems/student/W2D2_Tutorial2.html).
#
#
# **Markov model for binary latent dynamics**
#
# Let's reuse the binary switching process you saw in the [Linear Systems Tutorial 2](https://compneuro.neuromatch.io/tutorials/W2D2_LinearSystems/student/W2D2_Tutorial2.html): our state can be either +1 or -1. The probability of switching to state $s_t=j$ from the previous state $s_{t-1}=i$ is the conditional probability distribution $p(s_t = j| s_{t-1} = i)$. We can summarize these as a $2\times 2$ matrix we will denote $D$ for Dynamics.
#
# \begin{align*}
# D = \begin{bmatrix}p(s_t = +1 | s_{t-1} = +1) & p(s_t = -1 | s_{t-1} = +1)\\p(s_t = +1 | s_{t-1} = -1)& p(s_t = -1 | s_{t-1} = -1)\end{bmatrix}
# \end{align*}
#
# $D_{ij}$ represents the transition probability to switch from state $i$ to state $j$ at next time step. Please note that this is contrast to the meaning used in the intro and in Linear Systems (their transition matrices are the transpose of ours) but syncs with the [pre-reqs Statistics day](https://compneuro.neuromatch.io/tutorials/W0D5_Statistics/student/W0D5_Tutorial2.html#section-1-2-markov-chains).
#
# We can represent the probability of the _current_ state as a 2-dimensional vector
#
# $ P_t = [p(s_t = +1), p(s_t = -1)]$
#
# . The entries are the probability that the current state is +1 and the probability that the current state is -1 so these must sum up to 1.
#
# We then update the probabilities over time following the Markov process:
#
# \begin{align*}
# P_{t}= P_{t-1}D \tag{1}
# \end{align*}
#
# If you know the state, the entries of $P_{t-1}$ would be either 1 or 0 as there is no uncertainty.
#
# **Measurements**
#
# In a _Hidden_ Markov model, we cannot directly observe the latent states $s_t$. Instead we get noisy measurements $m_t\sim p(m|s_t)$.
# + cellView="form"
# @title Video 2: Binary HMM with Gaussian measurements
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1Sw41197Mj", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="z6KbKILMIPU", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# ## Coding Exercise 1.1: Simulate a binary HMM with Gaussian measurements
#
# In this exercise, you will implement a binary HMM with Gaussian measurements. Your HMM will start in State +1 and transition between states (both $-1 \rightarrow 1$ and $1 \rightarrow -1$) with probability `switch_prob`. Each state emits measurements drawn from a Gaussian with mean $+1$ for State +1 and mean $-1$ for State -1. The standard deviation of both states is given by `noise_level`.
#
# The exercises in the next cell have three steps:
#
# **STEP 1**. In `create_HMM`, complete the transition matrix `transmat_` (i.e., $D$) in the code.
# \begin{equation*}
# D =
# \begin{pmatrix}
# p_{\rm stay} & p_{\rm switch} \\
# p_{\rm switch} & p_{\rm stay} \\
# \end{pmatrix}
# \end{equation*}
# with $p_{\rm stay} = 1 - p_{\rm switch}$.
#
# **STEP 2**. In `create_HMM`, specify gaussian measurements $m_t | s_t$, by specifying the means for each state, and the standard deviation.
#
# **STEP 3**. In `sample`, use the transition matrix to specify the probabilities for the next state $s_t$ given the previous state $s_{t-1}$.
#
#
# In this exercise, we will use a helper data structure named `GaussianHMM1D`, implemented in the following cell. This allows us to set the information we need about the HMM model (the starting probabilities of state, the transition matrix, the means and variances of the Gaussian distributions, and the number of components) and easily access it. For example, if we can set our model using:
#
#
# ```
# model = GaussianHMM1D(
# startprob = startprob_vec,
# transmat = transmat_mat,
# means = means_vec,
# vars = vars_vec,
# n_components = n_components
# )
# ```
# and then access the variances as:
#
# ```
# model.vars
# ```
#
# Also note that we refer to the states as `0` and `1` in the code, instead of as `-1` and `+1`.
GaussianHMM1D = namedtuple('GaussianHMM1D', ['startprob', 'transmat','means','vars','n_components'])
# +
def create_HMM(switch_prob=0.1, noise_level=1e-1, startprob=[1.0, 0.0]):
"""Create an HMM with binary state variable and 1D Gaussian measurements
The probability to switch to the other state is `switch_prob`. Two
measurement models have mean 1.0 and -1.0 respectively. `noise_level`
specifies the standard deviation of the measurement models.
Args:
switch_prob (float): probability to jump to the other state
noise_level (float): standard deviation of measurement models. Same for
two components
Returns:
model (GaussianHMM instance): the described HMM
"""
############################################################################
# Insert your code here to:
# * Create the transition matrix, `transmat_mat` so that the odds of
# switching is `switch_prob`
# * Set the measurement model variances, to `noise_level ^ 2` for both
# states
raise NotImplementedError("`create_HMM` is incomplete")
############################################################################
n_components = 2
startprob_vec = np.asarray(startprob)
# STEP 1: Transition probabilities
transmat_mat = ... # np.array([[...], [...]])
# STEP 2: Measurement probabilities
# Mean measurements for each state
means_vec = ...
# Noise for each state
vars_vec = np.ones(2) * ...
# Initialize model
model = GaussianHMM1D(
startprob = startprob_vec,
transmat = transmat_mat,
means = means_vec,
vars = vars_vec,
n_components = n_components
)
return model
def sample(model, T):
"""Generate samples from the given HMM
Args:
model (GaussianHMM1D): the HMM with Gaussian measurement
T (int): number of time steps to sample
Returns:
M (numpy vector): the series of measurements
S (numpy vector): the series of latent states
"""
############################################################################
# Insert your code here to:
# * take row i from `model.transmat` to get the transition probabilities
# from state i to all states
raise NotImplementedError("`sample` is incomplete")
############################################################################
# Initialize S and M
S = np.zeros((T,),dtype=int)
M = np.zeros((T,))
# Calculate initial state
S[0] = np.random.choice([0,1],p=model.startprob)
# Latent state at time `t` depends on `t-1` and the corresponding transition probabilities to other states
for t in range(1,T):
# STEP 3: Get vector of probabilities for all possible `S[t]` given a particular `S[t-1]`
transition_vector = ...
# Calculate latent state at time `t`
S[t] = np.random.choice([0,1],p=transition_vector)
# Calculate measurements conditioned on the latent states
# Since measurements are independent of each other given the latent states, we could calculate them as a batch
means = model.means[S]
scales = np.sqrt(model.vars[S])
M = np.random.normal(loc=means, scale=scales, size=(T,))
return M, S
# Set random seed
np.random.seed(101)
# Set parameters of HMM
T = 100
switch_prob = 0.1
noise_level = 2.0
# Create HMM
model = create_HMM(switch_prob=switch_prob, noise_level=noise_level)
# Sample from HMM
M, S = sample(model,T)
assert M.shape==(T,)
assert S.shape==(T,)
# Print values
print(M[:5])
print(S[:5])
# +
# to_remove solution
def create_HMM(switch_prob=0.1, noise_level=1e-1, startprob=[1.0, 0.0]):
"""Create an HMM with binary state variable and 1D Gaussian measurements
The probability to switch to the other state is `switch_prob`. Two
measurement models have mean 1.0 and -1.0 respectively. `noise_level`
specifies the standard deviation of the measurement models.
Args:
switch_prob (float): probability to jump to the other state
noise_level (float): standard deviation of measurement models. Same for
two components
Returns:
model (GaussianHMM instance): the described HMM
"""
n_components = 2
startprob_vec = np.asarray(startprob)
# STEP 1: Transition probabilities
transmat_mat = np.array([[1. - switch_prob, switch_prob], [switch_prob, 1. - switch_prob]]) # # np.array([[...], [...]])
# STEP 2: Measurement probabilities
# Mean measurements for each state
means_vec = np.array([-1.0, 1.0])
# Noise for each state
vars_vec = np.ones(2) * noise_level * noise_level
# Initialize model
model = GaussianHMM1D(
startprob = startprob_vec,
transmat = transmat_mat,
means = means_vec,
vars = vars_vec,
n_components = n_components
)
return model
def sample(model, T):
"""Generate samples from the given HMM
Args:
model (GaussianHMM1D): the HMM with Gaussian measurement
T (int): number of time steps to sample
Returns:
M (numpy vector): the series of measurements
S (numpy vector): the series of latent states
"""
# Initialize S and M
S = np.zeros((T,),dtype=int)
M = np.zeros((T,))
# Calculate initial state
S[0] = np.random.choice([0,1],p=model.startprob)
# Latent state at time `t` depends on `t-1` and the corresponding transition probabilities to other states
for t in range(1,T):
# STEP 3: Get vector of probabilities for all possible `S[t]` given a particular `S[t-1]`
transition_vector = model.transmat[S[t-1],:]
# Calculate latent state at time `t`
S[t] = np.random.choice([0,1],p=transition_vector)
# Calculate measurements conditioned on the latent states
# Since measurements are independent of each other given the latent states, we could calculate them as a batch
means = model.means[S]
scales = np.sqrt(model.vars[S])
M = np.random.normal(loc=means, scale=scales, size=(T,))
return M, S
# Set random seed
np.random.seed(101)
# Set parameters of HMM
T = 100
switch_prob = 0.1
noise_level = 2.0
# Create HMM
model = create_HMM(switch_prob=switch_prob, noise_level=noise_level)
# Sample from HMM
M, S = sample(model,T)
assert M.shape==(T,)
assert S.shape==(T,)
# Print values
print(M[:5])
print(S[:5])
# -
# You should see that the first five measurements are:
#
# `[-3.09355908 1.58552915 -3.93502804 -1.98819072 -1.32506947]`
#
# while the first five states are:
#
# `[0 0 0 0 0]`
# ## Interactive Demo 1.2: Binary HMM
#
# In the demo below, we simulate and plot a similar HMM. You can change the probability of switching states and the noise level (the standard deviation of the Gaussian distributions for measurements). You can click the empty box to also visualize the measurements.
#
# **First**, think about and discuss these questions:
#
# 1. What will the states do if the switching probability is zero? One?
# 2. What will measurements look like with high noise? Low?
#
#
#
# **Then**, play with the demo to see if you were correct or not.
# + cellView="form"
#@title
#@markdown Execute this cell to enable the widget!
nstep = 100
@widgets.interact
def plot_samples_widget(
switch_prob=widgets.FloatSlider(min=0.0, max=1.0, step=0.02, value=0.1),
log10_noise_level=widgets.FloatSlider(min=-1., max=1., step=.01, value=-0.3),
flag_m=widgets.Checkbox(value=False, description='measurements', disabled=False, indent=False)
):
np.random.seed(101)
model = create_HMM(switch_prob=switch_prob,
noise_level=10.**log10_noise_level)
print(model)
observations, states = sample(model, nstep)
plot_hmm1(model, states, observations, flag_m=flag_m)
# +
# to_remove explanation
"""
1) With 0 switching probability, the HMM stays
in one state the whole time. With switching probability of 1, the HMM switches state
every single step.
2) With high noise, the measurements don't track the underlying latent state very well. With
low noise the latent state plot (green) and measurement plot (blue) look identical
"""
# + cellView="form"
# @title Video 3: Section 1 Exercises Discussion
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1dX4y1F7Fq", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="bDDRgAvQeFA", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
#
# **Applications**. Measurements could be:
# * fish caught at different times as the school of fish moves from left to right
# * membrane voltage when an ion channel changes between open and closed
# * EEG frequency measurements as the brain moves between sleep states
#
# What phenomena can you imagine modeling with these HMMs?
# ----
#
# # Section 2: Predicting the future in an HMM
#
#
# *Estimated timing to here from start of tutorial: 20 min*
#
#
#
#
# + cellView="form"
# @title Video 4: Forgetting in a changing world
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1o64y1s7M7", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="XOec560m61o", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# ### Interactive Demo 2.1: Forgetting in a changing world
#
#
# Even if we know the world state for sure, the world changes. We become less and less certain as time goes by since our last measurement. In this exercise, we'll see how a Hidden Markov Model gradually "forgets" the current state when predicting the future without measurements.
#
# Assume we know that the initial state is -1, $s_0=-1$, so $p(s_0)=[1,0]$. We will plot $p(s_t)$ versus time.
#
# 1. Examine helper function `simulate_prediction_only` and understand how the predicted distribution changes over time.
#
# 2. Using our provided code, plot this distribution over time, and manipulate the process dynamics via the slider controlling the switching probability.
#
# Do you forget more quickly with low or high switching probability? Why? How does the curve look when `prob_switch` $>0.5$? Why?
#
#
#
#
# + cellView="form"
# @markdown Execute this cell to enable helper function `simulate_prediction_only`
def simulate_prediction_only(model, nstep):
"""
Simulate the diffusion of HMM with no observations
Args:
model (GaussianHMM1D instance): the HMM instance
nstep (int): total number of time steps to simulate(include initial time)
Returns:
predictive_probs (list of numpy vector): the list of marginal probabilities
"""
entropy_list = []
predictive_probs = []
prob = model.startprob
for i in range(nstep):
# Log probabilities
predictive_probs.append(prob)
# One step forward
prob = prob @ model.transmat
return predictive_probs
# + cellView="form"
# @markdown Execute this cell to enable the widget!
np.random.seed(101)
T = 100
noise_level = 0.5
@widgets.interact(switch_prob=widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=0.1))
def plot(switch_prob=switch_prob):
model = create_HMM(switch_prob=switch_prob, noise_level=noise_level)
predictive_probs = simulate_prediction_only(model, T)
plot_marginal_seq(predictive_probs, switch_prob)
# +
# to_remove explanation
"""
1) You forget more quickly with high switching probability because you become less
certain that the state is the one you know.
2) With switch_prob > 0.5, the predictive probabilities cross over 0 and eventually oscillate.
"""
# + cellView="form"
# @title Video 5: Section 2 Exercise Discussion
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1DM4y1K7tK", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="GRnlvxZ_ozk", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# # Section 3: Forward inference in an HMM
#
# *Estimated timing to here from start of tutorial: 35 min*
# + cellView="form"
# @title Video 6: Inference in an HMM
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV17f4y1571y", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="fErhvxE9SHs", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# ### Coding Exercise 3.1: Forward inference of HMM
#
# As a recursive algorithm, let's assume we already have yesterday's posterior from time $t-1$: $p(s_{t-1}|m_{1:t-1})$. When the new data $m_{t}$ comes in, the algorithm performs the following steps:
#
# * **Predict**: transform yesterday's posterior over $s_{t-1}$ into today's prior over $s_t$ using the transition matrix $D$:
#
# $$\text{today's prior}=p(s_t|m_{1:t-1})= p(s_{t-1}|m_{1:t-1}) D$$
#
# * **Update**: Incorporate measurement $m_t$ to calculate the posterior $p(s_t|m_{0:t})$
#
# $$\text{posterior} \propto \text{prior}\cdot \text{likelihood}=p(m_t|s_t)p(s_t|m_{0:t-1})$$
#
# In this exercise, you will:
#
# * STEP 1: Complete the code in function `markov_forward` to calculate the predictive marginal distribution at next time step
#
# * STEP 2: Complete the code in function `one_step_update` to combine predictive probabilities and data likelihood into a new posterior
# * Hint: We have provided a function to calculate the likelihood of $m_t$ under the two possible states: `compute_likelihood(model,M_t)`.
#
# * STEP 3: Using code we provide, plot the posterior and compare with the true values
#
# The complete forward inference is implemented in `simulate_forward_inference` which just calls `one_step_update` recursively.
#
#
#
# + cellView="form"
# @markdown Execute to enable helper functions `compute_likelihood` and `simulate_forward_inference`
def compute_likelihood(model, M):
"""
Calculate likelihood of seeing data `M` for all measurement models
Args:
model (GaussianHMM1D): HMM
M (float or numpy vector)
Returns:
L (numpy vector or matrix): the likelihood
"""
rv0 = stats.norm(model.means[0], np.sqrt(model.vars[0]))
rv1 = stats.norm(model.means[1], np.sqrt(model.vars[1]))
L = np.stack([rv0.pdf(M), rv1.pdf(M)],axis=0)
if L.size==2:
L = L.flatten()
return L
def simulate_forward_inference(model, T, data=None):
"""
Given HMM `model`, calculate posterior marginal predictions of x_t for T-1 time steps ahead based on
evidence `data`. If `data` is not give, generate a sequence of measurements from first component.
Args:
model (GaussianHMM instance): the HMM
T (int): length of returned array
Returns:
predictive_state1: predictive probabilities in first state w.r.t no evidence
posterior_state1: posterior probabilities in first state w.r.t evidence
"""
# First re-calculate hte predictive probabilities without evidence
# predictive_probs = simulate_prediction_only(model, T)
predictive_probs = np.zeros((T,2))
likelihoods = np.zeros((T,2))
posterior_probs = np.zeros((T, 2))
# Generate an measurement trajectory condtioned on that latent state x is always 1
if data is not None:
M = data
else:
M = np.random.normal(model.means[0], np.sqrt(model.vars[0]), (T,))
# Calculate marginal for each latent state x_t
predictive_probs[0,:] = model.startprob
likelihoods[0,:] = compute_likelihood(model, M[[0]])
posterior = predictive_probs[0,:] * likelihoods[0,:]
posterior /= np.sum(posterior)
posterior_probs[0,:] = posterior
for t in range(1, T):
prediction, likelihood, posterior = one_step_update(model, posterior_probs[t-1], M[[t]])
# normalize and add to the list
posterior /= np.sum(posterior)
predictive_probs[t,:] = prediction
likelihoods[t,:] = likelihood
posterior_probs[t,:] = posterior
return predictive_probs, likelihoods, posterior_probs
help(compute_likelihood)
help(simulate_forward_inference)
# +
def markov_forward(p0, D):
"""Calculate the forward predictive distribution in a discrete Markov chain
Args:
p0 (numpy vector): a discrete probability vector
D (numpy matrix): the transition matrix, D[i,j] means the prob. to
switch FROM i TO j
Returns:
p1 (numpy vector): the predictive probabilities in next time step
"""
##############################################################################
# Insert your code here to:
# 1. Calculate the predicted probabilities at next time step using the
# probabilities at current time and the transition matrix
raise NotImplementedError("`markov_forward` is incomplete")
##############################################################################
# Calculate predictive probabilities (prior)
p1 = ...
return p1
def one_step_update(model, posterior_tm1, M_t):
"""Given a HMM model, calculate the one-time-step updates to the posterior.
Args:
model (GaussianHMM1D instance): the HMM
posterior_tm1 (numpy vector): Posterior at `t-1`
M_t (numpy array): measurement at `t`
Returns:
posterior_t (numpy array): Posterior at `t`
"""
##############################################################################
# Insert your code here to:
# 1. Call function `markov_forward` to calculate the prior for next time
# step
# 2. Calculate likelihood of seeing current data `M_t` under both states
# as a vector.
# 3. Calculate the posterior which is proportional to
# likelihood x prediction elementwise,
# 4. Don't forget to normalize
raise NotImplementedError("`one_step_update` is incomplete")
##############################################################################
# Calculate predictive probabilities (prior)
prediction = markov_forward(...)
# Get the likelihood
likelihood = compute_likelihood(...)
# Calculate posterior
posterior_t = ...
# Normalize
posterior_t /= ...
return prediction, likelihood, posterior_t
# Set random seed
np.random.seed(12)
# Set parameters
switch_prob = 0.4
noise_level = .4
t = 75
# Create and sample from model
model = create_HMM(switch_prob = switch_prob,
noise_level = noise_level,
startprob=[0.5, 0.5])
measurements, states = sample(model, nstep)
# Infer state sequence
predictive_probs, likelihoods, posterior_probs = simulate_forward_inference(model, nstep,
measurements)
states_inferred = np.asarray(posterior_probs[:,0] <= 0.5, dtype=int)
# Visualize
plot_forward_inference(
model, states, measurements, states_inferred,
predictive_probs, likelihoods, posterior_probs,t=t, flag_m = 0
)
# +
# to_remove solution
def markov_forward(p0, D):
"""Calculate the forward predictive distribution in a discrete Markov chain
Args:
p0 (numpy vector): a discrete probability vector
D (numpy matrix): the transition matrix, D[i,j] means the prob. to
switch FROM i TO j
Returns:
p1 (numpy vector): the predictive probabilities in next time step
"""
# Calculate predictive probabilities (prior)
p1 = p0 @ D
return p1
def one_step_update(model, posterior_tm1, M_t):
"""Given a HMM model, calculate the one-time-step updates to the posterior.
Args:
model (GaussianHMM1D instance): the HMM
posterior_tm1 (numpy vector): Posterior at `t-1`
M_t (numpy array): measurements at `t`
Returns:
posterior_t (numpy array): Posterior at `t`
"""
# Calculate predictive probabilities (prior)
prediction = markov_forward(posterior_tm1, model.transmat)
# Get the likelihood
likelihood = compute_likelihood(model, M_t)
# Calculate posterior
posterior_t = prediction * likelihood
# Normalize
posterior_t /= np.sum(posterior_t)
return prediction, likelihood, posterior_t
# Set random seed
np.random.seed(12)
# Set parameters
switch_prob = 0.4
noise_level = .4
t = 75
# Create and sample from model
model = create_HMM(switch_prob = switch_prob,
noise_level = noise_level,
startprob=[0.5, 0.5])
measurements, states = sample(model, nstep)
# Infer state sequence
predictive_probs, likelihoods, posterior_probs = simulate_forward_inference(model, nstep,
measurements)
states_inferred = np.asarray(posterior_probs[:,0] <= 0.5, dtype=int)
# Visualize
with plt.xkcd():
plot_forward_inference(
model, states, measurements, states_inferred,
predictive_probs, likelihoods, posterior_probs,t=t, flag_m = 0
)
# -
# ## Interactive Demo 3.2: Forward inference in binary HMM
#
# Now visualize your inference algorithm. Play with the sliders and checkboxes to help you gain intuition.
#
# * Use the sliders `switch_prob` and `log10_noise_level` to change the switching probability and measurement noise level.
#
# * Use the slider `t` to view prediction (prior) probabilities, likelihood, and posteriors at different times.
#
# When does the inference make a mistake? For example, set `switch_prob=0.1`, `log_10_noise_level=-0.2`, and take a look at the probabilities at time `t=2`.
# + cellView="form"
# @markdown Execute this cell to enable the demo
nstep = 100
@widgets.interact
def plot_forward_inference_widget(
switch_prob=widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=0.05),
log10_noise_level=widgets.FloatSlider(min=-1., max=1., step=.01, value=0.1),
t=widgets.IntSlider(min=0, max=nstep-1, step=1, value=nstep//2),
#flag_m=widgets.Checkbox(value=True, description='measurement distribution', disabled=False, indent=False),
flag_d=widgets.Checkbox(value=True, description='measurements', disabled=False, indent=False),
flag_pre=widgets.Checkbox(value=True, description='todays prior', disabled=False, indent=False),
flag_like=widgets.Checkbox(value=True, description='likelihood', disabled=False, indent=False),
flag_post=widgets.Checkbox(value=True, description='posterior', disabled=False, indent=False),
):
np.random.seed(102)
# global model, measurements, states, states_inferred, predictive_probs, likelihoods, posterior_probs
model = create_HMM(switch_prob=switch_prob,
noise_level=10.**log10_noise_level,
startprob=[0.5, 0.5])
measurements, states = sample(model, nstep)
# Infer state sequence
predictive_probs, likelihoods, posterior_probs = simulate_forward_inference(model, nstep,
measurements)
states_inferred = np.asarray(posterior_probs[:,0] <= 0.5, dtype=int)
fig = plot_forward_inference(
model, states, measurements, states_inferred,
predictive_probs, likelihoods, posterior_probs,t=t,
flag_m=0,
flag_d=flag_d,flag_pre=flag_pre,flag_like=flag_like,flag_post=flag_post
)
plt.show(fig)
# + cellView="form"
# @title Video 7: Section 3 Exercise Discussion
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id="BV1EM4y1T7cB", width=854, height=480, fs=1)
print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="CNrjxNedqV0", width=854, height=480, fs=1, rel=0)
print('Video available at https://youtube.com/watch?v=' + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# -
# ---
# # Summary
#
# *Estimated timing of tutorial: 1 hour, 5 minutes*
#
# In this tutorial, you
#
# * Simulated the dynamics of the hidden state in a Hidden Markov model and visualized the measured data (Section 1)
# * Explored how uncertainty in a future hidden state changes based on the probabilities of switching between states (Section 2)
# * Estimated hidden states from the measurements using forward inference, connected this to Bayesian ideas, and explored the effects of noise and transition matrix probabilities on this process (Section 3)
| tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import HTML
with open ('../style.css', 'r') as file:
css = file.read()
HTML(css)
# # Aufgaben-Blatt Nr. 1
# ## Aufgabe 1:
# Eine Zahl $m\in \mathbb{N}$ ist ein *echter Teiler* einer Zahl
# $n \in \mathbb{N}$ genau dann, wenn $m$ ein Teiler von $n$ ist und wenn außerdem $m < n$ gilt.
#
# Eine Zahl $n \in \mathbb{N}$ heißt *perfekt*, wenn $n$ gleich der Summe aller echten Teiler von
# $n$ ist. Zum Beispiel ist die Zahl $6$ perfekt, denn die Menge der echten Teiler
# von 6 ist $\{1,2,3\}$ und es gilt $1 + 2 + 3 = 6$.
# Ziel der Aufgabe ist die Berechnung der Menge aller perfekten Zahlen, die kleiner als $10,000$ sind.
# Implementieren Sie eine Prozedur `echteTeiler`, so dass der Aufruf
# $\texttt{echteTeiler}(n)$ für eine natürliche Zahl $n$ die Menge aller echten Teiler von $n$ berechnet. Beispielsweise soll gelten:
# $$ \texttt{echteTeiler}(6) = \{1, 2, 3\} $$
# <hr style="height:5px;border-width:0;color:gray;background-color:green">
# <p style="background-color:yellow; border:3px; border-style:solid; border-color:#FF0000; padding: 1em;"><b style="color:blue;background-color:orange;font-weight:900">Hinweis</b>: Versuchen Sie, bei der Lösung dieser Aufgabe <u>ohne</u> <tt>for</tt>-Schleifen
# oder <tt>while</tt>-Schleifen auszukommen. Sie sollen statt dessen mit
# <b style="color:blue;background-color:yellow">Mengen</b>
# arbeiten. Dieser Hinweis gilt auch für die folgenden Aufgaben.
# </p>
# <hr style="height:5px;border-width:0;color:gray;background-color:green">
def echteTeiler(n):
'Diese Funktion berechnet die Menge aller echten Teiler der Zahl n.'
'your code here'
echteTeiler(6)
# Implementieren Sie eine Funktion `isPerfect`, so dass der Aufruf
# $\texttt{isPerfect}(n)$ für eine natürliche Zahl $n$ genau dann das Ergebnis
# `True` zurück gibt, wenn $n$ eine perfekte Zahl ist.
def isPerfect(n):
'Returns True if n is perfect, else False.'
'your code here'
isPerfect(6)
# Berechnen Sie die Menge aller perfekten Zahlen, die kleiner als $10\,000$ sind.
def perfectNumbers(n):
'This function computes the set of all perfect numbers less than n.'
'your code here'
# Beim Ausführen der nächsten Zeile sollte die Zahl `6`berechnet werden.
perfectNumbers(10)
perfectNumbers(10000)
# ## Aufgabe 2:
# Ziel dieser Aufgabe ist die Implementierung einer Funktion `ggt`, die für zwei
# natürliche Zahlen $m$ und $n$ den größten gemeinsamen Teiler dieser Zahlen berechnet. Die Funktion `ggt` soll mit Hilfe mehrerer Hilffunktionen berechnet werden.
def teiler(n):
'This function computes the set of all natural number that divide n.'
'your code here'
# Implementieren Sie eine Funktion `gt`, so dass der Aufruf $\texttt{gt}(m,n)$
# für zwei natürliche Zahlen $m$ und $n$ die Menge aller *gemeinsamen Teiler*
# von $m$ und $n$ berechnet.
#
# **Hinweis**: Berechnen Sie zunächst die Menge der Teiler von $m$ und
# die Menge der Teiler von $n$. Überlegen Sie, wie die Mengenlehre Ihnen
# weiterhilft, wenn Sie diese beiden Mengen berechnet haben.
def gt(m, n):
'''This function calculates the set of numbers that
are divisors of both m and n.
'''
'your code here'
# Implementieren Sie nun eine Funktion `ggt`, so dass der Aufruf
# $\texttt{ggt}(m,n)$ den größten gemeinsamen Teiler
# der beiden Zahlen $m$ und $n$ berechnet.
def ggt(m, n):
'This function return the greatest common divisor of m and n.'
'your code here'
# ## Aufgabe 3:
# Ziel dieser Aufgabe ist die Implementierung einer Funktion `kgv`, die für zwei
# natürliche Zahlen $m$ und $n$ das kleinste gemeinsame Vielfache dieser Zahlen berechnet. Die Funktion `kgv` soll mit Hilfe geeigneter Hilffunktionen berechnet werden.
def kgv(m, n):
'This function return the smallest common multiple of m and n.'
'your code here'
# ## Aufgabe 4:
# Implementieren Sie eine Funktion `subsets`, so dass
# $\texttt{subsets}(M, k)$ für eine Menge $M$ und eine natürliche Zahl $k$
# die Menge aller der Teilmengen von $M$ berechnet, die genau $k$ Elemente haben.
#
# **Hinweis**: Versuchen Sie, die Funktion $\texttt{subsets}(M, k)$ rekursiv
# durch Rekursion nach $k$ zu implementieren.
# Geben Sie eine Implementierung der Funktion `power` an, bei der Sie die
# Funktion `subsets` verwenden. Für eine Menge $M$ soll die Funktion
# $\texttt{power}(M)$ die Potenz-Menge $2^M$ berechnen.
# ## Aufgabe 5:
# Ein Tupel der Form $\langle a, b, c \rangle$ wird als
# *geordnetes <a href="http://de.wikipedia.org/wiki/Pythagoreisches_Tripel">pythagoreisches Tripel</a>*
# bezeichnet, wenn sowohl
# $$ a^2 + b^2 = c^2 \quad \mbox{als auch} \quad a < b $$
# gilt. Beispielsweise ist $\langle 3,4,5 \rangle$ ein geordnetes pythagoreisches Tripel, denn einerseits ist
# $3^2 + 4^2 = 5^2$ und andererseits gilt $3 < 4$.
#
# Implementieren Sie eine Prozedur $\texttt{pythagoras}$, so dass der Aufruf
# $$\texttt{pythagoras}(n)$$
# für eine natürliche Zahl $n$ die Menge aller geordneten pythagoreischen Tripel $\langle a,b,c \rangle$
# berechnet, für die $c \leq n$ ist.
def pythagoras(n):
"your code here"
# Ein pythagoreisches Tripel $\langle a,b,c \rangle$ ist ein
# <em>reduziertes</em> Tripel, wenn
# die Zahlen $a$, $b$ und $c$ keinen <em>nicht-trivialen</em> gemeinsamen Teiler haben. Ein <em>nicht-trivaler</em> gemeinsamer Teiler ist ein Teiler, der größer als $1$ ist.
#
# Implementieren Sie eine Funktion `isReduced`, die als Argumente drei natürliche Zahlen $a$, $b$ und $c$ erhält und die genau dann $\texttt{True}$ als Ergebnis zurück liefert,
# wenn das Tripel $\langle a, b, c\rangle$ reduziert ist.
def isReduced(a, b, c):
"your code here"
# Implementieren Sie eine Prozedur `reducedPythagoras`, so dass der Aufruf
# $$\texttt{reducedPythagoras}(n)$$
# die Menge aller geordneten pythagoreischen Tripel $\langle a,b,c \rangle$ berechnet, die reduziert sind. Berechnen Sie mit dieser Prozedur alle reduzierten geordneten pythagoreischen
# Tripel $\langle a,b,c \rangle$, für die $c \leq 50$ ist.
def reducedPythagoras(n):
"your code here"
reducedPythagoras(50)
# ## Aufgabe 6:
# Nehmen Sie an, ein Spieler hat im Poker
# (<a href="https://de.wikipedia.org/wiki/Texas_Hold’em">Texas Hold'em</a>)
# die beiden
# Karten $\langle 8, \texttt{'♠'}\rangle$ und $\langle 9, \texttt{'♠'}\rangle$ erhalten. Schreiben Sie ein
# Programm, dass die folgenden Fragen beantworten.
# <ol>
# <li> Wie groß ist die Wahrscheinlichkeit, dass im Flop wenigsten zwei weitere Karten
# der Farbe $\texttt{'♠'}$ liegen?
# </li>
# <li> Wie groß ist die Wahrscheinlichkeit, dass alle drei Karten im Flop
# die Farbe $\texttt{'♠'}$ haben?
# </li>
# </ol>
# ## Aufgabe 7:
# Ein <a href="https://de.wikipedia.org/wiki/Anagramm">Anagramm</a> eines gegebenen Wortes $v$ ist ein Wort $w$, dass
# aus dem Wort $v$ durch Umstellung von Buchstaben entsteht. Beispielsweise ist das Wort
# $\texttt{"atlas"}$ ein
# Anagramm des Wortes "$\texttt{salat}$". Implementieren Sie eine Funktion $\texttt{anagram}(s)$, die für ein
# gegebenes Wort $s$ alle Wörter berechnet, die sich aus dem Wort $s$ durch Umstellung von Buchstaben
# ergeben. Die Menge dieser Wörter soll dann als Ergebnis zurück gegeben werden. Es ist nicht gefordert, dass
# die Anagramme sinnvolle Wörter der deutschen Sprache sind. Beispielsweise ist auch das Wort "$\texttt{talas}$"
# ein Anagramm des Wortes "$\texttt{salat}$".
#
def anagram(s):
"your code here"
# ## Aufgabe 8:
# Nehmen Sie an, dass Sie $n$ Würfel haben, deren Seiten mit den Zahlen 1 bis 6 bedruckt sind. Weiter ist eine
# feste Zahl $s$ vorgegeben. Implementieren Sie eine Funktion $\texttt{numberDiceRolls}$, so dass der Aufruf
# $$ \texttt{numberDiceRolls}(n, s) $$
# die Anzahl der Möglichkeiten berechnet, mit $n$ Würfeln in der Summe die Zahl $s$ zu würfeln. Beispielsweise
# soll $\texttt{numberDiceRolls}(3, 5)$ den Wert 6 liefern, denn es gibt 6 Möglichkeiten, um mit drei Würfeln in
# der Summe eine 5 zu würfeln:
# $$\langle1, 1, 3\rangle, \langle1, 2, 2\rangle, \langle1, 3, 1\rangle, \langle2, 1, 2\rangle, \langle2, 2,
# 1\rangle, \langle3, 1, 1\rangle$$
#
# **Hinweis**: Implementieren Sie die Funktion $\texttt{numberDiceRolls}(n, s)$ *rekursiv*.
def numberDiceRolls(n, s):
"your code here"
# ## Aufgabe 9: Palindrome
#
# Ein Wort $s$ ist ein [Palindrom](https://de.wikipedia.org/wiki/Palindrom)
# genau dann, wenn es rückwärts gelesen das selbe Wort ergibt. Es gilt dann also
# $$ \texttt{reverse}(s) = s. $$
# Die Funktion $\texttt{reverse}()$ dreht dabei die Reihenfolge der
# Buchstaben eines Wortes um. Es gilt beispielsweise
# $$ \texttt{reverse("abcd") = "dcba"}. $$
# Beispiele für Palindrome sind folgende Wörter:
# - `"reittier"`
# - `"abba"`
# - `"hannah"`
# - `"rentner"`
#
# Entwickeln Sie eine <b>rekursive</b> Funktion `isPalindrome` die als Argument einen String `s` erhält und die genau dann `True` zurück gibt, wenn der String `s` ein Palindrom ist.
def isPalindrome(s):
'return True iff s is a palindrome'
"your code here"
# ## Aufgabe 10: Permutationen
#
# Ist $n \in \mathbb{N}$, so ist eine `Permutation` der Länge $n$ ein Tupel, das jede
# Zahl der Menge $\{1, \cdots, n\}$ genau einmal enthält. Beispielsweise ist
# das Tupel `(3,2,1)` eine Permutation der Länge $3$. Eine andere Permuation der Länge
# $3$ ist das Tupel `(2,1,3)`.
#
# Entwickeln Sie eine <b>rekursive</b> Funktion `allPermutations`, die als Argument eine
# natürliche Zahl $n$ erhält und als Ergebnis die Menge aller Permutationen der Länge
# $n$ berechnet.
def allPermutations(n):
'This function returns the set of all permutations of length n.'
"your code here"
# ## Aufgabe 11: Sortieren durch Einfügen
#
# Bei dem Algorithmus *Sortieren durch Einfügen* sortieren wir eine Liste,
# indem wir
# - das letzte Element der Liste entfernen,
# - die Liste der verbleibenden Elemente rekursiv sortieren und schließlich
# - das letzte Element so in die sortierte Liste einfügen, dass das Ergebnis
# immer noch sortiert ist.
#
# Implementieren Sie eine Funktion `sort`, die als Argument eine Liste `L` von
# Zahlen erhält und diese mittels des Algorithmus *Sortieren durch Einfügen*
# *rekursiv* sortiert.
#
# Verwenden Sie dabei eine Hilfsfunktion `insert` die zwei Argumente erhält:
# - `x` ist eine Zahl,
# - `L` ist eine Liste von Zahlen.
#
# Der Aufruf `insert(x, L)` gibt als Ergebnis eine sortierte Liste zurück, die dadurch entsteht, dass die Zahl `x` so in die `L` eingefügt wird, dass das Ergebnis sortiert ist. Auch die Funktion `insert` sollen Sie <b>rekursiv</b> implementieren.
def sort(L):
'''This function sorts the elements of the list L and returns
the resulting list.
'''
if (len(L) <= 1):
return L
element = L[0]
sortedList = sort(L[1:])
return insert(element, sortedList)
def insert(x, L):
'''This function inserts the number x into the sorted list L such that
the resulting list is sorted, too.
'''
clonedList = list(L)
element = clonedList.pop(0)
if (clonedList == list()):
return [element, x] if element < x else [x, element]
if (element < x < clonedList[0]):
return [element]+[x]+clonedList
return [element]+insert(x, clonedList)
insert(3, [1])
sort([2, 1, 4, 3, 5, 10, 0])
sort([1, 0])
# +
def insertion(x, L):
if (L == list()):
return [x]
y = L[0]
R = L[1:]
if (x <= y):
return [x]+L
return [y]+insertion(x, R)
def insertionSort(L):
if (L == list()):
return list()
return insertion(L[0], insertionSort(L[1:]))
# -
insertionSort([3, 2])
insertionSort([2, 1, 4, 3, 5, 10, 0])
insertion(1, [0])
u = [1]
u[1:]
| Python/Exercises/Blatt-01-Insertion-Sort-David.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Object Detection in Pytorch document
# +
import torch
import torchvision
import torchvision.transforms as T
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as Image
import os
# %matplotlib inline
# -
# ## 경로 탐색
# +
# 데이터 경로
data_path = 'data/PennFudanPed'
os.listdir(data_path)
# -
# 이미지 경로
os.listdir(os.path.join(data_path,'PNGImages'))[:5]
# 마스크 경로
os.listdir(os.path.join(data_path, 'PedMasks'))[:5]
# ## Dataset 생성
class PennFudanDataset(object):
def __init__(self, root, transforms):
self.root = data_path
self.transforms = transforms
self.imgs = sorted(os.listdir(os.path.join(data_path, 'PNGImages')))
self.masks = sorted(os.listdir(os.path.join(data_path, 'PedMasks')))
def __getitem__(self, idx):
img_path = os.path.join(self.root, 'PNGImages', self.imgs[idx])
mask_path = os.path.join(self.root, 'PedMasks', self.masks[idx])
img = Image.open(img_path).convert('RGB')
mask = Image.open(mask_path)
mask = np.array(mask)
obj_ids = np.unique(mask) # 결과로 0,1,2 값이 나옴
obj_ids = obj_ids[1:]
# 0은 background을 의미하기 때문에 제외힌다(1,2)
masks = mask == obj_ids[:, None, None]
# obj_ids는 1,2 값만 존재함(0은 제외했음)
# ojb_ids[:, None, None]의 결과는 shape = (2, 1, 1)이 만들어짐
# mask는 0,1,2로 이루어진 사진(W x H)이기 때문에
# mask == ojb_ids 코드로 인해 (2 X W X H)로 변환된다(1이 True인 행렬, 2가 True인 행렬)
# 각각 1과 2가 있는 부분은 True로 바뀌고 나머지는 False로 바뀐다(masking 작업)
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(mask[i]) # 값이 1인 부분의 index를 모두 찾는다(id 별로)
xmin = np.min(pos[1]) # 행이 y축, 열이 x축이기 때문에 pos 결과의 index가 x=1, y=0이 된다.
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
# 마스킹 된 object를 기준으로
# True인 부분의 x,y 값의 최소값을 찾으면 box의 왼쪽 위 끝을 찾고
# True인 부분의 x,y 값의 최대값을 찾으면 box의 오른쪽 아래 끝을 잡는다
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.ones((num_objs))
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
# 각각의 상자에 대해서 넓이를 구해줌
# (xmax - xmin) * (ymax - ymin)
area = (boxes[:,3] - boxes[:,1]) * (boxes[:,2] - boxes[:,0])
iscrowd = torch.zeros((num_objs), dtype=torch.int64) # 솔직히 이건 뭔지 모르겠네....뒤에 나오겠지?
target = {}
target['boxes'] = boxes
target['labels'] = labels
target['masks'] = masks
target['image_id'] = image_id
target['area'] = area
target['iscrowd'] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target) # 모든 요소에 대해 적용이 되나?
return img, target
def __len__(self):
return len(self.imgs)
# ## Model 불러오기
# - faster-rcnn 이용
# -
# - COCO에서 pre-train된 모델 불러오기
# ### 1. Predictor 부분 FineTuning
# - 마지막 box를 예측하는 부분을 수정
# - Fasterrcnn -> fastrcnn으로 교체
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) # Feature Pyramid Network
num_classes = 2 # 1 : person, 0 : backgroud
model
model.roi_heads.box_predictor.cls_score.in_features
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor
FastRCNNPredictor(in_features, num_classes)
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# ### 2.Backbone 수정하기
import torchvision
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
backbone = torchvision.models.mobilenet_v2(pretrained=True).features
backbone.out_channels = 1280
anchor_generator = AnchorGenerator(sizes=((32,64,128,256,512)),
aspect_ratios=((0.5,1.0,2.0)))
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
output_size=7,
sampling_ratio=2)
model
model = FasterRCNN(backbone,
num_classes=2,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler)
model
# ### 3. Mask을 위한 모델 추가
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
# ## Training and Evaluating
def main():
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
num_classes = 2
dataset = PennFudanDataset('data/PennFudanPed', get_transform(train=True))
dataset_test = PennFudanDataset('data/PennFudanPed', get_transform(train=True))
indices = torch.randperm(len(dataset)).tolist() # 랜덤으로 숫자 배열
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
data_loader = torch.utils.data.DataLoader(dataset, batch_size=2,
shuffle=True, num_workers=1,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size = 1,
shuffle=False, num_workers=1,
collate_fn=utils.collate_fn)
model = get_model_instance_segmentation(num_classes)
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
EPOCH = 10
for e in range(EPOCH):
for data in dataloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
from data.engine import train_one_epoch, evaluate
torch.utils.data.Subset(dataset, i[:-50])
dataset = PennFudanDataset('data/PennFudanPed', get_transform(train=True))
i = torch.randperm(len(dataset)).tolist()
mask_list = sorted(os.listdir(os.path.join(data_path, 'PedMasks')))
mask_path = os.path.join(data_path, 'PedMasks', mask_list[0])
mask = Image.open(mask_path)
mask = np.array(mask)
mask.shape
# +
object_id = np.unique(mask)
object_id = object_id[1:]
object_id
# -
masks = mask == object_id[:, None, None]
masks
pos = np.where(masks[1])
pos
xmin = np.min(pos[1])
ymin = np.min(pos[0])
xmax = np.max(pos[1])
ymax = np.max(pos[0])
xmin, ymin, xmax, ymax
#boxes = []
boxes.append([xmin, ymin, xmax, ymax])
boxes
boxes = torch.as_tensor(boxes, dtype=torch.float)
boxes
object_num = len(object_id)
object_num
labels = torch.ones((object_num,), dtype=torch.int64)
labels
masks = torch.as_tensor(masks, dtype=torch.uint8)
masks
image_id = torch.tensor([0])
image_id
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:,2] - boxes[:,0])
area
torch.zeros((object_num), dtype=torch.int64)
| 1. Beginner/Pytorch7_FineTuning(Object Detection & Segmentation in Pytorch Document).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.12 64-bit (''menv'': conda)'
# language: python
# name: python3812jvsc74a57bd08865e2b3663b4286108edb179ccfe953148c238acf0701d008d1617f19c984b8
# ---
# <font size="+5">#02 | Feature Importance with Shapley Values</font>
# - Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)
# - Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄
# # Discipline to Search Solutions in Google
# > Apply the following steps when **looking for solutions in Google**:
# >
# > 1. **Necesity**: How to load an Excel in Python?
# > 2. **Search in Google**: by keywords
# > - `load excel python`
# > - ~~how to load excel in python~~
# > 3. **Solution**: What's the `function()` that loads an Excel in Python?
# > - A Function to Programming is what the Atom to Phisics.
# > - Every time you want to do something in programming
# > - **You will need a `function()`** to make it
# > - Theferore, you must **detect parenthesis `()`**
# > - Out of all the words that you see in a website
# > - Because they indicate the presence of a `function()`.
# # Load the Data
# > - The goal of this dataset is
# > - To predict if **bank's customers** (rows) `default` next month
# > - Based on their **socio-demographical characteristics** (columns)
# +
import pandas as pd
pd.set_option("display.max_columns", None)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls'
df = pd.read_excel(io=url, header=1, index_col=0).loc[:,['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'default payment next month']]
df.SEX = df.SEX.replace({1: 'Female', 2: 'Male'})
df.EDUCATION = df.EDUCATION.replace({0: 'No studies',
1: 'Primary',
2: 'Secondary',
3: 'Bachelor',
4: 'Undergraduated',
5: 'Masters',
6: 'PhD'})
df.MARRIAGE = df.MARRIAGE.replace({0: 'Divorced',
1: 'Yes',
2: 'No',
3: 'Widow'})
df.sample(10)
# -
df = pd.get_dummies(df, drop_first=True)
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
# ## Modelling
# ### Build a `DecisionTreeClassifier()` Model
# + [markdown] tags=[]
# ### Build a `RandomForestClassifier()` Model
# -
# ### Build a `LogisticRegression()` Model
# ## Make Predictions with Each Model
# ## Which is the Best Model?
# ## How to Interpret? Feature Importance
# ### Is it enough?
# ## Shapley Values Theory
# ### Microsoft Shapley Values Video
# %%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/ngOBhhINWb8" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# ### Alice, Bob & Celine Video
# %%HTML
<iframe width="560" height="315" src="https://www.youtube.com/embed/w9O0fkfMkx0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# ### Kaggle Explanation on Feature Importance
# - https://www.kaggle.com/dansbecker/shap-values
# ## Shapley Values Implementation with Each Model
# + [markdown] tags=[]
# > - Make the Shapley values with just one prediction
# -
# ### `DecisionTreeClassifier()`
# ### `RandomForestClassifier()`
# ### `LogisticRegression()`
# ### Which Model should you take into account?
# ## Pick Up Best Model
# ### Shapley Values for All Predictions
# ### Ranking of Variables
# ## Final Reflections
| III Advanced Machine Learning/02_Feature Importance with Shapley Values/solutions/02session_shapley-values.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 3 - Structured Data
# ## Arrays/Lists
# Arrays are like a list, they contain a collection of many different things. One example is a grocery list
groceries = ["apple", "cat_food", "yogurt", "mango"]
# Of course things in the list could be of different types, ie.
some_list = [3, True, 'cat', 42.0, [2, 4]]
# As seen above, we can have lists inside of lists because a list is just a collection of objects bounded to the variable (some_list). On of the things we can do with lists are getting the element at some location in the list. It is important to note that computers start counting with 0, therefore,
print(some_list[0])
# As seen above, the 0th element is 3. We can say at index 0 in some list is the integer 3. Also you now know to to call a specific element in a list - with square brackets and the number of the element starting at 0. In addition, we can call items from the end of the list
print(some_list[-1])
# As you can see, the last element is the list [2, 4]. In addition, we can slice from the list. This means we find a subset of the list from the specified index to the next.
print(some_list[2:5])
# We can see that it starts at the first index and goes to the last index but **does not include the last index**.
# Another thing about lists is that we can manipulate it. This is quite easy because we can specify a index and change it to something new
some_list[0] = 9
print(some_list)
If we want to add an element to the end, we use the append method like this
some_list.append("happy")
print(some_list)
# There are several other important methods like pop, index, insert, remove, count, and sort and you can learn more about them [here](https://www.w3schools.com/python/python_ref_list.asp), but most of these methods are self explanatory.
# ## Tuple
# There is a special form of an list called a tuple. A tuple is a **immutable** list, meaning you cannot change the list (ie. adding, removing, or modifying). On of the only things you can do with them is appending two tuples together
# ## Sets
# Set are like lists but there is one major difference, they are unstructured, therefore you cannot call by it's index. In addition, they **cannot have duplicates**. In addition, sets are **immutable**, meaning you cannot change an item once it is initialized, however, you can add, remove, or update the entire set. This makes sets a close resemblance to sets in pure math and it even has the methods to support it. From intercept, issubset, and many more found in [w3school](https://www.w3schools.com/python/python_sets.asp), sets can be quite important if used right.
some_set = {3, True, '3'}
print(some_set)
# This is how you set up a set, but know that sets are unordered
# ## Dictionaries
# Dictionaries are one of the most important tools to programmers. Dictionaries have elements that demonstrate a key-value pair. This means a key will give you access to the value. You can think of the key as the index.
# +
pets = {
"cat": 1,
"dog": 2,
"fish": 5
}
print(pets)
print(pets['cat'])
# -
# Most of the methods to add and get the value are the same, but you can find out more in this [website](https://www.w3schools.com/python/python_dictionaries.asp)
# ## Extra
# Another thing about functions that are important after learning lists is having a variable number of arguments passed in. In which case, we use **\*args**, or if a dictionary (will learn about later) is passed in, we use **\*\*kwargs**. Otherwise, we can feed in separate items in and args with handle items with no keyword while kwargs handles those with keywords
# +
def print_list(*args):
# prints out all items in tuple
print(args)
def print_list_2(*args, **kwargs):
# prints out all items in tuple
print(args)
print(kwargs)
evens = [2, 4, 6, 8, 10, 12]
print_list(2, 4, 6, 8, 10, 12)
print_list(*evens)
print_list_2(2, 4, 6, 8, 10, x = 12, y = 14, z = 16)
# -
# Note the " **\*** " just makes the list or tuple separated items, therefore
# +
def print_list(*args):
# prints out all items singularly
print(*args)
evens = [2, 4, 6, 8, 10, 12]
print_list(*evens)
# -
# # Exercise 4
# Ask the user to create a dictionary of words with the word linked to the meaning of that word. This dictionary should be 2 words long and repeat the dictionary after user finishes.
# write your code below
# ## Chapter 3 Answer:
# +
# inputs
first_int = int(input("First integer: "))
operator = input("Operator (+, -, *, /): ").strip()
second_int = int(input("Second integer: "))
# inits output as error such that if the output is not modified again, out error
output = "The operator is not recognized"
# chain of if statements
# This is know as a switch statement in some other languages
if operator == '+':
output = first_int + second_int
elif operator == '-':
output = first_int - second_int
elif operator == '*':
output = first_int * second_int
elif operator == '/':
output = first_int / second_int
# prints output
print(f'{first_int} {operator} {second_int} = {output} ')
# -
# First we get the inputs from the users, next we set a base case for the output. If the user inputs the operator wrong, we can tell them. Next we do a chain of if statement chain to check which operator it is. Next we print the output
| Chapter 4 - Structured Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Detect and Mitigate Unfairness in Models
#
# Machine learning models can incorporate unintentional bias, which can lead to issues with *fairness*. For example, a model that predicts the likelihood of diabetes might work well for some age groups, but not for others - subjecting a subset of patients to unnecessary tests, or depriving them of tests that would confirm a diabetes diagnosis.
#
# In this notebook, you'll use the **Fairlearn** package to analyze a model and explore disparity in prediction performance for different subsets of patients based on age.
#
# ## Important - Considerations for fairness
#
# > This notebook is designed as a practical exercise to help you explore the Fairlearn package and its integration with Azure Machine Learning. However, there are a great number of considerations that an organization or data science team must discuss related to fairness before using the tools. Fairness is a complex *sociotechnical* challenge that goes beyond simply running a tool to analyze models.
# >
# > Microsoft Research has co-developed a [fairness checklist](https://www.microsoft.com/en-us/research/publication/co-designing-checklists-to-understand-organizational-challenges-and-opportunities-around-fairness-in-ai/) that provides a great starting point for the important discussions that need to take place before a single line of code is written.
#
# ## Install the required SDKs
#
# To use the Fairlearn package with Azure Machine Learning, you need to install the Azure Machine Learning and Fairlearn Python packages, so run the following cell to do that.
# !pip install --upgrade azureml-sdk azureml-widgets azureml-contrib-fairness
# !pip install --upgrade fairlearn==0.5.0
# ***Restart the kernel*** before running the subsequent cells.
#
# ## Train a model
#
# You'll start by training a classification model to predict the likelihood of diabetes. In addition to splitting the data into training and test sets of features and labels, you'll extract *sensitive* features that are used to define subpopulations of the data for which you want to compare fairness. In this case, you'll use the **Age** column to define two categories of patient: those over 50 years old, and those 50 or younger.
# +
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
# load the diabetes dataset
print("Loading Data...")
data = pd.read_csv('data/diabetes.csv')
# Separate features and labels
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
X, y = data[features].values, data['Diabetic'].values
# Get sensitive features
S = data[['Age']].astype(int)
# Change value to represent age groups
S['Age'] = np.where(S.Age > 50, 'Over 50', '50 or younger')
# Split data into training set and test set
X_train, X_test, y_train, y_test, S_train, S_test = train_test_split(X, y, S, test_size=0.20, random_state=0, stratify=y)
# Train a classification model
print("Training model...")
diabetes_model = DecisionTreeClassifier().fit(X_train, y_train)
print("Model trained.")
# -
# Now that you've trained a model, you can use the Fairlearn package to compare its behavior for different sensitive feature values. In this case, you'll:
#
# - Use the fairlearn **selection_rate** function to return the selection rate (percentage of positive predictions) for the overall population.
# - Use **scikit-learn** metric functions to calculate overall accuracy, recall, and precision metrics.
# - Use a **MetricFrame** to calculate selection rate, accuracy, recall, and precision for each age group in the **Age** sensitive feature. Note that a mix of **fairlearn** and **scikit-learn** metric functions are used to calculate the performance values.
# +
from fairlearn.metrics import selection_rate, MetricFrame
from sklearn.metrics import accuracy_score, recall_score, precision_score
# Get predictions for the witheld test data
y_hat = diabetes_model.predict(X_test)
# Get overall metrics
print("Overall Metrics:")
# Get selection rate from fairlearn
overall_selection_rate = selection_rate(y_test, y_hat) # Get selection rate from fairlearn
print("\tSelection Rate:", overall_selection_rate)
# Get standard metrics from scikit-learn
overall_accuracy = accuracy_score(y_test, y_hat)
print("\tAccuracy:", overall_accuracy)
overall_recall = recall_score(y_test, y_hat)
print("\tRecall:", overall_recall)
overall_precision = precision_score(y_test, y_hat)
print("\tPrecision:", overall_precision)
# Get metrics by sensitive group from fairlearn
print('\nMetrics by Group:')
metrics = {'selection_rate': selection_rate,
'accuracy': accuracy_score,
'recall': recall_score,
'precision': precision_score}
group_metrics = MetricFrame(metrics,
y_test, y_hat,
sensitive_features=S_test['Age'])
print(group_metrics.by_group)
# -
# From these metrics, you should be able to discern that a larger proportion of the older patients are predicted to be diabetic. *Accuracy* should be more or less equal for the two groups, but a closer inspection of *precision* and *recall* indicates some disparity in how well the model predicts for each age group.
#
# In this scenario, consider *recall*. This metric indicates the proportion of positive cases that were correctly identified by the model. In other words, of all the patients who are actually diabetic, how many did the model find? The model does a better job of this for patients in the older age group than for younger patients.
#
# It's often easier to compare metrics visually. To do this, you'll use the Fairlearn dashboard:
#
# 1. Run the cell below (*note that a warning about future changes may be displayed - ignore this for now*).
# 2. When the widget is displayed, use the **Get started** link to start configuring your visualization.
# 3. Select the sensitive features you want to compare (in this case, there's only one: **Age**).
# 4. Select the model performance metric you want to compare (in this case, it's a binary classification model so the options are *Accuracy*, *Balanced accuracy*, *Precision*, and *Recall*). Start with **Recall**.
# 5. View the dashboard visualization, which shows:
# - **Disparity in performance** - how the selected performance metric compares for the subpopulations, including *underprediction* (false negatives) and *overprediction* (false positives).
# - **Disparity in predictions** - A comparison of the number of positive cases per subpopulation.
# 6. Edit the configuration to compare the predictions based on different performance metrics.
# +
from fairlearn.widget import FairlearnDashboard
# View this model in Fairlearn's fairness dashboard, and see the disparities which appear:
FairlearnDashboard(sensitive_features=S_test,
sensitive_feature_names=['Age'],
y_true=y_test,
y_pred={"diabetes_model": diabetes_model.predict(X_test)})
# -
# The results show a much higher selection rate for patients over 50 than for younger patients. However, in reality, age is a genuine factor in diabetes, so you would expect more positive cases among older patients.
#
# If we base model performance on *accuracy* (in other words, the percentage of predictions the model gets right), then it seems to work more or less equally for both subpopulations. However, based on the *precision* and *recall* metrics, the model tends to perform better for patients who are over 50 years old.
#
# Let's see what happens if we exclude the **Age** feature when training the model.
# +
# Separate features and labels
ageless = features.copy()
ageless.remove('Age')
X2, y2 = data[ageless].values, data['Diabetic'].values
# Split data into training set and test set
X_train2, X_test2, y_train2, y_test2, S_train2, S_test2 = train_test_split(X2, y2, S, test_size=0.20, random_state=0, stratify=y2)
# Train a classification model
print("Training model...")
ageless_model = DecisionTreeClassifier().fit(X_train2, y_train2)
print("Model trained.")
# View this model in Fairlearn's fairness dashboard, and see the disparities which appear:
FairlearnDashboard(sensitive_features=S_test2,
sensitive_feature_names=['Age'],
y_true=y_test2,
y_pred={"ageless_diabetes_model": ageless_model.predict(X_test2)})
# -
# Explore the model in the dashboard.
#
# When you review *recall*, note that the disparity has reduced, but the overall recall has also reduced because the model now significantly underpredicts positive cases for older patients. Even though **Age** was not a feature used in training, the model still exhibits some disparity in how well it predicts for older and younger patients.
#
# In this scenario, simply removing the **Age** feature slightly reduces the disparity in *recall*, but increases the disparity in *precision* and *accuracy*. This underlines one the key difficulties in applying fairness to machine learning models - you must be clear about what *fairness* means in a particular context, and optimize for that.
#
# ## Register the model and upload the dashboard data to your workspace
#
# You've trained the model and reviewed the dashboard locally in this notebook; but it might be useful to register the model in your Azure Machine Learning workspace and create an experiment to record the dashboard data so you can track and share your fairness analysis.
#
# Let's start by registering the original model (which included **Age** as a feature).
#
# > **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
# +
from azureml.core import Workspace, Experiment, Model
import joblib
import os
# Load the Azure ML workspace from the saved config file
ws = Workspace.from_config()
print('Ready to work with', ws.name)
# Save the trained model
model_file = 'diabetes_model.pkl'
joblib.dump(value=diabetes_model, filename=model_file)
# Register the model
print('Registering model...')
registered_model = Model.register(model_path=model_file,
model_name='diabetes_classifier',
workspace=ws)
model_id= registered_model.id
print('Model registered.', model_id)
# -
# Now you can use the FairLearn package to create binary classification group metric sets for one or more models, and use an Azure Machine Learning experiment to upload the metrics.
#
# > **Note**: This may take a while. When the experiment has completed, the dashboard data will be downloaded and displayed to verify that it was uploaded successfully.
# +
from fairlearn.metrics._group_metric_set import _create_group_metric_set
from azureml.contrib.fairness import upload_dashboard_dictionary, download_dashboard_by_upload_id
# Create a dictionary of model(s) you want to assess for fairness
sf = { 'Age': S_test.Age}
ys_pred = { model_id:diabetes_model.predict(X_test) }
dash_dict = _create_group_metric_set(y_true=y_test,
predictions=ys_pred,
sensitive_features=sf,
prediction_type='binary_classification')
exp = Experiment(ws, 'mslearn-diabetes-fairness')
print(exp)
run = exp.start_logging()
# Upload the dashboard to Azure Machine Learning
try:
dashboard_title = "Fairness insights of Diabetes Classifier"
upload_id = upload_dashboard_dictionary(run,
dash_dict,
dashboard_name=dashboard_title)
print("\nUploaded to id: {0}\n".format(upload_id))
# To test the dashboard, you can download it
downloaded_dict = download_dashboard_by_upload_id(run, upload_id)
print(downloaded_dict)
finally:
run.complete()
# -
# The preceding code downloaded the metrics generated in the experiement just to confirm it completed successfully. The real benefit of uploading the metrics to an experiement is that you can now view the FairLearn dashboard in Azure Machine Learning studio.
#
# Run the cell below to see the experiment details, and click the **View Run details** link in the widget to see the run in Azure Machine Learning studio. Then view the **Fairness** tab of the experiment run to view the dashboard, which behaves the same way as the widget you viewed previously in this notebook.
# +
from azureml.widgets import RunDetails
RunDetails(run).show()
# -
# You can also find the fairness dashboard by selecting a model in the **Models** page of Azure Machine Learning studio and reviewing its **Fairness** tab. This enables your organization to maintain a log of fairness analysis for the models you train and register.
# ## Mitigate unfairness in the model
#
# Now that you've analyzed the model for fairness, you can use any of the *mitigation* techniques supported by the FairLearn package to find a model that balances predictive performance and fairness.
#
# In this exercise, you'll use the **GridSearch** feature, which trains multiple models in an attempt to minimize the disparity of predictive performance for the sensitive features in the dataset (in this case, the age groups). You'll optimize the models by applying the **EqualizedOdds** parity constraint, which tries to ensure that models that exhibit similar true and false positive rates for each sensitive feature grouping.
#
# > *This may take some time to run*
# +
from fairlearn.reductions import GridSearch, EqualizedOdds
import joblib
import os
print('Finding mitigated models...')
# Train multiple models
sweep = GridSearch(DecisionTreeClassifier(),
constraints=EqualizedOdds(),
grid_size=20)
sweep.fit(X_train, y_train, sensitive_features=S_train.Age)
models = sweep.predictors_
# Save the models and get predictions from them (plus the original unmitigated one for comparison)
model_dir = 'mitigated_models'
os.makedirs(model_dir, exist_ok=True)
model_name = 'diabetes_unmitigated'
print(model_name)
joblib.dump(value=diabetes_model, filename=os.path.join(model_dir, '{0}.pkl'.format(model_name)))
predictions = {model_name: diabetes_model.predict(X_test)}
i = 0
for model in models:
i += 1
model_name = 'diabetes_mitigated_{0}'.format(i)
print(model_name)
joblib.dump(value=model, filename=os.path.join(model_dir, '{0}.pkl'.format(model_name)))
predictions[model_name] = model.predict(X_test)
# -
# Now you can use the FairLearn dashboard to compare the mitigated models:
#
# Run the following cell and then use the wizard to visualize **Age** by **Recall**.
FairlearnDashboard(sensitive_features=S_test,
sensitive_feature_names=['Age'],
y_true=y_test,
y_pred=predictions)
# The models are shown on a scatter plot. You can compare the models by measuring the disparity in predictions (in other words, the selection rate) or the disparity in the selected performance metric (in this case, *recall*). In this scenario, we expect disparity in selection rates (because we know that age *is* a factor in diabetes, with more positive cases in the older age group). What we're interested in is the disparity in predictive performance, so select the option to measure **Disparity in recall**.
#
# The chart shows clusters of models with the overall *recall* metric on the X axis, and the disparity in recall on the Y axis. Therefore, the ideal model (with high recall and low disparity) would be at the bottom right corner of the plot. You can choose the right balance of predictive performance and fairness for your particular needs, and select an appropriate model to see its details.
#
# An important point to reinforce is that applying fairness mitigation to a model is a trade-off between overall predictive performance and disparity across sensitive feature groups - generally you must sacrifice some overall predictive performance to ensure that the model predicts fairly for all segments of the population.
#
# > **Note**: Viewing the *precision* metric may result in a warning that precision is being set to 0.0 due to no predicted samples - you can ignore this.
#
# ## Upload the mitigation dashboard metrics to Azure Machine Learning
#
# As before, you might want to keep track of your mitigation experimentation. To do this, you can:
#
# 1. Register the models found by the GridSearch process.
# 2. Compute the performance and disparity metrics for the models.
# 3. Upload the metrics in an Azure Machine Learning experiment.
# +
# Register the models
registered_model_predictions = dict()
for model_name, prediction_data in predictions.items():
model_file = os.path.join(model_dir, model_name + ".pkl")
registered_model = Model.register(model_path=model_file,
model_name=model_name,
workspace=ws)
registered_model_predictions[registered_model.id] = prediction_data
# Create a group metric set for binary classification based on the Age feature for all of the models
sf = { 'Age': S_test.Age}
dash_dict = _create_group_metric_set(y_true=y_test,
predictions=registered_model_predictions,
sensitive_features=sf,
prediction_type='binary_classification')
exp = Experiment(ws, "mslearn-diabetes-fairness")
print(exp)
run = exp.start_logging()
RunDetails(run).show()
# Upload the dashboard to Azure Machine Learning
try:
dashboard_title = "Fairness Comparison of Diabetes Models"
upload_id = upload_dashboard_dictionary(run,
dash_dict,
dashboard_name=dashboard_title)
print("\nUploaded to id: {0}\n".format(upload_id))
finally:
run.complete()
# -
# > **Note**: A warning that precision is being set to 0.0 due to no predicted samples may be displayed - you can ignore this.
#
#
# When the experiment has finished running, click the **View Run details** link in the widget to view the run in Azure Machine Learning studio, and view the FairLearn dashboard on the **fairness** tab.
| 15 - Detect Unfairness.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Taller S.O.S. (Software de Observaciones Sintéticas)
# ## Simulaciones numéricas como laboratorios virtuales: conectando teoría y observaciones
# ### Documentación próximamente disponible en [link](https://github.com/MarcialX/sos).
# ### Este código analiza los cubos de datos simulados de una nube filamentaria, y a partir de observaciones de $^{12}$CO, $^{13}$CO y C$^{18}$O se obtienen propiedades físicas de una Nube Molecular prototipo.
# ## Parte 1: Propiedades globales de la Nube Molecular:
#
# Las masas de las nubes moleculares se pueden calcular usando tres diferentes métodos:
#
# * Masa calculada mediante el Teorema Virial, $M_{\rm vir}$:
#
# $\Big[ \frac{M_{\rm vir}}{10^4 \, M_\odot} \Big] = 1.58 \times \Big[ \frac{R_e}{5 \, {\rm pc}} \Big] \times \Big[ \frac{\Delta v}{5 \, {\rm km \, s^{-1}}} \Big]^2$, donde $R_e$ es el radio de la nube y $\Delta v$ el ancho de línea.
#
# * Masa calculada con la densidad de columna, $N (^{13}{\rm CO})$, o método ETL, $M_{\rm ETL}$:
#
# $\Big[ \frac{M_{\rm ETL}}{10^4 \, M_\odot} \Big] = 3.25 \times \Big[ \frac{R_e}{5 \, {\rm pc}} \Big]^2 \times \Big[ \frac{N (^{13}{\rm CO})}{10^{17} \, {\rm cm}^{-2}} \Big]$.
#
# * Masa calculada mediante el factor de conversión ($XF = 2.8 \times 10^{20} {\rm \, cm^{-2} \, K^{-1} \, km^{-1} \, s }$), $M_{\rm XF}$:
#
# $\Big[ \frac{M_{\rm XF}}{10^4 \, M_\odot} \Big] = 3.26 \times \Big[ \frac{R_e}{5 \, {\rm pc}} \Big]^2 \times \Big[ \frac{W (^{13}{\rm CO})}{5 \, {\rm K \, km \, s^{-1}}} \Big]$,
#
# donde $W (^{13}{\rm CO})$ es la luminosidad de la línea $^{13}{\rm CO}$ que se ralaciona con el factor X y la densidad de columna del hidrógeno molecular, $N({\rm H}_2)$, de la forma $XF \equiv N({\rm H}_2) / W (^{13}{\rm CO})$.
#
# Ver el artículo de [<NAME>-Romero y colaboradores (2017)](https://ui.adsabs.harvard.edu/abs/2017ApJ...839..113R/abstract) para una explicación más detallada.
#
# ***
#
#
# ### De ser necesario, nos cambiamos al directorio de trabajo:
# +
#import os
#os.chdir("/ruta/al/paquete/sos")
# -
# ### Importamos el paquete sos:
import sos
# ### Especificamos las rutas a los cubos de datos de una Nube Molecular (p.e., TaurusMC)
# +
#db = sos.mc_db_tools.mc_db(sos.DB_PATH)
#db.get_mc_params('TaurusMC')
# -
# ### De ser necesario, cargamos los cubos de datos (PPV) manualmente:
# +
#db.path13CO = './sos/data_cube/filament_13co_YZ_Jansky-per-px.fits'
#db.path12CO = './sos/data_cube/filament_12co_YZ_Jansky-per-px.fits'
#db.pathC18O = './sos/data_cube/filament_c18o_YZ_Jansky-per-px.fits'
# -
# ### Creamos ahora el objeto "mc", usando su identificador (p.ej., TaurusMC):
mc = sos.mc('TaurusMC')
# ### Objetos disponibles en la base de datos de SOS:
db_objs = sos.mc_db(sos.DB_PATH)
db_objs.mc_db.keys()
# ### Creamos los mapas integrados en velocidad de las lineas moleculares disponibles ($^{12}$CO, $^{13}$CO y C$^{18}$O)
mc.get_map_vels()
# ### Creamos los mapas de momento cero M0
sos.msg('Molécula 13CO', 'ok')
m0_data_13co, m0_header_13co = mc.M0('13CO', save=True)
sos.msg('Molécula 12CO', 'ok')
m0_data_12co, m0_header_12co = mc.M0('12CO', save=True)
sos.msg('Molécula C18O', 'ok')
m0_data_c18o, m0_header_c18o = mc.M0('C18O', save=True)
# ## Nube molecular completa. Modo FULL
# ### Ajustamos las lineas de emision de las moleculas integradas por toda la longitud y latitud:
mc.line_fit('13CO', iter=True)
mc.line_fit('12CO', iter=True)
# ### Calculamos los parametros fisicos de las nubes, como sus masas y densidades columnares. Las masas se calculan por los tres métodos mencionados arriba:
mc.get_gral_params()
# ### Desplegamos resultados:
mc.summary()
# ## Nube molecular segmentada. Modo segmentado o bin
# ### Dividimos los mapas de todas las moléculas: $^{13}$CO, $^{12}$CO y C$^{18}$O de la nube, en NxN bines:
# ### La primera segmentación requiere que 'rebin'=True
mc.binning_mol('13CO', 16, rebin=True)
# ### El resto ya no lo requiere
mc.binning_mol('12CO', 16)
mc.binning_mol('C18O', 16)
# ### Ajustamos las lineas para cada bin
mc.line_fit_binning('13CO')
mc.line_fit_binning('12CO')
# ### Calculamos los parámetros físicos por cada bin
mc.get_bins_params()
mc.mc_binned['B135']['mass_lte']
mc.mc_binned['B135'].keys()
# ### Veamos como lucen las líneas de alguna de las moléculas ($^{13}$CO) por cada bin:
mc_binned = mc.mc_binned
plt = sos.mc_plotter(mc_binned, m0_data_13co, m0_header_13co)
plt.plot_m0_line('13CO')
# ### Podemos crear un mapa de calor con las propiedades físicas en cada bin. Por ejemplo $M_{\rm LTE}$:
plt.map_param('mass_lte', cmap='Blues', log=False)
# ### Respaldemos los datos para la siguiente sección:
mc.backup('Parte_1')
# ### Para cargar el respaldo:
mc.load_bkp('./sos/bkp/Parte_1/')
| .ipynb_checkpoints/1-NubeMolecular_propiedadesGlobales-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="u3Zq5VrfiDqB"
# ##### Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + id="3jTEqPzFiHQ0"
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="x97n3SaNmNpB"
# # Fitting Generalized Linear Mixed-effects Models Using Variational Inference
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/probability/examples/Linear_Mixed_Effects_Model_Variational_Inference"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + id="yPby2hWGS651"
#@title Install { display-mode: "form" }
TF_Installation = 'System' #@param ['TF Nightly', 'TF Stable', 'System']
if TF_Installation == 'TF Nightly':
# !pip install -q --upgrade tf-nightly
print('Installation of `tf-nightly` complete.')
elif TF_Installation == 'TF Stable':
# !pip install -q --upgrade tensorflow
print('Installation of `tensorflow` complete.')
elif TF_Installation == 'System':
pass
else:
raise ValueError('Selection Error: Please select a valid '
'installation option.')
# + id="ZKFMx9zmTBbd"
#@title Install { display-mode: "form" }
TFP_Installation = "System" #@param ["Nightly", "Stable", "System"]
if TFP_Installation == "Nightly":
# !pip install -q tfp-nightly
print("Installation of `tfp-nightly` complete.")
elif TFP_Installation == "Stable":
# !pip install -q --upgrade tensorflow-probability
print("Installation of `tensorflow-probability` complete.")
elif TFP_Installation == "System":
pass
else:
raise ValueError("Selection Error: Please select a valid "
"installation option.")
# + [markdown] id="0GVst7yy6Aww"
# ## Abstract
#
# + [markdown] id="Lt4RS9whJhQh"
#
# In this colab we demonstrate how to fit a generalized linear mixed-effects model using variational inference in TensorFlow Probability.
#
# + [markdown] id="0-lfIBVAzi7D"
# ## Model Family
# + [markdown] id="ljSRsKrXwqb6"
# [Generalized linear mixed-effect models](https://en.wikipedia.org/wiki/Generalized_linear_mixed_model) (GLMM) are similar to [generalized linear models](https://en.wikipedia.org/wiki/Generalized_linear_model) (GLM) except that they incorporate a sample specific noise into the predicted linear response. This is useful in part because it allows rarely seen features to share information with more commonly seen features.
#
#
# + [markdown] id="H-B38entvltq"
# As a generative process, a Generalized Linear Mixed-effects Model (GLMM) is characterized by:
#
# $$
# \begin{align}
# \text{for } & r = 1\ldots R: \hspace{2.45cm}\text{# for each random-effect group}\\
# &\begin{aligned}
# \text{for } &c = 1\ldots |C_r|: \hspace{1.3cm}\text{# for each category ("level") of group $r$}\\
# &\begin{aligned}
# \beta_{rc}
# &\sim \text{MultivariateNormal}(\text{loc}=0_{D_r}, \text{scale}=\Sigma_r^{1/2})
# \end{aligned}
# \end{aligned}\\\\
# \text{for } & i = 1 \ldots N: \hspace{2.45cm}\text{# for each sample}\\
# &\begin{aligned}
# &\eta_i = \underbrace{\vphantom{\sum_{r=1}^R}x_i^\top\omega}_\text{fixed-effects} + \underbrace{\sum_{r=1}^R z_{r,i}^\top \beta_{r,C_r(i) }}_\text{random-effects} \\
# &Y_i|x_i,\omega,\{z_{r,i} , \beta_r\}_{r=1}^R \sim \text{Distribution}(\text{mean}= g^{-1}(\eta_i))
# \end{aligned}
# \end{align}
# $$
# + [markdown] id="3gZmFJXAHwfy"
# where:
#
# $$
# \begin{align}
# R &= \text{number of random-effect groups}\\
# |C_r| &= \text{number of categories for group $r$}\\
# N &= \text{number of training samples}\\
# x_i,\omega &\in \mathbb{R}^{D_0}\\
# D_0 &= \text{number of fixed-effects}\\
# C_r(i) &= \text{category (under group $r$) of the $i$th sample}\\
# z_{r,i} &\in \mathbb{R}^{D_r}\\
# D_r &= \text{number of random-effects associated with group $r$}\\
# \Sigma_{r} &\in \{S\in\mathbb{R}^{D_r \times D_r} : S \succ 0 \}\\
# \eta_i\mapsto g^{-1}(\eta_i) &= \mu_i, \text{inverse link function}\\
# \text{Distribution} &=\text{some distribution parameterizable solely by its mean}
# \end{align}
# $$
# + [markdown] id="5AYonR45P1Hr"
# In other words, this says that every category of each group is associated with a sample, $\beta_{rc}$, from a multivariate normal. Although the $\beta_{rc}$ draws are always independent, they are only indentically distributed for a group $r$: notice there is exactly one $\Sigma_r$ for each $r\in\{1,\ldots,R\}$.
#
# When affinely combined with a sample's group's features ($z_{r,i}$), the result is sample-specific noise on the $i$-th predicted linear response (which is otherwise $x_i^\top\omega$).
# + [markdown] id="__dP1MdYKda0"
# When we estimate $\{\Sigma_r:r\in\{1,\ldots,R\}\}$ we're essentially estimating the amount of noise a random-effect group carries which would otherwise drown out the signal present in $x_i^\top\omega$.
# + [markdown] id="0EZXZzlYSbM7"
# There are a variety of options for the $\text{Distribution}$ and [inverse link function](https://en.wikipedia.org/wiki/Generalized_linear_model#Link_function), $g^{-1}$. Common choices are:
# - $Y_i\sim\text{Normal}(\text{mean}=\eta_i, \text{scale}=\sigma)$,
# - $Y_i\sim\text{Binomial}(\text{mean}=n_i \cdot \text{sigmoid}(\eta_i), \text{total_count}=n_i)$, and,
# - $Y_i\sim\text{Poisson}(\text{mean}=\exp(\eta_i))$.
#
# For more possibilities, see the [`tfp.glm`](https://github.com/tensorflow/probability/tree/main/tensorflow_probability/python/glm) module.
# + [markdown] id="XajrojApx5cR"
# ## Variational Inference
# + [markdown] id="fIQn1mlYAUzx"
# Unfortunately, finding the maximum likelihood estimates of the parameters $\beta,\{\Sigma_r\}_r^R$ entails a non-analytical integral. To circumvent this problem, we instead
# 1. Define a parameterized family of distributions (the "surrogate density"), denoted $q_{\lambda}$ in the appendix.
# 2. Find parameters $\lambda$ so that $q_{\lambda}$ is close to our true target denstiy.
#
# The family of distributions will be independent Gaussians of the proper dimensions, and by "close to our target density", we will mean "minimizing the [Kullback-Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence)". See, for example [Section 2.2 of "Variational Inference: A Review for Statisticians"](https://arxiv.org/abs/1601.00670) for a well-written derivation and motivation. In particular, it shows that minimizing the K-L divergence is equivalent to minimizing the negative evidence lower bound (ELBO).
# + [markdown] id="Nu8B7ylx3UdL"
# ## Toy Problem
# + [markdown] id="vDmAfghTJcLo"
# [Gelman et al.'s (2007) "radon dataset"](http://www.stat.columbia.edu/~gelman/arm/) is a dataset sometimes used to demonstrate approaches for regression. (E.g., this closely related [PyMC3 blog post](http://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/).) The radon dataset contains indoor measurements of Radon taken throughout the United States. [Radon](https://en.wikipedia.org/wiki/Radon) is naturally ocurring radioactive gas which is [toxic](http://www.radon.com/radon_facts/) in high concentrations.
#
# For our demonstration, let's suppose we're interested in validating the hypothesis that Radon levels are higher in households containing a basement. We also suspect Radon concentration is related to soil-type, i.e., geography matters.
#
# To frame this as an ML problem, we'll try to predict log-radon levels based on a linear function of the floor on which the reading was taken. We'll also use the county as a random-effect and in so doing account for variances due to geography. In other words, we'll use a [generalized linear mixed-effect model](https://en.wikipedia.org/wiki/Generalized_linear_mixed_model).
# + id="_zr34b0IBqgY"
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import os
from six.moves import urllib
import matplotlib.pyplot as plt; plt.style.use('ggplot')
import numpy as np
import pandas as pd
import seaborn as sns; sns.set_context('notebook')
import tensorflow_datasets as tfds
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
# + [markdown] id="WkKB-97-sipM"
# We will also do a quick check for availablility of a GPU:
# + colab={"height": 35} id="gWCdTCR9snIQ" outputId="5ed94af3-242e-4b6d-c1c8-58784e2fb044"
if tf.test.gpu_device_name() != '/device:GPU:0':
print("We'll just use the CPU for this run.")
else:
print('Huzzah! Found GPU: {}'.format(tf.test.gpu_device_name()))
# + [markdown] id="UzepxbtrpHpD"
# ### Obtain Dataset:
#
# We load the dataset from TensorFlow datasets and do some light preprocessing.
# + colab={"height": 196} id="uCadtPD6jXfh" outputId="e805d0b7-cd48-4d6e-af33-b0619805bcbf"
def load_and_preprocess_radon_dataset(state='MN'):
"""Load the Radon dataset from TensorFlow Datasets and preprocess it.
Following the examples in "Bayesian Data Analysis" (Gelman, 2007), we filter
to Minnesota data and preprocess to obtain the following features:
- `county`: Name of county in which the measurement was taken.
- `floor`: Floor of house (0 for basement, 1 for first floor) on which the
measurement was taken.
The target variable is `log_radon`, the log of the Radon measurement in the
house.
"""
ds = tfds.load('radon', split='train')
radon_data = tfds.as_dataframe(ds)
radon_data.rename(lambda s: s[9:] if s.startswith('feat') else s, axis=1, inplace=True)
df = radon_data[radon_data.state==state.encode()].copy()
df['radon'] = df.activity.apply(lambda x: x if x > 0. else 0.1)
# Make county names look nice.
df['county'] = df.county.apply(lambda s: s.decode()).str.strip().str.title()
# Remap categories to start from 0 and end at max(category).
df['county'] = df.county.astype(pd.api.types.CategoricalDtype())
df['county_code'] = df.county.cat.codes
# Radon levels are all positive, but log levels are unconstrained
df['log_radon'] = df['radon'].apply(np.log)
# Drop columns we won't use and tidy the index
columns_to_keep = ['log_radon', 'floor', 'county', 'county_code']
df = df[columns_to_keep].reset_index(drop=True)
return df
df = load_and_preprocess_radon_dataset()
df.head()
# + [markdown] id="_OxaVNnjYZyL"
# ### Specializing the GLMM Family
# + [markdown] id="niha5M54Yjf-"
# In this section, we specialize the GLMM family to the task of predicting radon levels. To do this, we first consider the fixed-effect special case of a GLMM:
# $$
# \mathbb{E}[\log(\text{radon}_j)] = c + \text{floor_effect}_j
# $$
#
# This model posits that the log radon in observation $j$ is (in expectation) governed by the floor the $j$th reading is taken on, plus some constant intercept. In pseudocode, we might write
#
# def estimate_log_radon(floor):
# return intercept + floor_effect[floor]
#
# there's a weight learned for every floor and a universal `intercept` term. Looking at the radon measurements from floor 0 and 1, it looks like this might be a good start:
# + colab={"height": 313} id="YwzykNvJgfJo" outputId="c5f3166d-0ca6-4e62-e3ef-bf10bcc07a86"
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 4))
df.groupby('floor')['log_radon'].plot(kind='density', ax=ax1);
ax1.set_xlabel('Measured log(radon)')
ax1.legend(title='Floor')
df['floor'].value_counts().plot(kind='bar', ax=ax2)
ax2.set_xlabel('Floor where radon was measured')
ax2.set_ylabel('Count')
fig.suptitle("Distribution of log radon and floors in the dataset");
# + [markdown] id="8MqU1SefgRy5"
# To make the model a little more sophisticated, including something about geography is probably even better: radon is part of the decay chain of uranium, which may be present in the ground, so geography seems key to account for.
#
# $$
# \mathbb{E}[\log(\text{radon}_j)] = c + \text{floor_effect}_j + \text{county_effect}_j
# $$
#
# Again, in pseudocode, we have
#
# def estimate_log_radon(floor, county):
# return intercept + floor_effect[floor] + county_effect[county]
#
# the same as before except with a county-specific weight.
# + [markdown] id="rcddRr2Ug1cH"
# Given a sufficiently large training set, this is a reasonable model. However, given our data from Minnesota, we see that there's there's a large number of counties with a small number of measurements. For example, 39 out of 85 counties have fewer than five observations.
#
# This motivates sharing statistical strength between all our observations, in a way that converges to the above model as the number of observations per county increases.
# + colab={"height": 438} id="15f4k6gQg40_" outputId="2d9e07c6-0d5d-48e3-f67c-2386eaa9cb86"
fig, ax = plt.subplots(figsize=(22, 5));
county_freq = df['county'].value_counts()
county_freq.plot(kind='bar', ax=ax)
ax.set_xlabel('County')
ax.set_ylabel('Number of readings');
# + [markdown] id="VxtwlODcdJZe"
# If we fit this model, the `county_effect` vector would likely end up memorizing the results for counties which had only a few training samples, perhaps overfitting and leading to poor generalization.
#
# GLMM's offer a happy middle to the above two GLMs. We might consider fitting
#
# $$
# \log(\text{radon}_j) \sim c + \text{floor_effect}_j + \mathcal{N}(\text{county_effect}_j, \text{county_scale})
# $$
#
# This model is the same as the first, but we have fixed our likelihood to be a normal distribution, and will share the variance across all counties through the (single) variable `county_scale`. In pseudocode,
#
# def estimate_log_radon(floor, county):
# county_mean = county_effect[county]
# random_effect = np.random.normal() * county_scale + county_mean
# return intercept + floor_effect[floor] + random_effect
#
# We will infer the joint distribution over `county_scale`, `county_mean`, and the `random_effect` using our observed data. The global `county_scale` allows us to share statistical strength across counties: those with many observations provide a hit at the variance of counties with few observations. Furthermore, as we gather more data, this model will converge to the model without a pooled scale variable - even with this dataset, we will come to similar conclusions about the most observed counties with either model.
# + [markdown] id="QjvAR2-ZYgxP"
# ## Experiment
# + [markdown] id="bioH0_7ZfC4Z"
# We'll now try to fit the above GLMM using variational inference in TensorFlow. First we'll split the data into features and labels.
# + id="AFFj4KrwfPMg"
features = df[['county_code', 'floor']].astype(int)
labels = df[['log_radon']].astype(np.float32).values.flatten()
# + [markdown] id="ZWvgUUAbGgkc"
# ### Specify Model
# + id="ujtDCBCcCu1q"
def make_joint_distribution_coroutine(floor, county, n_counties, n_floors):
def model():
county_scale = yield tfd.HalfNormal(scale=1., name='scale_prior')
intercept = yield tfd.Normal(loc=0., scale=1., name='intercept')
floor_weight = yield tfd.Normal(loc=0., scale=1., name='floor_weight')
county_prior = yield tfd.Normal(loc=tf.zeros(n_counties),
scale=county_scale,
name='county_prior')
random_effect = tf.gather(county_prior, county, axis=-1)
fixed_effect = intercept + floor_weight * floor
linear_response = fixed_effect + random_effect
yield tfd.Normal(loc=linear_response, scale=1., name='likelihood')
return tfd.JointDistributionCoroutineAutoBatched(model)
joint = make_joint_distribution_coroutine(
features.floor.values, features.county_code.values, df.county.nunique(),
df.floor.nunique())
# Define a closure over the joint distribution
# to condition on the observed labels.
def target_log_prob_fn(*args):
return joint.log_prob(*args, likelihood=labels)
# + [markdown] id="8cd1whNpMPwL"
# ### Specify surrogate posterior
# + [markdown] id="UZ5WAja5ejQg"
# We now put together a surrogate family $q_{\lambda}$, where the parameters $\lambda$ are trainable. In this case, our family is independent multivariate normal distributions, one for each parameter, and $\lambda = \{(\mu_j, \sigma_j)\}$, where $j$ indexes the four parameters.
#
# The method we use to fit the surrogate family uses `tf.Variables`. We also use `tfp.util.TransformedVariable` along with `Softplus` to constrain the (trainable) scale parameters to be positive. Additionally, we apply `Softplus` to the entire `scale_prior`, which is a positive parameter.
#
# We initialize these trainable variables with a bit of jitter to aid in optimization.
# + id="Ov8PwoebKn2T"
# Initialize locations and scales randomly with `tf.Variable`s and
# `tfp.util.TransformedVariable`s.
_init_loc = lambda shape=(): tf.Variable(
tf.random.uniform(shape, minval=-2., maxval=2.))
_init_scale = lambda shape=(): tfp.util.TransformedVariable(
initial_value=tf.random.uniform(shape, minval=0.01, maxval=1.),
bijector=tfb.Softplus())
n_counties = df.county.nunique()
surrogate_posterior = tfd.JointDistributionSequentialAutoBatched([
tfb.Softplus()(tfd.Normal(_init_loc(), _init_scale())), # scale_prior
tfd.Normal(_init_loc(), _init_scale()), # intercept
tfd.Normal(_init_loc(), _init_scale()), # floor_weight
tfd.Normal(_init_loc([n_counties]), _init_scale([n_counties]))]) # county_prior
# + [markdown] id="a8IlctaL_cvE"
# Note that this cell can be replaced with [`tfp.experimental.vi.build_factored_surrogate_posterior`](https://www.tensorflow.org/probability/api_docs/python/tfp/experimental/vi/build_factored_surrogate_posterior?version=nightly), as in:
#
# ```python
# surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(
# event_shape=joint.event_shape_tensor()[:-1],
# constraining_bijectors=[tfb.Softplus(), None, None, None])
# ```
# + [markdown] id="yhtN7BOoXRDb"
# ### Results
# + [markdown] id="QZU99LcO5pcb"
# Recall that our goal is to define a tractable parameterized family of distributions, and then select parameters so that we have a tractable distribution that is close to our target distribution.
#
# We have built the surrogate distribution above, and can use [`tfp.vi.fit_surrogate_posterior`](https://www.tensorflow.org/probability/api_docs/python/tfp/vi/fit_surrogate_posterior), which accepts an optimizer and a given number of steps to find the parameters for the surrogate model minimizing the negative ELBO (which corresonds to minimizing the Kullback-Liebler divergence between the surrogate and the target distribution).
#
# The return value is the negative ELBO at each step, and the distributions in `surrogate_posterior` will have been updated with the parameters found by the optimizer.
# + id="Ow-XvCiJczNr"
optimizer = tf.optimizers.Adam(learning_rate=1e-2)
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn,
surrogate_posterior,
optimizer=optimizer,
num_steps=3000,
seed=42,
sample_size=2)
(scale_prior_,
intercept_,
floor_weight_,
county_weights_), _ = surrogate_posterior.sample_distributions()
# + colab={"height": 71} id="sn43cNdHXe8J" outputId="78b6d9bc-94a0-4ea0-aaf8-b6843dda43a9"
print(' intercept (mean): ', intercept_.mean())
print(' floor_weight (mean): ', floor_weight_.mean())
print(' scale_prior (approx. mean): ', tf.reduce_mean(scale_prior_.sample(10000)))
# + colab={"height": 245} id="4ItwhsHUm0hF" outputId="10bfc60e-3565-46c6-eca9-452823f573b4"
fig, ax = plt.subplots(figsize=(10, 3))
ax.plot(losses, 'k-')
ax.set(xlabel="Iteration",
ylabel="Loss (ELBO)",
title="Loss during training",
ylim=0);
# + [markdown] id="ZkAWIRuUWPee"
# We can plot the estimated mean county effects, along with the uncertainty of that mean. We have ordered this by number of observations, with the largest on the left. Notice that the uncertainty is small for the counties with many observations, but is larger for the counties that have only one or two observations.
# + colab={"height": 435} id="F5AEDIXQHZMT" outputId="d250f948-f585-4314-b1df-0a653ee30f28"
county_counts = (df.groupby(by=['county', 'county_code'], observed=True)
.agg('size')
.sort_values(ascending=False)
.reset_index(name='count'))
means = county_weights_.mean()
stds = county_weights_.stddev()
fig, ax = plt.subplots(figsize=(20, 5))
for idx, row in county_counts.iterrows():
mid = means[row.county_code]
std = stds[row.county_code]
ax.vlines(idx, mid - std, mid + std, linewidth=3)
ax.plot(idx, means[row.county_code], 'ko', mfc='w', mew=2, ms=7)
ax.set(
xticks=np.arange(len(county_counts)),
xlim=(-1, len(county_counts)),
ylabel="County effect",
title=r"Estimates of county effects on log radon levels. (mean $\pm$ 1 std. dev.)",
)
ax.set_xticklabels(county_counts.county, rotation=90);
# + [markdown] id="mB0pTr-XWztv"
# Indeed, we can see this more directly by plotting the log-number of observations against the estimated standard deviation, and see the relationship is approximately linear.
# + colab={"height": 476} id="3P1YgFBxQzyt" outputId="7fd7cd82-c403-45bb-8a9a-6ca4e890917f"
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(np.log1p(county_counts['count']), stds.numpy()[county_counts.county_code], 'o')
ax.set(
ylabel='Posterior std. deviation',
xlabel='County log-count',
title='Having more observations generally\nlowers estimation uncertainty'
);
# + [markdown] id="AxhvAIeFs2cL"
#
# ## Comparing to `lme4` in R
# + colab={"height": 17} id="hvRDd7T-s7qY" outputId="e2bcc435-8e9d-4fe4-844c-7bdfb01a005b"
# %%shell
exit # Trick to make this block not execute.
radon = read.csv('srrs2.dat', header = TRUE)
radon = radon[radon$state=='MN',]
radon$radon = ifelse(radon$activity==0., 0.1, radon$activity)
radon$log_radon = log(radon$radon)
# install.packages('lme4')
library(lme4)
fit <- lmer(log_radon ~ 1 + floor + (1 | county), data=radon)
fit
# Linear mixed model fit by REML ['lmerMod']
# Formula: log_radon ~ 1 + floor + (1 | county)
# Data: radon
# REML criterion at convergence: 2171.305
# Random effects:
# Groups Name Std.Dev.
# county (Intercept) 0.3282
# Residual 0.7556
# Number of obs: 919, groups: county, 85
# Fixed Effects:
# (Intercept) floor
# 1.462 -0.693
# + [markdown] id="n5IqiHERv91u"
# The following table summarizes the results.
# + colab={"height": 71} id="D0sUh3NNuqlw" outputId="3c817b06-2928-45b9-81bf-650c5d39c69d"
print(pd.DataFrame(data=dict(intercept=[1.462, tf.reduce_mean(intercept_.mean()).numpy()],
floor=[-0.693, tf.reduce_mean(floor_weight_.mean()).numpy()],
scale=[0.3282, tf.reduce_mean(scale_prior_.sample(10000)).numpy()]),
index=['lme4', 'vi']))
# + [markdown] id="nVjHJxVdwBXb"
# This table indicates the VI results are within ~10% of `lme4`'s. This is somewhat surprising since:
# - `lme4` is based on [Laplace's method](https://www.jstatsoft.org/article/view/v067i01/) (not VI),
# - no effort was made in this colab to actually converge,
# - minimal effort was made to tune hyperparameters,
# - no effort was taken regularize or preprocess the data (eg, center features, etc.).
# + [markdown] id="ApP0PtwYN_ah"
# ## Conclusion
# + [markdown] id="eIFHW00tOJwo"
# In this colab we described Generalized Linear Mixed-effects Models and showed how to use variational inference to fit them using TensorFlow Probability. Although the toy problem only had a few hundred training samples, the techniques used here are identical to what is needed at scale.
| tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('base')
# language: python
# name: python3
# ---
# # Prepare Datasets
# 1. https://github.com/2runo/Curse-detection-data
# 2. https://github.com/jason9693/APEACH (Benchmark set)
# 3. https://github.com/kocohub/korean-hate-speech (BEEP!)
# +
import os
from IPython.display import display
# %matplotlib inline
os.makedirs("raw", exist_ok=True)
os.makedirs("data/", exist_ok=True)
# +
from urllib import request
import pandas as pd
def download(url, save_path):
# content = requests.get(url).text
# with open(save_path, "w") as f:
# f.write(content)
request.urlretrieve(url, save_path)
def download_curse_detection():
url = "https://raw.githubusercontent.com/2runo/Curse-detection-data/master/dataset.txt"
download(url, "raw/curse.txt")
return pd.read_csv("raw/curse.txt", names=["text", "label"], sep="|")
curse = download_curse_detection()
curse
# +
from sklearn.model_selection import train_test_split
display(curse.duplicated().sum())
print(curse.label.unique())
curse = curse[curse.label.str.isdigit()].astype({"label": int})
print(curse.label.unique())
train, dev = train_test_split(curse, test_size=0.2, shuffle=True, stratify=curse.label, random_state=42)
train.to_csv("data/curse_train.csv", index=False)
dev.to_csv("data/curse_dev.csv", index=False)
# -
# +
def download_korean_hate_speach():
url_train = "https://raw.githubusercontent.com/kocohub/korean-hate-speech/master/labeled/train.tsv"
url_dev = "https://raw.githubusercontent.com/kocohub/korean-hate-speech/master/labeled/dev.tsv"
path_train = "raw/korean-hate-speech/train.tsv"
path_dev = "raw/korean-hate-speech/dev.tsv"
os.makedirs("raw/korean-hate-speech", exist_ok=True)
download(url_train, path_train)
download(url_dev, path_dev)
train = pd.read_csv(path_train, sep="\t")
dev = pd.read_csv(path_dev, sep="\t")
return train, dev
train, dev = download_korean_hate_speach()
display(train)
display(dev)
# +
print(train.comments.duplicated().sum())
print("bias values", train.bias.unique())
print("hate values", train.hate.unique())
display(train.hate.value_counts())
display(train.bias.value_counts())
display(train.contain_gender_bias.value_counts())
# +
from sklearn.preprocessing import LabelEncoder
def prepare_khs(df, bias_le, hate_le):
df = df.copy()
df['bias'] = bias_le.transform(df.bias)
df['hate'] = hate_le.transform(df.hate)
return df
bias_le = LabelEncoder().fit(train.bias)
hate_le = LabelEncoder().fit(train.hate)
print('bias classes', bias_le.classes_)
print('hate classes', hate_le.classes_)
train_khs = prepare_khs(train, bias_le, hate_le)
display(train_khs)
train_khs.to_csv("data/khs_train.csv", index=False)
dev_khs = prepare_khs(dev, bias_le, hate_le)
display(dev_khs)
dev_khs.to_csv("data/khs_dev.csv", index=False)
# +
import torch
from torch.utils.data import Dataset, TensorDataset
import pandas as pd
from tokenizers import Tokenizer
from transformers import AutoTokenizer, BertTokenizer
from typing import List, Dict, Any
class DataFrameDataset(Dataset):
def __init__(self,
tokenizer: Tokenizer,
df: pd.DataFrame,
label_columns: List[str],
padding: str = "max_length") -> None:
super().__init__()
self.df = df
inputs = tokenizer(df.comments.to_list(), padding=padding, max_length=64, truncation="only_first", return_tensors="pt")
self.input_ids = inputs["input_ids"]
print(list(inputs.keys()))
self.attention_masks = inputs["attention_mask"]
self.label_columns = label_columns
def __getitem__(self, index: Any) -> Dict:
labels = self.df.iloc[index][self.label_columns]
return self.input_ids[index], self.attention_masks[index], torch.from_numpy(labels.values.astype(int))
# -
tokenizer = BertTokenizer.from_pretrained("beomi/kcbert-base")
tokenizer.model_max_length = 32
ids, mask, labels = DataFrameDataset(tokenizer, train_khs, ["hate_hate", "hate_none", "hate_offensive"])[0]
print(ids, ids.shape, mask, labels)
print(train_khs.comments[0])
print(tokenizer.decode(ids))
| notebooks/prepare_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 2</font>
#
# ## Download: http://github.com/dsacademybr
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# ## Dicionários
# Isso é uma lista
estudantes_lst = ["Mateus", 24, "Fernanda", 22, "Tamires", 26, "Cristiano", 25]
estudantes_lst
# Isso é um dicionário
estudantes_dict = {"Mateus":24, "Fernanda":22, "Tamires":26, "Cristiano":25}
estudantes_dict
estudantes_dict["Mateus"]
estudantes_dict["Pedro"] = 23
estudantes_dict["Pedro"]
estudantes_dict["Tamires"]
estudantes_dict.clear()
estudantes_dict
del estudantes_dict
estudantes_dict
estudantes = {"Mateus":24, "Fernanda":22, "Tamires":26, "Cristiano":25}
estudantes
len(estudantes)
estudantes.keys()
estudantes.values()
estudantes.items()
estudantes2 = {"Maria":27, "Erika":28, "Milton":26}
estudantes2
estudantes.update(estudantes2)
estudantes
dic1 = {}
dic1
dic1["key_one"] = 2
print(dic1)
dic1[10] = 5
dic1
dic1[8.2] = "Python"
dic1
dic1["teste"] = 5
dic1
dict1 = {}
dict1
dict1["teste"] = 10
dict1["key"] = "teste"
# Atenção, pois chave e valor podem ser iguais, mas representam coisas diferentes.
dict1
dict2 = {}
dict2["key1"] = "Big Data"
dict2["key2"] = 10
dict2["key3"] = 5.6
dict2
a = dict2["key1"]
b = dict2["key2"]
c = dict2["key3"]
a, b, c
# Dicionário de listas
dict3 = {'key1':1230,'key2':[22,453,73.4],'key3':['leite','maça','batata']}
dict3
dict3['key2']
# Acessando um item da lista, dentro do dicionário
dict3['key3'][0].upper()
# Operações com itens da lista, dentro do dicionário
var1 = dict3['key2'][0] - 2
var1
# Duas operações no mesmo comando, para atualizar um item dentro da lista
dict3['key2'][0] -= 2
dict3
# ### Criando dicionários aninhados
# Criando dicionários aninhados
dict_aninhado = {'key1':{'key2_aninhada':{'key3_aninhada':'Dict aninhado em Python'}}}
dict_aninhado
dict_aninhado['key1']['key2_aninhada']['key3_aninhada']
# # Fim
# ### Obrigado
#
# ### Visite o Blog da Data Science Academy - <a href="http://blog.dsacademy.com.br">Blog DSA</a>
| python-fundamentals-data-analysis-3.0/PythonFundamentos/Cap02/Notebooks/DSA-Python-Cap02-05-Dicionarios.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import timeit
import gym
import numpy as np
import cv2
import math
import random
from datetime import timedelta
from IPython import display
import matplotlib.pyplot as plt
from collections import deque
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# +
GAMMA = 0.99
LR = 1e-4
TARGET_NET_UPDATE_FREQ = 1_000
EXP_REPLAY_SIZE = 10_0000
BATCH_SIZE = 32
LEARN_START = 10_000
MAX_FRAMES= 1_000_000
epsilon_start = 1.0
epsilon_final = 0.01
epsilon_decay = 30_000
epsilon_by_frame = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay)
# +
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = gym.spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=np.uint8)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to num_channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]), dtype=np.uint8)
def observation(self, observation):
return np.swapaxes(observation, 2, 0)
def wrap_pytorch(env):
return ImageToPyTorch(env)
# -
class ExperienceReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
def push(self, transition):
self.memory.append(transition)
if len(self.memory) > self.capacity:
del self.memory[0]
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQN(nn.Module):
def __init__(self, input_shape, num_actions):
super(DQN, self).__init__()
self.input_shape = input_shape
self.num_actions = num_actions
self.conv1 = nn.Conv2d(self.input_shape[0], 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(self.feature_size(), 512)
self.fc2 = nn.Linear(512, self.num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def feature_size(self):
return self.conv3(self.conv2(self.conv1(torch.zeros(1, *self.input_shape)))).view(1, -1).size(1)
class Model(object):
def __init__(self, static_policy=False, env=None):
super(Model, self).__init__()
self.gamma=GAMMA
self.lr = LR
self.target_net_update_freq = TARGET_NET_UPDATE_FREQ
self.experience_replay_size = EXP_REPLAY_SIZE
self.batch_size = BATCH_SIZE
self.learn_start = LEARN_START
self.static_policy=static_policy
self.num_feats = env.observation_space.shape
self.num_actions = env.action_space.n
self.env = env
self.declare_networks()
self.target_model.load_state_dict(self.model.state_dict())
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
#move to correct device
self.model = self.model.to(device)
self.target_model.to(device)
if self.static_policy:
self.model.eval()
self.target_model.eval()
else:
self.model.train()
self.target_model.train()
self.update_count = 0
self.declare_memory()
def declare_networks(self):
self.model = DQN(self.num_feats, self.num_actions)
self.target_model = DQN(self.num_feats, self.num_actions)
def declare_memory(self):
self.memory = ExperienceReplayMemory(self.experience_replay_size)
def append_to_replay(self, s, a, r, s_):
self.memory.push((s, a, r, s_))
def prep_minibatch(self):
# random transition batch is taken from experience replay memory
transitions = self.memory.sample(BATCH_SIZE)
batch_state, batch_action, batch_reward, batch_next_state = zip(*transitions)
shape = (-1,)+self.num_feats
batch_state = torch.tensor(batch_state, device=device, dtype=torch.float).view(shape)
batch_action = torch.tensor(batch_action, device=device, dtype=torch.long).squeeze().view(-1, 1)
batch_reward = torch.tensor(batch_reward, device=device, dtype=torch.float).squeeze().view(-1, 1)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch_next_state)), device=device, dtype=torch.uint8)
try: #sometimes all next states are false
non_final_next_states = torch.tensor([s for s in batch_next_state if s is not None], device=device, dtype=torch.float).view(shape)
empty_next_state_values = False
except:
non_final_next_states = None
empty_next_state_values = True
return batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values
def compute_loss(self, batch_vars):
batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values = batch_vars
#estimate
current_q_values = self.model(batch_state).gather(1, batch_action)
#target
with torch.no_grad():
max_next_q_values = torch.zeros(self.batch_size, device=device, dtype=torch.float).unsqueeze(dim=1)
if not empty_next_state_values:
max_next_action = self.get_max_next_state_action(non_final_next_states)
max_next_q_values[non_final_mask] = self.target_model(non_final_next_states).gather(1, max_next_action)
expected_q_values = batch_reward + (self.gamma*max_next_q_values)
diff = (expected_q_values - current_q_values)
loss = self.huber(diff)
loss = loss.mean()
return loss
def update(self, s, a, r, s_, frame=0):
if self.static_policy:
return None
self.append_to_replay(s, a, r, s_)
if frame < self.learn_start:
return None
batch_vars = self.prep_minibatch()
loss = self.compute_loss(batch_vars)
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
for param in self.model.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
self.update_target_model()
return loss.item()
def get_action(self, s, eps=0.1):
with torch.no_grad():
if np.random.random() >= eps or self.static_policy:
X = torch.tensor([s], device=device, dtype=torch.float)
a = self.model(X).max(1)[1].view(1, 1)
return a.item()
else:
return np.random.randint(0, self.num_actions)
def update_target_model(self):
self.update_count+=1
self.update_count = self.update_count % self.target_net_update_freq
if self.update_count == 0:
self.target_model.load_state_dict(self.model.state_dict())
def get_max_next_state_action(self, next_states):
return self.target_model(next_states).max(dim=1)[1].view(-1, 1)
def huber(self, x):
cond = (x.abs() < 1.0).to(torch.float)
return 0.5 * x.pow(2) * cond + (x.abs() - 0.5) * (1 - cond)
def plot(frame_idx, rewards, losses, elapsed_time):
display.clear_output(True)
plt.figure(figsize=(20,5))
plt.subplot(131)
plt.title('frame %s. reward: %s. time: %s' % (frame_idx, np.mean(rewards[-10:]), elapsed_time))
plt.plot(rewards)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.show()
# +
start=timeit.default_timer()
env_id = "PongNoFrameskip-v4"
env = make_atari(env_id)
env = wrap_deepmind(env, frame_stack=True)
env = wrap_pytorch(env)
model = Model(env=env)
losses = []
all_rewards = []
episode_reward = 0
observation = env.reset()
for frame_idx in range(1, MAX_FRAMES + 1):
epsilon = epsilon_by_frame(frame_idx)
action = model.get_action(observation, epsilon)
prev_observation=observation
observation, reward, done, _ = env.step(action)
observation = None if done else observation
loss = model.update(prev_observation, action, reward, observation, frame_idx)
episode_reward += reward
if done:
observation = env.reset()
all_rewards.append(episode_reward)
episode_reward = 0
if np.mean(all_rewards[-10:]) > 19:
plot(frame_idx, all_rewards, losses, timedelta(seconds=int(timeit.default_timer()-start)))
break
if loss is not None:
losses.append(loss)
if frame_idx % 10000 == 0:
plot(frame_idx, all_rewards, losses, timedelta(seconds=int(timeit.default_timer()-start)))
env.close()
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (3.6)
# language: python
# name: py36
# ---
# # Tensorflow - Neural Networkds
#
# We've discovered that the data may need more complex modeling, so we can test neural networks
import tensorflow as tf
import numpy as np
import pandas as pd
import sklearn as sk
# ## Model 1: Simple Linear Model
#
# This model will feature no hidden layers, and will simply use weights (mulitplying inputs) and bias (adding to the result) in order to create a one vs all probability of each class. For model evalution, we will use a test-train split.
#
# Note: We will treat each seperate class of polviews as it's own class.
# ### Data Prep
#
# With a few modifications, we can use the output of the data pipeline.
X = pd.read_csv('data/training/X.csv')
y = pd.read_csv('data/training/y.csv', names=['Polviews'])
from sklearn.preprocessing import OneHotEncoder
y = OneHotEncoder(categories='auto').fit_transform(y).todense()
# +
# Test/Train Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
# +
# Number of X columns
X_cols = X.shape[1]
# Number of y columns
y_cols = y.shape[1]
# List of the true value for each column
y_true = np.squeeze(np.asarray(y.argmax(axis=1)))
y_train_true = np.squeeze(np.asarray(y_train.argmax(axis=1)))
y_test_true = np.squeeze(np.asarray(y_test.argmax(axis=1)))
# -
# ### Placeholder Variables
#
# These variables are placeholders for our Tensorflow graph.
x = tf.placeholder(tf.float32, [None, X_cols])
y_true = tf.placeholder(tf.float32, [None, y_cols])
y_true_cls = tf.placeholder(tf.int64, [None])
weights = tf.Variable(tf.zeros([X_cols, y_cols]))
biases = tf.Variable(tf.zeros([y_cols]))
logits = tf.matmul(x, weights) + biases
y_pred = tf.nn.softmax(logits)
y_pred_cls = tf.argmax(y_pred, axis=1)
# ### Cost function, Optimization, and Accuarcy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# ### Running + Helper funcions
session = tf.Session()
session.run(tf.global_variables_initializer())
batch_size = 100
def optimize(num_iterations):
for i in range(num_iterations):
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
x_batchjn atch = tf.train.shuffle_batch(
[X_train, y_train],
batch_size=500
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
# Note that the placeholder for y_true_cls is not set
# because it is not used during training.
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
session.run(optimizer, feed_dict=feed_dict_train)
image_batch, label_batch = tf.train.shuffle_batch(
[X_train, y_train],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
feed_dict_test = {x: X_test,
y_true: y_test,
y_true_cls: y_test_true}
def print_accuracy():
# Use TensorFlow to compute the accuracy.
acc = session.run(accuracy, feed_dict=feed_dict_test)
# Print the accuracy.
print("Accuracy on test-set: {0:.1%}".format(acc))
def print_confusion_matrix():
# Get the true classifications for the test-set.
cls_true = y_trueest
# Get the predicted classifications for the test-set.
cls_pred = session.run(y_pred_cls, feed_dict=feed_dict_test)
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
# Make various adjustments to the plot.
plt.tight_layout()
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, range(num_classes))
plt.yticks(tick_marks, range(num_classes))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# No iterations
print_accuracy()
optimize(num_iterations=500)
session.close()
| Tensorflow (requires 3.6).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # ML Pipeline
# 按照如下的指导要求,搭建你的机器学习管道。
# ### 1. 导入与加载
# - 导入 Python 库
# - 使用 [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html) 从数据库中加载数据集
# - 定义特征变量X 和目标变量 Y
# import libraries
# load data from database
engine = create_engine('sqlite:///InsertDatabaseName.db')
df =
X =
Y =
# ### 2. 编写分词函数,开始处理文本
def tokenize(text):
pass
# ### 3. 创建机器学习管道
# 这个机器学习管道应该接收 `message` 列作输入,输出分类结果,分类结果属于该数据集中的 36 个类。你会发现 [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) 在预测多目标变量时很有用。
pipeline =
# ### 4. 训练管道
# - 将数据分割成训练和测试集
# - 训练管道
# ### 5. 测试模型
# 报告数据集中每个输出类别的 f1 得分、准确度和召回率。你可以对列进行遍历,并对每个元素调用 sklearn 的 `classification_report`。
# ### 6. 优化模型
# 使用网格搜索来找到最优的参数组合。
# +
parameters =
cv =
# -
# ### 7. 测试模型
# 打印微调后的模型的精确度、准确率和召回率。
#
# 因为本项目主要关注代码质量、开发流程和管道技术,所有没有模型性能指标的最低要求。但是,微调模型提高精确度、准确率和召回率可以让你的项目脱颖而出——特别是让你的简历更出彩。
# ### 8. 继续优化模型,比如:
# * 尝试其他的机器学习算法
# * 尝试除 TF-IDF 外其他的特征
# ### 9. 导出模型为 pickle file
# ### 10. Use this notebook to complete `train.py`
# 使用资源 (Resources)文件里附带的模板文件编写脚本,运行上述步骤,创建一个数据库,并基于用户指定的新数据集输出一个模型。
| code/ML Pipeline Preparation-zh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DATA VISUALIZATIONS ON THE IRIS DATASET USING PYTHON
# +
# Importing all libararies
import pandas as pd # Data processing and CSV file I/O library
# Importing seaborn, a python graphing library
import warnings # To ignore warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", color_codes=True)
# -
# Loading iris flower dataset
df=pd.read_csv('Iris.csv')
# Viewing dataset
df.head()
# Number of each species
df.groupby('Species').size()
# Histograms
df.drop("Id", axis=1).hist(edgecolor='black', linewidth=1.2)
fig=plt.gcf()
fig.set_size_inches(12,6)
fig.show()
# Scatterplot on the iris features
df.plot(kind="scatter", x="SepalLengthCm", y="SepalWidthCm")
# Seaborn joinplot shows bivariate scatterplots and univariate histograms in the same figure
sns.jointplot(x="SepalLengthCm", y="SepalWidthCm", data=df, size=10)
# Seaborn's FacetGrid used to color the scatterplot by species
# This will defferentiate each species from other in above scatter plot
sns.FacetGrid(df, hue="Species", size=5)\
.map(plt.scatter, "SepalLengthCm", "SepalWidthCm")\
.add_legend()
# Looking at an individual feature in Seaborn through boxplot
sns.boxplot(x="Species", y="PetalLengthCm", data=df)
# Adding layer of individual points on top of boxplot through seaborn's striplot
# Use jitter=True so that all the points don't fall in single vertical lines above the species
ax= sns.boxplot(x="Species", y="PetalLengthCm", data=df)
ax= sns.stripplot(x="Species", y="PetalLengthCm", data=df, jitter=True, edgecolor="gray")
# Violin plot combines the benefits of the previous two plots and simplifies them
# Denser region of the data are fatter, and sparser thiner in a violin plot
sns.violinplot(x="Species", y="PetalLengthCm", data=df, size=6)
# Kdeplot is a seaborn plot useful for looking at univariete relations
# which creates and visualizes a kernel density estimate of the underlying feature
sns.FacetGrid(df, hue="Species", size=6)\
.map(sns.kdeplot, "PetalLengthCm")\
.add_legend()
# Pairplot is another useful seaborn plot, which shows bivariate relation between each pair of features
#
# From pairplot, we'll see that the iris-setosa species is separated from the other two across
# all feature combinations
sns.pairplot(df.drop("Id", axis=1), hue="Species", size=3)
# Diagonal elements in a pairplot show the histogram by default
# We can update these elements to show other things, such as a kde
sns.pairplot(df.drop("Id", axis=1), hue="Species", size=3, diag_kind="kde")
# Some Plots using pandas
# Boxplot using pandas on each figure split out by species
df.drop("Id", axis=1).boxplot(by="Species", figsize=(12,6))
# Andrews curve involve using attributes of sample as coefficients for Fourier series
# and then plotting these
from pandas.tools.plotting import andrews_curves
andrews_curves(df.drop("Id", axis=1), "Species")
# Another multivariate visualizations technniques pandas has is parallel_coordinates
# Parallel coordinates plots each feature on a separate column and then draws lines
# Connecting the features for each sample data
from pandas.tools.plotting import parallel_coordinates
parallel_coordinates(df.drop("Id", axis=1), "Species")
# Radviz puts each feature as a point on a 2D plane, and then simulates
# having each sample attached to those points through a spring wighted
# by the relative value for that figure
from pandas.tools.plotting import radviz
radviz(df.drop("Id", axis=1), "Species")
| Iris Species /Data_Visualizations_On_Iris_Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # q1
num_list = ['12','32','43','35']
num_list.sort()
print (num_list)
print (num_list[0])
print (num_list[3])
# # q2
print ("this is my third python lab".split())
print (len("this is my third python lab".split()))
# # q3
my_list = ['a', '123', 123, 'b', 'B', 'False', False, 123, None, 'None']
print (set(my_list))
print (len(set(my_list)))
# # q4
my_dict = {"favorite count":1138,
"lang":"ch",
"coordinates":(-75.14310264, 40.05701649),
"visited countries":["GR","HK","MY"]}
print (my_dict)
# # q 4.1
my_dict['lang']='en'
print (my_dict)
# # q4.2
print (len(my_dict['visited countries']))
# # q4.3
my_dict["visited countries"].append('CH')
print (my_dict["visited countries"])
# # q4.4
print ('US' in my_dict["visited countries"])
# # q4.5
print(my_dict['coordinates'][0])
# # q4.6
my_dict["coordinates"]=(-81,45)
print (my_dict["coordinates"])
| lab 03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Effects of moving the locus along the genome
# (c) 2019 <NAME>. This work is licensed under a [Creative Commons Attribution License CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/). All code contained herein is licensed under an [MIT license](https://opensource.org/licenses/MIT)
#
# ---
# +
import os
import pickle
import cloudpickle
import itertools
import glob
# Our numerical workhorses
import numpy as np
import scipy as sp
import pandas as pd
# Import libraries to parallelize processes
from joblib import Parallel, delayed
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
# Seaborn, useful for graphics
import seaborn as sns
# Import the project utils
import sys
sys.path.insert(0, '../../../')
import ccutils
# Magic function to make matplotlib inline; other style specs must come AFTER
# %matplotlib inline
# This enables SVG graphics inline
# %config InlineBackend.figure_format = 'retina'
tmpdir = '../../../tmp/'
figdir = '../../../fig/moment_dynamics_numeric/'
datadir = '../../../data/csv_maxEnt_dist/'
# -
# Set PBoC plotting format
ccutils.viz.set_plotting_style()
# Increase dpi
mpl.rcParams['figure.dpi'] = 110
# ### $\LaTeX$ macros
# $\newcommand{kpon}{k^{(p)}_{\text{on}}}$
# $\newcommand{kpoff}{k^{(p)}_{\text{off}}}$
# $\newcommand{kron}{k^{(r)}_{\text{on}}}$
# $\newcommand{kroff}{k^{(r)}_{\text{off}}}$
# $\newcommand{rm}{r _m}$
# $\newcommand{gm}{\gamma _m}$
# $\newcommand{rp}{r _p}$
# $\newcommand{gp}{\gamma _p}$
# $\newcommand{\eR}{\Delta\varepsilon_r}$
# $\newcommand{\Nns}{N_{\text{NS}}}$
# $\newcommand{ee}[1]{\left\langle #1 \right\rangle}$
# $\newcommand{bb}[1]{\mathbf{#1}}$
# $\newcommand{foldchange}{\text{fold-change}}$
# $\newcommand{\ee}[1]{\left\langle #1 \right\rangle}$
# $\newcommand{\bb}[1]{\mathbf{#1}}$
# $\newcommand{\dt}[1]{{\partial{#1} \over \partial t}}$
# $\newcommand{\Km}{\bb{K}}$
# $\newcommand{\Rm}{\bb{R}_m}$
# $\newcommand{\Gm}{\bb{\Gamma}_m}$
# $\newcommand{\Rp}{\bb{R}_p}$
# $\newcommand{\Gp}{\bb{\Gamma}_p}$
# ## Problem setup
# Vinu and Brewster have some super interesting data where they moved the location of the O1+11-YFP construct to different locus other than the usual *galK*. What they have found so far is that when they perform the usual *lacI* titration on these cells the binding energy $\eR$ comes out to be different to what we fit on the *galK* locus.
#
# Now this is very intriguing since in principle Franz and Brewster used multiple loci and even promoters on plasmids for their 2014 cell paper and they still used the *galK* binding energy. Their current hypothesis is that if we were to cast this problem in the language of the chemical master equation it would be $\kron$ rather than $\kroff$ what changes. What this means is that the "accesibility" of the promoter varies as it is moved around the genome. I agree that this hypothesis is very reasonable since we know that the local environment of the genome changes due to things such as super coiling and other structural elements that affect the packing of the nucleoid.
# ### Proposed test
# My proposal for this data set was that in our recent submision we were able to use the chemical master equation to predict not only the mean gene expression but the noise (std / mean) and even the full distribution. The key to get the noise right was to include the effect of having varying copy number of the gene depening on its position along the genome. So in principle if it is true that only $\kron$ rather than $\kroff$ change as the gene is moved around, we could potentially see these effects in the structure of the noise.
#
# To test such idea we will start very simple. The first thing to do is to compare from our reference locus (*galK*) if we were to change the binding energy $\eR$ by some amount let's say $\delta \varepsilon_r$, could we distinguish if this was due to $\kron$ or $\kroff$?
# For this we will be using functions that I wrote for the paper where the moment dynamics are integrated over time accounting for what fraction of the cell cycle is spent with one vs two copies.
#
# Let's begin by setting the parameters for my growth conditions (100 min doubling time in deep 96 well plates)
# +
# Define mRNA rate
# http://bionumbers.hms.harvard.edu/bionumber.aspx?id=105717&ver=3&trm=lacZ%20mRNA%20lifetime&org=
gm = 1 / (3 * 60)
# Load the flat-chain
with open('../../../data/mcmc/lacUV5_constitutive_mRNA_double_expo.pkl',
'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
# Generate a Pandas Data Frame with the mcmc chain
index = ['kp_on', 'kp_off', 'rm']
# Generate a data frame out of the MCMC chains
df_mcmc = pd.DataFrame(gauss_flatchain, columns=index)
# rerbsine the index with the new entries
index = df_mcmc.columns
# map value of the parameters
max_idx = np.argmax(gauss_flatlnprobability, axis=0)
kp_on, kp_off, rm = df_mcmc.iloc[max_idx, :] * gm
# Use the protein production rate defined to give 500 proteins per cell
rp = 0.0595
# -
# ### Test functions with unregulated two-state promoter
# Before jumping into the real matter let's test the functions. For this we will run the moment dynamics for the unregulated two-state promoter. We will import a `.pkl` file that contains the matrix defining the dynamics.
# #### Setting initial conditions
# Since for this model we are explicitly accounting for cell division I do not consider protein degradation as a Poission process. What that means is that I set the degradation rate $\gp = 0$. This means that the only source of "degradation" is due to dilution as the cells divide.
#
# But in order to set some initial conditions for all moments I want to start at a better guess that setting every value of $\ee{m^x p^y}$ to zero. So what I decided to do was to run the dynamics for a long time with a non-zero protein degradation rate $\gp$. If these dynamics were ran without accounting for the protein dilution the proteins would grow without bounds since there are no degradation. That is why I have to set an artificial degradation rate **only to set initial conditions**. Once the "real dynamics" are calculated, we go back to having only dilution due to growth as the only source of protein degradation.
#
# Let's set these initial conditions then with the fake protein degradation.
# +
# Single promoter
gp_init = 1 / (60 * 60)
rp_init = 500 * gp_init
# Read protein ununregulated matrix
with open('../two_state_protein_dynamics_matrix.pkl', 'rb') as file:
# Load sympy object containing the matrix A that define the
# moment dynamics
A_mat_unreg_lam = cloudpickle.load(file)
# Load the list of moments included in the matrix
expo = cloudpickle.load(file)
# Substitute value of parameters on matrix
## Initial conditions
A_mat_unreg_s_init = A_mat_unreg_lam(kp_on, kp_off, rm, gm, rp_init, gp_init)
# Define time on which to perform integration
t = np.linspace(0, 4000 * 60, 2000)
# Define initial conditions
mom_init = np.zeros(len(expo) * 2)
# Set initial condition for zero moment
# Since this needs to add up to 1
mom_init[0] = 1
# Numerically integrate equations
mp_sol = sp.integrate.odeint(ccutils.model.dmomdt, mom_init, t,
args=(A_mat_unreg_s_init,))
mp_init = mp_sol[-1, :]
print('<m> = {:.1f}'.format(mp_init[2:4].sum()))
print('<p> = {:.1f}'.format(mp_init[14:16].sum()))
print('<p>/<m> = {:.1f}'.format(mp_init[14:16].sum() / mp_init[2:4].sum()))
# -
# Excellent. So we ran these dynamics for a very long time and they reached the expected steady state of having 500 proteins per mRNA on average.
#
# Now let's run the real dynamics accounting for the variability during the cell cycle.
# +
# Define doubling time
doubling_time = 100
# Define fraction of cell cycle spent with one copy
t_single_frac = 0.6
# Define time for single-promoter state
t_single = 60 * t_single_frac * doubling_time # sec
t_double = 60 * (1 - t_single_frac) * doubling_time # sec
# Define number of cell cycles
n_cycles = 6
# Define list of parameters
par_single = [kp_on, kp_off, rm, gm, rp, 0]
par_double = [kp_on, kp_off, 2 * rm, gm, rp, 0]
# Integrate moment equations
df_p_unreg = ccutils.model.dmomdt_cycles(mp_init,
t_single, t_double,
A_mat_unreg_lam,
par_single, par_double, expo,
n_cycles, n_steps=10000)
# Extract index for mRNA and protein first moment
first_mom_names_m = [x for x in df_p_unreg.columns
if 'm1p0' in x]
first_mom_names_p = [x for x in df_p_unreg.columns
if 'm0p1' in x]
# Extract the last cycle information
df_m_unreg_first = df_p_unreg.loc[df_p_unreg.cycle == df_p_unreg.cycle.max(),
first_mom_names_m]
df_p_unreg_first = df_p_unreg.loc[df_p_unreg.cycle == df_p_unreg.cycle.max(),
first_mom_names_p]
# Define array for integration
a_array = np.linspace(0, 1, len(df_m_unreg_first))
# Compute probability based on this array
p_a_array = np.log(2) * 2**(1 - a_array)
# Perform numerical integration
m_mean_unreg = sp.integrate.simps(df_m_unreg_first.sum(axis=1) * p_a_array,
a_array)
p_mean_unreg = sp.integrate.simps(df_p_unreg_first.sum(axis=1) * p_a_array,
a_array)
print('unregulated promoter:')
print('<m> = {:.2f}'.format(m_mean_unreg))
print('<p> = {:.2f}'.format(p_mean_unreg))
print('<p>/<m> = {:.1f}'.format(p_mean_unreg / m_mean_unreg))
# -
# ### Plotting time dynamics
#
# Our protein production rate satisfies the expected condition. Now let's plot the mean mRNA and mean protein. For this we first need to extract all the first moments for each of the promoter states and add them together to get the global first moment.
# +
# Extract index for first moment
first_mom_names_m = [x for x in df_p_unreg.columns if 'm1p0' in x]
first_mom_names_p = [x for x in df_p_unreg.columns if 'm0p1' in x]
# Compute the mean mRNA copy number
m_mean = df_p_unreg.loc[:, first_mom_names_m].sum(axis=1)
p_mean = df_p_unreg.loc[:, first_mom_names_p].sum(axis=1)
# Initialize figure
fig, ax = plt.subplots(2, 1, figsize=(2.5, 2), sharex=True)
# Plot mean mRNA as solid line
ax[0].plot(df_p_unreg.time / 60, m_mean, label='', lw=1.25)
ax[1].plot(df_p_unreg.time / 60, p_mean, label='', lw=1.25)
# Group data frame by cell cycle
df_group = df_p_unreg.groupby('cycle')
# Loop through cycles
for i, (group, data) in enumerate(df_group):
# Define the label only for the last cell cycle not to repeat in legend
if group == df_p_unreg['cycle'].max():
label_s = 'single promoter'
label_d = 'two promoters'
else:
label_s = ''
label_d = ''
# Find index for one-promoter state
idx = np.where(data.state == 'single')[0]
# Indicate states with two promoters
ax[0].axvspan(data.iloc[idx.min()]['time'] / 60,
data.iloc[idx.max()]['time'] / 60,
facecolor='#e3dcd1', label=label_s)
ax[1].axvspan(data.iloc[idx.min()]['time'] / 60,
data.iloc[idx.max()]['time'] / 60,
facecolor='#e3dcd1', label='')
# Find index for two-promoter state
idx = np.where(data.state == 'double')[0]
# Indicate states with two promoters
ax[0].axvspan(data.iloc[idx.min()]['time'] / 60,
data.iloc[idx.max()]['time'] / 60,
facecolor='#ffedce', label=label_d)
ax[1].axvspan(data.iloc[idx.min()]['time'] / 60,
data.iloc[idx.max()]['time'] / 60,
facecolor='#ffedce', label='')
## Indicate where the cell divisions happen
# First find where the cell cycle transition happen
trans_idx = np.array(np.diff(df_p_unreg.cycle) == 1)
# Add extra point to have same length
trans_idx = np.insert(trans_idx, 0, False)
# Get the time points at which this happens
time_div = df_p_unreg[trans_idx].time.values
# Plot with a triangle the cell division moment
ax[0].plot(time_div / 60, [np.max(m_mean) * 1.1] * len(time_div),
lw=0, marker='v', color='k')
# Set limits
# mRNA
ax[0].set_xlim(df_p_unreg['time'].min() / 60, df_p_unreg['time'].max() / 60)
ax[0].set_ylim([7, 28])
#protein
ax[1].set_xlim(df_p_unreg['time'].min() / 60, df_p_unreg['time'].max() / 60)
# Label plot
ax[1].set_xlabel('time (min)')
ax[0].set_ylabel(r'$\left\langle \right.$mRNA$\left. \right\rangle$/cell')
ax[1].set_ylabel(r'$\left\langle \right.$protein$\left. \right\rangle$/cell')
# Align y axis labels
fig.align_ylabels()
# Set legend for both plots
ax[0].legend(loc='upper left', ncol=2, frameon=False,
bbox_to_anchor=(-0.12, 0, 0, 1.3), fontsize=6.5)
plt.subplots_adjust(hspace=0.05)
# -
# Everything seems to be working as expected. Time to test the dynamics of regulated promoters.
# ### Regulated three-state promoter
# First thing we need is to determine the parameters for the regulated promoter. Specifically we will define $N_{NS}$ the number of non-specific binding sites, the MWC parameters $K_A$, $K_I$ and $\Delta\varepsilon_{AI}$, and finally the default diffusion limited on rate $k_o$. This last parameter is the one that will change if $\kron$ is what is changing since we define this on rate as
# $$
# \kron = k_o [R].
# $$
# So if the on rate $\kron$ changes while the repressor copy number remains constant, it means that the diffusion limited on rate $k_o$ should be the parameter changing.
#
# Let's define these parameters.
# +
# Define repressor specific parameters
# Diffusion limited rate
ko = 2.7E-3 # s**-1 nmol**-1
# Number of non-specific binding sites in thermodynamic model
Nns = 4.6E6
# Cell volume
Vcell = 2.15 # fL
# MWC induction parameters
ka = 139 # µM
ki = 0.53 # µM
epsilon = 4.5 # kBT
# -
# We also need to import the `.pkl` object that contains the dynamics of the three-state promoter.
# Read protein ununregulated matrix
with open('../three_state_protein_dynamics_matrix.pkl', 'rb') as file:
A_mat_reg_lam = cloudpickle.load(file)
expo_reg = cloudpickle.load(file)
# ### Computing moments for multiple parameters
# We now have everything ready to compute the moments for different operators, repressor copy numbers and inducer concentrations. For this exercise we will first assume that everything is integrated into *galK* as usual. We will compute the moments for a series of energy offshifts ranging from -1.5 to 1.5 $k_BT$.
# First let's define all of the different parameters that we will change
# +
# Define experimental concentrations in µM
inducer = np.logspace(-1, np.log10(5000), 15) # µM
inducer = np.insert(inducer, 0, 0)
# Define repressor copy numebers
repressors = np.logspace(1, np.log10(2000), 15)
repressors = np.append([0], repressors)
# Define operators and energies
operators = ['O1', 'O2']
energies = [-15.3, -13.9]
op_dict = dict(zip(operators, energies))
# Define energy offshifts
energy_off = [-1.5, -1, -0.5, 0, 0.5, 1, 1.5]
# Generate list of all variables
var = [t for t in itertools.product(*[operators, repressors,
inducer, energy_off])]
# -
# Now let's run the computation in parallele for each individual set of parameters. We will assume that the energy offshift is due to $\kroff$ first.
# +
# Boolean indicating if computation should be performed
compute_constraints = False
if compute_constraints:
# Initialize data frame to save the lagrange multipliers.
names = ['operator', 'binding_energy', 'offshift', 'repressor',
'inducer_uM']
names = names + ['m' + str(m[0]) + 'p' + str(m[1]) for m in expo_reg]
# Initialize DataFrame to save constraints
df_constraints = pd.DataFrame([], columns=names)
# Define function for parallel computation
def constraints_parallel(param):
# Extract variables
op = param[0] #operator
eRA = op_dict[op] + param[3] # binding energy
rep = param[1] # repressors
iptg = param[2] # inducer
print(op, eRA, rep, iptg)
# Calculate the repressor on rate including the MWC model
kr_on = ko * rep * ccutils.model.p_act(iptg, ka, ki, epsilon)
# Compute the repressor off-rate based on the on-rate and
# the binding energy
kr_off = ccutils.model.kr_off_fun(eRA, ko, kp_on, kp_off,
Nns, Vcell)
# Generate matrices for dynamics
# Single promoter
par_reg_s = [kr_on, kr_off, kp_on, kp_off, rm, gm, rp, 0]
# Two promoters
par_reg_d = [kr_on, kr_off, kp_on, kp_off, 2 * rm, gm, rp, 0]
# Initial conditions
A_reg_s_init = A_mat_reg_lam(kr_on, kr_off, kp_on, kp_off,
rm, gm, rp_init, gp_init)
# Define initial conditions
mom_init = np.zeros(len(expo_reg) * 3)
# Set initial condition for zero moment
# Since this needs to add up to 1
mom_init[0] = 1
# Define time on which to perform integration
t = np.linspace(0, 4000 * 60, 10000)
# Numerically integrate equations
m_init = sp.integrate.odeint(ccutils.model.dmomdt,
mom_init, t,
args=(A_reg_s_init,))
# Keep last time point as initial condition
m_init = m_init[-1, :]
# Integrate moment equations
df = ccutils.model.dmomdt_cycles(m_init,
t_single, t_double,
A_mat_reg_lam,
par_reg_s, par_reg_d,
expo_reg, n_cycles,
states=['A', 'I', 'R'],
n_steps=3000)
# Keep only last cycle
df = df[df['cycle'] == df['cycle'].max()]
# Define array for integration
a_array = np.linspace(0, 1, len(df))
# Compute probability based on this array
p_a_array = np.log(2) * 2**(1 - a_array)
# Initialize list to append moments
moms = list()
# Loop through moments computing the average moment
for i, mom in enumerate(expo_reg):
# Generate string that finds the moment
mom_name = 'm' + str(mom[0]) + 'p' + str(mom[1])
# List rows with moment
mom_bool = [x for x in df.columns if mom_name in x]
# Extract data for this particular moment
df_mom = df.loc[:, mom_bool].sum(axis=1)
# Average moment and append it to list
moms.append(sp.integrate.simps(df_mom * p_a_array,
a_array))
# Save results into series in order to append it to data frame
series = pd.Series([op, eRA, param[3], rep, iptg] + moms,
index=names)
return series
# Run function in parallel
constraint_series = Parallel(n_jobs=6)(delayed(constraints_parallel)(param)
for param in var)
# Initialize data frame to save list of parameters
df_constraints = pd.DataFrame([], columns=names)
for s in constraint_series:
df_constraints = df_constraints.append(s, ignore_index=True)
# Save progress at each step
df_constraints.to_csv(tmpdir + 'moments_kroff_change.csv',
index=False)
df_kroff = pd.read_csv(tmpdir + 'moments_kroff_change.csv')
df_kroff.head()
# -
# Excellent. Now we will repeat the exercise but this time following Brewster's hypothesis that it is $\kron$ what changes rather than $\kroff$. For this we need to recall that the fold-change in gene expression is given by
# $$
# \foldchange = \left( 1 + {\kron \over \kroff} \left( {\kpon \over \kpon + \kpoff} \right) \right)^{-1}.
# $$
#
# If we let $\kron \equiv k_o [R]$, where $k_o$ is a diffusion limited on rate and $[R]$ is the concentration of repressors, then it can be shown that for the thermodynamic picture and the kinetic picture to give the same answer it must be true that
# $$
# {k_o [R] \over \kroff}{\kpoff \over \kpoff + \kpon} =
# {R \over \Nns} e^{- \beta \eR}.
# $$
# Since Jones \& Brewster reporteda value for $k_o$ and we knew the repressor copy number for our cells the repressor off rate $\kroff$ was constrained as
# $$
# \kroff \approx 0.8 \cdot k_o \cdot \Nns e^{\beta \eR}
# \cdot {\kpoff \over \kpoff + \kpon},
# $$
# where the factor of 0.8 has to do with the conversion between absolute number of proten and concentration for the volume of our *E. coli* cells at the particular growth conditions of 2.1 fL.
#
# If we now claim that it is $\kron$ what changes rather than $\kroff$ what I will do is determine $\kroff$ assuming the usual binding energy $\eR$, and thte usual diffusion limited on rate $k_o$. After that I will modify the diffusion limited on rate as
# $$
# k_o' = k_o \cdot e^{-\beta \Delta\eR},
# $$
# where $\Delta\eR$ is the change in the binding energy as determined in the experiment.
#
# Let's go ahead and run this.
# +
# Boolean indicating if computation should be performed
compute_constraints = False
if compute_constraints:
# Initialize data frame to save the lagrange multipliers.
names = ['operator', 'binding_energy', 'offshift', 'repressor',
'inducer_uM']
names = names + ['m' + str(m[0]) + 'p' + str(m[1]) for m in expo_reg]
# Initialize DataFrame to save constraints
df_constraints = pd.DataFrame([], columns=names)
# Define function for parallel computation
def constraints_parallel(param):
# Extract variables
op = param[0] #operator
eRA = op_dict[op] # binding energy
rep = param[1] # repressors
iptg = param[2] # inducer
print(op, eRA, rep, iptg)
# Calculate the repressor on rate including the MWC model
kr_on = ko * rep * ccutils.model.p_act(iptg, ka, ki, epsilon)
# Compute the repressor off-rate based on the on-rate and
# the binding energy
kr_off = ccutils.model.kr_off_fun(eRA, ko, kp_on, kp_off,
Nns, Vcell)
# Update kr_on by changing the diffusion limited on rate
kr_on = kr_on * np.exp(-param[3])
# Generate matrices for dynamics
# Single promoter
par_reg_s = [kr_on, kr_off, kp_on, kp_off, rm, gm, rp, 0]
# Two promoters
par_reg_d = [kr_on, kr_off, kp_on, kp_off, 2 * rm, gm, rp, 0]
# Initial conditions
A_reg_s_init = A_mat_reg_lam(kr_on, kr_off, kp_on, kp_off,
rm, gm, rp_init, gp_init)
# Define initial conditions
mom_init = np.zeros(len(expo_reg) * 3)
# Set initial condition for zero moment
# Since this needs to add up to 1
mom_init[0] = 1
# Define time on which to perform integration
t = np.linspace(0, 4000 * 60, 10000)
# Numerically integrate equations
m_init = sp.integrate.odeint(ccutils.model.dmomdt,
mom_init, t,
args=(A_reg_s_init,))
# Keep last time point as initial condition
m_init = m_init[-1, :]
# Integrate moment equations
df = ccutils.model.dmomdt_cycles(m_init,
t_single, t_double,
A_mat_reg_lam,
par_reg_s, par_reg_d,
expo_reg, n_cycles,
states=['A', 'I', 'R'],
n_steps=3000)
# Keep only last cycle
df = df[df['cycle'] == df['cycle'].max()]
# Define array for integration
a_array = np.linspace(0, 1, len(df))
# Compute probability based on this array
p_a_array = np.log(2) * 2**(1 - a_array)
# Initialize list to append moments
moms = list()
# Loop through moments computing the average moment
for i, mom in enumerate(expo_reg):
# Generate string that finds the moment
mom_name = 'm' + str(mom[0]) + 'p' + str(mom[1])
# List rows with moment
mom_bool = [x for x in df.columns if mom_name in x]
# Extract data for this particular moment
df_mom = df.loc[:, mom_bool].sum(axis=1)
# Average moment and append it to list
moms.append(sp.integrate.simps(df_mom * p_a_array,
a_array))
# Save results into series in order to append it to data frame
series = pd.Series([op, eRA, param[3], rep, iptg] + moms,
index=names)
return series
# Run function in parallel
constraint_series = Parallel(n_jobs=6)(delayed(constraints_parallel)(param)
for param in var)
# Initialize data frame to save list of parameters
df_constraints = pd.DataFrame([], columns=names)
for s in constraint_series:
df_constraints = df_constraints.append(s, ignore_index=True)
# Save progress at each step
df_constraints.to_csv(tmpdir + 'moments_kron_change.csv',
index=False)
df_kron = pd.read_csv(tmpdir + 'moments_kron_change.csv')
df_kron.head()
# -
# ### Comparing changes in $\kron$ vs $\kroff$ fold-change
# Having computed the distribution moments let's compare the changes in the moments. First let's compute the fold-change at the protein level defined as
# $$
# \foldchange = {\ee{p(R \neq 0)} \over \ee{p(R = 0)}}.
# $$
#
# We will first plot this for no inducer $c = 0$ as a function of the repressor copy number.
# +
# Extract data with c = 0
df_kroff_c0 = df_kroff[df_kroff.inducer_uM == 0]
df_kron_c0 = df_kron[df_kron.inducer_uM == 0]
# List unique energy offshifts different from zero
offshift = df_kroff.offshift.unique()
offshift_unique = offshift[offshift != 0]
# Define color for operators
# Generate list of colors
col_list = ['Blues_r', 'Oranges_r']
# Initialize plot
fig, ax = plt.subplots(2, 6, figsize=(9, 4),
sharex=True, sharey=True)
#kron change
# Group by operator
df_group = df_kron_c0.groupby('operator')
#Loop through operators
for i, (group, op_data) in enumerate(df_group):
# Extract ∆lacI data
delta_data = op_data[(op_data.repressor == 0) &
(op_data.offshift == 0)]
# Set operator color
color = sns.color_palette(col_list[i], n_colors=2)[0]
# Loop through unique offshifts
for j, o in enumerate(offshift_unique):
# Extract data
data = op_data[op_data.offshift == o]
# Compute fold-change
fc = data.m0p1 / delta_data.m0p1.values
# Plot fold-change in corresponding panel
ax[i, j].plot(data.repressor, fc, color=color,
label='$k_{on}^{(r)}$')
#kron change
# Group by operator
df_group = df_kroff_c0.groupby('operator')
#Loop through operators
for i, (group, op_data) in enumerate(df_group):
# Extract ∆lacI data
delta_data = op_data[(op_data.repressor == 0) &
(op_data.offshift == 0)]
# Extract "wt" data
wt_data = op_data[(op_data.offshift == 0)]
# Set operator color
color = sns.color_palette(col_list[i], n_colors=2)[1]
# Loop through unique offshifts
for j, o in enumerate(offshift_unique):
# Extract data
data = op_data[op_data.offshift == o]
# Compute fold-change
fc = data.m0p1 / delta_data.m0p1.values
# Compute reference fold-change
ref_fc = wt_data.m0p1 / delta_data.m0p1.values
# Plot fold-change in corresponding panel
ax[i, j].plot(data.repressor, fc, color=color,
label='$k_{off}^{(r)}$', linestyle=':')
# Plot reference fold-change
ax[i, j].plot(wt_data.repressor, ref_fc, color='gray',
linestyle='--', label='{:s} ref'.format(group))
# Adjust plot axis
ax[i, j].set_xscale('log')
ax[i, j].set_yscale('log')
# Add x label to lower plots
if i==1:
ax[i, j].set_xlabel('repressor/cell')
# Add y label to left plots
if j==0:
ax[i, j].set_ylabel('fold-change')
# Add legend
ax[i, j].legend(loc='upper right', fontsize=6)
# Add offshift top of colums
if i==0:
label = r'$\Delta\Delta\epsilon_r$ = {:.1f} $k_BT$'.\
format(o)
ax[i, j].set_title(label, bbox=dict(facecolor='#ffedce'))
# Adjust spacing
plt.subplots_adjust(hspace=0.02, wspace=0.02)
# -
# As we expect at the level of fold-change the difference beetween changes in $\kron$ and $\kroff$ cannot be resolved. This is because we are setting such changes in the parameters to give the same difference in the binding energy.
# ### Comparing changes in $\kron$ vs $\kroff$ noise
# Now here is the real test of my idea. We want to see if at the level of the noise (std/mean) we can distinguish differences between $\kron$ vs. $\kroff$. We will again compute these quantities and plot them as a function of the repressor copy number for zero inducer.
# +
# Extract data with c = 0
df_kroff_c0 = df_kroff[df_kroff.inducer_uM == 0]
df_kron_c0 = df_kron[df_kron.inducer_uM == 0]
# List unique energy offshifts different from zero
offshift = df_kroff.offshift.unique()
offshift_unique = offshift[offshift != 0]
# Define color for operators
# Generate list of colors
col_list = ['Blues_r', 'Oranges_r']
# Initialize plot
fig, ax = plt.subplots(2, 6, figsize=(9, 4),
sharex=True, sharey=True)
#kron change
# Group by operator
df_group = df_kron_c0.groupby('operator')
#Loop through operators
for i, (group, op_data) in enumerate(df_group):
# Set operator color
color = sns.color_palette(col_list[i], n_colors=2)[0]
# Loop through unique offshifts
for j, o in enumerate(offshift_unique):
# Extract data
data = op_data[op_data.offshift == o]
# Compute noise
noise = np.sqrt(data.m0p2 - data.m0p1**2) / data.m0p1
# Plot noise in corresponding panel
ax[i, j].plot(data.repressor, noise, color=color,
label='$k_{on}^{(r)}$')
#kron change
# Group by operator
df_group = df_kroff_c0.groupby('operator')
#Loop through operators
for i, (group, op_data) in enumerate(df_group):
# Extract "wt" data
wt_data = op_data[(op_data.offshift == 0)]
# Set operator color
color = sns.color_palette(col_list[i], n_colors=2)[1]
# Loop through unique offshifts
for j, o in enumerate(offshift_unique):
# Extract data
data = op_data[op_data.offshift == o]
# Compute noise
noise = np.sqrt(data.m0p2 - data.m0p1**2) / data.m0p1
# Compute reference noise
ref_noise = np.sqrt(wt_data.m0p2 - wt_data.m0p1**2) / wt_data.m0p1
# Plot fold-change in corresponding panel
ax[i, j].plot(data.repressor, noise, color=color,
label='$k_{off}^{(r)}$', linestyle=':')
# Plot reference fold-change
ax[i, j].plot(wt_data.repressor, ref_noise, color='gray',
linestyle='--', label='{:s} ref'.format(group))
# Adjust plot axis
ax[i, j].set_xscale('log')
# Add x label to lower plots
if i==1:
ax[i, j].set_xlabel('repressor/cell')
# Add y label to left plots
if j==0:
ax[i, j].set_ylabel('noise')
# Add legend
ax[i, j].legend(loc='upper left', fontsize=6)
# Add offshift top of colums
if i==0:
label = r'$\Delta\Delta\epsilon_r$ = {:.1f} $k_BT$'.\
format(o)
ax[i, j].set_title(label, bbox=dict(facecolor='#ffedce'))
# Adjust spacing
plt.subplots_adjust(hspace=0.02, wspace=0.02)
# -
# Wow... This is shocking. I guess my hypothesis didn't work at all. There is literally no difference between changes in the on and off rates.
| src/theory/sandbox/gene_position.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Many-to-1 data merge
# In a many-to-one (or one-to-many) merge, one of the values will be duplicated and recycled in the output. That is, one of the keys in the merge is not unique.
#
# Here, the two DataFrames site and visited have been pre-loaded once again. Note that this time, visited has multiple entries for the site column. Confirm this by exploring it in the IPython Shell.
#
# The .merge() method call is the same as the 1-to-1 merge from the previous exercise, but the data and output will be different.
#
# ### Instructions
#
# - Merge the site and visited DataFrames on the 'name' column of site and 'site' column of visited, exactly as you did in the previous exercise.
# - Print the merged DataFrame and then hit 'Submit Answer' to see the different output produced by this merge!
import pandas as pd
site=pd.read_csv('site.csv')
#site=pd.DataFrame(site1)
print(site.columns)
print(site)
print(site.name)
visited1=pd.read_csv('visited.csv')
visited=pd.DataFrame(visited1)
print(visited.columns)
print(visited)
print(visited.site)
# +
# Merge the DataFrames: m2o
m2o = pd.merge(left=site, right= visited, left_on='name', right_on='site')
# Print m2o
print(m2o)
# -
A=pd.read_csv('groupby.csv')
A
df = A.groupby('Name').agg({'Sid':'first',
'Use_Case': ', '.join,
'Revenue':'first' }).reset_index()
print (df[['Name','Sid','Use_Case','Revenue']] )
df = A.groupby(['Name','Sid','Revenue'])['Use_Case'].apply(', '.join).reset_index()
print(df[['Name','Sid','Use_Case','Revenue']])
| cleaning data in python/Combining data for analysis/06. Many-to-1 data merge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Blackman9t/Advanced-Data-Science/blob/master/dimensionality_reduction_pyspark.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="giKCSW3NB0uo" colab_type="text"
# ## Dimensionality Reduction (DR):
#
# It sometimes becomes hard to choose the correct dimensions for plotting. So we will learn how an algorithm can do this for us. DR is more than just getting rid of columns in a dataset. it's more on transforming everything to a new dataset but preserving some key properties.
#
# ### Principal Component Analysis (PCA):
#
# The idea is that we take an n-dimensional(n number of columns) dataset where every row can be seen as a point in an n-dimensional euclidean vector space and each column as one coordinate in that space.<br>Mathematicians call such a space $R^n$, because the values on each axes are real numbers and every vector representing a point in that space needs $n$-elements.<br>
#
# So PCA transforms a dataset by specifying the number of desired dimensions in a way that the new dataset represents the very same points with vectors of lower dimensions $k$<br>This process is also called projections.<br>
# PCA ensures that if two points in the original dataset are far apart from each other, they are also far awy in the reduced dataset with an equal ratio of distance. This is same for close points too.
#
# After this projection, the new dimensions returned are explaining the majority of variations in the dataset. in otherwords, the so-called Principal Components are chosen such that the information contents of each additional dimension is decreasing.
#
# This is a way of getting rid of highly-correlated dimensions in the original dataset because their additional information content is low. the principal component dimensions are all orthogonal to each other and made up of dimensions that are not highly correlated to each other.
#
# So this means that we can spot clusters, potential separations, planes and outliers also in the lower dimensional dataset, but with the added functionality that the lower dataset can be plotted, especially if we reduce the dimension to 3 (x,y,z).
#
# When applying PCA, we are losing some information, but PCA is very intelligent in deciding what parts of the info is less relevant. So that the pain of removing information is minimized.
#
# Ofcourse the lower we choose $k$, the higher the losses.
#
# Loss can be easily estimated by comparing the original dataset with the one that PCA was applied, and then the inverse function of PCA is applied again to reconstruct the original dataset.
#
# The function used for comparing those two datasets is SSE or Sum-0f-Squared-Errors. this way, we get an idea of the percentage of info we are losing in the PCA dataset.
#
# This means the amount of info lost can be easily quantified and kept track of as we apply PCA to our datasets.
#
#
# + [markdown] id="5cKG53U1KBhJ" colab_type="text"
# ## Exercise 3.2
# Welcome to the last exercise of this course. This is also the most advanced one because it somehow glues everything together you've learned.
#
# These are the steps you will do:
#
# Load a data frame from cloudant/ApacheCouchDB.<br>
# Perform feature transformation by calculating minimal and maximal values of different properties on time windows (we'll explain what a time windows is later in here).<br>
# Reduce these now twelve dimensions to three using the PCA (Principal Component Analysis) algorithm of SparkML (Spark Machine Learning) => We'll actually make use of SparkML a lot more in the next course.<br>
# Plot the dimensionality reduced data set.
# + [markdown] id="ac2iJOI-K1ND" colab_type="text"
# First, let's install Spark related dependencies.
# + id="0S1uOkbmKtMj" colab_type="code" outputId="6810356d-b7df-42f8-c09a-02d75f5f96cb" colab={"base_uri": "https://localhost:8080/", "height": 235}
# !apt-get install openjdk-8-jdk-headless -qq > /dev/null
# !wget -q http://apache.osuosl.org/spark/spark-2.4.5/spark-2.4.5-bin-hadoop2.7.tgz
# !tar xf spark-2.4.5-bin-hadoop2.7.tgz
# !pip install -q findspark
# !pip install pyspark
# Set up required environment variables
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.4.5-bin-hadoop2.7"
# + [markdown] id="aqBVMzwFLGbt" colab_type="text"
# Next, let's initialise a Spark Context(sc) if one does not already exist.
# + id="wRrES4egLO2J" colab_type="code" outputId="b9120b3e-ef9d-4765-a986-88cb4097d701" colab={"base_uri": "https://localhost:8080/", "height": 34}
from pyspark import SparkConf, SparkContext
try:
conf = SparkConf().setMaster("local").setAppName("My_App")
sc = SparkContext(conf = conf)
print('SparkContext Initialised Successfully!')
except Exception as e:
print(e)
# + id="Qp3bGlqoLoLM" colab_type="code" outputId="590fed1f-48be-4b41-8d07-878496c77d73" colab={"base_uri": "https://localhost:8080/", "height": 191}
# Let's see the Spark Context
sc
# + [markdown] id="ioyIX7BjLaVB" colab_type="text"
# Next, let's create our Spark session wherein we will perform parallelized activities through the Spark context.
# + id="bpoN76E_Ll0Y" colab_type="code" outputId="7fca0b77-015b-40f8-b3c1-1b378dfd3604" colab={"base_uri": "https://localhost:8080/", "height": 214}
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('My App').getOrCreate()
spark
# + [markdown] id="S4QIhd-AMmY8" colab_type="text"
# Since JSON data can be semi-structured and contain additional metadata, it is possible that you might face issues with the DataFrame layout.<br>
# Please read the documentation of 'SparkSession.read()' to learn more about the possibilities to adjust the data loading.<br>
# PySpark documentation: http://spark.apache.org/docs/2.0.2/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader.json
# + [markdown] id="5s0eFfPTM0-l" colab_type="text"
# Now let's import the dataset
# + id="reB5uEueNXXa" colab_type="code" outputId="68c2f3f2-b603-4580-f9a0-94071fc13789" colab={"base_uri": "https://localhost:8080/", "height": 353}
# !wget https://github.com/IBM/coursera/blob/master/coursera_ds/washing.parquet?raw=true
# !mv washing.parquet?raw=true washing.parquet
# + id="sTbhN-kkMm8-" colab_type="code" outputId="5de520d9-e273-493e-8412-c7c2a62aca08" colab={"base_uri": "https://localhost:8080/", "height": 454}
df = spark.read.parquet("washing.parquet")
df.createOrReplaceTempView('washing')
df.show()
# + id="k5ne0iuvycLo" colab_type="code" outputId="40bf1dca-bbc4-42c4-8855-6eb78b5b619f" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Let's see the number of rows in washing
df.count()
# + [markdown] id="0C4zpvsQNxql" colab_type="text"
# ### Feature Transformation
# + [markdown] id="ZAT5BN_WN3hR" colab_type="text"
# This is the feature transformation part of this exercise. <br>Since our table is mixing schemas from different sensor data sources we are creating new features. In other word we use existing columns to calculate new ones. <br>We only use min and max for now, but using more advanced aggregations as we've learned in week three may improve the results. We are calculating those aggregations over a sliding window "w". <br>This window is defined in the SQL statement and basically reads the table by a one by one stride in direction of increasing timestamp. <br>Whenever a row leaves the window a new one is included. Therefore this window is called sliding window (in contrast to tubling, time or count windows). <br>Generally speaking, a window defines a finite set of elements on an unbounded stream. This set can be based on time (as in our previous examples), element counts, a combination of counts and time, or some custom logic to assign elements to windows<br>More on this can be found here: https://flink.apache.org/news/2015/12/04/Introducing-windows.html
# + id="6v4LHNokOYu2" colab_type="code" colab={}
result = spark.sql("""
SELECT * from (
SELECT
min(temperature) over w as min_temperature,
max(temperature) over w as max_temperature,
min(voltage) over w as min_voltage,
max(voltage) over w as max_voltage,
min(flowrate) over w as min_flowrate,
max(flowrate) over w as max_flowrate,
min(frequency) over w as min_frequency,
max(frequency) over w as max_frequency,
min(hardness) over w as min_hardness,
max(hardness) over w as max_hardness,
min(speed) over w as min_speed,
max(speed) over w as max_speed
FROM washing
WINDOW w AS (ORDER BY ts ROWS BETWEEN CURRENT ROW AND 10 FOLLOWING)
)
WHERE min_temperature is not null
AND max_temperature is not null
AND min_voltage is not null
AND max_voltage is not null
AND min_flowrate is not null
AND max_flowrate is not null
AND min_frequency is not null
AND max_frequency is not null
AND min_hardness is not null
AND min_speed is not null
AND max_speed is not null
""")
# + id="1-68v7CazMBT" colab_type="code" outputId="acbeaddb-69a7-4b7b-ae8d-c5325e19dcba" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(result.count(),len(result.columns))
# + [markdown] id="b54lYKCazvfm" colab_type="text"
# Since this table contains null values also our window might contain them. In case for a certain feature all values in that window are null we obtain also null. As we can see here (in my dataset) this is the case for 9 rows.
# + id="hztRGAa5zwSV" colab_type="code" outputId="27ce419f-dceb-49a0-bb87-6ddbcec28e04" colab={"base_uri": "https://localhost:8080/", "height": 34}
df.count() - result.count()
# + [markdown] id="65BJHpnKz9Wx" colab_type="text"
# Now we import some classes from SparkML. PCA for the actual algorithm. Vectors for the data structure expected by PCA and VectorAssembler to transform data into these vector structures.
# + id="6lRFLGFHz-A8" colab_type="code" colab={}
from pyspark.ml.feature import PCA
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
# + [markdown] id="xGaByhRv0qlK" colab_type="text"
# Let's define a vector transformation helper class which takes all our input features (result.columns) and created one additional column called "features" which contains all our input features as one single column wrapped in "DenseVector" objects
# + id="MtaF9WMB0snp" colab_type="code" colab={}
assembler = VectorAssembler(inputCols=result.columns, outputCol="features")
# + [markdown] id="1GQFqQXZ1FNC" colab_type="text"
# Now we actually transform the data, note that this is highly optimized code and runs really fast in contrast if we had implemented it.
# + id="tSEZLlCa01y8" colab_type="code" colab={}
features = assembler.transform(result)
# + [markdown] id="6JvLbvjG1epy" colab_type="text"
# Let's have a look at how this new additional column "features" looks like:
# + id="T3Xkpt6G1Jwu" colab_type="code" outputId="93f32cf0-50b6-4407-fcbf-9479662d29fb" colab={"base_uri": "https://localhost:8080/", "height": 454}
features.show()
# + [markdown] id="qc2EmQxY2-9B" colab_type="text"
# Let's select only the features column of the Spark data frame
# + id="1fk5GM1Q1_OQ" colab_type="code" outputId="c6de3866-0928-4634-8f6b-06e7cd9513b0" colab={"base_uri": "https://localhost:8080/", "height": 286}
features.select('features').show(10)
# + [markdown] id="YrpChNOB3Jm7" colab_type="text"
# Let's read the features Dataframe as an rdd object
# + id="2QAI7hFu2tD_" colab_type="code" outputId="56600cec-7fab-4bd5-cf90-55b51e929d81" colab={"base_uri": "https://localhost:8080/", "height": 185}
features.rdd.map(lambda r: r.features).take(10)
# + [markdown] id="ln2NVoeS4CI6" colab_type="text"
# Since the source data set has been prepared as a list of DenseVectors we can now apply PCA. Note that the first line again only prepares the algorithm by finding the transformation matrices (fit method)
# + id="6HgJv38u4Dcg" colab_type="code" colab={}
pca = PCA(k=3, inputCol= 'features', outputCol= 'PCAFeatures')
model = pca.fit(features)
# + [markdown] id="C3-9G5kM4_US" colab_type="text"
# Now we can actually transform the data. Let's have a look at the first 20 rows
# + id="gApFm51g5AZt" colab_type="code" outputId="3506797e-b9ce-4f06-fa81-1d3ac573602d" colab={"base_uri": "https://localhost:8080/", "height": 454}
result_pca = model.transform(features).select('PCAFeatures')
result_pca.show() # This shows the truncated version, we can show the verbose version by
# + id="waLIunY45tcn" colab_type="code" outputId="8f79d395-f259-43af-bc48-987d6db12e04" colab={"base_uri": "https://localhost:8080/", "height": 454}
result_pca.show(truncate=False) # This will not truncate the output visual
# + [markdown] id="H5E9m1kb6MdQ" colab_type="text"
# So we obtained three completely new columns which we can plot now. Let run a final check if the number of rows is the same.
# + id="2L9s9tX76H0M" colab_type="code" outputId="5e470384-deb3-4596-9b6b-0bfe9988654e" colab={"base_uri": "https://localhost:8080/", "height": 34}
result_pca.count()
# + [markdown] id="939hePaf6TtS" colab_type="text"
# Yay! Luckily, it's the same number of rows returned. Now we obtain a sample and read each of the three columns into a python list
# + [markdown] id="ErCoC2A76hA9" colab_type="text"
# ### Sampling The PCA data
# + id="8ebbO3tz6gF8" colab_type="code" outputId="6754ce4d-13a5-4ae1-e40a-29a88e1420e6" colab={"base_uri": "https://localhost:8080/", "height": 101}
# let's return about 80% of the data
rdd = result_pca.rdd.sample(False, 0.8)
# Let's see the first five rows
rdd.take(5)
# + [markdown] id="5xQc-r51_Cvs" colab_type="text"
# Using simple python map() to extract the first list
# + id="ZP6pbX5r8j1u" colab_type="code" outputId="51002ec2-ea15-4374-c8d4-590c3bbdfe2d" colab={"base_uri": "https://localhost:8080/", "height": 101}
x = list(map(lambda x: x[0][0], rdd.collect()))
x[:5]
# + [markdown] id="az0N8WI7_LJZ" colab_type="text"
# Using the rdd.map() to extarct same first list
# + id="lFrRfnaN7J3W" colab_type="code" outputId="53438a1e-2660-4c3b-867d-4f532b4d5b4e" colab={"base_uri": "https://localhost:8080/", "height": 101}
x = rdd.map(lambda a : a.PCAFeatures).map(lambda a : a[0]).collect()
x[:5]
# + id="SxJgF6Jy_XDt" colab_type="code" outputId="b2e9f743-d4b4-46f3-d956-432f626daf6f" colab={"base_uri": "https://localhost:8080/", "height": 101}
y = rdd.map(lambda a : a.PCAFeatures).map(lambda a : a[1]).collect()
y[:5]
# + id="Z0lXGCTQ_oXd" colab_type="code" outputId="45a673ac-2a3f-45c9-fa6b-aa88ce353091" colab={"base_uri": "https://localhost:8080/", "height": 101}
z = rdd.map(lambda a : a.PCAFeatures).map(lambda a : a[2]).collect()
z[:5]
# + [markdown] id="6-9BkLOc_3NA" colab_type="text"
# Finally we plot the three lists and name each of them as dimension 1-3 in the plot
# + id="AW7d1575_38q" colab_type="code" outputId="cff10f0c-f523-410f-ca7d-d1e6e0db9953" colab={"base_uri": "https://localhost:8080/", "height": 248}
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x,y,z, c='r', marker='o')
ax.set_xlabel('dimension1')
ax.set_ylabel('dimension2')
ax.set_zlabel('dimension3')
plt.show()
# + [markdown] id="-5c9qn8YBMP3" colab_type="text"
# Congratulations, we are done! We can see two clusters in the data set. We can also see a third cluster which either can be outliers or a real cluster. In the next course we will actually learn how to compute clusters automatically. For now we know that the data indicates that there are two semi-stable states of the machine and sometime we see some anomalies since those data points don't fit into one of the two clusters.
# + id="Umg5zJEXBNVl" colab_type="code" colab={}
| dimensionality_reduction_pyspark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # OOPS from BOOK
class Const(object): # an overriding descriptor, see later
def __init__(self, value):
self.value = value
def __set__(self, *_): # ignore any attempt at setting
print("Done")
def __get__(self, *_): # always return the constant value
return self.value
car1=Const(5)
class X(object):
c=Const(23)
x=X()
print(x.c)
x.c=42
print(x.c)
class B(object):
a = 23
b = 45
def f(self): print('method f in class B')
def g(self): print('method g in class B')
class C(B):
b = 67
c = 89
d = 123
def g(self): print('method g in class C')
def h(self): print('method h in class C')
x = C()
x.d = 77
x.e = 88
print(x.b)
print(x.__dict__)
print(x.__dict__['d'])
print(x.a)
x.h()
| OOPS from BOOK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torch
# language: python
# name: torch
# ---
# +
import numpy as np
import pandas as pd
# viz
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(18.7,6.27)})
# notebook settings
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
# -
# ## 11 Classes
# +
# 2 embedding dims, 11 classes
exp1 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.23_11_19594-20/model_meta_data.pkl"))
exp2 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.23_22:56_11_4500-50/model_meta_data.pkl"))
exp3 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.24_10:57_11_300-60/model_meta_data.pkl"))
full = pd.concat([exp1, exp2, exp3])
# 2 embedding dims, 26 classes
exp_2_26 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.24_14:59_26_2000-100/model_meta_data.pkl"))
# 8 embedding dims, 26 classes
exp_8_26 = pd.DataFrame(pd.read_pickle("/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.26_11:16_26_8_2000-100/model_meta_data.pkl"))
# -
uexp_2_11 = full[full['n_features']<=2000].groupby(['n_features']).mean()
uexp_2_26 = exp_2_26.groupby(['n_features']).mean()
uexp_8_26 = exp_8_26.groupby(['n_features']).mean()
for data,exp in zip([uexp_2_11, uexp_2_26, uexp_8_26], ['2 dim, 11 classes', '2 dim, 26 class', '8 dim, 26 class']):
plt.plot(data.index, data['ANMI'], label=exp)
plt.legend()
true = pd.DataFrame(pd.read_pickle('/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.27_17:43_11_2_2000-100/model_meta_data.pkl'))
rand = pd.DataFrame(pd.read_pickle('/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.27_18:57_shuffle_11_2_2000-100/model_meta_data.pkl'))
utrue = true.groupby(['n_features']).mean()
urand = rand.groupby(['n_features']).mean()
for data,exp in zip([utrue, urand], ['2 dim, 11 classes - TRUE', '2 dim, 11 classes - SHUFF']):
plt.plot(data.index, data['ANMI'], label=exp)
_ = plt.ylim((0., 1.))
_ = plt.yticks(np.arange(0., 1.01, 0.05))
plt.legend()
def percentage_increase(values):
values = np.array(values)
pct = np.array([((values[i+1] - values[i]) / values[i]) * 100 for i in range(len(values)-1)])
return np.around(pct, 3)
bar = percentage_increase(utrue['ANMI'])
plt.plot(utrue.index[:-1], bar)
true = pd.DataFrame(pd.read_pickle('/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.29_12:51_true_11_2_150-50/model_meta_data.pkl'))
rand = pd.DataFrame(pd.read_pickle('/srv/nas/mk2/projects/pan-cancer/experiments/feature_sel/2020.03.29_12:51_shuffle_11_2_150-50/model_meta_data.pkl'))
utrue = true.groupby(['n_features']).mean()
urand = rand.groupby(['n_features']).mean()
for data,exp in zip([utrue, urand], ['2 dim, 11 classes - TRUE', '2 dim, 11 classes - SHUFF']):
plt.plot(data.index, data['ANMI'], label=exp)
_ = plt.ylim((0., 1.))
_ = plt.yticks(np.arange(0., 1.01, 0.05))
plt.legend()
urand
| notebook/2020.03.29_analyze_feat_sel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:riboraptor]
# language: python
# name: conda-env-riboraptor-py
# ---
# %pylab inline
# # Idea
#
# What is a summary statistic for a Ribo-seq sample?
#
# - metagene profile
# - profile of 64 codons and their relative normalized codon counts
#
# If we were to take the latter and do clustering what sort of results will emerge?
#
import pandas as pd
df = pd.read_excel('~/re-ribo-datasets.xlsx')
| notebooks/ribotricer-codon-summaries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## MNIST CNN
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from fastai.vision import *
path = untar_data(URLs.MNIST)
path; path.ls()
training_folder = path/'training'; training_folder.ls()
il = ImageList.from_folder(path, convert_mode='L')
il.items[0]
defaults.cmap='binary'
il
il[0].show()
il[1].show()
sd = il.split_by_folder(train='training', valid='testing'); sd
ll = sd.label_from_folder(); ll
# ### Explore some images
def print_some_image(index=0):
x,y = ll.train[index]
print(y,x.shape)
x.show()
print_some_image()
tfms = ([*rand_pad(padding=3, size=28, mode='zeros')], [])
ll = ll.transform(tfms)
print_some_image()
bs = 128
# not using imagenet_stats because not using pretrained model
data = ll.databunch(bs=bs).normalize()
data
x,y = data.train_ds[0]
x.show()
print(y)
x,y = data.valid_ds[0]
x.show()
print(y)
def _plot(i,j,ax): data.train_ds[0][0].show(ax, cmap='gray')
plot_multi(_plot, 3, 3, figsize=(8,8))
xb,yb = data.one_batch()
xb.shape,yb.shape
data.show_batch(rows=3, figsize=(5,5))
# ### Basic CNN with batchnorm
def conv(ni,nf):
return nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1)
model = nn.Sequential(
conv(1, 8), # 14
nn.BatchNorm2d(8),
nn.ReLU()
)
xb,yb = data.one_batch()
model(xb).shape
model = nn.Sequential(
conv(1, 8), # 14
nn.BatchNorm2d(8),
nn.ReLU(),
conv(8, 16), # 14
nn.BatchNorm2d(16),
nn.ReLU()
)
model(xb).shape
model = nn.Sequential(
conv(1, 8), # 14
nn.BatchNorm2d(8),
nn.ReLU(),
conv(8, 16), # 7
nn.BatchNorm2d(16),
nn.ReLU(),
conv(16, 32), # 4
nn.BatchNorm2d(32),
nn.ReLU()
)
model(xb).shape
model = nn.Sequential(
conv(1, 8), # 14
nn.BatchNorm2d(8),
nn.ReLU(),
conv(8, 16), # 7
nn.BatchNorm2d(16),
nn.ReLU(),
conv(16, 32), # 4
nn.BatchNorm2d(32),
nn.ReLU(),
conv(32, 16), # 2
nn.BatchNorm2d(16),
nn.ReLU()
)
model(xb).shape
model = nn.Sequential(
conv(1, 8), # 14
nn.BatchNorm2d(8),
nn.ReLU(),
conv(8, 16), # 7
nn.BatchNorm2d(16),
nn.ReLU(),
conv(16, 32), # 4
nn.BatchNorm2d(32),
nn.ReLU(),
conv(32, 16), # 2
nn.BatchNorm2d(16),
nn.ReLU(),
conv(16, 10), # 1
nn.BatchNorm2d(10),
Flatten() # Remove 1, 1 dimension
)
model(xb).shape
model(xb)[0]
learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)
learn.summary()
learn.lr_find(end_lr=100)
learn.recorder.plot()
learn.fit_one_cycle(3, max_lr=0.1)
learn.recorder.plot_lr()
# ### Refactor
def conv2(ni,nf):
return conv_layer(ni,nf,stride=2)
model = nn.Sequential(
conv2(1, 8), # 14
conv2(8, 16), # 7
conv2(16, 32), # 4
conv2(32, 16), # 2
conv2(16, 10), # 1
Flatten() # remove (1,1) grid
)
learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)
learn.summary()
learn.lr_find(end_lr=100)
learn.recorder.plot()
learn.fit_one_cycle(3, max_lr=0.1)
# ### Resnet like
class ResBlock(nn.Module):
def __init__(self, nf):
super(ResBlock, self).__init__()
self.conv1 = conv_layer(nf,nf)
self.conv2 = conv_layer(nf,nf)
def forward(self, x):
return x + (self.conv2(self.conv1(x)))
??res_block
model = nn.Sequential(
conv2(1, 8),
ResBlock(8),
conv2(8, 16),
ResBlock(16),
conv2(16, 32),
ResBlock(32),
conv2(32, 16),
ResBlock(16),
conv2(16, 10),
Flatten()
)
learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)
learn.summary()
learn.lr_find(end_lr=100)
learn.recorder.plot()
learn.fit_one_cycle(3, max_lr=0.1)
model = nn.Sequential(
conv2(1, 8),
res_block(8),
conv2(8, 16),
res_block(16),
conv2(16, 32),
res_block(32),
conv2(32, 16),
res_block(16),
conv2(16, 10),
Flatten()
)
learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)
learn.summary()
learn.lr_find(end_lr=100)
learn.recorder.plot()
learn.fit_one_cycle(3, max_lr=0.1)
# ### Dense Net
model = nn.Sequential(
conv2(1, 8),
res_block(8, dense=True),
conv2(16, 16),
res_block(16, dense=True),
conv2(32, 32),
res_block(32, dense=True),
conv2(64, 16),
res_block(16, dense=True),
conv2(32, 10),
Flatten()
)
learn = Learner(data, model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)
learn.summary()
learn.lr_find(end_lr=100)
learn.recorder.plot()
learn.fit_one_cycle(3, max_lr=0.1)
# ### Inference
images, y = data.one_batch()
images[0].shape
learn.predict(images[0])
images[0].squeeze().shape
plt.imshow(images[0].squeeze())
| nbs_gil/lesson7-resnet-mnist-experiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={}
# # LaTeX Exercise 1
# + [markdown] nbgrader={}
# The images of the equations on this page were taken from the Wikipedia pages referenced for each equation.
# + [markdown] nbgrader={}
# ## Imports
# + nbgrader={}
from IPython.display import Image
# + [markdown] nbgrader={}
# ## Typesetting equations
# + [markdown] nbgrader={}
# In the following cell, use Markdown and LaTeX to typeset the equation for the probability density of the normal distribution $f(x, \mu, \sigma)$, which can be found [here](http://en.wikipedia.org/wiki/Normal_distribution). Following the main equation, write a sentence that defines all of the variable in the equation.
# + nbgrader={}
Image(filename='normaldist.png')
# + [markdown] deletable=false nbgrader={"checksum": "19b733e9b9c40a9d640d0ff730227a31", "grade": true, "grade_id": "latexex01a", "points": 2, "solution": true}
# YOUR ANSWER HERE
# + [markdown] nbgrader={}
# In the following cell, use Markdown and LaTeX to typeset the equation for the time-dependent Schrodinger equation for non-relativistic particles shown [here](http://en.wikipedia.org/wiki/Schr%C3%B6dinger_equation#Time-dependent_equation) (use the version that includes the Laplacian and potential energy). Following the main equation, write a sentence that defines all of the variable in the equation.
# + nbgrader={}
Image(filename='tdseqn.png')
# + [markdown] deletable=false nbgrader={"checksum": "4d858b55aeb9117b8cfa6f706ab5b617", "grade": true, "grade_id": "latexex01b", "points": 4, "solution": true}
# YOUR ANSWER HERE
# + [markdown] nbgrader={}
# In the following cell, use Markdown and LaTeX to typeset the equation for the Laplacian squared ($\Delta=\nabla^2$) acting on a scalar field $f(r,\theta,\phi)$ in spherical polar coordinates found [here](http://en.wikipedia.org/wiki/Laplace_operator#Two_dimensions). Following the main equation, write a sentence that defines all of the variable in the equation.
# + nbgrader={}
Image(filename='delsquared.png')
# + [markdown] deletable=false nbgrader={"checksum": "625624933082a6695c8fd5512a808b77", "grade": true, "grade_id": "latexex01c", "points": 4, "solution": true}
# YOUR ANSWER HERE
| days/assignment06/LaTeXEx01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Метрики precision и recall
# Данные содержат признаки пользователей и целевую переменную affair - была ли связь на стороне
import pandas as pd
data = pd.read_csv('affair_data.csv')
data.head()
data.info()
# +
from sklearn.linear_model import LogisticRegression
# импортируем метод для автоматической разбивки на обучающую и тестовую выборки
# раньше использовали from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
# -
# Формируем набор признаков и целевую переменную
data.columns
X = data[['rate_marriage', 'age', 'yrs_married', 'children', 'religious', 'educ', 'occupation', 'occupation_husb']]
Y = data['affair']
model = LogisticRegression()
# Разбиваем данные на обучающую и тестовую выборки в соотношении 70 / 30
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Обучаем модель
model = LogisticRegression()
model.fit(X_train, Y_train)
# Получаем прогнозные значения модели (переменная predictions)
predictions = model.predict_proba(X_test)
predictions
for line in zip( predictions[:, 1], Y_test ):
print( line )
model.score(X_test, Y_test)
# ### Упражнение
#
# Постройте набор моделей для значений random_state от 0 до 9. Получите model.score для каждого варианта.
# +
# будет ли меняться качество с разным random_state
for i in range(10):
m = LogisticRegression(random_state=i)
m.fit(X_train, Y_train)
# print(m.coef_)
print(m.score(X_test, Y_test))
# +
# будет ли меняться качество с одинаковым random_state
for i in range(10):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state = i)
m = LogisticRegression(random_state=0)
m.fit(X_train, Y_train)
# print(m.coef_)
print(m.score(X_test, Y_test)) # качество зависит от разбивки
# -
# ### Оценка качества модели
import warnings
warnings.simplefilter('ignore')
# +
###
# a = [1,2,3]
# b = [4,5,6]
# for pair in zip(a,b):
# print(pair)
###
# -
# Считаем accuracy последней модели
# +
tp = 0 # True positive
fp = 0 # False positive
fn = 0 # False negative
tn = 0 # True negative
predictions = model.predict_proba( X_test )
for predicted_prob, actual in zip( predictions[:, 1], Y_test ):
if predicted_prob >= 0.5:
predicted = 1
else:
predicted = 0
if predicted == 1:
if actual == 1:
tp += 1
else:
fp += 1
else:
if actual == 1:
fn += 1
else:
tn += 1
# +
# какая точность получилась?
accuracy = (tp + tn) / (tp + fp + fn + tn)
accuracy
# -
precision = tp / (tp + fp)
precision
# +
recall = tp / (tp + fn)
recall
# получается что модель практически не ставит 1
# +
##
# -
print(Y_test.sum())
# +
# вероятности сравниваем с 0.5 (получаем булевый вектор позитивный или негативный класс)
# и если мы его сложим то увидим сколько модель поставила 1)
(predictions[:, 1] >= 0.5).sum()
# при достаточно большом accuracy (70%) наша модель практически не предсказывает первый класс таргета т.е.
# предсказательной способности нет. Ловит всего 7 из 587 ;(
# -
# ### Используем готовые библиотеки
# метод для построения графика precision-recall
from sklearn.metrics import precision_recall_curve
pr, re, thres = precision_recall_curve( Y_test, predictions[:, 1] )
thres.shape
# посмотрим что получилось
for line in zip( pr, re, thres ):
print( line )
# %pylab inline
# plt.plot( re, pr )
plt.plot( re, pr )
from sklearn.metrics import average_precision_score
# площадь под кривой
average_precision_score( Y_test, predictions[:, 1] )
# ### ROC
# посчитаем ROC-кривую (Receiver Operating Characteristic)
from sklearn.metrics import roc_curve
# получаем значения false и true positive rate для различных значений порога
fpr, tpr, thres = roc_curve( Y_test, predictions[:, 1] )
for line in zip( fpr, tpr, thres ):
print( line )
plt.plot( fpr, tpr )
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
# **Построим еще одну модель и сравним их между собой**
Xshort = X.loc[ :, ['age', 'children'] ]
Xshort.head()
model2 = LogisticRegression()
X_train_short, X_test_short, Y_train, Y_test = train_test_split( Xshort, Y, test_size = 0.3, random_state = 0 )
model2.fit( X_train_short, Y_train )
predictions2 = model2.predict_proba( X_test_short )
fpr2, tpr2, thres2 = roc_curve( Y_test, predictions2[:, 1] )
# +
plt.plot( fpr, tpr, label = 'All columns' )
plt.plot( fpr2, tpr2, label = 'Age & children' )
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend( loc = 0 )
plt.show()
# -
# **Посчитаем площадь под ROC-кривой для двух моделей**
from sklearn.metrics import roc_auc_score
roc_auc_score( Y_test, predictions[:, 1] )
roc_auc_score( Y_test, predictions2[:, 1] )
# Можно посчитать с помощью более общего метода auc
#
# Про разницу между ними https://stackoverflow.com/questions/31159157/different-result-with-roc-auc-score-and-auc
from sklearn.metrics import auc
auc( fpr, tpr )
auc( fpr2, tpr2 )
# ## Пример влияния L1 и L2-регуляризации
model1 = LogisticRegression( penalty = 'l1', C = 0.01 ).fit( X_train, Y_train )
predictions = model1.predict_proba( X_test )
model2 = LogisticRegression( penalty = 'l2', C = 0.01 ).fit( X_train, Y_train )
predictions2 = model2.predict_proba( X_test )
model3 = LogisticRegression( penalty = 'l1', C = 0.001 ).fit( X_train, Y_train )
predictions3 = model3.predict_proba( X_test )
fpr, tpr, thres = roc_curve( Y_test, predictions[:, 1] )
fpr2, tpr2, thres2 = roc_curve( Y_test, predictions2[:, 1] )
fpr3, tpr3, thres3 = roc_curve( Y_test, predictions3[:, 1] )
# +
plt.plot( fpr, tpr, label = 'l1' )
plt.plot( fpr2, tpr2, label = 'l2' )
plt.plot( fpr3, tpr3, label = 'C=1' )
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend( loc = 0 )
plt.show()
# -
# **Построим еще одну модель, используя другие признаки**
Xshort = pd.get_dummies( data.loc[ :, ['age', 'children'] ], columns = ['age', 'children'] )
Xshort.head()
Y = data['affair']
X_train, X_test, Y_train, Y_test = train_test_split( Xshort, Y, test_size = 0.3, random_state = 0 )
model1 = LogisticRegression( penalty = 'l1', C = 0.01 ).fit( X_train, Y_train )
model2 = LogisticRegression( penalty = 'l2', C = 0.01 ).fit( X_train, Y_train )
model3 = LogisticRegression( penalty = 'l2', C = 1 ).fit( X_train, Y_train )
predictions = model1.predict_proba( X_test )
predictions2 = model2.predict_proba( X_test )
predictions3 = model3.predict_proba( X_test )
fpr, tpr, thres = roc_curve( Y_test, predictions[:, 1] )
fpr2, tpr2, thres2 = roc_curve( Y_test, predictions2[:, 1] )
fpr3, tpr3, thres3 = roc_curve( Y_test, predictions3[:, 1] )
# +
plt.plot( fpr, tpr, label = 'l1' )
plt.plot( fpr2, tpr2, label = 'l2' )
plt.plot( fpr3, tpr3, label = 'C=1' )
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend( loc = 0 )
plt.show()
# -
| Lectures notebooks/(Lectures notebooks) netology Machine learning/8. Model accuracy assessment, retraining, regularization/Logres_affair.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Audio classification, with visualization and sonification of the results
#
# This notebook is designed as the audio-equivalent of the [MNIST image classification exercise](MNIST.ipynb). Instead of MNIST data, we use a subset of [this drum sounds dataset](http://deepyeti.ucsd.edu/cdonahue/wavegan/data/drums.tar.gz) provided as part of the course repository.
#
# As you will see, audio can be processed very similarly to images, but with two modifications:
#
# * Tensors are indexed as ```[index in batch,sample,channel]``` instead of ```[index in batch,x,y,channel]```
# * Conv1D instead of Conv2D
#
# **After you've read, run, and understood the code, try to modify it as follows to test your learning:**
# * Hard: based on the [adversarial training example](AdversarialMNIST.ipynb), try optimizing a sound that maximizes the activation output of some neuron. This might yield interesting sound textures.
#
# Currently, we have no easy modifications to suggest. Feel free to invent your own!
# ## Loading and playing the data
#
# First, let's load the dataset using a helper provided for the course and try playing a loaded sound in IPython.
# + jupyter={"outputs_hidden": false}
#The pylab inline below is something you may need to make images and plots visible in Jupyter, depending on your Anaconda setup
# %pylab inline
import numpy as np
import matplotlib.pyplot as pp
import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1" #disable Tensorflow GPU usage, a simple example like this runs faster on CPU
import tensorflow as tf
from tensorflow import keras
from helpers.audio_loader import load_audio
import IPython
print(os.getcwd())
#Load the audio dataset. The course repository includes a small subset of this drum sound set:
#http://deepyeti.ucsd.edu/cdonahue/wavegan/data/drums.tar.gz
nClasses=3
(x_train, y_train), (x_test, y_test) = load_audio("drums_subset_preprocessed", num_classes=nClasses, inputpath="../Datasets/")
#Play a sound.
#IPython.display.Audio can play audio stored as Numpy arrays if you specify the sampling rate.
#The method expects 1D vectors, which is why we need to index as [sample number,:,channel],
#where the : denotes that we play the whole sound and not just a part of it.
IPython.display.Audio(x_train[0,:,0],rate=16000,autoplay=True)
# + jupyter={"outputs_hidden": false}
#Let's import the layer types we need
from tensorflow.keras.layers import Dense #fully connected layer
from tensorflow.keras.layers import Flatten #converts images to vectors of numbers
#As before, we use a simply sequential, i.e., multilayer architecture
model = keras.models.Sequential()
#Flatten converts a batch of multidimensional data into a batch of 1D data.
#This is what the fully connected layers expect.
#For example, the rows of an image are simply stacked after each other.
#If the data was not images, we would not need this.
model.add(Flatten())
#The audio classification is so much harder that we need to have at least a
#few layers. Just the final 3-neuron layer, we won't learn anything
model.add(Dense(64,activation="relu"))
model.add(Dense(64,activation="relu"))
#The output layer is fully connected, with 1 neuron for each 10 classes.
#For classification, one should use the softmax activation.
#This means that each output neuron can be thought as the probability of a class.
model.add(Dense(nClasses, activation='softmax'))
#Compile the model. Note that now y_test is one-hot vectors instead of indices.
#Thus, categorical_crossentropy loss instead of sparse_categorical_crossentropy.
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
#Train the network
model.fit(x_train, y_train,
batch_size=32,
epochs=10,
verbose=1,
validation_data=(x_test, y_test))
# + jupyter={"outputs_hidden": false}
#Sonify some of the first layer neuron weights
#First, query the weights. We use index 1 because index 0 is the flatten layer
weights=model.layers[1].get_weights()[0]
#Create a figure with appropriate size
nNeuronsToSonify=10
#Loop over the neurons
for i in range(nNeuronsToSonify):
#Weights is a 2D tensor where the first dimension indexes over data variables, second over neurons
sound=weights[:,i]
#Reshape and play
sound=np.reshape(sound,sound.shape[0])
#Here, as we want to show multiple audio playback widgets, we have to use
#the IPython.display.display() around the IPython.display.Audio()
IPython.display.display(IPython.display.Audio(sound,rate=16000))
# -
# ## A convolutional neural network for audio
# The classification accuracy of the fully connected network is very poor. Let's try the same with a convolutional neural network.
# + jupyter={"outputs_hidden": false}
#Let's import the layer types we need
#import tensorflow.keras.layers.Layer
from tensorflow.keras.layers import Dense #fully connected layer
from tensorflow.keras.layers import Conv1D #convolutional layer with 1D filters
from tensorflow.keras.layers import Flatten #converts images to plain vectors of numbers
from tensorflow.keras.layers import Dropout #this mitigates overfitting
#As before, we use a simply sequential, i.e., multilayer architecture
model = keras.models.Sequential()
#First convolutional layer. Here, the kernels are 1D tensors, and we can use
#9 wide ones instead of the 5x5 and 3x3 we used in image classification.
#Larger kernels are typically a bit better but use much more computing resources.
#With 1D convolutions, the cost
kernelSize=9
model.add(Conv1D(16, kernel_size=kernelSize, strides=2,activation='relu',
input_shape=(x_train.shape[1],x_train.shape[2],)))
#Now, let's add more convolutional layers until the temporal dimension of the output is small enough
while model.layers[-1].output.shape[1]>kernelSize*2:
model.add(Conv1D(32, kernel_size=kernelSize, activation='relu', strides=2))
#Fully connected part
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(nClasses, activation='softmax'))
#Compile the model.
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
#Train the network
model.fit(x_train, y_train,
batch_size=32,
epochs=5,
verbose=1,
validation_data=(x_test, y_test))
# -
# Let's test the classifier with a sound
# + jupyter={"outputs_hidden": false}
#this is the test image
testIdx=82
#We index by testIdx:testIdx+1 to pass a batch of one image to the network instead of just one image
classProbabilities=model.predict(x_test[testIdx:testIdx+1])
print("Predicted class probabilities: ",classProbabilities)
#np.argmax returns the index of the largest value in a Numpy tensor.
#np.max returns the largest value
classes=["Kick","Ride","Snare"]
print("The most probable class is {}, with probability {}".format(classes[np.argmax(classProbabilities)],np.max(classProbabilities)))
IPython.display.Audio(x_test[testIdx,:,0],rate=16000,autoplay=True)
# -
# Now, let's try to see what the network has learned. With image classification, we could show the convolution filters as images. Here, we can't simply play them as audio, as they are only 5 values, whereas a second of audio is 16000 values in our dataset. Thus, we simply plot them as curves
# + jupyter={"outputs_hidden": false}
#Define a visualization helper
def visualizeLayerWeights(model,layerIndex):
#Get the neuron weights, i.e., convolution kernels or filters
kernel=model.layers[layerIndex].get_weights()[0]
#Check the shape
print("Visualizing layer {} kernel, shape {}".format(layerIndex,kernel.shape))
#Visualize 16 first filters
nFiltersToVisualize=16
pp.figure(1,figsize=[nFiltersToVisualize*2,2]) #specify figsize explicitly because otherwise the images will be too small
for i in range(nFiltersToVisualize):
pp.subplot(1,nFiltersToVisualize,1+i)
pp.plot(kernel[:,0,i])
pp.show()
#visualize first layer
visualizeLayerWeights(model,0)
# -
| Code/Jupyter/AudioMNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Stack overflow developer survey 2019 -- analysis
#
# ### Business Understanding:
#
# Stack overflow survey is one of the largest developer survey. Almost every developer has used Stack overflow atleast once while coding. Below are a few questions that I'd like to answer using the survey data.
#
# 1. Does open source development help you increase your income?
# 2. Does your undergrad major affect youe income?
# 3. What are the languages of choice for 2020?
# 4. Does Location, Open source contribution and dependents influence income?
# 5. Does years of coding experience influence salary?
# 6. What are the desired platforms for 2020?
# 7. What are the desired databases for 2020?
# 8. Which was the most used eidtor of 2019?
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import r2_score, mean_squared_error, accuracy_score
from sklearn.preprocessing import StandardScaler
import seaborn as sns
# %matplotlib inline
# -
import warnings
warnings.filterwarnings('ignore')
# ### Data Understanding
#
# Let us look at the data and try to assert whether the business questions listed can be answered or not.
survey_data = pd.read_csv('survey_results_public.csv')
survey_data.head()
survey_data.columns
# Looking at the columns. We can conclude that the business questions listed can be answered.
# ### Question 1: Does open source development help you increase your income?
ques1_data = survey_data[['OpenSourcer', 'ConvertedComp']]
# dropping all rows without a ConvertedComp as this the column that I am trying to predict
ques1_data = ques1_data.dropna(subset=['ConvertedComp'])
ques1_data.head()
ques1_data['OpenSourcer'].value_counts()
ques1_data.groupby('OpenSourcer', as_index=False)['ConvertedComp'].mean().sort_values(by='ConvertedComp', ascending=False)
# ### Question 1 answer:
#
# As you can see from the above data, yes, OpenSource development is set to increase the developer's income. People contributing to openSource once a month or more have relitvely more income than people who don't.
# ### Question 2: Does your undergrad major affect youe income?
ques2_data = survey_data[['UndergradMajor', 'ConvertedComp']]
# Dropping all rows where either UndergradMajor or ConvertedComp is not present as Undergrad major or ConvertedComp imputing it will dilute the results
ques2_data = ques2_data.dropna()
ques2_data.head()
ques2_data.groupby('UndergradMajor', as_index=False)['ConvertedComp'].mean().sort_values(by='ConvertedComp', ascending=False)
# ### Question 2 answer
#
# People with underGrad major in fine arts or performaning arts have a higher mean income. The lowest is of the web development or web design. In between, is the income of people with Mathematics of statistics major.
# ### Question 3: What are the languages of choice for 2020?
# Dropping all rows where LanguageDesireNextYear is null as it cannot be imputed
ques3_data = survey_data['LanguageDesireNextYear'].dropna()
ques3_data.head()
languages = {}
for row in ques3_data:
for language in row.split(";"):
try:
languages[language] = languages[language] + 1
except:
languages[language] = 1
languages
sorted(languages)
sorted_languages = sorted(languages.items(), key = lambda kv:(kv[1], kv[0]))
sorted_languages.reverse()
sorted_languages
# ### Question 3 answer:
#
# #### Top 5 languages:
#
# JavaScript
#
# Python
#
# HTML/CSS
#
# SQL
#
# TypeScript
#
# #### Bottom 5 languages:
#
# F#
#
# Objective-C
#
# Clojure
#
# Erlang
#
# VBA
# ### Question 4: Does Location, Open source contribution and dependents influence income?
survey_data.columns[survey_data.dtypes == 'float']
survey_data.columns[survey_data.dtypes == 'int']
survey_data.columns[survey_data.dtypes == object]
# +
def cleanData(dataFrame):
"""Clean Data
- One of the preprocessing steps to get the data ready for training the model
Keyword arguments -
dataFrame: A pandas DataFrame that contains both numerical and categorical values
"""
y = dataFrame['ConvertedComp']
X = dataFrame[['Country', 'YearsCode', 'OpenSourcer', 'SOHowMuchTime', 'Dependents']]
X['YearsCode'] = pd.to_numeric(X['YearsCode'], errors='coerce')
X['YearsCode'].fillna(0, inplace=True)
X['OpenSourcer'] = X['OpenSourcer'].replace({
'Never': 0,
'Less than once per year': 1,
'Less than once a month but more than once per year': 2,
'Once a month or more often': 3
})
X['OpenSourcer'] = pd.to_numeric(X['OpenSourcer'], downcast='integer')
X['SOHowMuchTime'] = X['SOHowMuchTime'].replace({
'0-10 minutes': 0,
'11-30 minutes': 1,
'31-60 minutes': 2,
'60+ minutes': 3
})
X['SOHowMuchTime'] = pd.to_numeric(X['SOHowMuchTime'], downcast='integer')
X['SOHowMuchTime'].fillna(0, inplace=True)
X['Dependents'] = X['Dependents'].replace({
'Yes': 1,
'No': 0
})
X['Dependents'] = pd.to_numeric(X['Dependents'], downcast='integer')
columns = X.columns
num_vars = X.select_dtypes(include=['int', 'float']).columns
for col in num_vars:
X[col].fillna((X[col].mean()), inplace=True)
cat_vars = X.select_dtypes(include=['object']).columns
for var in cat_vars:
# dropping the actual column and creating categorical columns with prefix name of original column
X = pd.concat([X.drop([var], axis=1), pd.get_dummies(X[var], prefix=columns[columns.get_loc(var)], drop_first=True)], axis=1)
return X, y
# Our predictable value is ConvertedComp so imputing it will dilute the results
X, y = cleanData(survey_data.dropna(subset=['ConvertedComp']))
final_features = X
scaleX = StandardScaler()
scaleY = StandardScaler()
X = scaleX.fit_transform(X)
y = scaleY.fit_transform(np.array(y).reshape(-1,1))
# +
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .33, random_state=42)
ridge = Ridge()
parameters = {
'alpha': [
1e-15,
1e-10,
1e-8,
1e-4,
1e-3,
1e-2,
1,
5,
10,
20,
50,
100
]
}
ridge_regressor = GridSearchCV(ridge, parameters, scoring='neg_mean_squared_error', cv=5)
ridge_regressor.fit(X_train, y_train)
# -
print(ridge_regressor.best_params_)
print("neg_mse: "+str(ridge_regressor.best_score_))
final_features.columns
coefficients = pd.concat([pd.DataFrame({'Feature': final_features.columns}),
pd.DataFrame(np.transpose(ridge_regressor.best_estimator_.coef_))], axis = 1)
coefficients.rename(columns={0:'Coeficient'}, inplace=True)
pd.set_option('display.max_rows', None)
coefficients.sort_values(by=['Coeficient'], ascending=False)
# ### Question 4 answer:
# Yes, as you can see, the Working in:
# United States, United Kingdom, Australia, Ireland and Canada lead to more income. While working in Iran, Pakistan, Russian Federaton, Brazil and India lead to a lesser developer income. Also, contributing to open source and having dependents leads to better income.
# ### Question 5: Does years of coding experience influence salary?
#
# ### Question 5 answer:
# Yes, as observed from above, years of coding does influence te salary of a developer
# ### Question 6: What are the desired platforms for 2020?
# Dropping PlatformDesireNextYear as imputing it is not possible
platform_data = survey_data['PlatformDesireNextYear'].dropna().values
platform_data
platforms = {}
for platform in platform_data:
for i in platform.split(";"):
try:
temp = platforms[i]
platforms[i] = temp+1
except:
platforms[i] = 1
platforms
sorted_platforms = sorted(platforms.items(), key = lambda kv:(kv[1], kv[0]))
sorted_platforms.reverse()
sorted_platforms
# ### Question 6 answer:
# The top 5 desired platforms of 2020 are Linux, Docker, Windows, AWS and Android
# ### Question 7: What are the desired databases for 2020?
# Dropping DatabaseDesireNextYear as imputing it is not possible
database_data = survey_data['DatabaseDesireNextYear'].dropna().values
database_data
databases = {}
for database in database_data:
for i in database.split(";"):
try:
temp = databases[i]
databases[i] = temp+1
except:
databases[i] = 1
databases
sorted_databases = sorted(databases.items(), key = lambda kv:(kv[1], kv[0]))
sorted_databases.reverse()
sorted_databases
# ### Question 7 answer:
# Top 5 desired databases of 2020 are PostgreSQL, MySQL, MongoDB, Redis and SQLite
# ### Question 8: Which is the most used editor in 2019?
#dropping all rows where DevEnviron column has null values as it is the column that I am trying to predict
environment_data = survey_data['DevEnviron'].dropna().values
environment_data
environments = {}
for environment in environment_data:
for i in environment.split(";"):
try:
temp = environments[i]
environments[i] = temp+1
except:
environments[i] = 1
environments
sorted_environments = sorted(environments.items(), key = lambda kv:(kv[1], kv[0]))
sorted_environments.reverse()
sorted_environments
# ### Question 8 answer:
# The top 5 most used editors of 2019 are Visual Studio Code, Visual Studio, Notepad++, IntelliJ and Vim
| Stack_overflow_dev_survey_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
my_var = 10
other_var = my_var
print(id(my_var))
print(id(other_var))
# + pycharm={"name": "#%%\n"}
import sys
a = [1, 2, 3]
print(id(a))
sys.getrefcount(a)
# + pycharm={"name": "#%%\n"}
import ctypes
def get_reference_count(address: int):
return ctypes.c_long.from_address(address).value
print(get_reference_count(id(a)))
| python-deep-dive/Python 3: Deep Dive (Part 1 - Functional)/reference_counting/refrence_counting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Smart signatures
# #### 06.1 Writing Smart Contracts
# ##### <NAME> (<EMAIL>)
# 2022-01-12
#
# * Install PyTEAL
# * Learn the PyTEAL logic
# * Write and deploy smart Signatures
# ### Install PyTEAL
# * If you have trouble updating PyTEAL, have a look at notebook 02.x_WSC about updating/upgrading.
# !pip install pyteal
# + [markdown] tags=[]
# ## Setup
# See notebook 04.1, the lines below will always automatically load functions in `algo_util.py`, the five accounts and the Purestake credentials
# +
# Loading shared code and credentials
import sys, os
codepath = '..'+os.path.sep+'..'+os.path.sep+'sharedCode'
sys.path.append(codepath)
from algo_util import *
cred = load_credentials()
# Shortcuts to directly access the 3 main accounts
MyAlgo = cred['MyAlgo']
Alice = cred['Alice']
Bob = cred['Bob']
Charlie = cred['Charlie']
Dina = cred['Dina']
# +
from algosdk import account, mnemonic
from algosdk.v2client import algod
from algosdk.future import transaction
from algosdk.future.transaction import PaymentTxn
from algosdk.future.transaction import AssetConfigTxn, AssetTransferTxn, AssetFreezeTxn
from algosdk.future.transaction import LogicSig, LogicSigTransaction
import algosdk.error
import json
import base64
import hashlib
# -
from pyteal import *
# Initialize the algod client (Testnet or Mainnet)
algod_client = algod.AlgodClient(algod_token='', algod_address=cred['algod_test'], headers=cred['purestake_token'])
print(Alice['public'])
print(Bob['public'])
print(Charlie['public'])
# #### Quick check of asset holdings, otherwise go to ...
# - https://bank.testnet.algorand.network
# - https://testnet.algoexplorer.io/dispenser
asset_holdings_df(algod_client,Alice['public'])
# #### Check Purestake API
last_block = algod_client.status()["last-round"]
print(f"Last committed block is: {last_block}")
# ## Smart Signatures
# * A Smart Signature is a function that has two possible results `True` or `False`
# * A Smart Signature can only evaluate properties of a transaction that is proposed to it.
# * A Smart Signature cannot read or write from/to the blockchain
# ## The Dispenser
# * The simplest smart signature is always `TRUE`
# * It accepts **all** transactions that are proposed
# #### Step 1: The programmer writes down the conditions as a PyTeal program
# * This is the logic of the smart signature.
# * Usually written inside `( ... )`
# * This one is always `True`
# +
import random
from random import randrange
a = Int( randrange(2**32-1) )
dispenser_pyteal = (
a == a
)
# -
# #### Step 2: Compile PyTeal -> Teal
# * Necessary intermediate step
# * No need to print and inspect the TEAL program
dispenser_teal = compileTeal(dispenser_pyteal,
Mode.Signature, # <----- Here we say it is a Smart Signature (and not a Smart Contract)
version=3)
print(dispenser_teal)
# #### Step 3: Compile Teal -> Bytecode for AVM
# AVM = Algorand Virtual Machine
#
# `algod_client.compile` creates a dict with two entries:
# * `hash` contains the address $\longleftarrow$ corresponds to the `[public]`
# * `result` contains the compiled code $\longleftarrow$ corresponds to the `[private]`
Dispenser = algod_client.compile(dispenser_teal)
Dispenser
# Look on Algoexplorer at the address of the smart signature.
# (There is not yet something to see)
print('http://testnet.algoexplorer.io/address/'+Dispenser['hash'])
# #### Step 4: Alice funds and deploys the smart signature
# * Only *at this step* we decide who is funding the smart signature.
# * This means we can write the Smart Signature without knowing who will fund it.
# * It is even possible that multiple people fund a Smart Signature.
# * Notice that the recipient is the **Dispenser**
# +
# Step 1: prepare transaction
sp = algod_client.suggested_params()
amt = int(2.1*1e6)
txn = transaction.PaymentTxn(sender=Alice['public'], sp=sp, receiver=Dispenser['hash'], amt=amt)
# Step 2+3: sign and sen
stxn = txn.sign(Alice['private'])
txid = algod_client.send_transaction(stxn)
# Step 4: wait for confirmation
txinfo = wait_for_confirmation(algod_client, txid)
# -
# Look at Algoexplorer. (The smart signature is funded.)
print('http://testnet.algoexplorer.io/address/'+Dispenser['hash'])
# #### Step 5: Alice informs Bob
# * Bob can only interact with the Smart signature, if he has the following information:
print("Alice communicates to Bob the following")
print("Compiled smart signature:", Dispenser['result'])
print("Address of smart signature: ", Dispenser['hash'])
# #### Step 6: Bob proposes a transaction to the smart signature
# * Using the information obtained in step 5
# * He proposes a payment from the dispenser to himself
# * The payment transaction is signed by the smart signature, **if the conditions are fullfilled** (easy in this case)
# +
# Step 1: prepare TX
sp = algod_client.suggested_params()
withdrawal_amt = int(0.2*1e6)
txn = PaymentTxn(sender=Dispenser['hash'], sp=sp, receiver=Bob['public'], amt=withdrawal_amt)
# Step 2: sign TX <---- This step is different!
encodedProg = Dispenser['result'].encode()
program = base64.decodebytes(encodedProg)
lsig = LogicSig(program)
stxn = LogicSigTransaction(txn, lsig)
# Step 3: send
txid = algod_client.send_transaction(stxn)
# Step 4: wait for confirmation
txinfo = wait_for_confirmation(algod_client, txid)
# -
# Look again at Algoexplorer.
# - The smart signature has fewer ALGOs.
# - Bob has more ALGOs.
print('http://testnet.algoexplorer.io/address/'+Dispenser['hash'])
print('http://testnet.algoexplorer.io/address/'+Bob['public'])
# ##### Check holdings
# ### Exercise
# * Run step 6 again and check again holdings
# ### Exercise
# * Charlie wants to get **all** the money in the Smart Signature.
# * How much can he withdraw?
# * Do not forget the ...
# Hint: this is the amount of micro-Algos currently in the Smart Signaure
algod_client.account_info(Dispenser['hash'])['amount']
# +
# Python code goes here
| ClassMaterial/06 - Smart Signatures/06 code/06.1a_WSC_SmartSignatures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcahiers-de-programmes&branch=master&subPath=Tutoriels/markdown.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/cahiers-de-programmes/master/bouton-callysto.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# + [markdown] lang="fr"
# # Composition avec Markdown
#
# Ce cahier illustre l'utilisation de Markdown pour saisir des documents dans Jupyter. Après avoir parcouru ce cahier, vous pourrez modifier le texte dans n'importe quel cahier. De plus, vous pourrez aider vos étudiants à éditer le texte de leurs propres cahiers.
#
# ## Les éléments _les plus_ fondamentaux
#
# Notez que vous pouvez afficher le code source derrière n'importe quelle cellule en double-cliquant dessus. Pour rendre ou réexécuter la cellule, utilisez l'une des options suivantes:
# 1. Sélectionnez `Cell` $\rightarrow$ `Run Cells`
# 2. Appuyez sur `Shift-Enter`
# 3. Cliquez sur le bouton `Run` ci-dessus
# + [markdown] lang="fr"
# ## Création et suppression de cellules Markdown
# La première étape à considérer consiste à créer une nouvelle cellule pour la composition. Pour ce faire, pendant que cette cellule est sélectionnée, dans le menu en haut, cliquez sur le signe plus (+) situé à côté du bouton Enregistrer.
# + [markdown] lang="fr"
# Une fois que vous avez créé la nouvelle cellule, vous obtiendrez une cellule vide de `Code`. Pour changer cette cellule en une cellule que vous pouvez utiliser pour la composition, sélectionnez la cellule et, dans le menu déroulant en haut, change `Code` $\rightarrow$ `Markdown`. Cela change ce type de cellule en une cellule Markdown, et elle peut maintenant être utilisée pour la composition!
#
#
# L'étape suivante consistera à apprendre à supprimer des cellules. Sélectionnez la cellule que vous avez convertie en une cellule Markdown. Pour supprimer cette cellule de votre cahier Jupyter, cliquez sur `Edit` $\rightarrow$ `Delete Cells`. Si vous avez accidentellement supprimé une cellule incorrecte, vous pouvez toujours la ramener en cliquant sur `Edit` $\rightarrow$ `Undo Delete Cells`.
# + [markdown] lang="fr"
# ## Créer des titres
#
# Créer un gros titre en gras dans un cahier Jupyter avec Markdown est très simple! Tout ce que vous avez à faire est, sur une nouvelle ligne, tapez le signe dièse puis un espace. Après l'espace, ajoutez votre titre. C'est si simple. En ajoutant des dièses supplémentaires, vous créerez différents niveaux de titre. La cellule ci-dessous est un texte non formaté démontrant cela. Pour afficher la cellule rendue, dans le menu en haut, cliquez sur le menu déroulant `Raw NBConvert` et sélectionnez `Markdown`.
# + active=""
# # I am a level one title
# ## I am a level two title
# ### I am a level three title
# #### I am a level four title
# ##### I am a level five title
# ###### I am a level six title
# ####### I would be a level seven title if such a thing existed
#
# If # isn't the first character, that line does not get rendered as a title.
# + [markdown] lang="fr"
# Pour voir comment ces titres apparaissent dans le cahier Jupyter, sélectionnez la cellule ci-dessus et, dans le menu en haut, sélectionnez `Raw NBConvert` $\rightarrow$ `Markdown`, puis appuyez sur `Ctrl + Enter` pour afficher la cellule. N'hésitez pas à créer une nouvelle cellule et à jouer avec celles-ci ou à modifier celle ci-dessus.
# + [markdown] lang="fr"
# ## Créer des listes détaillées
# Créer des listes en Markdown dans un cahier Jupyter est également simple! Pour ce faire, vous devez simplement commencer une nouvelle ligne et commencer à compter. La syntaxe de Markdown est indiquée dans le code ci-dessous.
# + [markdown] lang="fr"
# #### Double-cliquer sur moi
#
# 1. Ceci est le premier élément d'une liste
# 1. Ceci est le deuxième
# 50. Notez que les chiffres que nous utilisons n'ont pas d'importance, une fois que la cellule est rendue
# 1. En appuyant sur la touche `Tab` on peut ajouter des sous-items
# 45. Les chiffres n'ont pas d'importance ici, non plus
# - On peut ajouter plusieurs sous-listes
# - Mais à un moment donné, cela commence à devenir redondant
# - Nous pouvons aussi remonter quelques niveaux d'imbrication en alignant le texte avec la ligne plus haute
# 2346. En revenant à l'emplacement original, nous quittons nos listes imbriquées
# 23236235672456. Les chiffres n'ont vraiment pas d'importance.
# *En fait, vous pouvez abandonner les chiffres à tout moment.
# 1. Ou recommencez à compter
#
#
# - Nous n'avons pas non plus besoin de chiffres si nous ne voulons pas!
# * Nous avons beaucoup d'options
# -Notez comment si nous ne mettons pas d'espace entre le tiret et le premier terme, cela ne devient pas un nouvel élément dans la liste!
# + [markdown] lang="fr"
# ---
# ### Exercice 1
# 1. Dans la cellule ci-dessus, essayez de corriger la dernière ligne afin que le dernier élément de la liste soit correctement mis en forme une fois rendu.
# 2. Créez une nouvelle cellule ci-dessous, ajoutez un titre et une liste ordonnée de vos cinq fruits préférés.
# ---
# + [markdown] lang="fr"
# ## Liens
# Vous pouvez ajouter des hyperliens aux pages Web où vous voulez, directement dans le texte. La syntaxe est la suivante
# ```markdown
# [texte que vous souhaitez afficher](lien vers un fichier ou une page URL)
# ```
# Changez la cellule ci-dessous à Markdown, l'exécuter et voir où le lien vous mène!
# + lang="fr" active=""
# [ceci est le texte qui devient l'hyperlien](https://media.giphy.com/media/Vs44a88Dvbnkk/giphy.gif)
# + [markdown] lang="fr"
# ## Images / gifs
# Les images et les gifs peuvent être intégrés dans une cellule Markdown de manière presque identique à celle d’un hyperlien. Pour afficher l'image, ajoutez un point d'exclamation à la commande, comme illustré ci-dessous.
# ```markdown
# 
# ```
# afficherait une image / gif. Il peut s'agir d'un lien URL vers une image ou d'un lien enregistré sur le concentrateur / localement. Modifiez la cellule ci-dessous à Markdown et l'exécuter pour la voir en action, ou même intégrer vos propres images dans ce cahier.
# + lang="fr" active=""
# 
# + [markdown] lang="fr"
# ---
# ### Exercice 2
# 1. Dans une nouvelle cellule ci-dessous, incorporez une image d'un chat.
# 2. Dans la même cellule, ajoutez un lien hypertexte sous l'image qui est liée à l'image originale.
# 3. Modifiez le texte affiché dans le lien hypertexte pour dire "Kitty Kitty".
# ---
# + [markdown] lang="fr"
# # Citations bloc
# Les citations bloc sont un moyen d'indenter du texte / des images avec une barre grise afin de les distinguer du texte principal. Cela se fait tout simplement en ajoutant un `>` au début d'une ligne. Changez la cellule ci-dessous à Markdown pour voir quelques exemples
# + lang="fr" active=""
# > Ceci est un exemple de citation bloc. Vous pouvez taper aussi longtemps que vous le souhaitez et tout le texte résultant sera à droite de la barre grise verticale.
#
# Vous pouvez également utiliser des citations bloc pour formater facilement des images avec des légendes!
#
# >
# >
# > Maintenant, nous pouvons inclure une légende pour les images. C'est bien parce que l'image se distingue maintenant du texte principal. Cela facilite l’inclusion d’images sans interrompre le récit que vous avez créé. Remarquez comment j'ai inclus un `>` vide sous l'image. C'est pourquoi l'espacement de ce texte se présente bien en dessous. Essayez de le retirer et voyez ce qui se passe.
# + [markdown] lang="fr"
# ## Les tableaux
#
# On peut également créer des tableaux de données dans Markdown. Cependant, le faire manuellement n'est souvent pas le plus pratique. Pour les grands tableaux, nous vous recommandons d'utiliser des outils en ligne (tels que [ici](https://jakebathman.github.io/Markdown-Table-Generator/)) où vous pouvez copier-coller des tableaux Excel ou des fichiers CSV. Cela produira alors une syntaxe pour rendre correctement ces tableaux. Pour les petits tableaux, il est assez facile de taper les vôtres. Voir la cellule ci-dessous.
# + active=""
# | Ceci est le premier nom de colonne | Deuxième colonne | Troisième |
# |--- | --- | ------------- |
# |Notez que rien n'est aligné lorsque nous le tapons | Mais ça a l'air bien quand rendu | C'est vraiment pratique!
# |les lignes courtes sont bien aussi | nombres? | 343453 |
# |symboles | `oui` | $\int e^{-x^2} dx$ |
# |notez également que le nombre de tirets dans la deuxième ligne n'a pas d'importance | il faut juste qu'il y en ait au moins trois | les autres ne sont vraiment que pour vous, de donner au pré-rendu une belle apparence |
# + [markdown] lang="fr"
# La syntaxe de base de la table est la suivante:
# ```markdown
# |initialiser|vos|noms de|colonnes|
# |-----------|---|-------|---------| <-cette ligne met en place les en-têtes, nécessite au moins trois `---` chaque ligne
# | données |données|données|données| <- n'importe quel type de données en texte. N'hésitez pas à mélanger aussi!
# | nombres | texte | texte | données <- vous n'avez pas besoin d'inclure le "pipe" le plus à droite si vous ne voulez pas
# ```
#
# La syntaxe peut être considérée comme la manière d’essayer d’écrire directement un tableau à l’aide de symboles sur votre clavier. Jouez avec la table dans la cellule ci-dessus ou créez une nouvelle cellule et essayez de créer le vôtre! Vous pourriez en mettre un dans cette cellule et mettre un tableau ici. Double-cliquez n'importe où sur cette cellule pour ouvrir le mode d'édition.
# + [markdown] lang="fr"
# ---
# ### Exercices 3
# 1. En utilisant des exemplaires de cahiers, les enseignants peuvent aimer ou ne pas aimer les différentes sections de l’échantillon fourni. Heureusement, il est facile de faire ces changements mineurs dans les cahiers. Dans les deux cellules ci-dessous:
# * supprimez la deuxième cellule car vous n'en aurez pas besoin dans votre leçon
# * supprimez le dernier paragraphe de la première cellule
# * changez la date dans le premier paragraphe
# 2. Dans la première cellule ci-dessous, prenez la liste ordonnée de la citation bloc.
#
# ---
# + [markdown] lang="fr"
# > ## Propriétés des nombres aléatoires
# >
# > 22 mai 2018
# > Supposons que vous ayez une suite de nombres aléatoires $N$ $\{R\}$ avec le contenu $\{r_1, r_2, ... , r_N\}$ où chaque élément $r_i$ est un nombre aléatoire. Quelle sorte de propriétés cette séquence de nombres devrait-elle avoir? S'il s'agit vraiment d'une séquence de nombres aléatoires, il _doit_ satisfaire les propriétés suivantes, que nous expliquerons plus en détail:
# >
# > 1. Tirer n'importe quel $r_i$ est également probable et indépendant.
# > 2. La séquence de nombres aléatoires est uniformément distribuée.
# >
# > "Tirer une valeur" dans cette portée signifie que nous sélectionnons un nombre de notre séquence de nombres aléatoires, mais ne le retirons pas de la séquence (la séquence reste inchangée, nous "observons" simplement le nombre aléatoire).
# Regardons ces deux propriétés plus en détail.
# + [markdown] lang="fr"
# > ### Toutes les valeurs sont également probables et indépendantes
# >
# > Cela signifie que si vous deviez sélectionner (mais pas retirer) un nombre de votre séquence de nombres aléatoires $\{r_1, r_2, ... , r_N\}$ au hasard, la probabilité de tirer l'un de ces nombres est
# \begin{equation}
# p(r_i) = \frac{1}{N}
# \end{equation}
# où $p(r_i)$ est la probabilité de sélectionner un nombre $r_i$. Cette probabilité est identique pour tous les nombres de votre ensemble. Plus explicitement:
# \begin{equation}
# p(r_1) = p(r_2) = ... = p(r_N) = \frac{1}{N}
# \end{equation}
# >
# > La propriété d'indépendance signifie que si vous tirer un nombre de l'ensemble, cela n'affecte pas la probabilité de tirer d'autres nombres, ni même lui-même ultérieurement. C'est parce que la séquence reste inchangée après avoir tiré (observez) un nombre.
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| Tutoriels/markdown.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import necessary libraries
from requests import get
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import pandas as pd
from time import sleep
from random import randint
from time import time
from IPython.core.display import clear_output
from warnings import warn
import datetime
import re
data_raw = pd.read_csv('./data/restaurant_ratings.csv')
data_raw
# remove unwanted columns
data = data_raw.drop(['Unnamed: 0',
'Rating',
'Number of Ratings',
'Affordability'],
axis=1)
data.head(5)
# add https:// in front of the review links
data['Reviews Link'] = 'https://www.' + data['Reviews Link']
data.head(5)
# check if length is correct
len(list(data["Reviews Link"]))
# store links as a list
LIST_OF_LINKS = list(data["Reviews Link"])
# show first 5 links
LIST_OF_LINKS[:5]
# ## Actual Scraping
# get the start value for every new page in yelp
start_of_new_page = [str(i) for i in range(0,381,20)]
# debug
print(start_of_new_page, end=' ')
print(f'\nNumber of pages scraped: {len(start_of_new_page)}')
def scraper(list_of_links, num_req, start, end):
# redeclaring lists to store data in multiple values
cust_names = []
cust_ratings = []
cust_comments = []
res_names = []
res_types = []
# counter
count = 0
# flag variable to check the scrape
# if unsuccessful scrape, try again
unsuccessful = True
# preparing the monitoring of the loop
start_time = time()
### -----
# for every comment in the interval
for link_raw in list_of_links[start:end+1]:
loop_time = time()
count+=1 # increment count to determine which link it is being scraped
requests=1 # reset requests count for different webpage
# print(f'----- LINK {count} -----')
for pageStart in start_of_new_page:
# Break the loop if the number of requests is greater than expected
if requests > num_req:
#warn('Number of requests was greater than expected.')
break
unsuccessful = True
fail_count = 0
repeat = 0
while unsuccessful:
# make a get request
#response = get(f'https://www.yelp.com/biz/jumbo-seafood-singapore-4?start={pageStart}')
#response = get(f'https://www.tripadvisor.com.sg/Restaurant_Review-g294265-d7348336-Reviews-or{pageStart}-Sunday_Folks-Singapore.html')
#link_array = link_raw.split('Reviews-')
link = link_raw + '&start=' + str(pageStart)
# print(link)
response = get(link)
# pause the loop
sleep(randint(1,4))
# monitor the requests
elapsed_time = time() - start_time
print(f'LINK {count+start} REQUEST {requests}; Frequency: {requests/elapsed_time} requests/s')
# Parse the content of the request with BeautifulSoup
page_html = BeautifulSoup(response.text, 'html.parser')
# get the comment container for all 20 comments in a page
comment_containers = page_html.find_all('div', class_='review__373c0__13kpL border-color--default__373c0__3-ifU')
if len(comment_containers) != 0:
print(f"REQUEST {requests}: SUCCESS --> Failed Count: {fail_count}")
clear_output(wait = True)
unsuccessful = False
else:
fail_count+=1
repeat+=1
#print(f"Request {requests}: unsuccessful scrape") # debug
if repeat >= 5:
print("Repeated 5 times --> cannot scrape")
break
requests += 1
# for every comments in 10
for com in comment_containers:
# in case the scrape fail for that particular entry due to html tag issue
try:
# append the restaurant name and type
res_names.append(data["Restaurant"][count+start-1])
res_types.append(data["Restaurant Type"][count+start-1])
# scrape the customer name
cust_name = com.div.find('a', class_='link__373c0__1G70M link-color--inherit__373c0__3dzpk link-size--inherit__373c0__1VFlE').text
cust_names.append(cust_name)
# scrape the customer ratings
cust_rating = com.find('div', class_='arrange__373c0__2C9bH gutter-1__373c0__2l5bx vertical-align-middle__373c0__1SDTo border-color--default__373c0__3-ifU').span.div['aria-label']
cust_ratings.append(cust_rating)
cust_comment_raw = com.find_all('div', class_='margin-b2__373c0__abANL border-color--default__373c0__3-ifU')
if len(cust_comment_raw) != 1:
temp = cust_comment_raw[1].text
else:
temp = cust_comment_raw[0].text
cust_comment = temp.replace(u'\xa0', u'')
cust_comments.append(cust_comment)
except:
print(f'error in request {requests}')
continue
# Throw a warning for non-200 status codes
if response.status_code != 200:
warn('Request: {}; Status code: {}'.format(requests, response.status_code))
# check time needed to exceute one link
print(f'Time taken for scraping link {count+start}: {time() - loop_time} seconds')
print('DONE')
return cust_names, cust_ratings, cust_comments, res_names, res_types
# scraper call function
def scraper_call(list_of_links, num_req, start, end):
cust_names, cust_ratings, cust_comments, res_names, res_types = scraper(list_of_links = list_of_links,
num_req=num_req,
start=start,
end=end)
print(f'Number of entries: {len(cust_names)}')
review = pd.DataFrame({
'Restaurant Name': res_names,
'Restaurant Type': res_types,
'Reviewer\'s Name': cust_names,
'Rating': cust_ratings,
'Comment': cust_comments,
})
print(review.info())
if start == 0:
review.to_csv('./data/yelp-comments.csv', mode='a', index=False)
else:
review.to_csv('./data/yelp-comments.csv', mode='a', index=False, header=False)
# determine number of requests (each request is 10 entries)
REQUESTS = 10
start_list = [s for s in range(0,221,50)]
end_list = [e for e in range(49,221,50)]
# test values
print(start_list)
print(end_list)
# ## CALLING OF FUNCTIONS
# link 0 - 49
scraper_call(list_of_links=LIST_OF_LINKS, num_req=REQUESTS, start=start_list[0], end=end_list[0])
# link 50 - 99
scraper_call(list_of_links=LIST_OF_LINKS, num_req=REQUESTS, start=start_list[1], end=end_list[1])
# link 100 - 149
scraper_call(list_of_links=LIST_OF_LINKS, num_req=REQUESTS, start=start_list[2], end=end_list[2])
# link 150 - 199
scraper_call(list_of_links=LIST_OF_LINKS, num_req=REQUESTS, start=start_list[3], end=end_list[3])
# link 200 - 220
scraper_call(list_of_links=LIST_OF_LINKS, num_req=REQUESTS, start=start_list[4], end=219)
| OLD notebooks/archive/OLD Files/yelp-scraper-comments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from os import getcwd, scandir, rmdir, rename
from shutil import move
from pprint import pprint
root = getcwd()
def get_folders(path):
folders = []
if type(path) is str:
with scandir(path) as contents:
for child in contents:
if not child.name.startswith('.') and child.is_dir():
folders.append(child.path)
return folders
if type(path) is list:
for item in path:
with scandir(item) as contents:
for child in contents:
if not child.name.startswith('.') and child.is_dir():
folders.append(child.path)
return folders
return False
def get_files(path):
files = []
if type(path) is str:
with scandir(path) as contents:
for child in contents:
if child.is_file():
files.append(child.path)
return files
if type(path) is list:
for item in path:
with scandir(item) as contents:
for child in contents:
if child.is_file():
files.append(child.path)
return files
return False
def new_path(curpath):
new = curpath.split('/')
del new[-2]
new = '/'.join(new)
return new
def perform_cleanup(children, grandchildren, orphans):
if not orphans['folders'] or not orphans['files']:
for file in grandchildren['files']:
move(file, new_path(file))
parents = {'folders': get_folders(root), 'files': get_files(root)}
children = {'folders': get_folders(parents['folders']), 'files': get_files(parents['folders'])}
grandchildren = {'folders': get_folders(children['folders']), 'files': get_files(children['folders'])}
orphans = {'folders': get_folders(grandchildren['folders']), 'files': get_files(grandchildren['folders'])}
# for file in grandchildren['files']:
# new = file.split('/')
# del new[-2]
# new = '/'.join(new)
# move(file, new)
# for folder in children['folders']:
# rmdir(folder)
# -
| .ipynb_checkpoints/NewCode-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Indention
# ### 有空行無法執行 空白相當於分號
print('sdsad')
# print('sadasd')
print('sadasd')
# # comment
#我是註解
"""
我是註解
"""
# # type
i=5
print(type(i))
i=5.5
print(type(i))
i= True
print(type(i))
i= 'something'
print(type(i))
i = None
print(type(i))
# # Assignment
x=1
y=2
z=x+y
print(z)
# # Mathematical Operator
# +
x=3;y=4
z=x+y;print(z)
z=x-y;print(z)
z=x*y;print(z)
z=x/y;print(z)
z=x//y;print(z) #取整餘數
z=x%y;print(z)
z=x**y;print(z) #取次方
#print("****","##") output:*** ##
print("x + y =",x+y)
# -
# # Comparison Operator
x=5;y=8
print("x < y =>",x<y)
print("x <= y =>",x<=y)
print("x > y =>",x>y)
print("x >= y =>",x>=y)
print("x == y =>",x==y)
print("x != y =>",x!=y)
# # boolen
# +
a = True
b = False
print('a and b', a and b)
print('a or b',a or b)
print('not a',not a)
print('not b',not b)
# -
# # 流程控制
# +
#if elif else
a=3;b=2
if a < b:
print("a<b")
elif a == b:
print("a==b")
else:
print("a>b")
if (a<b) or (a > b):
print('!=')
else:
print('=')
# +
#for
i=5
data = [0]*i
for data_point in data:
data_point = i
print(data_point)
i-=1
for x in range(0,5):
data[x] = x+1
s=0
for data_point in data:
s += data_point
print('sum=',s)
print('avg=',s/len(data))
# -
#while
first = 0
second = 1
n = 10
while n>2:
third = first + second
first = second
second = third
n-=1
print(third)
print(first+second)
#break continue
numbers = [12,2,4,6,3,5,8,-1,2,5,7,9]
#for n in numbers:
#print(n)
#印偶數 遇到-1中斷
for n in numbers:
if n % 2 == 0:
print(n)
elif n == -1:
break
else:
continue
print('one time')
# # Error Handling
# +
try:
a=1;b=0
c = a/b
except Exception as e:
#print(e)
pass
#raise("asaddsaad")
try:
raise Exception("asasas")
except Exception as e:
print(e)
# -
| python_exercise/syntax_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Корректность проверена на Python 3.6:**
# + pandas 0.23.4
# + numpy 1.15.4
# + matplotlib 3.0.2
# + sklearn 0.20.2
# # Sklearn
# # Визуализация данных
# +
from sklearn import datasets
import numpy as np
# -
# %pylab inline
# ### Загрузка выборки
digits = datasets.load_digits()
print(digits.DESCR)
print('target:', digits.target[0])
print('features: \n', digits.data[0])
print('number of features:', len(digits.data[0]))
# ## Визуализация объектов выборки
#не будет работать: Invalid dimensions for image data
pylab.imshow(digits.data[0])
digits.data[0].shape
print(digits.data[0].reshape(8,8))
digits.data[0].reshape(8,8).shape
pylab.imshow(digits.data[0].reshape(8,8))
print(digits.keys())
print(digits.images[0])
pylab.imshow(digits.images[0])
# +
pyplot.figure(figsize(8, 8))
pyplot.subplot(2, 2, 1)
pylab.imshow(digits.images[0])
pyplot.subplot(2, 2, 2)
pylab.imshow(digits.images[0], cmap = 'hot')
pyplot.subplot(2, 2, 3)
pylab.imshow(digits.images[0], cmap = 'gray')
pyplot.subplot(2, 2, 4)
pylab.imshow(digits.images[0], cmap = 'gray', interpolation = 'nearest')
# +
pyplot.figure(figsize(20, 8))
for plot_number, plot in enumerate(digits.images[:10]):
pyplot.subplot(2, 5, plot_number + 1)
pylab.imshow(plot, cmap = 'gray')
pylab.title('digit: ' + str(digits.target[plot_number]))
# -
# ## Уменьшение размерности
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from collections import Counter
# -
data = digits.data[:1000]
labels = digits.target[:1000]
print(Counter(labels))
pylab.figure(figsize = (10, 6))
pylab.bar(Counter(labels).keys(), Counter(labels).values())
classifier = KNeighborsClassifier()
classifier.fit(data, labels)
print(classification_report(classifier.predict(data), labels))
# ### Random projection
from sklearn import random_projection
projection = random_projection.SparseRandomProjection(n_components = 2, random_state = 0)
data_2d_rp = projection.fit_transform(data)
pylab.figure(figsize=(10, 6))
pylab.scatter(data_2d_rp[:, 0], data_2d_rp[:, 1], c = labels)
classifier.fit(data_2d_rp, labels)
print(classification_report(classifier.predict(data_2d_rp), labels))
# ### PCA
from sklearn.decomposition import PCA
pca = PCA(n_components = 2, random_state = 0)
data_2d_pca = pca.fit_transform(data)
pylab.figure(figsize = (10, 6))
pylab.scatter(data_2d_pca[:, 0], data_2d_pca[:, 1], c = labels)
classifier.fit(data_2d_pca, labels)
print(classification_report(classifier.predict(data_2d_pca), labels))
# ### MDS
from sklearn import manifold
mds = manifold.MDS(n_components = 2, n_init = 1, max_iter = 100)
data_2d_mds = mds.fit_transform(data)
pylab.figure(figsize=(10, 6))
pylab.scatter(data_2d_mds[:, 0], data_2d_mds[:, 1], c = labels)
classifier.fit(data_2d_mds, labels)
print(classification_report(classifier.predict(data_2d_mds), labels))
# ### t- SNE
tsne = manifold.TSNE(n_components = 2, init = 'pca', random_state = 0)
data_2d_tsne = tsne.fit_transform(data)
pylab.figure(figsize = (10, 6))
pylab.scatter(data_2d_tsne[:, 0], data_2d_tsne[:, 1], c = labels)
classifier.fit(data_2d_tsne, labels)
print(classification_report(classifier.predict(data_2d_tsne), labels))
| Yandex data science/3/Week 3/.ipynb_checkpoints/sklearn.data_visualization-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # List and its functions
list = ["Manoj","Kumar",1002,"EIE","NECN"]
list
# +
# Adds an item
list.append("HI")
# -
list
# +
# Extends the list by appending all the items
list.extend(["BRUCE","BANNER"])
# -
list
# +
# Inserts an item at a given position.
list.insert(0,"I'm")
# -
list
# +
# Removes the first item from the list
list.remove("BANNER")
# -
list
# +
# Returns the number of items in the list.
len(list)
# +
# Removes all items from the list
list.clear()
# -
list
# +
# New list
mylist = ["falcon","bucky",786,"rogers"]
# -
mylist
# +
# New updated list
mylist = ["bee", "moth", "ant"]
# +
# Returns the largest item in an list
print(max(mylist))
# -
list1 = ["ant", "moth", "wasp"]
# +
# Returns the smallest item in an list
print(min(list1))
# -
# # The End
| list and its functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
import numpy as np
import cv2
import random
import glob
import torch
import time
import pickle
import os
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import detection_utils as utils
from detectron2.data import samplers
from torch.utils.data import Dataset, DataLoader
# -
class PredictDataset(Dataset):
def __init__(self, glob_string):
self.image_files = sorted(glob.glob(glob_string))
def __len__(self):
return len(self.image_files) // 2
def __getitem__(self, idx):
image_raw = cv2.imread(self.image_files[idx * 2])
height, width = image_raw.shape[:2]
image = torch.as_tensor(image_raw.astype("float32").transpose(2, 0, 1)).contiguous()
image_dict0 = {"image": image, "height": height, "width": width, "file_name": self.image_files[idx * 2]}
image_raw = cv2.imread(self.image_files[idx * 2 + 1])
height, width = image_raw.shape[:2]
image = torch.as_tensor(image_raw.astype("float32").transpose(2, 0, 1)).contiguous()
image_dict1 = {"image": image, "height": height, "width": width, "file_name": self.image_files[idx * 2 + 1]}
return [image_dict0, image_dict1]
def cv2_imshow(im):
cv2.imshow('file', im)
cv2.waitKey(0)
cv2.destroyAllWindows()
# +
from detectron2.data.datasets import register_coco_instances
# CAN BE ANY BABOON .json ANNOTATION FILE
register_coco_instances("train", {},
"/home/golden/kenya_drones/night_baboons/annotations/original-annotations/train.json",
"/home/golden/kenya_drones/night_baboons/annotations/original-annotations/images")
train_metadata = MetadataCatalog.get("train")
# +
cfg = get_cfg()
# WHERE DETECTRON2 GETS INSTALLED
cfg.merge_from_file(
"/home/golden/detectron2-master/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
)
baboon_weights = "/home/golden/Dropbox/detection-projects/carter-baboons/output/full-aug_maxiter-2000_lr-0.019_detectPerIm-200_minsize-0_batchsize-8/model_final.pth"
cfg.MODEL.WEIGHTS = (baboon_weights)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.4
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (100)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
cfg.TEST.DETECTIONS_PER_IMAGE = 100
# +
model = build_model(cfg)
_ = model.eval()
checkpointer = DetectionCheckpointer(model)
_ = checkpointer.load(cfg.MODEL.WEIGHTS)
# +
# FOLDER THAT HAS THE IMAGES YOU WANT TO PROCESS
images_folder = "/home/golden/kenya_drones/night_baboons/frames/1_20190801"
dataset = PredictDataset(os.path.join(images_folder, "*.jpg"))
save_root = os.path.join('.', os.path.basename(images_folder))
os.makedirs(save_root, exist_ok=True)
# +
# def build_detection_test_loader(cfg, dataset_name, mapper=None):
# """
# Similar to `build_detection_train_loader`.
# But this function uses the given `dataset_name` argument (instead of the names in cfg),
# and uses batch size 1.
# Args:
# cfg: a detectron2 CfgNode
# dataset_name (str): a name of the dataset that's available in the DatasetCatalog
# mapper (callable): a callable which takes a sample (dict) from dataset
# and returns the format to be consumed by the model.
# By default it will be `DatasetMapper(cfg, False)`.
# Returns:
# DataLoader: a torch DataLoader, that loads the given detection
# dataset, with test-time transformation and batching.
# """
# dataset_dicts = get_detection_dataset_dicts(
# [dataset_name],
# filter_empty=False,
# proposal_files=[
# cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
# ]
# if cfg.MODEL.LOAD_PROPOSALS
# else None,
# )
# dataset = DatasetFromList(dataset_dicts)
# if mapper is None:
# mapper = DatasetMapper(cfg, False)
# dataset = MapDataset(dataset, mapper)
# sampler = samplers.InferenceSampler(len(dataset))
# # Always use 1 image per worker during inference since this is the
# # standard when reporting inference time in papers.
# batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 2, drop_last=False)
# data_loader = torch.utils.data.DataLoader(
# dataset,
# num_workers=cfg.DATALOADER.NUM_WORKERS,
# batch_sampler=batch_sampler,
# collate_fn=trivial_batch_collator,
# )
# return data_loader
# def trivial_batch_collator(batch):
# """
# A batch collator that does nothing.
# """
# return batch
# -
data_loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=7)
cuda0 = torch.device('cuda:0')
# ### This is where the actual image inference happens
# +
import pickle
import os
max_batches = 10000
t = time.time()
with torch.no_grad():
for batch_num, image_batch in enumerate(data_loader):
if batch_num >= max_batches:
break
if batch_num % 250 == 0:
print('{} images processed'.format(batch_num * 2))
for i in range(len(image_batch)):
image_batch[i]['image'] = np.squeeze(image_batch[i]['image'])
image_batch[i]['image'] = image_batch[i]['image'].to(cuda0)
image_batch[i]['width'] = image_batch[i]['width'].to(cuda0).item()
image_batch[i]['height'] = image_batch[i]['height'].to(cuda0).item()
# print(image_batch['image'].shape)
# print(image_batch)
predictions = model(image_batch)
for preds, im_dict in zip(predictions, image_batch):
name = os.path.splitext(os.path.basename(im_dict['file_name'][0]))[0]
file = os.path.join(save_root, '{}-predictions.pkl'.format(name))
preds_instance = preds["instances"].to("cpu")
with open(file, 'wb') as out:
pickle.dump(preds_instance, out)
out.close()
print(time.time() - t)
# -
# ### Below is some stuff to dig into the output
type(predictions[0]['instances'])
import os
name = image_batch[0]['file_name'][0].split('/')[-2]
name
# +
# file = os.path.join(save_root, '{}-predictions.pkl'.format(name))
# preds = predictions[0]["instances"].to("cpu")
# with open(file, 'wb') as out:
# pickle.dump(preds, out)
# out.close()
# +
files = sorted(glob.glob(os.path.join(save_root, '*-predictions.pkl')))
all_detections = []
raw_instances = []
for file in files[:]:
with open(file, 'rb') as readfile:
detections=pickle.load(readfile)
detection_dict = detections.get_fields()
detection_dict['pred_boxes'] = detection_dict['pred_boxes'].tensor.numpy()
detection_dict['scores'] = detection_dict['scores'].numpy()
detection_dict['pred_classes'] = detection_dict['pred_classes'].numpy()
detection_dict['image_name'] = os.path.basename(file).split('-')[0]
all_detections.append(detection_dict)
raw_instances.append(detections)
np_detections_file = os.path.join(save_root, '{}_detections.npy'.format(name))
np.save(np_detections_file, all_detections)
# -
raw_instances[0]
# +
import numpy as np
import glob
files = [np_detections_file]
for file in files[0:1]:
detections = np.load(file, allow_pickle=True)
print(detections[100]['scores'].shape)
print(detections[100]['image_name'])
# +
# image_files = glob.glob(images_folder + '/*.jpg')
# -
# +
# detections
# -
files[0]
import matplotlib.pyplot as plt
im_ind = 0
images_folder
files = sorted(glob.glob(images_folder + '/*.jpg'))
im = plt.imread(files[0])
print(im.shape)
# +
import matplotlib.pyplot as plt
make_video = True
draw_plots = False
max_frames = 5000
fps = 30
output_file = '/home/golden/Dropbox/locusts/test_video_full.mp4'
if make_video:
frames = 0
out = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, (3840, 2160))
print('here')
# for im_ind in np.linspace(0, len(raw_instances)-1, 20, dtype=int):
for im_ind in range(len(raw_instances)):
# for im_ind in range(60):
if im_ind >= max_frames:
break
if im_ind % 500 == 0:
print(im_ind)
# observation_name = raw_instances[im_ind].image_name.split('_')[0] + '_' + raw_instances[im_ind].image_name.split('_')[1]
# image_raw = plt.imread(os.path.join(os.path.dirname(images_folder), observation_name, raw_instances[im_ind].image_name + '.jpg'))
image_raw = plt.imread(files[im_ind])
v = Visualizer(image_raw,
metadata=train_metadata,
scale=1.0,
)
v = v.draw_instance_predictions(raw_instances[im_ind])
if make_video:
out.write(v.get_image()[...,::-1])
frames += 1
if draw_plots:
plt.figure(figsize=(20,20))
plt.imshow(v.get_image())
if make_video:
out.release()
print('num frames {}'.format(frames))
# -
os.path.exists(output_file)
v.get_image()[...,::-1].shape
plt.imshow(v.get_image()[...,::-1])
v.get_image().shape
test = np.load(os.path.join(save_root, '{}-predictions.npy'.format(name)), allow_pickle=True)
files = sorted(glob.glob(os.path.join(save_root, '*-predictions.pkl')))
readfile = files[0]
with open(file, 'rb') as readfile:
detections=pickle.load(readfile)
# +
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
import os
cfg = get_cfg()
cfg.merge_from_file(
"/home/golden/detectron2-master/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
)
cfg.DATASETS.TRAIN = ("salmon-train",)
cfg.DATASETS.TEST = ("salmon-val",)
cfg.DATALOADER.NUM_WORKERS = 6
cfg.DATALOADER.ASPECT_RATIO_GROUPING = False
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
)
cfg.SOLVER.IMS_PER_BATCH = 8
cfg.SOLVER.BASE_LR = 0.019
cfg.SOLVER.MAX_ITER = (2000)
cfg.SOLVER.WARMUP_ITERS = 100
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (256)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
cfg.TEST.EVAL_PERIOD = 100
cfg.TEST.DETECTIONS_PER_IMAGE = 200
cfg.INPUT.MIN_SIZE_TEST = (0)
cfg.INPUT.MAX_SIZE_TEST = (4000)
# +
# Check validation
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.DATASETS.TEST = ("salmon-val", )
predictor = DefaultPredictor(cfg)
for d in val_dicts:
im = cv2.imread(d["file_name"])
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=train_metadata,
scale=1.0,
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(v.get_image()[:, :, ::-1])
| CODE/CNN_scripts/process-video.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Spherical Feature Extraction using s2cnn
# +
import sys
sys.path.append('C:/Users/ustundag/GitHub/2D-3D-Semantics/s2cnn_TORCH/')
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# %matplotlib inline
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data_utils
import torchvision.transforms.functional as tfun
from torch.autograd import Variable
from s2cnn import SO3Convolution
from s2cnn import S2Convolution
from s2cnn import so3_integrate
from s2cnn import so3_near_identity_grid
from s2cnn import s2_near_identity_grid
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# -
class S2ConvNet_deep(nn.Module):
def __init__(self, bandwidth = 30):
super(S2ConvNet_deep, self).__init__()
grid_s2 = s2_near_identity_grid(n_alpha=6, max_beta=np.pi/16, n_beta=1)
grid_so3_1 = so3_near_identity_grid(n_alpha=6, max_beta=np.pi/16, n_beta=1, max_gamma=2*np.pi, n_gamma=6)
grid_so3_2 = so3_near_identity_grid(n_alpha=6, max_beta=np.pi/8, n_beta=1, max_gamma=2*np.pi, n_gamma=6)
grid_so3_3 = so3_near_identity_grid(n_alpha=6, max_beta=np.pi/4, n_beta=1, max_gamma=2*np.pi, n_gamma=6)
grid_so3_4 = so3_near_identity_grid(n_alpha=6, max_beta=np.pi/2, n_beta=1, max_gamma=2*np.pi, n_gamma=6)
grid_so3_5 = so3_near_identity_grid(n_alpha=6, max_beta=0.2, n_beta=1)
self.convolutional = nn.Sequential(
S2Convolution(
nfeature_in = 3,
nfeature_out = 8,
b_in = bandwidth,
b_out = bandwidth,
grid=grid_s2),
nn.ReLU(inplace=False),
SO3Convolution(
nfeature_in = 8,
nfeature_out = 16,
b_in = bandwidth,
b_out = bandwidth//2,
grid=grid_so3_1),
nn.ReLU(inplace=False),
SO3Convolution(
nfeature_in = 16,
nfeature_out = 16,
b_in = bandwidth//2,
b_out = bandwidth//2,
grid=grid_so3_2),
nn.ReLU(inplace=False),
SO3Convolution(
nfeature_in = 16,
nfeature_out = 24,
b_in = bandwidth//2,
b_out = bandwidth//4,
grid=grid_so3_2),
nn.ReLU(inplace=False),
SO3Convolution(
nfeature_in = 24,
nfeature_out = 24,
b_in = bandwidth//4,
b_out = bandwidth//4,
grid=grid_so3_3),
nn.ReLU(inplace=False),
SO3Convolution(
nfeature_in = 24,
nfeature_out = 32,
b_in = bandwidth//4,
b_out = bandwidth//8,
grid=grid_so3_3),
nn.ReLU(inplace=False),
SO3Convolution(
nfeature_in = 32,
nfeature_out = 64,
b_in = bandwidth//8,
b_out = bandwidth//8,
grid=grid_so3_4),
nn.ReLU(inplace=False)
)
def forward(self, x):
x = self.convolutional(x)
#x = so3_integrate(x)
#x = self.linear(x)
return x
s2cnn = S2ConvNet_deep(bandwidth=64)
s2cnn.to(DEVICE)
"""
path = 'C:/Users/ustundag/Desktop/test_pano_rgb.png'
img = Image.open(path)
img = img.resize((128,128))
data = np.asarray(img, dtype=np.float32)
data = tfun.to_tensor(data)
data = data.unsqueeze_(0)
data = data[:,:3,:,:]
print(data.shape)
plt.imshow(img)
plt.show()
"""
"""
images = data.to(DEVICE)
outputs = s2cnn(images)
print('outputs.shape: ', outputs.shape)
"""
"""
x = outputs.detach().cpu().numpy()
a = x[0, 0, :, :, 10]
print(a.shape)
plt.imshow(a, cmap='gray')
plt.show()
"""
# ### Extract and save features of 7 specific objects using semantics as masks
# +
import assets.utils as u
VALID_OBJECTS = ('board','bookcase','chair','door','sofa','table','window')
import glob
from scipy.io import savemat, loadmat
from IPython.display import display, clear_output
import torchvision.transforms.functional as tfun
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_label(pix):
labels = u.load_labels('C:/Users/ustundag/Github/2D-3D-Semantics/assets/semantic_labels.json')
limit = len(labels)
i = u.get_index(pix)
if i < limit:
instance_label = labels[i]
instance_label_as_dict = u.parse_label(instance_label)
label = instance_label_as_dict["instance_class"]
return label
return '<UNK>' # unknown in case index is out of bounds in "labels.json" file
def image2tensor(path, dim):
img = Image.open(path)
img = img.resize((dim,dim))
img = np.asarray(img, dtype=np.float32)
tensor = tfun.to_tensor(img)
tensor = tensor[:3,:,:]
tensor = tensor.unsqueeze_(0)
return tensor
def save_features_and_labels(file):
paths = glob.glob("C:\\Users\\ustundag\\GitHub\\2D-3D-Semantics\\area_3\\pano\\rgb\\*.png")
features = []
labels = []
s2cnn = S2ConvNet_deep(bandwidth=64)
s2cnn.to(DEVICE)
i = 1
for path in paths:
clear_output(wait=True)
tensor = image2tensor(path, dim=128) # 'dim' must be double of bandwidth
images = tensor.to(DEVICE)
fmap = s2cnn(images) # torch.Size([1, 64, 16, 16, 16])
fmap = fmap.detach().cpu().numpy()
fmap = fmap[0, :, :, :, 0] # torch.Size([64, 16, 16])
fmap = fmap.reshape(fmap.shape[0], fmap.shape[1]*fmap.shape[2])
# Replace 2 occurrences to find counterpart of RGB image as Semantic
sem_file = path.replace("rgb", "semantic", 2)
sem_img = np.asarray(Image.open(sem_file).resize((16,16)))
print("sem_img.shape: ", sem_img.shape)
sem_pixels = sem_img.reshape(sem_img.shape[0]*sem_img.shape[1], sem_img.shape[2])
#unique_pixels = np.unique(sem_pixels, axis=0)
valid_indexes = [[np.argwhere((sem_pixels == p).all(axis=1))[0,0], get_label(p)]
for p in sem_pixels
if get_label(p) in VALID_OBJECTS]
# first value = feature index, second value = label
for idx in valid_indexes:
features.append(fmap[:, idx[0]])
labels.append(VALID_OBJECTS.index(idx[1]))
display(str(i) + " / 85")
i += 1
savemat(file,{'features': np.asarray(features),
'labels' : np.asarray(labels)})
# -
file = 'area_3_data_pano_s2cnn_dims_128_128_16_16.mat'
save_features_and_labels(file)
data = loadmat("C:\\Users\\ustundag\\GitHub\\2D-3D-Semantics\\"+file)
features = data["features"]
labels = data["labels"]
features.shape
labels.shape
| test_Spherical_Feature_Extraction_using_S2CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Spatial queries: Point in Polygon & Intersect
#
# Finding out if a certain point is located inside or outside of an area, or finding out if a line intersects with another line or polygon are fundamental geospatial operations that are often used e.g. to select data based on location. Such spatial queries are one of the typical first steps of the workflow when doing spatial analysis. Performing a spatial join (will be introduced later) between two spatial datasets is one of the most typical applications where Point in Polygon (PIP) query is used.
#
# ## How to check if point is inside a polygon?
#
# Computationally, detecting if a point is inside a Polygon is most commonly done using a specific formula called [Ray Casting algorithm](https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm). Luckily, we do not need to create such a function ourselves for conducting the Point in Polygon (PIP) query. Instead, we can take advantage of [Shapely's binary predicates](http://toblerity.org/shapely/manual.html#binary-predicates) that can evaluate the topolocical relationships between geographical objects, such as the PIP as we're interested here.
#
# There are basically two ways of conducting PIP in Shapely:
#
# 1. using a function called [.within()](http://toblerity.org/shapely/manual.html#object.within) that checks if a point is within a polygon
# 2. using a function called [.contains()](http://toblerity.org/shapely/manual.html#object.contains) that checks if a polygon contains a point
#
# Notice: even though we are talking here about **Point** in Polygon operation, it is also possible to check if a LineString or Polygon is inside another Polygon.
#
# - Let's first create a Polygon using a list of coordinate-tuples and a couple of Point objects
# +
from shapely.geometry import Point, Polygon
# Create Point objects
p1 = Point(24.952242, 60.1696017)
p2 = Point(24.976567, 60.1612500)
# Create a Polygon
coords = [(24.950899, 60.169158), (24.953492, 60.169158), (24.953510, 60.170104), (24.950958, 60.169990)]
poly = Polygon(coords)
# Let's check what we have
print(p1)
print(p2)
print(poly)
# -
# - Let's check if those points are `within` the polygon
# +
# Check if p1 is within the polygon using the within function
p1_within = p1.within(poly)
# Check if p2 is within the polygon
p2_within = p2.within(poly)
# Print the results
print("Is p1 within the polygon?: ", p1_within)
print("Is p2 within the polygon?: ", p2_within)
# -
# Okey, so we can see that the first point seems to be inside that polygon and the other one doesn't.
#
# - In fact, the first point is close to the center of the polygon as we can see:
print(p1)
print(poly.centroid)
# - It is also possible to do PIP other way around, i.e. to check if polygon contains a point:
# +
# Does polygon contain point 1
print("Does polygon contain p1?: ", poly.contains(p1))
# What about the other point?
print("Does polygon contain p2?: ", poly.contains(p2))
# -
# Thus, both ways has the same results.
#
# Which one should you use then? Well, it depends:
#
# - if you have many points and just one polygon and you try to find out which one of them is inside the polygon:
#
# - you need to iterate over the points and check one at a time if it is **within()** the polygon specified
#
# - if you have many polygons and just one point and you want to find out which polygon contains the point
#
# - you need to iterate over the polygons until you find a polygon that **contains()** the point specified (assuming there are no overlapping polygons)
# ## Intersect
#
# Another typical geospatial operation is to see if a geometry [intersect](http://toblerity.org/shapely/manual.html#object.intersects) or [touches](http://toblerity.org/shapely/manual.html#object.touches) another one. The difference between these two is that:
#
# - if objects intersect, the boundary and interior of an object needs to intersect in any way with those of the other.
# - If an object touches the other one, it is only necessary to have (at least) a single point of their boundaries in common but their interiors shoud NOT intersect.
#
# Let's try these out.
#
# - Let's create two LineStrings
# +
from shapely.geometry import LineString, MultiLineString
# Create two lines
line_a = LineString([(0, 0), (1, 1)])
line_b = LineString([(1, 1), (0, 2)])
# -
# - Let's see if they intersect
line_a.intersects(line_b)
# - Do they also touch each other?
line_a.touches(line_b)
# Indeed, they do and we can see this by plotting the features together
# Create a MultiLineString
multi_line = MultiLineString([line_a, line_b])
multi_line
# Thus, the line_b continues from the same node ( (1,1) ) where line_a ends.
#
# - However, if the lines overlap fully, they don't touch, as we can see:
# +
# Check if line_a touches itself
print("Touches?: ", line_a.touches(line_a))
# However, it does intersect
print("Intersects?: ", line_a.intersects(line_a))
| source/codes/Lesson3-point-in-polygon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tuple
# A tuple is a collection which is ordered and unchangeable. In Python tuples are written with round brackets.
# e.g - Create a Tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple)
# # Access Tuple Items
# You can access tuple items by referring to the index number, inside square brackets:
# e.g - Print the second item in the tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple[1])
# # Negative Indexing
# Negative indexing means beginning from the end, -1 refers to the last item, -2 refers to the second last item etc.
# e.g - Print the last item of the tuple:
thistuple = ("apple", "banana", "cherry")
print(thistuple[-1])
# # Range of Indexes
# You can specify a range of indexes by specifying where to start and where to end the range.
# When specifying a range, the return value will be a new tuple with the specified items.
# e.g - Return the third, fourth, and fifth item:e.g -
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[2:5])
# + active=""
# *Note: The search will start at index 2 (included) and end at index 5 (not included).
# + active=""
# *Remember that the first item has index 0.
# -
# # Range of Negative Indexes
# Specify negative indexes if you want to start the search from the end of the tuple:
# e.g - This example returns the items from index -4 (included) to index -1 (excluded)
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[-4:-1])
# # Change Tuple Values
# Once a tuple is created, you cannot change its values. Tuples are unchangeable, or immutable as it also is called.
# But there is a workaround. You can convert the tuple into a list, change the list, and convert the list back into a tuple.
# e.g - Convert the tuple into a list to be able to change it:
# +
x = ("apple", "banana", "cherry")
y = list(x)
y[1] = "kiwi"
x = tuple(y)
print(x)
# -
# # Loop Through a Tuple
# You can loop through the tuple items by using a for loop.
# e.g - Iterate through the items and print the values:
thistuple = ("apple", "banana", "cherry")
for x in thistuple:
print(x)
# # Check if Item Exists
# To determine if a specified item is present in a tuple use the in keyword:
# e.g - Check if "apple" is present in the tuple:
thistuple = ("apple", "banana", "cherry")
if "apple" in thistuple:
print("Yes, 'apple' is in the fruits tuple")
# # Tuple Length
# To determine how many items a tuple has, use the len() method:
# e.g - Print the number of items in the tuple:
thistuple = ("apple", "banana", "cherry")
print(len(thistuple))
# # Add Items
# Once a tuple is created, you cannot add items to it. Tuples are unchangeable.
# + active=""
# e.g - You cannot add items to a tuple:
# -
thistuple = ("apple", "banana", "cherry")
thistuple[3] = "orange" # This will raise an error
print(thistuple)
# # Create Tuple With One Item
# To create a tuple with only one item, you have to add a comma after the item, otherwise Python will not recognize it as a tuple.
# e,g - One item tuple, remember the commma:
# +
thistuple = ("apple",)
print(type(thistuple))
#NOT a tuple
thistuple = ("apple")
print(type(thistuple))
# -
# # Remove Items
# + active=""
# *Note: You cannot remove items in a tuple.
# -
# Tuples are unchangeable, so you cannot remove items from it, but you can delete the tuple completely:
# The del keyword can delete the tuple completely:e.g -
thistuple = ("apple", "banana", "cherry")
del thistuple
print(thistuple) #this will raise an error because the tuple no longer exists
# # Join Two Tuples
# +
tuple1 = ("a", "b" , "c")
tuple2 = (1, 2, 3)
tuple3 = tuple1 + tuple2
print(tuple3)
# -
# # The tuple() Constructor
# It is also possible to use the tuple() constructor to make a tuple.
thistuple = tuple(("apple", "banana", "cherry"))
# note the double round-brackets
print(thistuple)
# # Tuple Methods
# Python has two built-in methods that you can use on tuples.
# + active=""
# count() Returns the number of times a specified value occurs in a tuple
# + active=""
# index() Searches the tuple for a specified value and returns the position of where it was found
| Python Tuples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "Project Euler Problems"
# > https://projecteuler.net/ is a website with a bunch of problems to solve that require critical thinking and programming skills. For this blog post, I will solve three of the problems from this website.
# ## Problem 9: Special Pythagorean Triplet
#
# https://projecteuler.net/problem=9
# This problem had been solved by 360,681 people at the time of selection.
#
# The goal of this code is to find the pythagorean triplet $a^2 + b^2 = c^2$ where $a < b < c$ such that $a + b + c = 1000$.
#
# This solution runs two for loops to try different values of a and b and generates c such that $c = \sqrt{a^2 + b^2}$. To enforce the inequality, the if statement then checks if the values follow the pattern $ a < b < c $. If the inquality is true, the next if statement checks if the sum of a, b, and c is equal to 1000. If so, it prints the values of each variable and the product of all three.
# +
def special_triplet():
""" Find the pythagorean triplet such that
a, b, and c sum to 1000.
Print out the product a*b*c.
"""
for a in range(1,1001):
for b in range(1, 1001):
c = (a**2 + b**2)**0.5
if a < b < c:
if a + b + c == 1000:
print("a = ", a, "b = ", b, "c = ", c, "abc = ", a*b*c)
special_triplet()
# -
# ## Problem 42: Coded Triangle Numbers
#
# https://projecteuler.net/problem=42
#
# This problem had 74,916 solutions at the time of selection.
#
# The goal of this code is to count the number of words in the given file that are triangle words. A triangle word is a word where the number corresponding to each letter in the word (A = 1, B = 2, C = 3, ..., Z = 26) sum to a value, x, that satisfies the following condition: $x = \frac{1}{2}n(n+1)$ where n is a natural number.
#
# This is the process followed by the code:
#
# 1. Read the file 'p042_words.txt' into the function triangle_nums().
#
# 2. Because the given data file had capitalized words in quotation marks separated by commas, all quotation marks are then removed with the replace function and words are split by commas with the split function.
#
# 3. Within the for loop, the ord() command to converts letters to numbers and subtracts 64 to reset capitalized letters to 1, 2, 3, etc.. The value of each letter in this word is added to the list nums.
#
# 4. To check if the sum, s = sum(nums), is a triangle number, I checked if $\frac{2s}{j(j=1)} = 1$ for $j \in N$, $j \leq s$. If the number associated with this word is a triangle number, making this word a triangle word, add one to the count.
#
# 5. The for loop will then repeat steps 3 and 4 for each word. Once all words are done, the function triangle_nums() will return the number of triangle words.
#
# 6. Finally, a print statement prints a sentence stating the number of triangle words.
#
#
# %%file p042_words.txt
"A","ABILITY","ABLE","ABOUT","ABOVE","ABSENCE","ABSOLUTELY","ACADEMIC","ACCEPT","ACCESS","ACCIDENT","ACCOMPANY","ACCORDING","ACCOUNT","ACHIEVE","ACHIEVEMENT","ACID","ACQUIRE","ACROSS","ACT","ACTION","ACTIVE","ACTIVITY","ACTUAL","ACTUALLY","ADD","ADDITION","ADDITIONAL","ADDRESS","ADMINISTRATION","ADMIT","ADOPT","ADULT","ADVANCE","ADVANTAGE","ADVICE","ADVISE","AFFAIR","AFFECT","AFFORD","AFRAID","AFTER","AFTERNOON","AFTERWARDS","AGAIN","AGAINST","AGE","AGENCY","AGENT","AGO","AGREE","AGREEMENT","AHEAD","AID","AIM","AIR","AIRCRAFT","ALL","ALLOW","ALMOST","ALONE","ALONG","ALREADY","ALRIGHT","ALSO","ALTERNATIVE","ALTHOUGH","ALWAYS","AMONG","AMONGST","AMOUNT","AN","ANALYSIS","ANCIENT","AND","ANIMAL","ANNOUNCE","ANNUAL","ANOTHER","ANSWER","ANY","ANYBODY","ANYONE","ANYTHING","ANYWAY","APART","APPARENT","APPARENTLY","APPEAL","APPEAR","APPEARANCE","APPLICATION","APPLY","APPOINT","APPOINTMENT","APPROACH","APPROPRIATE","APPROVE","AREA","ARGUE","ARGUMENT","ARISE","ARM","ARMY","AROUND","ARRANGE","ARRANGEMENT","ARRIVE","ART","ARTICLE","ARTIST","AS","ASK","ASPECT","ASSEMBLY","ASSESS","ASSESSMENT","ASSET","ASSOCIATE","ASSOCIATION","ASSUME","ASSUMPTION","AT","ATMOSPHERE","ATTACH","ATTACK","ATTEMPT","ATTEND","ATTENTION","ATTITUDE","ATTRACT","ATTRACTIVE","AUDIENCE","AUTHOR","AUTHORITY","AVAILABLE","AVERAGE","AVOID","AWARD","AWARE","AWAY","AYE","BABY","BACK","BACKGROUND","BAD","BAG","BALANCE","BALL","BAND","BANK","BAR","BASE","BASIC","BASIS","BATTLE","BE","BEAR","BEAT","BEAUTIFUL","BECAUSE","BECOME","BED","BEDROOM","BEFORE","BEGIN","BEGINNING","BEHAVIOUR","BEHIND","BELIEF","BELIEVE","BELONG","BELOW","BENEATH","BENEFIT","BESIDE","BEST","BETTER","BETWEEN","BEYOND","BIG","BILL","BIND","BIRD","BIRTH","BIT","BLACK","BLOCK","BLOOD","BLOODY","BLOW","BLUE","BOARD","BOAT","BODY","BONE","BOOK","BORDER","BOTH","BOTTLE","BOTTOM","BOX","BOY","BRAIN","BRANCH","BREAK","BREATH","BRIDGE","BRIEF","BRIGHT","BRING","BROAD","BROTHER","BUDGET","BUILD","BUILDING","BURN","BUS","BUSINESS","BUSY","BUT","BUY","BY","CABINET","CALL","CAMPAIGN","CAN","CANDIDATE","CAPABLE","CAPACITY","CAPITAL","CAR","CARD","CARE","CAREER","CAREFUL","CAREFULLY","CARRY","CASE","CASH","CAT","CATCH","CATEGORY","CAUSE","CELL","CENTRAL","CENTRE","CENTURY","CERTAIN","CERTAINLY","CHAIN","CHAIR","CHAIRMAN","CHALLENGE","CHANCE","CHANGE","CHANNEL","CHAPTER","CHARACTER","CHARACTERISTIC","CHARGE","CHEAP","CHECK","CHEMICAL","CHIEF","CHILD","CHOICE","CHOOSE","CHURCH","CIRCLE","CIRCUMSTANCE","CITIZEN","CITY","CIVIL","CLAIM","CLASS","CLEAN","CLEAR","CLEARLY","CLIENT","CLIMB","CLOSE","CLOSELY","CLOTHES","CLUB","COAL","CODE","COFFEE","COLD","COLLEAGUE","COLLECT","COLLECTION","COLLEGE","COLOUR","COMBINATION","COMBINE","COME","COMMENT","COMMERCIAL","COMMISSION","COMMIT","COMMITMENT","COMMITTEE","COMMON","COMMUNICATION","COMMUNITY","COMPANY","COMPARE","COMPARISON","COMPETITION","COMPLETE","COMPLETELY","COMPLEX","COMPONENT","COMPUTER","CONCENTRATE","CONCENTRATION","CONCEPT","CONCERN","CONCERNED","CONCLUDE","CONCLUSION","CONDITION","CONDUCT","CONFERENCE","CONFIDENCE","CONFIRM","CONFLICT","CONGRESS","CONNECT","CONNECTION","CONSEQUENCE","CONSERVATIVE","CONSIDER","CONSIDERABLE","CONSIDERATION","CONSIST","CONSTANT","CONSTRUCTION","CONSUMER","CONTACT","CONTAIN","CONTENT","CONTEXT","CONTINUE","CONTRACT","CONTRAST","CONTRIBUTE","CONTRIBUTION","CONTROL","CONVENTION","CONVERSATION","COPY","CORNER","CORPORATE","CORRECT","COS","COST","COULD","COUNCIL","COUNT","COUNTRY","COUNTY","COUPLE","COURSE","COURT","COVER","CREATE","CREATION","CREDIT","CRIME","CRIMINAL","CRISIS","CRITERION","CRITICAL","CRITICISM","CROSS","CROWD","CRY","CULTURAL","CULTURE","CUP","CURRENT","CURRENTLY","CURRICULUM","CUSTOMER","CUT","DAMAGE","DANGER","DANGEROUS","DARK","DATA","DATE","DAUGHTER","DAY","DEAD","DEAL","DEATH","DEBATE","DEBT","DECADE","DECIDE","DECISION","DECLARE","DEEP","DEFENCE","DEFENDANT","DEFINE","DEFINITION","DEGREE","DELIVER","DEMAND","DEMOCRATIC","DEMONSTRATE","DENY","DEPARTMENT","DEPEND","DEPUTY","DERIVE","DESCRIBE","DESCRIPTION","DESIGN","DESIRE","DESK","DESPITE","DESTROY","DETAIL","DETAILED","DETERMINE","DEVELOP","DEVELOPMENT","DEVICE","DIE","DIFFERENCE","DIFFERENT","DIFFICULT","DIFFICULTY","DINNER","DIRECT","DIRECTION","DIRECTLY","DIRECTOR","DISAPPEAR","DISCIPLINE","DISCOVER","DISCUSS","DISCUSSION","DISEASE","DISPLAY","DISTANCE","DISTINCTION","DISTRIBUTION","DISTRICT","DIVIDE","DIVISION","DO","DOCTOR","DOCUMENT","DOG","DOMESTIC","DOOR","DOUBLE","DOUBT","DOWN","DRAW","DRAWING","DREAM","DRESS","DRINK","DRIVE","DRIVER","DROP","DRUG","DRY","DUE","DURING","DUTY","EACH","EAR","EARLY","EARN","EARTH","EASILY","EAST","EASY","EAT","ECONOMIC","ECONOMY","EDGE","EDITOR","EDUCATION","EDUCATIONAL","EFFECT","EFFECTIVE","EFFECTIVELY","EFFORT","EGG","EITHER","ELDERLY","ELECTION","ELEMENT","ELSE","ELSEWHERE","EMERGE","EMPHASIS","EMPLOY","EMPLOYEE","EMPLOYER","EMPLOYMENT","EMPTY","ENABLE","ENCOURAGE","END","ENEMY","ENERGY","ENGINE","ENGINEERING","ENJOY","ENOUGH","ENSURE","ENTER","ENTERPRISE","ENTIRE","ENTIRELY","ENTITLE","ENTRY","ENVIRONMENT","ENVIRONMENTAL","EQUAL","EQUALLY","EQUIPMENT","ERROR","ESCAPE","ESPECIALLY","ESSENTIAL","ESTABLISH","ESTABLISHMENT","ESTATE","ESTIMATE","EVEN","EVENING","EVENT","EVENTUALLY","EVER","EVERY","EVERYBODY","EVERYONE","EVERYTHING","EVIDENCE","EXACTLY","EXAMINATION","EXAMINE","EXAMPLE","EXCELLENT","EXCEPT","EXCHANGE","EXECUTIVE","EXERCISE","EXHIBITION","EXIST","EXISTENCE","EXISTING","EXPECT","EXPECTATION","EXPENDITURE","EXPENSE","EXPENSIVE","EXPERIENCE","EXPERIMENT","EXPERT","EXPLAIN","EXPLANATION","EXPLORE","EXPRESS","EXPRESSION","EXTEND","EXTENT","EXTERNAL","EXTRA","EXTREMELY","EYE","FACE","FACILITY","FACT","FACTOR","FACTORY","FAIL","FAILURE","FAIR","FAIRLY","FAITH","FALL","FAMILIAR","FAMILY","FAMOUS","FAR","FARM","FARMER","FASHION","FAST","FATHER","FAVOUR","FEAR","FEATURE","FEE","FEEL","FEELING","FEMALE","FEW","FIELD","FIGHT","FIGURE","FILE","FILL","FILM","FINAL","FINALLY","FINANCE","FINANCIAL","FIND","FINDING","FINE","FINGER","FINISH","FIRE","FIRM","FIRST","FISH","FIT","FIX","FLAT","FLIGHT","FLOOR","FLOW","FLOWER","FLY","FOCUS","FOLLOW","FOLLOWING","FOOD","FOOT","FOOTBALL","FOR","FORCE","FOREIGN","FOREST","FORGET","FORM","FORMAL","FORMER","FORWARD","FOUNDATION","FREE","FREEDOM","FREQUENTLY","FRESH","FRIEND","FROM","FRONT","FRUIT","FUEL","FULL","FULLY","FUNCTION","FUND","FUNNY","FURTHER","FUTURE","GAIN","GAME","GARDEN","GAS","GATE","GATHER","GENERAL","GENERALLY","GENERATE","GENERATION","GENTLEMAN","GET","GIRL","GIVE","GLASS","GO","GOAL","GOD","GOLD","GOOD","GOVERNMENT","GRANT","GREAT","GREEN","GREY","GROUND","GROUP","GROW","GROWING","GROWTH","GUEST","GUIDE","GUN","HAIR","HALF","HALL","HAND","HANDLE","HANG","HAPPEN","HAPPY","HARD","HARDLY","HATE","HAVE","HE","HEAD","HEALTH","HEAR","HEART","HEAT","HEAVY","HELL","HELP","HENCE","HER","HERE","HERSELF","HIDE","HIGH","HIGHLY","HILL","HIM","HIMSELF","HIS","HISTORICAL","HISTORY","HIT","HOLD","HOLE","HOLIDAY","HOME","HOPE","HORSE","HOSPITAL","HOT","HOTEL","HOUR","HOUSE","HOUSEHOLD","HOUSING","HOW","HOWEVER","HUGE","HUMAN","HURT","HUSBAND","I","IDEA","IDENTIFY","IF","IGNORE","ILLUSTRATE","IMAGE","IMAGINE","IMMEDIATE","IMMEDIATELY","IMPACT","IMPLICATION","IMPLY","IMPORTANCE","IMPORTANT","IMPOSE","IMPOSSIBLE","IMPRESSION","IMPROVE","IMPROVEMENT","IN","INCIDENT","INCLUDE","INCLUDING","INCOME","INCREASE","INCREASED","INCREASINGLY","INDEED","INDEPENDENT","INDEX","INDICATE","INDIVIDUAL","INDUSTRIAL","INDUSTRY","INFLUENCE","INFORM","INFORMATION","INITIAL","INITIATIVE","INJURY","INSIDE","INSIST","INSTANCE","INSTEAD","INSTITUTE","INSTITUTION","INSTRUCTION","INSTRUMENT","INSURANCE","INTEND","INTENTION","INTEREST","INTERESTED","INTERESTING","INTERNAL","INTERNATIONAL","INTERPRETATION","INTERVIEW","INTO","INTRODUCE","INTRODUCTION","INVESTIGATE","INVESTIGATION","INVESTMENT","INVITE","INVOLVE","IRON","IS","ISLAND","ISSUE","IT","ITEM","ITS","ITSELF","JOB","JOIN","JOINT","JOURNEY","JUDGE","JUMP","JUST","JUSTICE","KEEP","KEY","KID","KILL","KIND","KING","KITCHEN","KNEE","KNOW","KNOWLEDGE","LABOUR","LACK","LADY","LAND","LANGUAGE","LARGE","LARGELY","LAST","LATE","LATER","LATTER","LAUGH","LAUNCH","LAW","LAWYER","LAY","LEAD","LEADER","LEADERSHIP","LEADING","LEAF","LEAGUE","LEAN","LEARN","LEAST","LEAVE","LEFT","LEG","LEGAL","LEGISLATION","LENGTH","LESS","LET","LETTER","LEVEL","LIABILITY","LIBERAL","LIBRARY","LIE","LIFE","LIFT","LIGHT","LIKE","LIKELY","LIMIT","LIMITED","LINE","LINK","LIP","LIST","LISTEN","LITERATURE","LITTLE","LIVE","LIVING","LOAN","LOCAL","LOCATION","LONG","LOOK","LORD","LOSE","LOSS","LOT","LOVE","LOVELY","LOW","LUNCH","MACHINE","MAGAZINE","MAIN","MAINLY","MAINTAIN","MAJOR","MAJORITY","MAKE","MALE","MAN","MANAGE","MANAGEMENT","MANAGER","MANNER","MANY","MAP","MARK","MARKET","MARRIAGE","MARRIED","MARRY","MASS","MASTER","MATCH","MATERIAL","MATTER","MAY","MAYBE","ME","MEAL","MEAN","MEANING","MEANS","MEANWHILE","MEASURE","MECHANISM","MEDIA","MEDICAL","MEET","MEETING","MEMBER","MEMBERSHIP","MEMORY","MENTAL","MENTION","MERELY","MESSAGE","METAL","METHOD","MIDDLE","MIGHT","MILE","MILITARY","MILK","MIND","MINE","MINISTER","MINISTRY","MINUTE","MISS","MISTAKE","MODEL","MODERN","MODULE","MOMENT","MONEY","MONTH","MORE","MORNING","MOST","MOTHER","MOTION","MOTOR","MOUNTAIN","MOUTH","MOVE","MOVEMENT","MUCH","MURDER","MUSEUM","MUSIC","MUST","MY","MYSELF","NAME","NARROW","NATION","NATIONAL","NATURAL","NATURE","NEAR","NEARLY","NECESSARILY","NECESSARY","NECK","NEED","NEGOTIATION","NEIGHBOUR","NEITHER","NETWORK","NEVER","NEVERTHELESS","NEW","NEWS","NEWSPAPER","NEXT","NICE","NIGHT","NO","NOBODY","NOD","NOISE","NONE","NOR","NORMAL","NORMALLY","NORTH","NORTHERN","NOSE","NOT","NOTE","NOTHING","NOTICE","NOTION","NOW","NUCLEAR","NUMBER","NURSE","OBJECT","OBJECTIVE","OBSERVATION","OBSERVE","OBTAIN","OBVIOUS","OBVIOUSLY","OCCASION","OCCUR","ODD","OF","OFF","OFFENCE","OFFER","OFFICE","OFFICER","OFFICIAL","OFTEN","OIL","OKAY","OLD","ON","ONCE","ONE","ONLY","ONTO","OPEN","OPERATE","OPERATION","OPINION","OPPORTUNITY","OPPOSITION","OPTION","OR","ORDER","ORDINARY","ORGANISATION","ORGANISE","ORGANIZATION","ORIGIN","ORIGINAL","OTHER","OTHERWISE","OUGHT","OUR","OURSELVES","OUT","OUTCOME","OUTPUT","OUTSIDE","OVER","OVERALL","OWN","OWNER","PACKAGE","PAGE","PAIN","PAINT","PAINTING","PAIR","PANEL","PAPER","PARENT","PARK","PARLIAMENT","PART","PARTICULAR","PARTICULARLY","PARTLY","PARTNER","PARTY","PASS","PASSAGE","PAST","PATH","PATIENT","PATTERN","PAY","PAYMENT","PEACE","PENSION","PEOPLE","PER","PERCENT","PERFECT","PERFORM","PERFORMANCE","PERHAPS","PERIOD","PERMANENT","PERSON","PERSONAL","PERSUADE","PHASE","PHONE","PHOTOGRAPH","PHYSICAL","PICK","PICTURE","PIECE","PLACE","PLAN","PLANNING","PLANT","PLASTIC","PLATE","PLAY","PLAYER","PLEASE","PLEASURE","PLENTY","PLUS","POCKET","POINT","POLICE","POLICY","POLITICAL","POLITICS","POOL","POOR","POPULAR","POPULATION","POSITION","POSITIVE","POSSIBILITY","POSSIBLE","POSSIBLY","POST","POTENTIAL","POUND","POWER","POWERFUL","PRACTICAL","PRACTICE","PREFER","PREPARE","PRESENCE","PRESENT","PRESIDENT","PRESS","PRESSURE","PRETTY","PREVENT","PREVIOUS","PREVIOUSLY","PRICE","PRIMARY","PRIME","PRINCIPLE","PRIORITY","PRISON","PRISONER","PRIVATE","PROBABLY","PROBLEM","PROCEDURE","PROCESS","PRODUCE","PRODUCT","PRODUCTION","PROFESSIONAL","PROFIT","PROGRAM","PROGRAMME","PROGRESS","PROJECT","PROMISE","PROMOTE","PROPER","PROPERLY","PROPERTY","PROPORTION","PROPOSE","PROPOSAL","PROSPECT","PROTECT","PROTECTION","PROVE","PROVIDE","PROVIDED","PROVISION","PUB","PUBLIC","PUBLICATION","PUBLISH","PULL","PUPIL","PURPOSE","PUSH","PUT","QUALITY","QUARTER","QUESTION","QUICK","QUICKLY","QUIET","QUITE","RACE","RADIO","RAILWAY","RAIN","RAISE","RANGE","RAPIDLY","RARE","RATE","RATHER","REACH","REACTION","READ","READER","READING","READY","REAL","REALISE","REALITY","REALIZE","REALLY","REASON","REASONABLE","RECALL","RECEIVE","RECENT","RECENTLY","RECOGNISE","RECOGNITION","RECOGNIZE","RECOMMEND","RECORD","RECOVER","RED","REDUCE","REDUCTION","REFER","REFERENCE","REFLECT","REFORM","REFUSE","REGARD","REGION","REGIONAL","REGULAR","REGULATION","REJECT","RELATE","RELATION","RELATIONSHIP","RELATIVE","RELATIVELY","RELEASE","RELEVANT","RELIEF","RELIGION","RELIGIOUS","RELY","REMAIN","REMEMBER","REMIND","REMOVE","REPEAT","REPLACE","REPLY","REPORT","REPRESENT","REPRESENTATION","REPRESENTATIVE","REQUEST","REQUIRE","REQUIREMENT","RESEARCH","RESOURCE","RESPECT","RESPOND","RESPONSE","RESPONSIBILITY","RESPONSIBLE","REST","RESTAURANT","RESULT","RETAIN","RETURN","REVEAL","REVENUE","REVIEW","REVOLUTION","RICH","RIDE","RIGHT","RING","RISE","RISK","RIVER","ROAD","ROCK","ROLE","ROLL","ROOF","ROOM","ROUND","ROUTE","ROW","ROYAL","RULE","RUN","RURAL","SAFE","SAFETY","SALE","SAME","SAMPLE","SATISFY","SAVE","SAY","SCALE","SCENE","SCHEME","SCHOOL","SCIENCE","SCIENTIFIC","SCIENTIST","SCORE","SCREEN","SEA","SEARCH","SEASON","SEAT","SECOND","SECONDARY","SECRETARY","SECTION","SECTOR","SECURE","SECURITY","SEE","SEEK","SEEM","SELECT","SELECTION","SELL","SEND","SENIOR","SENSE","SENTENCE","SEPARATE","SEQUENCE","SERIES","SERIOUS","SERIOUSLY","SERVANT","SERVE","SERVICE","SESSION","SET","SETTLE","SETTLEMENT","SEVERAL","SEVERE","SEX","SEXUAL","SHAKE","SHALL","SHAPE","SHARE","SHE","SHEET","SHIP","SHOE","SHOOT","SHOP","SHORT","SHOT","SHOULD","SHOULDER","SHOUT","SHOW","SHUT","SIDE","SIGHT","SIGN","SIGNAL","SIGNIFICANCE","SIGNIFICANT","SILENCE","SIMILAR","SIMPLE","SIMPLY","SINCE","SING","SINGLE","SIR","SISTER","SIT","SITE","SITUATION","SIZE","SKILL","SKIN","SKY","SLEEP","SLIGHTLY","SLIP","SLOW","SLOWLY","SMALL","SMILE","SO","SOCIAL","SOCIETY","SOFT","SOFTWARE","SOIL","SOLDIER","SOLICITOR","SOLUTION","SOME","SOMEBODY","SOMEONE","SOMETHING","SOMETIMES","SOMEWHAT","SOMEWHERE","SON","SONG","SOON","SORRY","SORT","SOUND","SOURCE","SOUTH","SOUTHERN","SPACE","SPEAK","SPEAKER","SPECIAL","SPECIES","SPECIFIC","SPEECH","SPEED","SPEND","SPIRIT","SPORT","SPOT","SPREAD","SPRING","STAFF","STAGE","STAND","STANDARD","STAR","START","STATE","STATEMENT","STATION","STATUS","STAY","STEAL","STEP","STICK","STILL","STOCK","STONE","STOP","STORE","STORY","STRAIGHT","STRANGE","STRATEGY","STREET","STRENGTH","STRIKE","STRONG","STRONGLY","STRUCTURE","STUDENT","STUDIO","STUDY","STUFF","STYLE","SUBJECT","SUBSTANTIAL","SUCCEED","SUCCESS","SUCCESSFUL","SUCH","SUDDENLY","SUFFER","SUFFICIENT","SUGGEST","SUGGESTION","SUITABLE","SUM","SUMMER","SUN","SUPPLY","SUPPORT","SUPPOSE","SURE","SURELY","SURFACE","SURPRISE","SURROUND","SURVEY","SURVIVE","SWITCH","SYSTEM","TABLE","TAKE","TALK","TALL","TAPE","TARGET","TASK","TAX","TEA","TEACH","TEACHER","TEACHING","TEAM","TEAR","TECHNICAL","TECHNIQUE","TECHNOLOGY","TELEPHONE","TELEVISION","TELL","TEMPERATURE","TEND","TERM","TERMS","TERRIBLE","TEST","TEXT","THAN","THANK","THANKS","THAT","THE","THEATRE","THEIR","THEM","THEME","THEMSELVES","THEN","THEORY","THERE","THEREFORE","THESE","THEY","THIN","THING","THINK","THIS","THOSE","THOUGH","THOUGHT","THREAT","THREATEN","THROUGH","THROUGHOUT","THROW","THUS","TICKET","TIME","TINY","TITLE","TO","TODAY","TOGETHER","TOMORROW","TONE","TONIGHT","TOO","TOOL","TOOTH","TOP","TOTAL","TOTALLY","TOUCH","TOUR","TOWARDS","TOWN","TRACK","TRADE","TRADITION","TRADITIONAL","TRAFFIC","TRAIN","TRAINING","TRANSFER","TRANSPORT","TRAVEL","TREAT","TREATMENT","TREATY","TREE","TREND","TRIAL","TRIP","TROOP","TROUBLE","TRUE","TRUST","TRUTH","TRY","TURN","TWICE","TYPE","TYPICAL","UNABLE","UNDER","UNDERSTAND","UNDERSTANDING","UNDERTAKE","UNEMPLOYMENT","UNFORTUNATELY","UNION","UNIT","UNITED","UNIVERSITY","UNLESS","UNLIKELY","UNTIL","UP","UPON","UPPER","URBAN","US","USE","USED","USEFUL","USER","USUAL","USUALLY","VALUE","VARIATION","VARIETY","VARIOUS","VARY","VAST","VEHICLE","VERSION","VERY","VIA","VICTIM","VICTORY","VIDEO","VIEW","VILLAGE","VIOLENCE","VISION","VISIT","VISITOR","VITAL","VOICE","VOLUME","VOTE","WAGE","WAIT","WALK","WALL","WANT","WAR","WARM","WARN","WASH","WATCH","WATER","WAVE","WAY","WE","WEAK","WEAPON","WEAR","WEATHER","WEEK","WEEKEND","WEIGHT","WELCOME","WELFARE","WELL","WEST","WESTERN","WHAT","WHATEVER","WHEN","WHERE","WHEREAS","WHETHER","WHICH","WHILE","WHILST","WHITE","WHO","WHOLE","WHOM","WHOSE","WHY","WIDE","WIDELY","WIFE","WILD","WILL","WIN","WIND","WINDOW","WINE","WING","WINNER","WINTER","WISH","WITH","WITHDRAW","WITHIN","WITHOUT","WOMAN","WONDER","WONDERFUL","WOOD","WORD","WORK","WORKER","WORKING","WORKS","WORLD","WORRY","WORTH","WOULD","WRITE","WRITER","WRITING","WRONG","YARD","YEAH","YEAR","YES","YESTERDAY","YET","YOU","YOUNG","YOUR","YOURSELF","YOUTH"
# +
import math
def triangle_nums(file):
""" Calculate the sum of the numerical values of each word,
and determine if the sum is a triangle number (making the
word a triangle word. Count how many triangle words are in
the file.
"""
count = 0
with open(file) as f:
for line in f:
line = line.replace('"','')
words = line.split(',')
for i in range(len(words)):
nums = []
for letter in words[i]:
nums.append(ord(letter) - 64)
for j in range(1,int(sum(nums))):
if (2*sum(nums))/(j*(j+1)) == 1:
count += 1
return count
tri_words = triangle_nums("p042_words.txt")
print("There are", tri_words, "triangle words in the given file.")
# -
#hide
# %%file roman.txt
MMMMDCLXXII
MMDCCCLXXXIII
MMMDLXVIIII
MMMMDXCV
DCCCLXXII
MMCCCVI
MMMCDLXXXVII
MMMMCCXXI
MMMCCXX
MMMMDCCCLXXIII
MMMCCXXXVII
MMCCCLXXXXIX
MDCCCXXIIII
MMCXCVI
CCXCVIII
MMMCCCXXXII
MDCCXXX
MMMDCCCL
MMMMCCLXXXVI
MMDCCCXCVI
MMMDCII
MMMCCXII
MMMMDCCCCI
MMDCCCXCII
MDCXX
CMLXXXVII
MMMXXI
MMMMCCCXIV
MLXXII
MCCLXXVIIII
MMMMCCXXXXI
MMDCCCLXXII
MMMMXXXI
MMMDCCLXXX
MMDCCCLXXIX
MMMMLXXXV
MCXXI
MDCCCXXXVII
MMCCCLXVII
MCDXXXV
CCXXXIII
CMXX
MMMCLXIV
MCCCLXXXVI
DCCCXCVIII
MMMDCCCCXXXIV
CDXVIIII
MMCCXXXV
MDCCCXXXII
MMMMD
MMDCCLXIX
MMMMCCCLXXXXVI
MMDCCXLII
MMMDCCCVIIII
DCCLXXXIIII
MDCCCCXXXII
MMCXXVII
DCCCXXX
CCLXIX
MMMXI
MMMMCMLXXXXVIII
MMMMDLXXXVII
MMMMDCCCLX
MMCCLIV
CMIX
MMDCCCLXXXIIII
CLXXXII
MMCCCCXXXXV
MMMMDLXXXVIIII
MMMDCCCXXI
MMDCCCCLXXVI
MCCCCLXX
MMCDLVIIII
MMMDCCCLIX
MMMMCCCCXIX
MMMDCCCLXXV
XXXI
CDLXXXIII
MMMCXV
MMDCCLXIII
MMDXXX
MMMMCCCLVII
MMMDCI
MMMMCDLXXXIIII
MMMMCCCXVI
CCCLXXXVIII
MMMMCML
MMMMXXIV
MMMCCCCXXX
DCCX
MMMCCLX
MMDXXXIII
CCCLXIII
MMDCCXIII
MMMCCCXLIV
CLXXXXI
CXVI
MMMMCXXXIII
CLXX
DCCCXVIII
MLXVII
DLXXXX
MMDXXI
MMMMDLXXXXVIII
MXXII
LXI
DCCCCXLIII
MMMMDV
MMMMXXXIV
MDCCCLVIII
MMMCCLXXII
MMMMDCCXXXVI
MMMMLXXXIX
MDCCCLXXXI
MMMMDCCCXV
MMMMCCCCXI
MMMMCCCLIII
MDCCCLXXI
MMCCCCXI
MLXV
MMCDLXII
MMMMDXXXXII
MMMMDCCCXL
MMMMCMLVI
CCLXXXIV
MMMDCCLXXXVI
MMCLII
MMMCCCCXV
MMLXXXIII
MMMV
MMMV
DCCLXII
MMDCCCCXVI
MMDCXLVIII
CCLIIII
CCCXXV
MMDCCLXXXVIIII
MMMMDCLXXVIII
MMMMDCCCXCI
MMMMCCCXX
MMCCXLV
MMMDCCCLXIX
MMCCLXIIII
MMMDCCCXLIX
MMMMCCCLXIX
CMLXXXXI
MCMLXXXIX
MMCDLXI
MMDCLXXVIII
MMMMDCCLXI
MCDXXV
DL
CCCLXXII
MXVIIII
MCCCCLXVIII
CIII
MMMDCCLXXIIII
MMMDVIII
MMMMCCCLXXXXVII
MMDXXVII
MMDCCLXXXXV
MMMMCXLVI
MMMDCCLXXXII
MMMDXXXVI
MCXXII
CLI
DCLXXXIX
MMMCLI
MDCLXIII
MMMMDCCXCVII
MMCCCLXXXV
MMMDCXXVIII
MMMCDLX
MMMCMLII
MMMIV
MMMMDCCCLVIII
MMMDLXXXVIII
MCXXIV
MMMMLXXVI
CLXXIX
MMMCCCCXXVIIII
DCCLXXXV
MMMDCCCVI
LI
CLXXXVI
MMMMCCCLXXVI
MCCCLXVI
CCXXXIX
MMDXXXXI
MMDCCCXLI
DCCCLXXXVIII
MMMMDCCCIV
MDCCCCXV
MMCMVI
MMMMCMLXXXXV
MMDCCLVI
MMMMCCXLVIII
DCCCCIIII
MMCCCCIII
MMMDCCLXXXVIIII
MDCCCLXXXXV
DVII
MMMV
DCXXV
MMDCCCXCV
DCVIII
MMCDLXVI
MCXXVIII
MDCCXCVIII
MMDCLX
MMMDCCLXIV
MMCDLXXVII
MMDLXXXIIII
MMMMCCCXXII
MMMDCCCXLIIII
DCCCCLXVII
MMMCLXXXXIII
MCCXV
MMMMDCXI
MMMMDCLXXXXV
MMMCCCLII
MMCMIX
MMDCCXXV
MMDLXXXVI
MMMMDCXXVIIII
DCCCCXXXVIIII
MMCCXXXIIII
MMDCCLXXVIII
MDCCLXVIIII
MMCCLXXXV
MMMMDCCCLXXXVIII
MMCMXCI
MDXLII
MMMMDCCXIV
MMMMLI
DXXXXIII
MMDCCXI
MMMMCCLXXXIII
MMMDCCCLXXIII
MDCLVII
MMCD
MCCCXXVII
MMMMDCCIIII
MMMDCCXLVI
MMMCLXXXVII
MMMCCVIIII
MCCCCLXXIX
DL
DCCCLXXVI
MMDXCI
MMMMDCCCCXXXVI
MMCII
MMMDCCCXXXXV
MMMCDXLV
MMDCXXXXIV
MMD
MDCCCLXXXX
MMDCXLIII
MMCCXXXII
MMDCXXXXVIIII
DCCCLXXI
MDXCVIIII
MMMMCCLXXVIII
MDCLVIIII
MMMCCCLXXXIX
MDCLXXXV
MDLVIII
MMMMCCVII
MMMMDCXIV
MMMCCCLXIIII
MMIIII
MMMMCCCLXXIII
CCIII
MMMCCLV
MMMDXIII
MMMCCCXC
MMMDCCCXXI
MMMMCCCCXXXII
CCCLVI
MMMCCCLXXXVI
MXVIIII
MMMCCCCXIIII
CLXVII
MMMCCLXX
CCCCLXIV
MMXXXXII
MMMMCCLXXXX
MXL
CCXVI
CCCCLVIIII
MMCCCII
MCCCLVIII
MMMMCCCX
MCDLXXXXIV
MDCCCXIII
MMDCCCXL
MMMMCCCXXIII
DXXXIV
CVI
MMMMDCLXXX
DCCCVII
MMCMLXIIII
MMMDCCCXXXIII
DCCC
MDIII
MMCCCLXVI
MMMCCCCLXXI
MMDCCCCXVIII
CCXXXVII
CCCXXV
MDCCCXII
MMMCMV
MMMMCMXV
MMMMDCXCI
DXXI
MMCCXLVIIII
MMMMCMLII
MDLXXX
MMDCLXVI
CXXI
MMMDCCCLIIII
MMMCXXI
MCCIII
MMDCXXXXI
CCXCII
MMMMDXXXV
MMMCCCLXV
MMMMDLXV
MMMCCCCXXXII
MMMCCCVIII
DCCCCLXXXXII
MMCLXIV
MMMMCXI
MLXXXXVII
MMMCDXXXVIII
MDXXII
MLV
MMMMDLXVI
MMMCXII
XXXIII
MMMMDCCCXXVI
MMMLXVIIII
MMMLX
MMMCDLXVII
MDCCCLVII
MMCXXXVII
MDCCCCXXX
MMDCCCLXIII
MMMMDCXLIX
MMMMCMXLVIII
DCCCLXXVIIII
MDCCCLIII
MMMCMLXI
MMMMCCLXI
MMDCCCLIII
MMMDCCCVI
MMDXXXXIX
MMCLXXXXV
MMDXXX
MMMXIII
DCLXXIX
DCCLXII
MMMMDCCLXVIII
MDCCXXXXIII
CCXXXII
MMMMDCXXV
MMMCCCXXVIII
MDCVIII
MMMCLXXXXIIII
CLXXXI
MDCCCCXXXIII
MMMMDCXXX
MMMDCXXIV
MMMCCXXXVII
MCCCXXXXIIII
CXVIII
MMDCCCCIV
MMMMCDLXXV
MMMDLXIV
MDXCIII
MCCLXXXI
MMMDCCCXXIV
MCXLIII
MMMDCCCI
MCCLXXX
CCXV
MMDCCLXXI
MMDLXXXIII
MMMMDCXVII
MMMCMLXV
MCLXVIII
MMMMCCLXXVI
MMMDCCLXVIIII
MMMMDCCCIX
DLXXXXIX
DCCCXXII
MMMMIII
MMMMCCCLXXVI
DCCCXCIII
DXXXI
MXXXIIII
CCXII
MMMDCCLXXXIIII
MMMCXX
MMMCMXXVII
DCCCXXXX
MMCDXXXVIIII
MMMMDCCXVIII
LV
MMMDCCCCVI
MCCCII
MMCMLXVIIII
MDCCXI
MMMMDLXVII
MMCCCCLXI
MMDCCV
MMMCCCXXXIIII
MMMMDI
MMMDCCCXCV
MMDCCLXXXXI
MMMDXXVI
MMMDCCCLVI
MMDCXXX
MCCCVII
MMMMCCCLXII
MMMMXXV
MMCMXXV
MMLVI
MMDXXX
MMMMCVII
MDC
MCCIII
MMMMDCC
MMCCLXXV
MMDCCCXXXXVI
MMMMCCCLXV
CDXIIII
MLXIIII
CCV
MMMCMXXXI
CCCCLXVI
MDXXXII
MMMMCCCLVIII
MMV
MMMCLII
MCMLI
MMDCCXX
MMMMCCCCXXXVI
MCCLXXXI
MMMCMVI
DCCXXX
MMMMCCCLXV
DCCCXI
MMMMDCCCXIV
CCCXXI
MMDLXXV
CCCCLXXXX
MCCCLXXXXII
MMDCIX
DCCXLIIII
DXIV
MMMMCLII
CDLXI
MMMCXXVII
MMMMDCCCCLXIII
MMMDCLIIII
MCCCCXXXXII
MMCCCLX
CCCCLIII
MDCCLXXVI
MCMXXIII
MMMMDLXXVIII
MMDCCCCLX
MMMCCCLXXXX
MMMCDXXVI
MMMDLVIII
CCCLXI
MMMMDCXXII
MMDCCCXXI
MMDCCXIII
MMMMCLXXXVI
MDCCCCXXVI
MDV
MMDCCCCLXXVI
MMMMCCXXXVII
MMMDCCLXXVIIII
MMMCCCCLXVII
DCCXLI
MMCLXXXVIII
MCCXXXVI
MMDCXLVIII
MMMMCXXXII
MMMMDCCLXVI
MMMMCMLI
MMMMCLXV
MMMMDCCCXCIV
MCCLXXVII
LXXVIIII
DCCLII
MMMCCCXCVI
MMMCLV
MMDCCCXXXXVIII
DCCCXV
MXC
MMDCCLXXXXVII
MMMMCML
MMDCCCLXXVIII
DXXI
MCCCXLI
DCLXXXXI
MMCCCLXXXXVIII
MDCCCCLXXVIII
MMMMDXXV
MMMDCXXXVI
MMMCMXCVII
MMXVIIII
MMMDCCLXXIV
MMMCXXV
DXXXVIII
MMMMCLXVI
MDXII
MMCCCLXX
CCLXXI
DXIV
MMMCLIII
DLII
MMMCCCXLIX
MMCCCCXXVI
MMDCXLIII
MXXXXII
CCCLXXXV
MDCLXXVI
MDCXII
MMMCCCLXXXIII
MMDCCCCLXXXII
MMMMCCCLXXXV
MMDCXXI
DCCCXXX
MMMDCCCCLII
MMMDCCXXII
MMMMCDXCVIII
MMMCCLXVIIII
MMXXV
MMMMCDXIX
MMMMCCCX
MMMCCCCLXVI
MMMMDCLXXVIIII
MMMMDCXXXXIV
MMMCMXII
MMMMXXXIII
MMMMDLXXXII
DCCCLIV
MDXVIIII
MMMCLXXXXV
CCCCXX
MMDIX
MMCMLXXXVIII
DCCXLIII
DCCLX
D
MCCCVII
MMMMCCCLXXXIII
MDCCCLXXIIII
MMMDCCCCLXXXVII
MMMMCCCVII
MMMDCCLXXXXVI
CDXXXIV
MCCLXVIII
MMMMDLX
MMMMDXII
MMMMCCCCLIIII
MCMLXXXXIII
MMMMDCCCIII
MMDCLXXXIII
MDCCCXXXXIV
XXXXVII
MMMDCCCXXXII
MMMDCCCXLII
MCXXXV
MDCXXVIIII
MMMCXXXXIIII
MMMMCDXVII
MMMDXXIII
MMMMCCCCLXI
DCLXXXXVIIII
LXXXXI
CXXXIII
MCDX
MCCLVII
MDCXXXXII
MMMCXXIV
MMMMLXXXX
MMDCCCCXLV
MLXXX
MMDCCCCLX
MCDLIII
MMMCCCLXVII
MMMMCCCLXXIV
MMMDCVIII
DCCCCXXIII
MMXCI
MMDCCIV
MMMMDCCCXXXIV
CCCLXXI
MCCLXXXII
MCMIII
CCXXXI
DCCXXXVIII
MMMMDCCXLVIIII
MMMMCMXXXV
DCCCLXXV
DCCXCI
MMMMDVII
MMMMDCCCLXVIIII
CCCXCV
MMMMDCCXX
MCCCCII
MMMCCCXC
MMMCCCII
MMDCCLXXVII
MMDCLIIII
CCXLIII
MMMDCXVIII
MMMCCCIX
MCXV
MMCCXXV
MLXXIIII
MDCCXXVI
MMMCCCXX
MMDLXX
MMCCCCVI
MMDCCXX
MMMMDCCCCXCV
MDCCCXXXII
MMMMDCCCCXXXX
XCIV
MMCCCCLX
MMXVII
MLXXI
MMMDXXVIII
MDCCCCII
MMMCMLVII
MMCLXXXXVIII
MDCCCCLV
MCCCCLXXIIII
MCCCLII
MCDXLVI
MMMMDXVIII
DCCLXXXIX
MMMDCCLXIV
MDCCCCXLIII
CLXXXXV
MMMMCCXXXVI
MMMDCCCXXI
MMMMCDLXXVII
MCDLIII
MMCCXLVI
DCCCLV
MCDLXX
DCLXXVIII
MMDCXXXIX
MMMMDCLX
MMDCCLI
MMCXXXV
MMMCCXII
MMMMCMLXII
MMMMCCV
MCCCCLXIX
MMMMCCIII
CLXVII
MCCCLXXXXIIII
MMMMDCVIII
MMDCCCLXI
MMLXXIX
CMLXIX
MMDCCCXLVIIII
DCLXII
MMMCCCXLVII
MDCCCXXXV
MMMMDCCXCVI
DCXXX
XXVI
MMLXIX
MMCXI
DCXXXVII
MMMMCCCXXXXVIII
MMMMDCLXI
MMMMDCLXXIIII
MMMMVIII
MMMMDCCCLXII
MDCXCI
MMCCCXXIIII
CCCCXXXXV
MMDCCCXXI
MCVI
MMDCCLXVIII
MMMMCXL
MLXVIII
CMXXVII
CCCLV
MDCCLXXXIX
MMMCCCCLXV
MMDCCLXII
MDLXVI
MMMCCCXVIII
MMMMCCLXXXI
MMCXXVII
MMDCCCLXVIII
MMMCXCII
MMMMDCLVIII
MMMMDCCCXXXXII
MMDCCCCLXXXXVI
MDCCXL
MDCCLVII
MMMMDCCCLXXXVI
DCCXXXIII
MMMMDCCCCLXXXV
MMCCXXXXVIII
MMMCCLXXVIII
MMMDCLXXVIII
DCCCI
MMMMLXXXXVIIII
MMMCCCCLXXII
MMCLXXXVII
CCLXVI
MCDXLIII
MMCXXVIII
MDXIV
CCCXCVIII
CLXXVIII
MMCXXXXVIIII
MMMDCLXXXIV
CMLVIII
MCDLIX
MMMMDCCCXXXII
MMMMDCXXXIIII
MDCXXI
MMMDCXLV
MCLXXVIII
MCDXXII
IV
MCDLXXXXIII
MMMMDCCLXV
CCLI
MMMMDCCCXXXVIII
DCLXII
MCCCLXVII
MMMMDCCCXXXVI
MMDCCXLI
MLXI
MMMCDLXVIII
MCCCCXCIII
XXXIII
MMMDCLXIII
MMMMDCL
DCCCXXXXIIII
MMDLVII
DXXXVII
MCCCCXXIIII
MCVII
MMMMDCCXL
MMMMCXXXXIIII
MCCCCXXIV
MMCLXVIII
MMXCIII
MDCCLXXX
MCCCLIIII
MMDCLXXI
MXI
MCMLIV
MMMCCIIII
DCCLXXXVIIII
MDCLIV
MMMDCXIX
CMLXXXI
DCCLXXXVII
XXV
MMMXXXVI
MDVIIII
CLXIII
MMMCDLVIIII
MMCCCCVII
MMMLXX
MXXXXII
MMMMCCCLXVIII
MMDCCCXXVIII
MMMMDCXXXXI
MMMMDCCCXXXXV
MMMXV
MMMMCCXVIIII
MMDCCXIIII
MMMXXVII
MDCCLVIIII
MMCXXIIII
MCCCLXXIV
DCLVIII
MMMLVII
MMMCXLV
MMXCVII
MMMCCCLXXXVII
MMMMCCXXII
DXII
MMMDLV
MCCCLXXVIII
MMMCLIIII
MMMMCLXXXX
MMMCLXXXIIII
MDCXXIII
MMMMCCXVI
MMMMDLXXXIII
MMMDXXXXIII
MMMMCCCCLV
MMMDLXXXI
MMMCCLXXVI
MMMMXX
MMMMDLVI
MCCCCLXXX
MMMXXII
MMXXII
MMDCCCCXXXI
MMMDXXV
MMMDCLXXXVIIII
MMMDLXXXXVII
MDLXIIII
CMXC
MMMXXXVIII
MDLXXXVIII
MCCCLXXVI
MMCDLIX
MMDCCCXVIII
MDCCCXXXXVI
MMMMCMIV
MMMMDCIIII
MMCCXXXV
XXXXVI
MMMMCCXVII
MMCCXXIV
MCMLVIIII
MLXXXIX
MMMMLXXXIX
CLXXXXIX
MMMDCCCCLVIII
MMMMCCLXXIII
MCCCC
DCCCLIX
MMMCCCLXXXII
MMMCCLXVIIII
MCLXXXV
CDLXXXVII
DCVI
MMX
MMCCXIII
MMMMDCXX
MMMMXXVIII
DCCCLXII
MMMMCCCXLIII
MMMMCLXV
DXCI
MMMMCLXXX
MMMDCCXXXXI
MMMMXXXXVI
DCLX
MMMCCCXI
MCCLXXX
MMCDLXXII
DCCLXXI
MMMCCCXXXVI
MCCCCLXXXVIIII
CDLVIII
DCCLVI
MMMMDCXXXVIII
MMCCCLXXXIII
MMMMDCCLXXV
MMMXXXVI
CCCLXXXXIX
CV
CCCCXIII
CCCCXVI
MDCCCLXXXIIII
MMDCCLXXXII
MMMMCCCCLXXXI
MXXV
MMCCCLXXVIIII
MMMCCXII
MMMMCCXXXIII
MMCCCLXXXVI
MMMDCCCLVIIII
MCCXXXVII
MDCLXXV
XXXV
MMDLI
MMMCCXXX
MMMMCXXXXV
CCCCLIX
MMMMDCCCLXXIII
MMCCCXVII
DCCCXVI
MMMCCCXXXXV
MDCCCCXCV
CLXXXI
MMMMDCCLXX
MMMDCCCIII
MMCLXXVII
MMMDCCXXIX
MMDCCCXCIIII
MMMCDXXIIII
MMMMXXVIII
MMMMDCCCCLXVIII
MDCCCXX
MMMMCDXXI
MMMMDLXXXIX
CCXVI
MDVIII
MMCCLXXI
MMMDCCCLXXI
MMMCCCLXXVI
MMCCLXI
MMMMDCCCXXXIV
DLXXXVI
MMMMDXXXII
MMMXXIIII
MMMMCDIV
MMMMCCCXLVIII
MMMMCXXXVIII
MMMCCCLXVI
MDCCXVIII
MMCXX
CCCLIX
MMMMDCCLXXII
MDCCCLXXV
MMMMDCCCXXIV
DCCCXXXXVIII
MMMDCCCCXXXVIIII
MMMMCCXXXV
MDCLXXXIII
MMCCLXXXIV
MCLXXXXIIII
DXXXXIII
MCCCXXXXVIII
MMCLXXIX
MMMMCCLXIV
MXXII
MMMCXIX
MDCXXXVII
MMDCCVI
MCLXXXXVIII
MMMCXVI
MCCCLX
MMMCDX
CCLXVIIII
MMMCCLX
MCXXVIII
LXXXII
MCCCCLXXXI
MMMI
MMMCCCLXIV
MMMCCCXXVIIII
CXXXVIII
MMCCCXX
MMMCCXXVIIII
MCCLXVI
MMMCCCCXXXXVI
MMDCCXCIX
MCMLXXI
MMCCLXVIII
CDLXXXXIII
MMMMDCCXXII
MMMMDCCLXXXVII
MMMDCCLIV
MMCCLXIII
MDXXXVII
DCCXXXIIII
MCII
MMMDCCCLXXI
MMMLXXIII
MDCCCLIII
MMXXXVIII
MDCCXVIIII
MDCCCCXXXVII
MMCCCXVI
MCMXXII
MMMCCCLVIII
MMMMDCCCXX
MCXXIII
MMMDLXI
MMMMDXXII
MDCCCX
MMDXCVIIII
MMMDCCCCVIII
MMMMDCCCCXXXXVI
MMDCCCXXXV
MMCXCIV
MCMLXXXXIII
MMMCCCLXXVI
MMMMDCLXXXV
CMLXIX
DCXCII
MMXXVIII
MMMMCCCXXX
XXXXVIIII
# ## Problem 89: Roman Numerals
#
# https://projecteuler.net/problem=89
#
# This problem had 21,086 solutions at the time of selection. The problem statement says to find the number of characters saved by writing each of these in their minimal form. An explanation of roman numerals can be found here: https://projecteuler.net/about=roman_numerals
#
#
# This is the process followed by the code:
#
# 1. Read the file 'roman.txt' into the function roman_numerals().
#
# 2. Set unlet, the variable tracking the number of unnecessary letters, to 0.
#
# 3. Open the file, and read through line by line.
#
# 4. Set the number of I characters and the current number of unnecessary characters to 0 (in the form of a list , [0,0]).
#
# 5. For the first line, run the function count_fives to count the number of I's. Count how many V's will replace the I's. Add four times the number of V's to count. Return the number of V's and count.
#
# 6. Still for the first line, run the function count_ones to count the number of V's, including the V's added in step 5. Count how many X's will replace the V's. Return the number of X's and count.
#
# 7. Repeat step 5 with X's and L's.
#
# 8. Repeat step 6 with L's and C's.
#
# 9. Repeat step 5 with C's and D's.
#
# 10. Add the number of unnecessary letters in the first line to unlet.
#
# 11. Repeat steps 4-10 for every line of the file.
#
# 12. Print a statement with the number of unnecessary letters, unlet, in the entire file.
#
#
#
# +
def roman_numerals(file):
""" If the roman numerals input were reduced to the most efficient form,
count the number of unecessary characters in the file.
"""
unlet = 0
def count_ones(string1, letter1_count, string2, count, line):
"""Reduce letters with values that start with 5
into letters with values that start with 1."""
letter1 = line.count(string1) + letter1_count
letter2 = int(letter1/2)
if letter1 > 1:
count = count + int(letter1/2)
return [letter2, count]
def count_fives(string1, letter1_count, string2, count, line):
"""Reduce letters with values that start with 1
into letters with values that start with 5."""
letter1 = line.count(string1) + letter1_count
letter2 = int(letter1/5)
if letter1 > 4:
count = count + 4*int(letter1/5)
return [letter2, count]
with open(file) as f:
for line in f:
I = [0, 0]
V = count_fives('I', I[0], 'V', I[1], line)
X = count_ones('V', V[0], 'X', V[1], line)
L = count_fives('X', X[0], 'L', X[1], line)
C = count_ones('L', L[0], 'C', L[1], line)
D = count_fives('C', C[0], 'D', C[1], line)
unlet = unlet + D[1]
print(unlet, "unnecessary characters were identified.")
roman_numerals("roman.txt")
# -
| _notebooks/2021-08-31-hw1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import matplotlib.pyplot as plt
from numpy import *
from qutip import *
from IPython.display import Image
from numpy.linalg import *
import multiprocessing as mp
import scipy.constants as sc
import time
import datetime
import os
#############################################
name = "Two-cavity-modes-coupled-to-a-mechanical-oscillator"
author = "<NAME>"
time_index = datetime.datetime.now().strftime("Date[%Y-%m-%d]_Time[%Hh%Mm%Ss]")
save_path = "Data/" + time_index + "/"
try:
os.mkdir(save_path)
except OSError:
print ("Creation of the directory %s failed" % save_path)
#############################################
# definitions
# negativity
def negativity(sigma):
#Partial transpose of sigma
sigmapt = partial_transpose(sigma,(0,1))
#Eigenvalues of Sigmapt
evals = sigmapt.eigenenergies()
#Sum of negative eigenvalues
s = 0
l = sigma.shape[0]
for i in range(0,l):
s = s + abs((evals[i] - abs(evals[i])))
return s
# print simulation parameters
def parameters():
print("## Simulation parameters ##")
print("# Frequecies:")
print("wa = ", wa)
print("wb = ", wb)
print("wr = ", wr)
print("ga = ", ga)
print("gb = ", gb)
print("# Displacements:")
print("chi_a = ", chi_a)
print("chi_b = ", chi_b)
print("chi_ab = ", chi_ab)
print("chi_ba = ", chi_ba)
print("disp_a = ", na*chi_a + nb*chi_ab,", disp_b = ", na*chi_ba + nb*chi_b)
print("disp_a = %.5f, disp_b = %.5f"% (na*chi_a + nb*chi_ab, na*chi_ba + nb*chi_b))
print("# External fields:")
if E_a > 0:
print("Cavity A drive:")
print("E_a = ", E_a)
if E_b > 0:
print("Cavity B drive:")
print("E_b = ", E_b)
else:
if E_a == 0:
print("NONE")
print("# States:")
print("dim_Fock = ", N)
if na >= 0:
print("Cavity A is initially in a Fock state: na = ", na)
else:
print("Cavity A is initially in a Coherent state:")
print("alpha_a = ", alpha_a,", na_avg = ", na_avg)
if nb >= 0:
print("Cavity B is initially in a Fock state: nb = ", nb)
else:
print("Cavity B is initially in a Coherent state:")
print("alpha_b = ", alpha_b,", nb_avg = ", nb_avg)
if nr >= 0:
print("MR is initially in a Fock state: nr = ", nr)
print("# Dissipation parameters:")
print("kappa_a = ", kappa_a)
print("kappa_b = ", kappa_b)
print("gamma = ", gamma)
print("n_th_a = ", n_th_a)
print("n_th_b = ", n_th_b)
print("n_th_r = ", n_th_r)
print("# timelist in M.E. simulation:")
print("t0 = ", t0,", tf = ", tf,", nt = ", nt)
print("# timelist in Spectrum simulation:")
print("t0_2 = ", t0_2,", tf_2 = ", tf_2,", nt_2 = ", nt_2)
return
# save simulation parameters
def save_parameters():
filename = name + "-" + "parameters" + "-" + time_index + ".txt"
file = open(save_path + filename,"w")
file.write("Simulation:" + name + "\n")
file.write("Author:" + author + "\n\n")
file.write("## Simulation parameters ##\n")
file.write("# Frequecies:\n")
file.write("wa = %.2f\n" % wa)
file.write("wb = %.2f\n" % wb)
file.write("wr = %.2f\n" % wr)
file.write("ga = %.3f\n" % ga)
file.write("gb = %.3f\n" % gb)
file.write("# Displacements:\n")
file.write("chi_a = %.5f\n" % chi_a)
file.write("chi_b = %.5f\n" % chi_b)
file.write("chi_ab = %.5f\n" % chi_ab)
file.write("chi_ba = %.5f\n" % chi_ba)
file.write("disp_a = %.5f, disp_b = %.5f\n" % (na*chi_a + nb*chi_ab, na*chi_ba + nb*chi_b))
file.write("# External fields:\n")
if E_a > 0:
file.write("Cavity A drive:\n")
file.write("E_a = %.4f, we_a = %.2f\n" % (E_a,we_a))
if E_b > 0:
file.write("Cavity B drive:\n")
file.write("E_b = %.4f, we_b = %.2f\n" % (E_b,we_b))
else:
if E_a == 0:
file.write("NONE\n")
file.write("# States:\n")
file.write("dim_Fock = %d\n" % N)
if na >= 0:
file.write("Cavity A is initially in a Fock state: na = %d\n" % na)
else:
file.write("Cavity A is initially in a Coherent state:\n")
file.write("alpha_a = %.2f, na_avg = %.2f\n" % (alpha_a, na_avg))
if nb >= 0:
file.write("Cavity B is initially in a Fock state: nb = %d\n" % nb)
else:
file.write("Cavity B is initially in a Coherent state:\n")
file.write("alpha_b = %.2f, nb_avg = %.2f\n" % (alpha_b, nb_avg))
if nr >= 0:
file.write("MR is initially in a Fock state: nr = %d\n" % nr)
file.write("# Dissipation parameters:\n")
file.write("kappa_a = %.4f\n" % kappa_a)
file.write("kappa_b = %.4f\n" % kappa_b)
file.write("gamma = %.4f\n" % gamma)
file.write("n_th_a = %.3f\n" % n_th_a)
file.write("n_th_b = %.3f\n" % n_th_b)
file.write("n_th_r = %.3f\n" % n_th_r)
file.write("# timelist for qutip.mesolve():\n")
file.write("t0 = %.1f, tf = %.1f, nt = %d\n" % (t0,tf,nt))
file.write("# timelist for qutip.correlation_2op_2t() simulation:\n")
file.write("t0_2 = %.1f, tf_2 = %.1f, nt_2 = %d\n" % (t0_2,tf_2,nt_2))
file.write("# wlist for qutip.spectrum() simulation:\n")
file.write("w0 = %.1f, wf = %.1f, nw = %d" % (w0,wf,nw))
file.close()
return
#############################################
# parameters
# frequency related
wa = 1.0 #* 2 * pi # cavity a frequency
wb = 1.0 #* 2 * pi # cavity b frequency
wr = 0.5 #* 2 * pi # mechanical oscilattor frequency
Da = wa - wr
Db = wb - wr
ga = 0.01 # coupling strength cavity a
gb = 0.1 # coupling strength cavity b
kappa_a = 1e-06 # cavity a dissipation rate
kappa_b = 1e-06 # cavity b dissipation rate
gamma = 0 # mechanic damping rate
E_a = 0 # External field coupling (cav A)
we_a = 0.0 # External field frequency (cav A)
E_b = 0 # External field coupling (cav B)
we_b = 0.0 # External field frequency (cav B)
chi_a = ga**2/(wa*wr)
chi_b = gb**2/(wb*wr)
chi_ab= ga*gb/(wa*wr)
chi_ba= ga*gb/(wb*wr)
# number related
na = 0
nb = 0
nr = 0
alpha_a = 0
alpha_b = 0
na_avg = alpha_a**2
nb_avg = alpha_b**2
N = 7 # Fock space dimension
n_th_a = 0.2 # avg number of thermal bath excitation coupled to a
n_th_b = 0.2 # avg number of thermal bath excitation coupled to b
n_th_r = 0 # avg number of thermal bath excitation coupled to r
# lists
t0 = 0
tf = 250
nt = 1000
dt = (tf-t0)/nt
tlist = linspace(t0,tf,nt)
t0_2 = 0
tf_2 = 1000
nt_2 = 5000
w0 = 0
wf = 2
nw = 8000
save_parameters()
#############################################
# initial state
if na >= 0:
state_a_t0 = fock(N, na)
else:
state_a_t0 = coherent(N, alpha_a)
if nb >= 0:
state_b_t0 = fock(N, nb)
else:
state_b_t0 = coherent(N, alpha_b)
if nr >= 0:
state_r_t0 = fock(N,nr)
state_t0 = tensor(state_a_t0,state_b_t0) #,state_r_t0)
#############################################
# operators
# annihilation
a = tensor(destroy(N),qeye(N)) #,qeye(N))
b = tensor(qeye(N),destroy(N)) #,qeye(N))
#r = tensor(qeye(N),qeye(N)),destroy(N))
# number
Na = a.dag() * a
Nb = b.dag() * b
#Nr = r.dag() * r
# quadrature X
Xa = a.dag() + a
Xb = b.dag() + b
#Xr = r.dag() + r
# operator alpha
alpha = (ga/wr)*Na + (gb/wr)*Nb
#############################################
# Hamiltonian
Hvec = []
#Hvec.append(wa*Na) # Field mode a free energy
#Hvec.append(wb*Nb) # Field mode b free energy
#Hvec.append(wr*Nr) # Mechanical oscilattor energy
#Hvec.append(-ga*Na*Xr) # interaction cavity-a with mechanical osc. (1st order)
#Hvec.append(-gb*Nb*Xr) # interaction cavity-a with mechanical osc. (1st order)
Hvec.append(wa*Na*(1-(ga/wa)*alpha)) # interaction cavity-a with mechanical osc. (1st order)
Hvec.append(wb*Nb*(1-(gb/wb)*alpha)) # interaction cavity-b with mechanical osc. (1st order)
Hvec.append(E_a*Xa)
Hvec.append(-we_a*Na)
Hvec.append(E_b*Xb)
Hvec.append(-we_b*Nb)
H = 0
for i in range(len(Hvec)):
H = H + Hvec[i]
#############################################
# collapse operators
c_ops = []
# Relaxations, temperature = 0 or >0
# cavity-a relaxation
rate = kappa_a * (1 + n_th_a)
if rate > 0.0:
c_ops.append(sqrt(rate) * a)
# cavity-b relaxation
rate = kappa_b * (1 + n_th_b)
if rate > 0.0:
c_ops.append(sqrt(rate) * b)
# mechanical oscillator relaxation
rate = gamma * (1 + n_th_r)
if rate > 0.0:
c_ops.append(sqrt(rate) * r)
# Excitations, only temperature > 0
# cavity-a excitation
rate = kappa_a * n_th_a
if rate > 0.0:
c_ops.append(sqrt(rate) * a.dag())
# cavity-b excitation
rate = kappa_b * n_th_b
if rate > 0.0:
c_ops.append(sqrt(rate) * b.dag())
# mechanical oscillator excitation
rate = gamma * n_th_r
if rate > 0.0:
c_ops.append(sqrt(rate) * r.dag())
#############################################
# Master equation solve function
Op_list = []
#Op_list = [Na,Nb,Xa,Xb]
comp = mesolve(H, state_t0, tlist, c_ops, Op_list)
#############################################
# Entanglement measures for N = 2
if N < 3:
negativity_ab = []
negativity_ar = []
negativity_br = []
concurrence_ab = []
concurrence_ar = []
concurrence_br = []
for i in range(nt):
rho_t = comp.states[i]
rho_ab_t = ptrace(rho_t, [0,1])
rho_ar_t = ptrace(rho_t, [0,2])
rho_br_t = ptrace(rho_t, [1,2])
negativity_ab.append(negativity(rho_ab_t))
negativity_ar.append(negativity(rho_ar_t))
negativity_br.append(negativity(rho_br_t))
concurrence_ab.append(concurrence(rho_ab_t))
concurrence_ar.append(concurrence(rho_ar_t))
concurrence_br.append(concurrence(rho_br_t))
###### 1st plot ######
fig, axes = plt.subplots(1, 1, figsize=(10,6))
axes.plot(tlist, (real(negativity_ab)),'b',label= "a-b", lw=2.0)
axes.plot(tlist, (real(negativity_ar)),'r' ,label= "a-r", lw=2.0)
axes.plot(tlist, (real(negativity_ar)),'g' ,label= "b-r", lw=2.0)
axes.legend(loc=0)
axes.set_ylim(0, 1.01)
axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
axes.set_ylabel('Negatividade $\mathcal{N}$ ',rotation=90,fontsize= 22.0)
plt.show()
###### 2nd plot ######
fig, axes = plt.subplots(1, 1, figsize=(10,6))
axes.plot(tlist, (real(concurrence_ab)),'b',label= "a-b", lw=2.0)
axes.plot(tlist, (real(concurrence_ar)),'r' ,label= "a-r", lw=2.0)
axes.plot(tlist, (real(concurrence_br)),'g' ,label= "b-r", lw=2.0)
axes.legend(loc=0)
axes.set_ylim(0, 1.01)
axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
axes.set_ylabel('Concurrence $\mathcal{C}$ ',rotation=90,fontsize= 22.0)
plt.show()
#else:
# print("Error: Invalid system dimensions. The negativity and concurrence cannot be calculated.")
#############################################
# Expected values
Na_m = expect(Na, comp.states)
Nb_m = expect(Nb, comp.states)
#Nr_m = expect(Nr, comp.states)
Xa_m = expect(Xa, comp.states)
Xb_m = expect(Xb, comp.states)
#Xr_m = expect(Xr, comp.states)
Na_m_sqrd = expect(Na*Na, comp.states)
Nb_m_sqrd = expect(Nb*Nb, comp.states)
parameters()
###### 1st plot ######
fig, axes = plt.subplots(1, 1, figsize=(10,6))
axes.plot(tlist, (real(Na_m)),'b',label= "$N_a$", lw=3.0)
axes.plot(tlist, (real(Nb_m)),'r--' ,label= "$N_b$", lw=3.0)
#axes.plot(tlist, (real(Nr_m)),'g' ,label= "$N_r$", lw=3.0)
axes.legend(loc=0)
axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
axes.set_ylabel('Avg photon number',rotation=90,fontsize= 22.0)
filename = "NumbN-"
save(save_path + filename + time_index,(tlist,Na_m, Nb_m))
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
###### 2nd plot ######
fig, axes = plt.subplots(1, 1, figsize=(10,6))
axes.plot(tlist, (real(Na_m_sqrd)),'b',label= "$N_a^2$", lw=3.0)
axes.plot(tlist, (real(Nb_m_sqrd)),'r--' ,label= "$N_b^2$", lw=3.0)
axes.legend(loc=0)
axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
axes.set_ylabel('Avg photon number squared',rotation=90,fontsize= 22.0)
filename = "NumbN_squared-"
save(save_path + filename + time_index,(tlist,Na_m_sqrd, Nb_m_sqrd))
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
###### 3rd plot ######
fig, axes = plt.subplots(1, 1, figsize=(10,6))
axes.plot(tlist, (real(Xa_m)),'b',label= "$X_a$", lw=2.0)
axes.plot(tlist, (real(Xb_m)),'r' ,label= "$X_b$", lw=2.0)
#axes.plot(tlist, (real(Xr_m)),'g' ,label= "$X_r$", lw=2.0)
axes.legend(loc=0)
#axes.set_ylim(-4, 4)
axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
axes.set_ylabel('Avg quadrature X',rotation=90,fontsize= 22.0)
filename = "quadX-"
save(save_path + filename + time_index,(tlist,Xa_m, Xb_m))
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
#############################################
# + deletable=true editable=true
# Correlation
tlist2 = linspace(t0_2,tf_2,nt_2)
corr_a = correlation_2op_2t(H, state_t0, None, tlist2, [], a.dag(), a)
corr_b = correlation_2op_2t(H, state_t0, None, tlist2, [], b.dag(), b)
#corr_r = correlation_2op_2t(H, state_t0, None, tlist2, [], r.dag(), r)
wlist1, Sa = spectrum_correlation_fft(tlist2, corr_a)
wlist2, Sb = spectrum_correlation_fft(tlist2, corr_b)
#wlist2, Sr = spectrum_correlation_fft(tlist2, corr_r)
parameters()
##########
fig, axes = plt.subplots(1, 1, figsize=(10,6))
axes.plot(tlist2, (real(corr_a)),'b', lw=2.0)
#axes.legend(loc=0)
axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
axes.set_ylabel('Correlation',rotation=90,fontsize= 22.0)
axes.set_xlim(0,100)
filename = "CorrelationA-"
save(save_path + filename + time_index,(tlist2,corr_a))
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
##########
fig, axes = plt.subplots(1, 1, figsize=(10,6))
axes.plot(tlist2, (real(corr_b)),'r', lw=2.0)
#axes.legend(loc=0)
axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
axes.set_ylabel('Correlation',rotation=90,fontsize= 22.0)
axes.set_xlim(0,100)
filename = "CorrelationB-"
save(save_path + filename + time_index,(tlist2,corr_b))
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
##########
#fig, axes = plt.subplots(1, 1, figsize=(10,6))
#axes.plot(tlist2, (real(corr_r)),'g', lw=2.0)
#axes.legend(loc=0)
#axes.set_xlabel(r'$g t$',rotation=0,fontsize= 22.0)
#axes.set_ylabel('Correlation',rotation=90,fontsize= 22.0)
#axes.set_xlim(0,100)
#plt.show()
##########
fig, axes = plt.subplots(1, 1, figsize=(14,4))
axes.plot(wlist1, (real(Sa)),'b',label='Cavity A', lw=2.0)
axes.plot(wlist2, (real(Sb)),'r',label='Cavity B', lw=2.0)
axes.legend(loc=0)
axes.set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
#axes.set_ylabel('Correlation',rotation=90,fontsize= 22.0)
axes.set_xlim(-10, 11)
filename = "Spectrum-Corr-FFT-"
save(save_path + filename + time_index,(tlist2,Sa,Sb))
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
# + deletable=true editable=true
fig, axes = plt.subplots(3,1, figsize=(14,9))
axes[0].plot(wlist1, (real(Sa)),'b',label='Cavity A', lw=2.0)
axes[0].plot(wlist2, (real(Sb)),'r',label='Cavity B', lw=2.0)
axes[0].legend(loc=0)
axes[0].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[0].set_xlim(-6, 6)
axes[1].plot(wlist1, (real(Sa)),'b',label='Cavity A', lw=2.0)
axes[1].plot(wlist2, (real(Sb)),'r',label='Cavity B', lw=2.0)
axes[1].legend(loc=0)
axes[1].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[1].set_xlim(1.2, 2.51)
start, end = axes[1].get_xlim()
axes[1].xaxis.set_ticks(arange(start, end, 0.10))
axes[2].plot(wlist1, (real(Sa)),'b',label='Cavity A', lw=2.0)
axes[2].plot(wlist2, (real(Sb)),'r',label='Cavity B', lw=2.0)
axes[2].legend(loc=0)
axes[2].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[2].set_xlim(1.9, 2.1)
start, end = axes[2].get_xlim()
axes[2].xaxis.set_ticks(arange(start, end, chi_b/2))
filename = "Spectrum-Corr-FFT-2-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
# + deletable=true editable=true
fig, axes = plt.subplots(3,1, figsize=(14,9))
axes[0].plot(wlist1, (real(Sa)),'b',label='Cavity A', lw=2.0)
axes[0].plot(wlist2, (real(Sb)),'r',label='Cavity B', lw=2.0)
axes[0].legend(loc=0)
axes[0].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[0].set_xlim(-6, 6)
axes[1].plot(wlist1, (real(Sa)),'b',label='Cavity A', lw=2.0)
axes[1].plot(wlist2, (real(Sb)),'r',label='Cavity B', lw=2.0)
axes[1].legend(loc=0)
axes[1].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[1].set_xlim(0.2, 0.8)
start, end = axes[1].get_xlim()
axes[1].xaxis.set_ticks(arange(start, end, 0.10))
axes[2].plot(wlist1, (real(Sa)),'b',label='Cavity A', lw=2.0)
axes[2].plot(wlist2, (real(Sb)),'r',label='Cavity B', lw=2.0)
axes[2].legend(loc=0)
axes[2].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[2].set_xlim(2.2,2.5)
start, end = axes[2].get_xlim()
axes[2].xaxis.set_ticks(arange(start, end, 0.1))
filename = "Spectrum-Corr-FFT-3-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
# + deletable=true editable=true
wlist = linspace(w0,wf,nw)
Sa_2 = spectrum(H, wlist, c_ops, a.dag(), a)
Sb_2 = spectrum(H, wlist, c_ops, b.dag(), b)
# + deletable=true editable=true
fig, axes = plt.subplots(3,1, figsize=(14,9))
axes[0].plot(wlist, (real(Sa_2)),'b',label='Cavity A', lw=2.0)
axes[0].plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes[0].legend(loc=0)
axes[0].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[0].set_xlim(w0, wf)
axes[1].plot(wlist, (real(Sa_2)),'b',label='Cavity A', lw=2.0)
axes[1].plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes[1].legend(loc=0)
axes[1].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[1].set_xlim(0.5, 1.05)
start, end = axes[1].get_xlim()
axes[1].xaxis.set_ticks(arange(start, end, ((end-start)/16)))
axes[2].plot(wlist, (real(Sa_2)),'b',label='Cavity A', lw=2.0)
axes[2].plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes[2].legend(loc=0)
axes[2].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[2].set_xlim(0.9,1.01)
#axes[2].set_ylim(0,15)
start, end = axes[2].get_xlim()
axes[2].xaxis.set_ticks(arange(start, end, (end-start)/16))
filename = "Spectrum()-"
save(save_path + filename + time_index,(wlist,Sa_2,Sb_2))
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
# + deletable=true editable=true
fig, axes = plt.subplots(3,1, figsize=(14,9))
axes[0].plot(wlist, (real(Sa_2)),'b',label='Cavity A', lw=2.0)
axes[0].legend(loc=0)
axes[0].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[0].set_xlim(0.9, 1.1)
axes[1].plot(wlist, (real(Sa_2)),'b',label='Cavity A', lw=2.0)
axes[1].legend(loc=0)
axes[1].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[1].set_xlim(0.97, 1.02)
start, end = axes[1].get_xlim()
axes[1].xaxis.set_ticks(arange(start, end, (end-start)/10))
axes[2].plot(wlist, (real(Sa_2)),'b',label='Cavity A', lw=2.0)
axes[2].legend(loc=0)
axes[2].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[2].set_xlim(0.99,1.005)
#axes[2].set_ylim(0,15)
start, end = axes[2].get_xlim()
axes[2].xaxis.set_ticks(arange(start, end, (end-start)/16))
filename = "Spectrum()-A-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
####
fig, axes = plt.subplots(1,1, figsize=(14,9))
axes.plot(wlist, (real(Sa_2)),'b',label='Cavity B', lw=2.0)
axes.legend(loc=0)
axes.set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes.set_xlim(w0, wf)
start, end = axes.get_xlim()
axes.xaxis.set_ticks(arange(start, end, 0.1))
filename = "Spectrum()-A-2-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
####
fig, axes = plt.subplots(1,1, figsize=(14,9))
axes.plot(wlist, (real(Sa_2)),'b',label='Cavity B', lw=2.0)
axes.legend(loc=0)
axes.set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes.set_xlim(0.95, 1.05)
start, end = axes.get_xlim()
axes.xaxis.set_ticks(arange(start, end, 0.005))
filename = "Spectrum()-A-3-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
# + deletable=true editable=true
fig, axes = plt.subplots(3,1, figsize=(14,9))
axes[0].plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes[0].legend(loc=0)
axes[0].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[0].set_xlim(0.5, 1.1)
axes[1].plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes[1].legend(loc=0)
axes[1].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[1].set_xlim(0.85, 1.)
start, end = axes[1].get_xlim()
axes[1].xaxis.set_ticks(arange(start, end, 0.10))
axes[2].plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes[2].legend(loc=0)
axes[2].set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes[2].set_xlim(0.92,1.0)
#axes[2].set_ylim(0,1000)
start, end = axes[2].get_xlim()
axes[2].xaxis.set_ticks(arange(start, end, 0.01))
filename = "Spectrum()-B-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
#############
fig, axes = plt.subplots(1,1, figsize=(14,9))
axes.plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes.legend(loc=0)
axes.set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes.set_xlim(0.5, 1.1)
start, end = axes.get_xlim()
axes.xaxis.set_ticks(arange(start, end, 0.05))
filename = "Spectrum()-B-2-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
#############
fig, axes = plt.subplots(1,1, figsize=(14,9))
axes.plot(wlist, (real(Sb_2)),'r',label='Cavity B', lw=2.0)
axes.legend(loc=0)
axes.set_xlabel(r'$\omega/\omega_a$',rotation=0,fontsize= 22.0)
axes.set_xlim(0.95, 1.)
start, end = axes.get_xlim()
axes.xaxis.set_ticks(arange(start, end, 0.005))
filename = "Spectrum()-B-3-"
plt.savefig(save_path + filename + time_index + ".pdf", bbox_inches='tight');
plt.savefig(save_path + filename + time_index + ".png", bbox_inches='tight');
plt.show()
# + deletable=true editable=true
parameters()
# + deletable=true editable=true
# ls -la Data
# + deletable=true editable=true
from qutip.ipynbtools import version_table
version_table()
# -
| Two-cavity-modes-coupled-to-a-mechanical-oscillator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tabula
import pandas as pd
import re
import math
import numpy as np
import time
pdf_path=r"https://chfs.ky.gov/agencies/dph/covid19/COVID19DailyReport.pdf"
# # Get Data on the PDF
# +
#date
# [top,left,bottom,width]
box = [2.3,1.5,3.3,18]
fc = 28.28
for i in range(0, len(box)):
box[i] *= fc
deaths = tabula.read_pdf(pdf_path,pages=1,area=[box],multiple_tables=False)
today=deaths[0].columns[0]
today=today[26:]
today=pd.to_datetime(today,format='%m/%d/%Y')
today=today.date()
print(today)
# -
# # Get Total Deaths and Total Cases
# +
# [top,left,bottom,width]
box1 = [3.5,1.5,9,11]
fc = 28.28
for i in range(0, len(box1)):
box1[i] *= fc
total = tabula.read_pdf(pdf_path,pages=1,area=[box1],multiple_tables=False)
total[0].iloc[5]=total[0].columns
total[0].columns = [today,"Totals"]
total[0].set_index(today,inplace=True)
total[0].columns.name = total[0].index.name
total[0].index.name = None
totals=total[0]
totals['Totals'] = totals['Totals'].str.replace(',', '').astype(int)
totals
# -
# # Race of Cases Where Race is Known
# +
# [top,left,bottom,width]
box2 = [11,1.6,16.5,11.5]
fc = 28.28
for i in range(0, len(box2)):
box2[i] *= fc
data = tabula.read_pdf(pdf_path,pages=2,area=[box2],multiple_tables=False)
race_cases=data[0]
#find the percentage of total known
start = '('
end = '%'
s = data[0].columns[0]
perc=float((s.split(start))[1].split(end)[0])/100
race_cases.rename(columns={race_cases.columns[0]:'Race'}, inplace=True)
race_cases.set_index("Race",inplace=True)
race_cases.columns=["Percent"]
race_cases.columns
race_cases['Percent'] = race_cases['Percent'].str.replace('%', '').astype(float)
race_cases['Cases'] = np.floor(race_cases.Percent * totals.loc['Total Cases*','Totals']*perc/100)
race_cases.columns.name = race_cases.index.name
race_cases.index.name = None
race_cases.loc["Unknown","Cases"]= totals.loc['Total Cases*','Totals']-race_cases['Cases'].sum()
race_cases
# -
# # Ethnicity of Cases Where Ethnicity is Known
# +
# [top,left,bottom,width]
box3 = [11,12,16,20]
fc = 28.28
for i in range(0, len(box3)):
box3[i] *= fc
data_eth= tabula.read_pdf(pdf_path,pages=2,area=[box3],multiple_tables=False)
ethnicity_cases=data_eth[0]
#find the percentage of total known
start = '('
end = '%'
s = data_eth[0].columns[0]
perc=float((s.split(start))[1].split(end)[0])/100
ethnicity_cases.rename(columns={ethnicity_cases.columns[0]:'Ethnicity'}, inplace=True)
ethnicity_cases
ethnicity_cases.set_index("Ethnicity",inplace=True)
ethnicity_cases.columns=["Percent"]
ethnicity_cases['Percent'] = ethnicity_cases['Percent'].str.replace('%', '').astype(float)
ethnicity_cases['Cases'] = np.floor(ethnicity_cases.Percent * totals.loc['Total Cases*','Totals']*perc/100)
ethnicity_cases.columns.name = ethnicity_cases.index.name
ethnicity_cases.index.name = None
ethnicity_cases.loc["Unknown","Cases"]= totals.loc['Total Cases*','Totals']-ethnicity_cases['Cases'].sum()
ethnicity_cases
# -
# # Race of Deaths Where Race is Known
# +
# [top,left,bottom,width]
box4 = [16.5,1.6,23,11]
fc = 28.28
for i in range(0, len(box4)):
box4[i] *= fc
deaths = tabula.read_pdf(pdf_path,pages=2,area=[box4],multiple_tables=False)
deaths_race=deaths[0]
#find the percentage of total known
start = '('
end = '%'
s = deaths[0].columns[0]
perc=float((s.split(start))[1].split(end)[0])/100
deaths_race.rename(columns={deaths_race.columns[0]:'Race'}, inplace=True)
deaths_race.set_index("Race",inplace=True)
deaths_race.columns=["Percent"]
deaths_race['Percent'] = deaths_race['Percent'].str.replace('%', '').astype(float)
deaths_race['Deaths'] = np.floor(deaths_race.Percent * totals.loc['Total Deaths*','Totals']*perc/100)
deaths_race.columns.name = deaths_race.index.name
deaths_race.index.name = None
deaths_race.loc["Unknown","Deaths"]= totals.loc['Total Deaths*','Totals']-deaths_race['Deaths'].sum()
deaths_race
# -
# # Ethnicity of Deaths Where Ethnicity is Known
# +
from tabula import read_pdf
# [top,left,bottom,width]
box5 = [16,12,20,20]
fc = 28.28
for i in range(0, len(box5)):
box5[i] *= fc
deaths_e = tabula.read_pdf(pdf_path,pages=2,area=[box5],multiple_tables=False)
deaths_ethnicity=deaths_e[0]
#find the percentage of total known
start = '('
end = '%'
s = deaths_e[0].columns[0]
perc=float((s.split(start))[1].split(end)[0])/100
deaths_ethnicity.rename(columns={deaths_ethnicity.columns[0]:"Ethnicity"}, inplace=True)
deaths_ethnicity.set_index("Ethnicity",inplace=True)
deaths_ethnicity.columns=["Percent"]
deaths_ethnicity['Percent'] = deaths_ethnicity['Percent'].str.replace('%', '').astype(float)
deaths_ethnicity['Deaths'] = np.floor(deaths_ethnicity.Percent * totals.loc['Total Deaths*','Totals']*perc/100)
deaths_ethnicity.columns.name = deaths_ethnicity.index.name
deaths_ethnicity.index.name = None
deaths_ethnicity.loc["Unknown","Deaths"]= totals.loc['Total Deaths*','Totals']-deaths_ethnicity['Deaths'].sum()
deaths_ethnicity
# -
# # Converting New Scraped Covid Data into Same Format as CRDT Table
# +
column_names = ["Date","State","Cases_Total","Cases_White","Cases_Black","Cases_LatinX","Cases_Asian",
"Cases_AIAN","Cases_NHPI","Cases_Multiracial","Cases_Other","Cases_Unknown",
"Cases_Ethnicity_Hispanic","Cases_Ethnicity_NonHispanic","Cases_Ethnicity_Unknown",
"Deaths_Total","Deaths_White","Deaths_Black","Deaths_LatinX","Deaths_Asian",
"Deaths_AIAN","Deaths_NHPI","Deaths_Multiracial","Deaths_Other","Deaths_Unknown",
"Deaths_Ethnicity_Hispanic","Deaths_Ethnicity_NonHispanic","Deaths_Ethnicity_Unknown"]
#new_data is the dataframe for the new data which you just scraped converted into data format of Georgia_Historical
new_data = pd.DataFrame(columns = column_names)
new_data["Date"]=[today]
new_data["State"]=["KY"]
new_data["Cases_Total"]=[totals.loc["Total Cases*","Totals"]]
new_data["Cases_White"]=[race_cases.loc["White","Cases"]]
new_data["Cases_Black"]=[race_cases.loc["Black","Cases"]]
new_data["Cases_Asian"]=[race_cases.loc["Asian","Cases"]]
new_data["Cases_AIAN"]=[race_cases.loc["American Indian or Alaska Native","Cases"]]
new_data["Cases_NHPI"]=[race_cases.loc["Native Hawaiian or Other Pacific","Cases"]]
new_data["Cases_Unknown"]=[race_cases.loc["Unknown","Cases"]]
new_data["Cases_Ethnicity_Hispanic"]=[ethnicity_cases.loc["Hispanic","Cases"]]
new_data["Cases_Ethnicity_NonHispanic"]=[ethnicity_cases.loc["Non-Hispanic","Cases"]]
new_data["Cases_Ethnicity_Unknown"]=[ethnicity_cases.loc["Unknown","Cases"]]
new_data["Deaths_Total"]=[totals.loc["Total Deaths*","Totals"]]
new_data["Deaths_White"]=[deaths_race.loc["White","Deaths"]]
new_data["Deaths_Black"]=[deaths_race.loc["Black","Deaths"]]
new_data["Deaths_Asian"]=[deaths_race.loc["Asian","Deaths"]]
new_data["Deaths_AIAN"]=[deaths_race.loc["American Indian or Alaska Native","Deaths"]]
#new_data["Deaths_NHPI"]=[deaths_race.loc["Native Hawaiian or Other Pacific","Deaths"]]
new_data["Deaths_Multiracial"]=[deaths_race.loc["Multiracial","Deaths"]]
new_data["Deaths_Unknown"]=[deaths_race.loc["Unknown","Deaths"]]
new_data["Deaths_Ethnicity_Hispanic"]=[deaths_ethnicity.loc["Hispanic","Deaths"]]
new_data["Deaths_Ethnicity_NonHispanic"]=[deaths_ethnicity.loc["Non-Hispanic","Deaths"]]
new_data["Deaths_Ethnicity_Unknown"]=[deaths_ethnicity.loc["Unknown","Deaths"]]
new_data
# -
# # Data Validation
# +
#"Difference in sum of cases by race v. Total confirmed cases"
race_v_cases=race_cases['Percent'].sum()
#"Difference in sum of cases by ethnicity v. Total confirmed cases"
race_v_deaths=ethnicity_cases['Percent'].sum()
#"Difference in sum of deaths by race v. Total confirmed deaths"
ethnicity_v_cases=deaths_race['Percent'].sum()
#"Difference in sum of deaths by ethnicity v. Total confirmed deaths"
ethnicity_v_deaths=deaths_ethnicity['Percent'].sum()
data_validation= pd.DataFrame({"Difference in sum of cases by race v. Total confirmed cases":[race_v_cases],
"Difference in sum of deaths by race v. Total confirmed deaths":[race_v_deaths],
"Difference in sum of cases by ethnicity v. Total confirmed cases":[ethnicity_v_cases],
"Difference in sum of deaths by ethnicity v. Total confirmed deaths":[ethnicity_v_deaths]},index=['Data Validation']).T
#Red if values are not zero
data_validation.style.applymap(lambda x: 'background-color : red' if x>100.0 or x<=99.9 else '')
# -
# # Create a csv for today's scrpaer data
#to make the CSV based for just this scraper
new_data.to_csv(r"C:\Users\wjg39\Documents\Desktop\CRDT\KY\\"+str(new_data["Date"].loc[0])+"_race-ethnicity_Kentucky.csv", index = False)
# # Add today's scraper data to historical data
#to append new data to the csv of all historical
new_data.to_csv(r'C:\Users\wjg39\Documents\Desktop\CRDT\KY\historical_race-ethnicity_Kentucky.csv', mode='a', header=False,index=False)
| Scraper - XLSX:Google Sheets:CSV/Kentucky_PDF_Scraper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# + [markdown] deletable=false editable=false jbook={"class": "TextCell", "current_ui_view": "student", "readonly": true, "section_id": "section_0", "sources": {"student": "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"></ul></div>"}} slideshow={"slide_type": "slide"} toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# + deletable=false jbook={"class": "CodeCell", "current_ui_view": "student", "section_id": "section_0", "sources": {"student": "include(\"../src/free_whiten.jl\")\nusing Base.Test"}} slideshow={"slide_type": "slide"}
include("../src/free_whiten.jl")
using Test
# -
@testset "Test mat_center function $mat_type" for mat_type in ["her", "rec"]
if mat_type == "her"
for idx = 1: 10
@test abs(tr(mat_center(randn(3,3); mat_type = mat_type))) <= 3*eps(Float64);
end
end
if mat_type == "rec"
for idx = 1: 10
@test sum(abs.(mean(mat_center(randn(3,3); mat_type ="rec"), dims = 2))) <= 3*eps(Float64)
end
end
end
# + deletable=false jbook={"class": "CodeCell", "current_ui_view": "student", "section_id": "section_0", "sources": {"student": "# test the mat_center function\n# Hermitian case\n# the trace of output of mat_center with hermitian option should be zero\nfor idx = 1: 10\n @show @test abs(trace(mat_center(randn(3,3), \"her\"))) <= 3*eps(Float64)\nend"}} slideshow={"slide_type": "slide"}
# test the mat_center function
# Hermitian case
# the trace of output of mat_center with hermitian option should be zero
@testset "Test mat_center for Hermitian matrix" begin
for idx = 1: 10
@test abs(tr(mat_center(randn(3,3); mat_type = "her"))) <= 3*eps(Float64);
end
end
# + deletable=false jbook={"class": "CodeCell", "current_ui_view": "student", "section_id": "section_0", "sources": {"student": "# rectangular case\nfor idx = 1: 10\n @show @test sum(abs.(mean(mat_center(randn(3,3), \"rec\"), 2))) <= 3*eps(Float64)\nend"}} slideshow={"slide_type": "slide"}
# rectangular case
@testset "Test mat_center for rectangular matrix" begin
for idx = 1: 10
@test sum(abs.(mean(mat_center(randn(3,3); mat_type ="rec"), dims = 2))) <= 3*eps(Float64)
end
end
# + deletable=false jbook={"class": "CodeCell", "current_ui_view": "student", "section_id": "section_0", "sources": {"student": "# test free_whiten function\n# The covariance of the whitened data should be identity\n# Hermitian case\nfor idx = 1: 10\n G = randn(10, 10)\n Z = [G + G', G*G']\n Y = free_whiten(Z, \"her\")[1]\n @show @test maximum(abs.( trace.(Y*Y')./10 - eye(2))) < 10*eps(Float64)\nend"}} slideshow={"slide_type": "slide"}
# test free_whiten function
# The covariance of the whitened data should be identity
# Hermitian case
@testset "Test free_whiten for Hermitian matrix" begin
for idx = 1: 10
G = randn(10, 10)
Z = [G + G', G*G']
Y = free_whiten(Z; mat_type = "her")[1]
@test maximum(abs.( tr.(Y*Y')./10 - I)) < 10*eps(Float64)
end
end
# + deletable=false jbook={"class": "CodeCell", "current_ui_view": "student", "section_id": "section_0", "sources": {"student": "# rectangular case\nfor idx = 1: 10\n Z = [randn(10, 20) for i = 1: 3]\n Y = free_whiten(Z, \"rec\")[1]\n @show @test maximum(abs.( trace.(Y*Y')./10 - eye(3))) < 10*eps(Float64)\nend"}} slideshow={"slide_type": "slide"}
# rectangular case
@testset "Test free_whiten for rectangular matrix" begin
for idx = 1: 10
Z = [randn(10, 20) for i = 1: 3]
Y = free_whiten(Z; mat_type = "rec")[1]
@test maximum(abs.( tr.(Y*Y')./10 - I)) < 10*eps(Float64)
end
end
# + deletable=false jbook={"class": "CodeCell", "current_ui_view": "student", "section_id": "section_0", "sources": {"student": ""}} slideshow={"slide_type": "slide"}
| test/test_ipynb/test_whiten.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os, os.path
os.chdir('../')
import pandas as pd
import numpy as np
import re
from collections import Counter
from keras.models import Sequential
from keras.layers import Dense
# ### Read the Data
data = pd.read_csv('data/processed_data.csv')
data.head()
# ### Vectorization
text1 = list(data['text1'])
text2 = list(data['text2'])
labels = list(data['label'])
assert len(text1) == len(text2)
texts = text1 + text2
tokens = []
for text in texts:
for sentence in re.findall(r'\w+', text):
for i in range(len(sentence)-1):
word = sentence[i:i+2]
tokens.append(word)
counter_2 = Counter(tokens)
most_common_2 = [word[0] for word in counter_2.most_common(30)]
# +
tokens_3 = []
texts = data['text1']
for text in texts:
for sentence in re.findall(r'\w+', text):
for i in range(len(sentence)-2):
word = sentence[i:i+3]
tokens_3.append(word)
counter_3 = Counter(tokens_3)
most_common_3 = [word[0] for word in counter_3.most_common(30)]
# -
word = most_common_2[0]
word
keywords = most_common_2 + most_common_3
word_vector = np.zeros((len(text1), 2*len(keywords)))
for i, word in enumerate(keywords):
ip = i + len(keywords)
for j in range(len(word_vector)):
if word in text1[j]:
word_vector[j, i] = 1
if word in text2[j]:
word_vector[j, ip] = 1
# ## Training
data = pd.DataFrame(word_vector)
input_dim = word_vector.shape[1]
model = Sequential()
model.add(Dense(64, input_dim=input_dim, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(data, labels, epochs=150, batch_size=10)
scores = model.evaluate(data, labels)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
| exploratory data analysis/.ipynb_checkpoints/benchmark with key_extraction jieba-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# %matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
# # Limits, Legends, and Layouts
#
# In this section, we'll focus on what happens around the edges of the axes: Ticks, ticklabels, limits, layouts, and legends.
# # Limits and autoscaling
#
# By default, matplotlib will attempt to determine limits for you that encompasses all the data you have plotted. This is the "autoscale" feature. For line and image plots, the limits are not padded, while plots such as scatter plots and bar plots are given some padding.
# +
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=plt.figaspect(0.5))
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
plt.show()
# -
# ### `ax.margins(...)`
#
# If you'd like to add a bit of "padding" to a plot, `ax.margins(<some_small_fraction>)` is a very handy way to do so. Instead of choosing "even-ish" numbers as min/max ranges for each axis, `margins` will make matplotlib calculate the min/max of each axis by taking the range of the data and adding on a fractional amount of padding.
#
# As an example: (Note that the ranges for the scatter example actually shrink slightly in this case)
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plt.figaspect(0.5))
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax1.margins(x=0.0, y=0.1) # 10% padding in the y-direction only
ax2.margins(0.05) # 5% padding in all directions
plt.show()
# -
# ### `ax.axis(...)`
#
# The `ax.axis(...)` method is a convienent way of controlling the axes limits and enabling/disabling autoscaling.
#
# If you ever need to get all of the current plot limits, calling `ax.axis()` with no arguments will return the xmin/max/etc:
#
# xmin, xmax, ymin, ymax = ax.axis()
#
# If you'd like to manually set all of the x/y limits at once, you can use `ax.axis` for this, as well (note that we're calling it with a single argument that's a sequence, not 4 individual arguments):
#
# ax.axis([xmin, xmax, ymin, ymax])
#
# However, you'll probably use `axis` mostly with either the `"tight"` or `"equal"` options. There are other options as well; see the documentation for full details. In a nutshell, though:
#
# * *tight*: Set axes limits to the exact range of the data
# * *equal*: Set axes scales such that one cm/inch in the y-direction is the same as one cm/inch in the x-direction. (In matplotlib terms, this sets the aspect ratio of the plot to 1. That _doesn't_ mean that the axes "box" will be square, though!)
#
# And as an example:
# +
fig, axes = plt.subplots(nrows=3)
for ax in axes:
ax.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
axes[0].set_title('Normal Autoscaling', y=0.7, x=0.8)
axes[1].set_title('ax.axis("tight")', y=0.7, x=0.8)
axes[1].axis('tight')
axes[2].set_title('ax.axis("equal")', y=0.7, x=0.8)
axes[2].axis('equal')
plt.show()
# -
# ### Manually setting only one limit
#
# Another trick with limits is to specify only half of a limit. When done **after** a plot is made, this has the effect of allowing the user to anchor a limit while letting matplotlib autoscale the rest of it.
# Good -- setting limits after plotting is done
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plt.figaspect(0.5))
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax1.set_ylim(bottom=-10)
ax2.set_xlim(right=25)
plt.show()
# Bad -- Setting limits before plotting is done
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=plt.figaspect(0.5))
ax1.set_ylim(bottom=-10)
ax2.set_xlim(right=25)
ax1.plot([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
ax2.scatter([-10, -5, 0, 5, 10, 15], [-1.2, 2, 3.5, -0.3, -4, 1])
plt.show()
# # Legends
#
# As you've seen in some of the examples so far, the X and Y axis can also be labeled, as well as the subplot itself via the title.
#
# However, another thing you can label is the line/point/bar/etc that you plot. You can provide a label to your plot, which allows your legend to automatically build itself.
fig, ax = plt.subplots()
ax.plot([1, 2, 3, 4], [10, 20, 25, 30], label='Philadelphia')
ax.plot([1, 2, 3, 4], [30, 23, 13, 4], label='Boston')
ax.set(ylabel='Temperature (deg C)', xlabel='Time', title='A tale of two cities')
ax.legend(loc="upper left")
plt.show()
# Legends will go in the upper right corner by default (you can control this with the `loc` kwarg), but if you'd prefer matplotlib to choose a location to avoid overlapping plot elements as much as possible, you can pass in:
#
# ax.legend(loc="best")
#
# Also, if you happen to be plotting something that you do not want to appear in the legend, just set the label to "\_nolegend\_".
fig, ax = plt.subplots(1, 1)
ax.bar([1, 2, 3, 4], [10, 20, 25, 30], label="Foobar", align='center', color='lightblue')
ax.plot([1, 2, 3, 4], [10, 20, 25, 30], label="_nolegend_", marker='o', color='darkred')
ax.legend(loc='best')
plt.show()
# # Exercise 4.1
#
# Once again, let's use a bit of what we've learned. Try to reproduce the following figure:
#
# <img src="images/exercise_4-1.png">
#
# Hint: You'll need to combine `ax.axis(...)` and `ax.margins(...)`. Here's the data and some code to get you started:
# +
# # %load exercises/4.1-legends_and_scaling.py
import numpy as np
import matplotlib.pyplot as plt
# Try to reproduce the figure shown in images/exercise_4-1.png
# Here's the data and colors used.
t = np.linspace(0, 2 * np.pi, 150)
x1, y1 = np.cos(t), np.sin(t)
x2, y2 = 2 * x1, 2 * y1
colors = ['darkred', 'darkgreen']
# Try to plot the two circles, scale the axes as shown and add a legend
# Hint: it's easiest to combine `ax.axis(...)` and `ax.margins(...)` to scale
# the axes
# -
# # Dealing with the boundaries: Layout, ticks, spines, etc
#
# One key thing we haven't talked about yet is all of the annotation on the outside of the axes, the borders of the axes, and how to adjust the amount of space around the axes. We won't go over every detail, but this next section should give you a reasonable working knowledge of how to configure what happens around the edges of your axes.
#
# ## Ticks, Tick Lines, Tick Labels and Tickers
# This is a constant source of confusion:
#
# * A Tick is the *location* of a Tick Label.
# * A Tick Line is the line that denotes the location of the tick.
# * A Tick Label is the text that is displayed at that tick.
# * A [`Ticker`](http://matplotlib.org/api/ticker_api.html#module-matplotlib.ticker) automatically determines the ticks for an Axis and formats the tick labels.
#
# [`tick_params()`](http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.tick_params) is often used to help configure your tickers.
# +
fig, ax = plt.subplots()
ax.plot([1, 2, 3, 4], [10, 20, 25, 30])
# Manually set ticks and tick labels *on the x-axis* (note ax.xaxis.set, not ax.set!)
ax.xaxis.set(ticks=range(1, 5), ticklabels=[3, 100, -12, "foo"])
# Make the y-ticks a bit longer and go both in and out...
ax.tick_params(axis='y', direction='inout', length=10)
plt.show()
# -
# A commonly-asked question is "How do I plot non-numerical categories?"
#
# The easiest way to do this is to "fake" the x-values and then change the tick labels to reflect the category.
#
# For example:
# +
data = [('apples', 2), ('oranges', 3), ('peaches', 1)]
fruit, value = zip(*data)
fig, ax = plt.subplots()
x = np.arange(len(fruit))
ax.bar(x, value, align='center', color='gray')
ax.set(xticks=x, xticklabels=fruit)
plt.show()
# -
# ## Subplot Spacing
# The spacing between the subplots can be adjusted using [`fig.subplots_adjust()`](http://matplotlib.org/api/pyplot_api.html?#matplotlib.pyplot.subplots_adjust). Play around with the example below to see how the different arguments affect the spacing.
fig, axes = plt.subplots(2, 2, figsize=(9, 9))
fig.subplots_adjust(wspace=0.5, hspace=0.3,
left=0.125, right=0.9,
top=0.9, bottom=0.1)
plt.show()
# A common "gotcha" is that the labels are not automatically adjusted to avoid overlapping those of another subplot. Matplotlib does not currently have any sort of robust layout engine, as it is a design decision to minimize the amount of "magic" that matplotlib performs. LaTeX users would be quite familiar with the amount of frustration that can occur with placement of figures in their documents.
#
# That said, there have been some efforts to develop tools that users can use to help address the most common compaints. The "[Tight Layout](http://matplotlib.org/users/tight_layout_guide.html)" feature, when invoked, will attempt to resize margins, and subplots so that nothing overlaps.
#
# If you have multiple subplots, and want to avoid overlapping titles/axis labels/etc, `fig.tight_layout` is a great way to do so:
# +
def example_plot(ax):
ax.plot([1, 2])
ax.set_xlabel('x-label', fontsize=16)
ax.set_ylabel('y-label', fontsize=8)
ax.set_title('Title', fontsize=24)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
# Try enabling fig.tight_layout to compare...
#fig.tight_layout()
plt.show()
# -
# ## GridSpec
# Under the hood, matplotlib utilizes [`GridSpec`](http://matplotlib.org/api/gridspec_api.html) to lay out the subplots. While `plt.subplots()` is fine for simple cases, sometimes you will need more advanced subplot layouts. In such cases, you should use GridSpec directly. GridSpec is outside the scope of this course, but it is handy to know that it exists. [Here](http://matplotlib.org/users/gridspec.html) is a guide on how to use it.
# ## Sharing axes
# There will be times when you want to have the x axis and/or the y axis of your subplots to be "shared". Sharing an axis means that the axis in one or more subplots will be tied together such that any change in one of the axis changes all of the other shared axes. This works very nicely with autoscaling arbitrary datasets that may have overlapping domains. Furthermore, when interacting with the plots (panning and zooming), all of the shared axes will pan and zoom automatically.
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
ax1.plot([1, 2, 3, 4], [1, 2, 3, 4])
ax2.plot([3, 4, 5, 6], [6, 5, 4, 3])
plt.show()
# ## "Twinning" axes
# Sometimes one may want to overlay two plots on the same axes, but the scales may be entirely different. You can simply treat them as separate plots, but then twin them.
fig, ax1 = plt.subplots(1, 1)
ax1.plot([1, 2, 3, 4], [1, 2, 3, 4])
ax2 = ax1.twinx()
ax2.scatter([1, 2, 3, 4], [60, 50, 40, 30])
ax1.set(xlabel='X', ylabel='First scale')
ax2.set(ylabel='Other scale')
plt.show()
# # Axis Spines
# Spines are the axis lines for a plot. Each plot can have four spines: "top", "bottom", "left" and "right". By default, they are set so that they frame the plot, but they can be individually positioned and configured via the [`set_position()`](http://matplotlib.org/api/spines_api.html#matplotlib.spines.Spine.set_position) method of the spine. Here are some different configurations.
# +
fig, ax = plt.subplots()
ax.plot([-2, 2, 3, 4], [-10, 20, 25, 5])
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom') # no ticklines at the top
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left') # no ticklines on the right
# "outward"
# Move the two remaining spines "out" away from the plot by 10 points
# ax.spines['bottom'].set_position(('outward', 10))
# ax.spines['left'].set_position(('outward', 10))
# "data"
# Have the spines stay intersected at (0,0)
# ax.spines['bottom'].set_position(('data', 0))
# ax.spines['left'].set_position(('data', 0))
# "axes"
# Have the two remaining spines placed at a fraction of the axes
ax.spines['bottom'].set_position(('axes', 0.5))
ax.spines['left'].set_position(('axes', 0.5))
plt.show()
# -
# # Exercise 4.2
#
# This one is a bit trickier. Once again, try to reproduce the figure below:
#
# <img src="images/exercise_4-2.png">
#
#
# A few key hints: The two subplots have no vertical space between them (this means that the `hspace` is `0`). Note that the bottom spine is at 0 in data coordinates and the tick lines are missing from the right and top sides.
#
# Because you're going to be doing a lot of the same things to both subplots, to avoid repitive code you might consider writing a function that takes an `Axes` object and makes the spine changes, etc to it.
#
#
# +
# # %load exercises/4.2-spines_ticks_and_subplot_spacing.py
import matplotlib.pyplot as plt
import numpy as np
# Try to reproduce the figure shown in images/exercise_4.2.png
# This one is a bit trickier!
# Here's the data...
data = [('dogs', 4, 4), ('frogs', -3, 1), ('cats', 1, 5), ('goldfish', -2, 2)]
animals, friendliness, popularity = zip(*data)
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1)
fig.subplots_adjust(hspace=0)
def _formatAxes( ax ):
ax.set(xticks=x, xticklabels=animals)
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position('zero')
ax.yaxis.set_ticks_position('left')
ax.axhline(y=0, color='black')
x = np.arange(len(animals))
ax1.bar(x, friendliness, align='center', color='gray')
ax2.bar(x, popularity, align='center', color='gray')
ax1.set( ylabel='Friendliness', ylim = map(lambda x : 1.08 * x, [ min(friendliness), max(friendliness) ] ) )
ax2.set( ylabel='Popularity', ylim = map(lambda x : 1.08 * x, [ 0, max(popularity) ] ))
for ax in ax1, ax2:
_formatAxes(ax)
plt.show()
# -
| course/matplotlib/4. Layout.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 교재 Part 6
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import folium
from config.settings import DATA_DIR # 그대로 사용해도 됨
# 데이터 파일을 불러오기
df = pd.read_excel('data/city_pop.xlsx') # 시도별 전출입 인구수
elec = pd.read_excel('data/elec_energy.xlsx') # 남북한 전력 이용
auto = pd.read_csv('data/auto-mpg.csv', header=None) # 자동차 정보
auto.columns = ['mpg', 'cylinders', 'displacement', 'hp', 'weight', 'acceleration', 'model_year', 'origin', 'name']
ttn = sns.load_dataset('titanic')
col = pd.read_excel('data/col_location.xlsx', index_col=0) # 서울 대학교 위치정보
ggd = pd.read_excel('data/gg_pop.xlsx') # 경기도 인구 정보
stock = pd.read_csv('data/stock.csv')
stock_d = pd.read_excel('data/stock-data.xlsx')
stock_p = pd.read_excel(DATA_DIR[0] + '/stock-price.xlsx')
stock_v = pd.read_excel(DATA_DIR[0] + '/stock-valuation.xlsx')
class Util:
def add_10(n):
return n + 10
def add_both(a, b):
return a + b
def missing_values(series):
return series.isnull()
def min_max(series):
return series.max() - series.min()
def kpl_cyl(mpg, cyl):
return mpg * 1.6/3.7 + cyl
def info(df):
return df.info()
def f_sum(df, axis=0): # 여기에 axis=0, 1을 입력하면,1
return df.sum(axis) # 여기에도 키워드 argument로 적용되어서 axis=0, 1로 인식되는 것 아님. 그냥 0, 1로 인식되는데, 첫번째 argument가 axis라서 문제가 안되는 것
def zscore(x):
return (x - x.mean()) / x.std()
# ## Apply - Series에 elementwise / Dataframe에 serieswise로 함수 적용
# +
class P6:
def p218(self):
df = ttn.loc[:, ['age', 'fare']]
df['ten'] = 10
print(df)
# Apply 함수 - Series에 적용
sr1 = df['age'].apply(Util.add_10)
#print(sr1)
sr2 = df['age'].apply(Util.add_both, b=10)
#print(sr2)
sr3 = df['age'].apply(lambda x: Util.add_10(x)) # x = df['age']의 element
#print(sr3) # sr1, 2, 3 모두 같은 결과
# Dataframe에도 적용 가능
x = ttn.loc[:, ['age', 'fare']]
df0 = x.apply(Util.add_10, axis=0) # axis=0: column-wise
#print(df0)
df1 = x.apply(Util.add_10, axis=1) # axis=1: row-wise
#print(df1) # df0과 같은 결과
miss = x.apply(Util.missing_values, axis=1) # axis=0, 1 모두 결과 같다
#print(miss)
# 어느 경우에 axis=0, 1이 차이를 보일까??
diff0 = x.apply(Util.min_max, axis=0)
#print(diff0) # 각 열의 최대 - 최소
diff1 = x.apply(Util.min_max, axis=1)
#print(diff1) # 각 행의 최대 - 최소
add10 = df.apply(lambda x: Util.add_both(x['age'], x['ten']), axis=1) # x=df의 각 행, 'age'는 series x의 index
#print(add10)
if __name__ == '__main__':
P6().p218()
# +
class P6:
def test(self):
display(auto.head())
# mpg컬럼에 kpl_cyl함수를 적용해서 kpl 컬럼을 추가하시오.
#auto['kpl'] = auto['mpg'].apply(Util.kpl_cyl, cyl=auto['cylinders']) # Series에 apply는 elementwise이기 때문에 cyl이 series이면 안된다
auto['kpl'] = auto[['mpg']].apply(Util.kpl_cyl, cyl=auto['cylinders']) # Dataframe에 적용해서 serieswise로 적용되도록 함
#display(auto.head())
# 강사님 방법
auto['kpl_t'] = auto.apply(lambda x: Util.kpl_cyl(x['mpg'], x['cylinders']) , axis=1)
#display(auto.head())
if __name__ == '__main__':
P6().test()
# -
# ## Applymap - Dataframe에 elementwise로 함수 적용
# +
class P6:
def p221(self):
df = ttn.loc[:, ['age', 'fare']]
# Applymap 함수 - dataframe에 elementwise로 적용
df1 = df.applymap(Util.add_10)
print(df1)
df2 = df.applymap(Util.min_max) # 불가능
print(df2)
if __name__ == '__main__':
P6().p221()
# -
# ## Pipe - Series/Dataframe 자체에 함수 적용
# +
class P6:
def p226(self):
# Series에 적용
#print(ttn['age'].pipe(max))
# Dataframe에 적용
#ttn.pipe(Util.info)
# Pipe는 함수를 연속해서 사용할 때 유용
result = ttn.pipe(Util.missing_values).pipe(Util.f_sum, axis=1).pipe(Util.f_sum)
print(type(result)) # pipe 결과도 dataframe이라 연속해서 pipe 적용 가능
print(result)
#ttn.loc[:, ['age', 'fare']].pipe(Util.total) # total 함수가 머야..............
if __name__ == '__main__':
P6().p226()
# -
# ## 컬럼 순서 바꾸기
# Index 순서를 정렬하고 싶으면, df.sort_index()를 사용할 수 있다.
# +
class P6:
def p229(self):
# 1. 알파벳 순으로 컬럼 정렬
df = ttn.loc[:, 'survived':'age']
#print(df)
#print(df.columns.values) # list
sorted_col = sorted(df.columns) # sorted(): return list
#print(sorted_col) # 알파벳 순으로 정렬됨
sorted_df = df[sorted_col] # 정렬된 순으로 컬럼을 불러온다
#print(sorted_df)
# 2. 역순으로 컬럼 정렬
rev_col = reversed(df.columns)
print(rev_col) # reversed object: 내용 보려면 list화 해야함
rev_col = list(rev_col)
print(rev_col)
rev_df = df[rev_col]
print(rev_df)
if __name__ == '__main__':
P6().p229()
# -
# ## 열 분리
# +
class P6:
def p232(self):
print(stock_d.head())
#print(stock_d['연월일'].dtypes) # datetime64
stock_d['연월일'] = stock_d['연월일'].astype('str')
#print(stock_d.head())
date = stock_d['연월일'].str.split('-')
#print(date.head())
#print(type(date)) # series
# 연월일 분리
stock_d['연'] = date.str.get(0) # .str을 해야 각각 객체의 0번째에 접근할 수 있음
stock_d['월'] = date.str.get(1)
stock_d['일'] = date.str.get(2)
#display(stock_d.head())
# 다시 Timestamp로 바꾸어서 연월일 분리
stock_d['연월일'] = pd.to_datetime(stock_d['연월일'])
stock_d['Year'] = stock_d['연월일'].dt.year
stock_d['Month'] = stock_d['연월일'].dt.month
stock_d['Day'] = stock_d['연월일'].dt.day
#display(stock_d)
if __name__ == '__main__':
P6().p232()
# -
# >Difference: Using `split` returns month and day as '06' and '01', while using `datetime` returns as '6' and '1'.
# ## Filtering - 조건식 mask 이용
# +
class P6: # 다시확인???????????????????????????????
def p234(self):
m1 = (ttn['age'] >= 10) & (ttn['age'] < 20) # 10대만 추림
#display(ttn[m1])
m2 = (ttn['age'] >= 10) & (ttn['sex'] == 'female')
#display(ttn[m2])
m3 = (ttn['age'] < 10) | (ttn['age'] >= 60)
#display(ttn[m3])
#display(ttn.loc[m3, ['age', 'sex', 'alone']])
m4 = ttn['pclass'] == 1
#display(ttn.loc[m4, ['pclass', 'age', 'sex']])
m5 = ttn['pclass'] == 2
#display(ttn[m4 | m5])
# 출력 설정 변경 - 최대 열의 개수 지정해서 모든 열 볼 수 있도록함
pd.set_option('display.max_columns', 20)
# isin 함수
tf = ttn['embark_town'].isin(['Southampton', 'Queenstown'])
result = ttn[tf]
#display(result.tail())
if __name__ == '__main__':
P6().p234()
# + [markdown] heading_collapsed=true
# ## Dataframe 합치기 - Concat, Join, Merge
# + hidden=true
df1 = pd.DataFrame({'a': ['a0', 'a1', 'a2', 'a3'],
'b': ['b0', 'b1', 'b2', 'b3'],
'c': ['c0', 'c1', 'c2', 'c3']
}, index=[0, 1, 2, 3])
df2 = pd.DataFrame({'a': ['a2', 'a3', 'a4', 'a5'],
'b': ['b2', 'b3', 'b4', 'b5'],
'c': ['c2', 'c3', 'c4', 'c5'],
'd': ['d2', 'd3', 'd4', 'd5']
}, index=[2, 3, 4, 5])
display(df1)
display(df2)
# + hidden=true
sr1 = pd.Series(['e0', 'e1', 'e2', 'e3'], name='e') # index= [0, 1, 2, 3]
sr2 = pd.Series(['f0', 'f1', 'f2'], name='f', index=[3, 4, 5])
sr3 = pd.Series(['go', 'g1', 'g2', 'g3'], name='g')
print(sr1)
print(sr2)
print(sr3)
# + [markdown] hidden=true
# ### Concat - pd.concat()
# + hidden=true
class P6:
def p239(self):
# Concat 함수 - Dataframe
r1 = pd.concat([df1, df2], axis=0, ignore_index=True, join='inner') # igonore_index=True이면 해당 axis가 무조건 숫자 인덱스로 바뀜
#display(r1)
# Concat - Series
r2 = pd.concat([df1, sr1], axis=1)
display(r2)
r3 = pd.concat([df1, sr2], axis=1, join='inner')
#display(r3)
r4 = pd.concat([df2, sr2], axis=1)
#display(r4)
r5 = pd.concat([df2, sr3], axis=1)
#display(r5)
if __name__ == '__main__':
P6().p239()
# + [markdown] hidden=true
# ### Merge - pd.merge()
# + hidden=true
class P6:
def p245(self):
# Merge 함수
display(stock_p.head(), stock_v.head())
df1 = pd.merge(stock_p, stock_v, how='inner') # on 설정(default)
display(df1)
df2 = pd.merge(stock_p, stock_v, left_on='stock_name', right_on='name', how='right') # left_on, right_on 설정
#display(df2)
df3 = pd.merge(stock_p, stock_v, left_index=True, right_index=True) # left_index, right_index 설정
#display(df3)
# 5만원 미만인 종목에 대해서만 두 데이터프레임을 합친다
p5 = stock_p[stock_p['price'] < 50000]
#display(p5)
df4 = pd.merge(p5, stock_v, on='id', how='inner')
#display(df4)
if __name__ == '__main__':
P6().p245()
# + hidden=true
pd.merge(df1, df2, how='inner') # inner의 경우 index가 유지되지 않음
# + [markdown] hidden=true
# ### Join - df1.join(df2)
# + hidden=true
class P6:
def p252(self):
# Join 함수
# on: df의 "컬럼"이나 index level를 지정하면, other의 index에 겹쳐진다. 디폴트는 index-on-index
st_p = stock_p.set_index('id')
st_v = stock_v.set_index('id')
#display(st_p, st_v)
df1 = st_p.join(st_v, how='inner')
#display(df1.head())
# stock_p의 on=id와 st_v의 index를 겹치기 - index는 무조건 stock_p를 따른다
df2 = stock_p.join(st_v, on='id', how='outer')
#display(df2)
# stock_v의 on=id와 st_p의 index를 겹치기
df3 = stock_v.join(st_p, on='id', how='outer')
#display(df3)
if __name__ == '__main__':
P6().p252()
# + [markdown] hidden=true
# > Only `concat` and `join` can combine more than 3 dataframes at once. Yet, in case `join`ing dataframes with overlapping column names, it's hard to make it, as `join` can only differentiate two overlapping columns(left suffix and right suffix).
# -
# ## Group 연산
# +
class P6:
def p254(self):
df = ttn[['age', 'sex', 'class', 'fare', 'survived']]
display(df.head())
# Group 함수
group = df.groupby(['class'])
#print(group) # DataFrameGroupBy object
#for data in group: # 만들어진 그룹을 반복 - (group 이름, group의 subframe) tuple이 group object
# display(data)
# print(type(data))
# 원하는 class의 내용만 확인하기
sec1 = group.get_group('Second')
#print(sec1)
group2 = df.groupby(['class', 'sex'])
#for data in group2:
# print(data)
th_female = group2.get_group(('Third', 'female'))
#print(th_female)
r1 = df.groupby(['class']).mean()
#print(r1)
# Dataframe으로 얻어진 결과는 loc 사용 가능
r2 = df.groupby(['class', 'sex']).mean()
#print(r2)
#print(r2.loc['First']) # indexing
#print(r2.loc[('First', 'female')]) # multi-indexing
if __name__ == '__main__':
P6().p254()
# +
class P6:
def p261(self):
df = ttn[['age', 'sex', 'class', 'fare', 'survived']]
s = df.groupby(['class']).std()
#print(s)
# 각 그룹의 특정 컬럼에 접근
s2 = df.groupby(['class'])['fare'] # SeriesGroupBy object
#print(s2)
#for s in s2:
# print(s)
s3 = df.groupby(['class']).fare # SeriesGroupBy
#print(s3)
s2 = df.groupby(['class'])['fare'].std()
#print(s2)
s3 = df.groupby(['class']).fare.std()
#print(s3) # s2와 같은 결과
s4 = df.groupby(['class'])[['fare']] # 그대로 DataFrameGroupBy
#print(s4)
#for s in s4:
# print(s) # fare컬럼 만이 아니라 전체 컬럼이 출력되어서 별 의미 없다..
# Group 전체에 함수 적용 - agg 메소드-----------------------------------------------------------------------------
r1 = df.groupby(['class']).agg(Util.min_max) # 컬럼별 series-wise로 적용
#print(r1)
r2 = df.groupby(['class']).agg(['min', 'max']) # multiple aggregation
#print(r2)
r3 = df.groupby(['class']).agg({'fare':['min', 'max'], 'age':'mean'})
#print(r3)
r31 = df.groupby(['class']).agg(lambda x: x.fare.max() - x.fare.min()) # x['fare']는 오류남..이유 모르겠음
print(r31) # fare컬럼의 max()-min()값을 모든 컬럼으로 propagate
#r32 = df.groupby(['class']).agg(lambda x: x.info()) # x: class별 subframe
#print(r32)
# transform 메소드: 원본 dataframe의 index를 갖춘 dataframe 형태로 반환------------------------------------
r4 = df.groupby(['class']).transform(Util.zscore) # class별 subframe 중, object 타입인 sex, class를 제외하고 연산
#print(r4) # 그룹별 series-wise로 mean(), std()를 구해서 zscore를 구한 뒤 합친 것
r5 = df.groupby(['class'])['age'].transform(Util.zscore)
#print(r5)
r6 = df.groupby(['class']).transform(lambda x: x) # 합쳐서 반환할 때 grouping에 사용된 컬럼은 항상 배제된다
#print(r6) # class 컬럼 제외됨
#print(df)
#r7 = df.groupby(['class']).transform(Util.info) # 출력 오류 메세지를 보면 series-wise로 적용되는 것을 알 수 있음!!!
#print(r7)
# filter 메소드--------------------------------------------------------------------------------------------
r8 = df.groupby(['class']).filter(lambda x: len(x) >= 200) # x: class별 데이터프레임
#print(r8) # First, Third class만 얻어짐. 원본 dataframe의 index 유지되고, grouping에 사용된 컬럼도 포함된다
r9 = df.groupby(['class']).filter(lambda x: x['age'].mean() > 30)
#print(r9) # First class만 반환
# apply 메소드---------------------------------------------------------------------------------------------
# 다른 방법보다 느리기 때문에 다른 방법을 먼저 시도해보도록..
# 함수 반환 결과에 따라 dataframe일 수도, series일 수도 있음
r10 = df.groupby(['class']).apply(Util.zscore) # transform 결과와 비교: object인 sex 컬럼도 NaN로 포함
#print(r10)
r11 = df.groupby(['class'])['age'].apply(Util.zscore)
#print(r11)
r12 = df.groupby(['class']).apply(lambda x: x.fare.max() - x.fare.min()) # x: class별 dataframe
print(r12) # agg 결과외 비교: propagate 하지 않고 series 반환
r13 = df.groupby(['class']).apply(lambda x: x) # grouping한 컬럼도 포함
#print(r13)
#r14 = df.groupby(['class']).apply(lambda x: x.info())
#print(r14)
r15 = df.groupby(['class']).apply(lambda x: x['age'].mean() > 30) # filter 결과와 비교: t/f만 반환
#print(r15)
if __name__ == '__main__':
P6().p261()
# -
# >Dataframe에 series를 +, - 연산하는 것은 row-wise로 계산된다(Day20_num_pan > pan11.py 참조)
# ## Multi-index
# +
class P6:
def p271(self):
df = ttn[['age', 'sex', 'class', 'fare', 'survived']]
grouped = df.groupby(['class', 'sex'])
gdf = grouped. mean()
print(gdf) # Dataframe
#print(gdf.loc['First']) # Dataframe
#print(gdf.loc[('First', 'female')]) # Series
# xs indexer
#print(gdf.xs('First', level='class', drop_level=False)) # drop_level: indexing하는 level을 drop하고 반환할지 여부
#print(gdf.xs('male', level='sex', drop_level=True)) # 등급별로 결과 출력
# Series에서도 xs 사용 가능
print(gdf['age'].xs('female', level=1, drop_level=False))
if __name__ == '__main__':
P6().p271()
# -
# ## Pivot
# ### pd.pivot_table - df에서 바로 pivot table을 만들 때
# +
class P6:
def p273(self):
# 출력 옵션 변경
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows', 20)
df = ttn[['age', 'sex', 'class', 'fare', 'survived']]
pv1 = pd.pivot_table(df, index='class', columns='sex', values='age', aggfunc='mean')
#print(pv1)
# aggfunc이 list인 경우, 컬럼이 multi-index가 되면서 상위가 function
pv2 = pd.pivot_table(df, index='class', columns='sex', values='survived', aggfunc=['mean', 'sum'])
#print(pv2)
#print(pv2.columns)
# 컬럼 multi-index 순서: function > values > column
pv3 = pd.pivot_table(df, index=['class', 'sex'], columns='survived', values=['age', 'fare'], aggfunc=['mean', 'max'])
print(pv3)
#print(pv3.columns)
#print(pv3.xs('First', level='class'))
#print(pv3.xs(('First', 'female')))
#print(pv3.xs('male', level='sex'))
# Series로 반환가능한 상황인데 level을 같이 적어주면 drop_level=False처럼 작동한다 --> drop_level=True 설정해도 안 먹힘!
#print(pv3.xs(('Second', 'male')))
#print(pv3.xs(('Second', 'male'), level=[0, 1])) # type: DataFrame
#print(pv3.xs(('male', 'Second'), level=['sex', 'class'], drop_level=True)) # 위와 같은 결과
# 컬럼을 추출하기
#print(pv3.xs('mean', axis=1, drop_level=False))
#print(pv3.xs(('mean', 'age'), level=[0, 1], axis=1))
#print(pv3.xs(('mean', 'age', 0), axis=1)) # type: Series
#print(pv3.xs(('mean', 'age', 0), level=[0, 1, 2], axis=1)) # level 지정하면 -> type: DataFrame
#print(pv3.xs(1, level='survived', axis=1, drop_level=False))
#print(pv3.xs('fare', level=1, axis=1))
#print(pv3.xs(('max', 0), level=[0, 2], axis=1))
# Heatmap 그리기-----------------------------------------------------------------------------------------------------
plt.pcolor(pv3, cmap='PuBuGn')
plt.colorbar()
plt.show()
if __name__ == '__main__':
P6().p273()
# -
# ### pd.pivot - DataFrameGroupBy object에서 pivot을 만들 때
# IBM3 Exprolatory Data Analysis 참고
# * **Groupby**로 먼저 필요한 feature값에 대해 function aggregation을 해두어야 함
# * 이후 **df.pivot**으로 reshape한 뒤 heatmap 등에 활용
# +
class P6:
def extra(self):
df = ttn[['age', 'sex', 'class', 'fare', 'survived']]
# pivot 인수 index, columns는 컬럼에서 선택해야 하기 때문에 as_index=False 설정해야 함!
gdf = df.groupby(['class', 'sex', 'survived'], as_index=False).mean()
#print(gdf)
pv = gdf.pivot(index=['class', 'sex'], columns='survived')
print(pv)
# Multi-index의 한 레벨의 값만 가져오기
#print(pv.index, pv.columns) # index, col 모두 multi-index
#print(pv.index.levels)
#print(pv.index.levels[0]) # 'class' -> type: CategoricalIndex, dtype: category
#print(pv.columns.levels)
# Heatmap 만들기 - Pseudocolor plot(bivariate density plot)------------------------------------------------------------
#plt.pcolor(pv, cmap='YlOrRd')
#plt.colorbar()
#plt.show()
fig, ax = plt.subplots(figsize=(8, 5))
im = ax.pcolor(pv, cmap='YlOrRd')
x_label = pv.columns
y_label = pv.index
ax.set_xticks(np.arange(pv.shape[1]) + 0.5)
ax.set_yticks(np.arange(pv.shape[0]) + 0.5)
ax.set_xticklabels(x_label)
ax.set_yticklabels(y_label)
ax.set_xlabel('0: dead, 1: survived')
ax.set_ylabel('(class, sex)')
fig.colorbar(im)
plt.show()
if __name__ == '__main__':
P6().extra()
| part06_notebook_ver.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.models import load_model
import tensorflow as tf
from PIL import Image
import os
class DSS:
def __init__(self, model_path = 'best_head.h5'):
self.model = load_model(model_path)
self.pred_dict = {0:'COVID-19', 1:'normal', 2:'pneumonia'}
try:
self.hist_data = pd.read_csv('DB')
except:
self.hist_data = pd.DataFrame(columns =['Patient_name',
'Patient_id',
'Date_of_photo',
'Shot',
'Predicted_class',
'class_probability',
'actual_results'])
def __get_image(self):
name = input('Please insert name of the image: ')
path = 'images/' + name +'.jpg'
np_image = Image.open(path)
np_image = np_image.resize((299, 299), Image.ANTIALIAS)
np_image = np.array(np_image).astype('float32')/255
np_image = np.expand_dims(np_image, axis=0)
return np_image
def __predict_class(self, image):
prediction = self.model.predict(tf.convert_to_tensor(image))
predicted_class = np.argmax(prediction)
class_probability = prediction[0, predicted_class]
predicted_class = self.pred_dict[predicted_class]
return predicted_class, class_probability
def __new_patient(self):
patient_name = input('Please input patient name: ')
patient_id = input('Please input patient id: ')
date = input('Please input date of X-Ray shot: ')
img = self.__get_image()
predicted_class, class_probability = self.__predict_class(img)
df = pd.DataFrame({'Patient_name':[patient_name],
'Patient_id':[patient_id],
'Date_of_photo':[date],
'Shot':[img],
'Predicted_class':[predicted_class],
'class_probability':[class_probability]})
self.hist_data = self.hist_data.append(df, ignore_index = True)
print('Prediction is: ' + str(predicted_class)
+'\nProbability of the prediction: ' + str(class_probability))
def __edit_patient_info(self):
row_id = input('Please chose row where you want to update the actual results: ')
act_res = input('Please insert actual results: ')
self.hist_data.iloc[int(row_id), -1] = act_res
print('Updated!')
def __finish(self):
self.hist_data.to_csv('DB', index = False)
print('DataBase is saved!')
def actions(self):
stop = False
while(not stop):
act = input('Please, input what you want to do: \n 1 -- predict for new patient, \
\n 2 -- insert actual results for previous patients,\n 3 -- finish work\n')
if act == '1':
self.__new_patient()
elif act == '2':
self.__edit_patient_info()
elif act == '3':
self.__finish()
stop = True
else:
print('Sorry, wrong input!')
dss = DSS()
dss.actions()
df = pd.read_csv('DB')
df
| DSS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table> <tr>
# <td style="background-color:#ffffff;">
# <a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="25%" align="left"> </a></td>
# <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
# prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
# </td>
# </tr></table>
# <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# <h2> <font color="blue"> Solutions for </font>Quantum State</h2>
# <a id="task1"></a>
# <h3> Task 1 </h3>
#
# Let $a$ and $b$ be real numbers.
#
# If the following vectors are valid quantum states, then what can be the values of $a$ and $b$?
#
# $$
# \ket{v} = \myrvector{a \\ -0.1 \\ -0.3 \\ 0.4 \\ 0.5}
# ~~~~~ \mbox{and} ~~~~~
# \ket{u} = \myrvector{ \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{b}} \\ -\frac{1}{\sqrt{3}} }.
# $$
# <h3> Solution </h3>
# +
# vector |v>
print("vector |v>")
values = [-0.1, -0.3, 0.4, 0.5]
total = 0 # summation of squares
for i in range(len(values)):
total += values[i]**2; # add the square of each value
print("total is ",total)
print("the missing part is",1-total)
print("so, the value of 'a' can be",(1-total)**0.5,"or",-(1-total)**0.5) # square root of the missing part
print()
print("vector |u>")
values = [1/(2**0.5), -1/(3**0.5)]
total = 0 # summation of squares
for i in range(len(values)):
total += values[i]**2; # add the square of each value
print("total is ",total)
print("the missing part is",1-total)
# the missing part is 1/b, square of 1/sqrt(b)
# thus, b is 1/missing_part
print("so, the value of 'b' should be",1/(1-total))
# -
# <a id="task2"></a>
# <h3> Task 2</h3>
# Remember Hadamard operator:
#
# $$
# H = \hadamard.
# $$
#
# Randomly create a 2-dimensional quantum state, and test whether Hadamard operator preserves its length or not.
#
# <b>Write a function</b> that returns a randomly created 2-dimensional quantum state.
#
# <i>Hint:
# <ul>
# <li> Pick two random values between -100 and 100 for the amplitudes of state 0 and state 1 </li>
# <li> Find an appropriate normalization factor to divide each amplitude such that the length of quantum state should be 1 </li>
# </ul>
# </i>
#
# <b>Write a function</b> that determines whether a given vector is a valid quantum state or not.
#
# (Due to precision problem, the summation of squares may not be exactly 1 but very close to 1, e.g., 0.9999999999999998.)
#
# <b>Repeat 10 times:</b>
# <ul>
# <li> Randomly pick a quantum state </li>
# <li> Check whether the picked quantum state is valid </li>
# <li> Multiply Hadamard operator with the randomly created quantum state </li>
# <li> Check whether the quantum state in result is valid </li>
# </ul>
# <h3> Solution </h3>
# randomly creating a 2-dimensional quantum state
from random import randrange
def random_quantum_state():
first_entry = randrange(-100,101)
second_entry = randrange(-100,101)
length_square = first_entry**2+second_entry**2
while length_square == 0:
first_entry = randrange(-100,101)
second_entry = randrange(-100,101)
length_square = first_entry**2+second_entry**2
first_entry = first_entry / length_square**0.5
second_entry = second_entry / length_square**0.5
return [first_entry,second_entry]
# testing whether a given quantum state is valid
def is_quantum_state(quantum_state):
length_square = 0
for i in range(len(quantum_state)):
length_square += quantum_state[i]**2
print("summation of entry squares is",length_square)
# there might be precision problem
# the length may be very close to 1 but not exactly 1
# so we use the following trick
if (length_square - 1)**2 < 0.00000001: return True
return False # else
# defining a function for Hadamard multiplication
def hadamard(quantum_state):
result_quantum_state = [0,0] # define with zero entries
result_quantum_state[0] = (1/(2**0.5)) * quantum_state[0] + (1/(2**0.5)) * quantum_state[1]
result_quantum_state[1] = (1/(2**0.5)) * quantum_state[0] - (1/(2**0.5)) * quantum_state[1]
return result_quantum_state
# we are ready
for i in range(10):
picked_quantum_state=random_quantum_state()
print(picked_quantum_state,"this is randomly picked quantum state")
print("Is it valid?",is_quantum_state(picked_quantum_state))
new_quantum_state = hadamard(picked_quantum_state)
print(new_quantum_state,"this is new quantum state")
print("Is it valid?",is_quantum_state(new_quantum_state))
print() # print an empty line
| bronze/B28_Quantum_State_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:test2]
# language: python
# name: conda-env-test2-py
# ---
# libraries
import os
import sys
import json
import numpy as np
import pandas as pd
sys.path.append(os.path.abspath('../../'))
sys.path.append(os.path.abspath('../../extra_files'))
import helper as hp
from imageio import imwrite, imread
from skimage.transform import resize
# File paths
data_path = '/home/aldo/Documents/data-cic/'
preprocess_path = data_path + 'preprocess_data'
# ## Training SSD300 trained with mobilenet backbone trained
# +
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger, EarlyStopping, ReduceLROnPlateau
from keras import backend as K
from keras.models import load_model
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from light_models.keras_ssd300_mobilenetv2 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from extra_files.f1_callback import F1_callback as f1_call
# -
# ### Parameters (original SSD300 architecture)
# +
## Parameteres needed for ssd_300() and SSDInputEncoder()
img_height = 300 # Height of the model input images
img_width = 300 # Width of the model input images
img_channels = 3 # Number of color channels of the model input images
mean_color = [1., 1., 1.] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.
divide_by_stddev = [127.5, 127.5, 127.5]
swap_channels = False # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.
n_classes = 1 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO
scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
scales = scales_pascal
#scales = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]
aspect_ratios = [[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters
two_boxes_for_ar1 = True
steps = [16, 30, 60, 100, 150, 300] # The space between two adjacent anchor box center points for each predictor layer.
offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation
normalize_coords = True
# -
# ## Create new model with SSD weights
# +
# 1: Build the Keras model.
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
alpha=1.0,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
divide_by_stddev=divide_by_stddev,
swap_channels=swap_channels)
# 2: Load some weights into the model.
# TODO: Set the path to the weights you want to load.
# 3: Instantiate an optimizer and the SSD loss function and compile the model.
# If you want to follow the original Caffe implementation, use the preset SGD
# optimizer, otherwise I'd recommend the commented-out Adam optimizer.
adam = Adam(lr=0.001)
#sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
# -
model.summary()
# ## Data generator for the training
# +
# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.
train_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)
# 2: Parse the image and label lists for the training and validation datasets.
# TODO: Set the paths to your dataset here.
# Images
images_dir = data_path + 'images_reduced'
# Ground truth
#train_labels_filename = preprocess_path + '/cic_train_reduced.csv'
train_labels_filename = preprocess_path + '/cic_train_val_reduced.csv'
val_labels_filename = preprocess_path + '/cic_val_reduced.csv'
train_dataset.parse_csv(images_dir=images_dir,
labels_filename=train_labels_filename,
input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.
include_classes='all')
val_dataset.parse_csv(images_dir=images_dir,
labels_filename=val_labels_filename,
input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'],
include_classes='all')
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# +
# 3: Set the batch size.
batch_size = 32 # Change the batch size if you like, or if you run into GPU memory issues.
# 4: Set the image transformations for pre-processing and data augmentation options.
# For the training generator:
ssd_data_augmentation = SSDDataAugmentation(img_height=img_height,
img_width=img_width,
background=mean_color)
# For the validation generator:
convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('conv13_mbox_conf').output_shape[1:3],
model.get_layer('conv17_mbox_conf').output_shape[1:3],
model.get_layer('conv18_2_mbox_conf').output_shape[1:3],
model.get_layer('conv19_2_mbox_conf').output_shape[1:3],
model.get_layer('conv20_2_mbox_conf').output_shape[1:3],
model.get_layer('conv21_2_mbox_conf').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
normalize_coords=normalize_coords)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[ssd_data_augmentation],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# -
# ## Remaining training parameters
# Define a learning rate schedule.
def lr_schedule(epoch):
if epoch < 400:
return 0.001
else:
return 0.0001
# +
lr_list = [0.01, 0.001, 0.0001]
for lr in lr_list:
# 1: Build the Keras model.
K.clear_session() # Clear previous models from memory.
print('TRAINING MODEL WITH LEARNING RATE:', lr)
model = ssd_300(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
alpha=1.0,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
divide_by_stddev=divide_by_stddev,
swap_channels=swap_channels)
adam = Adam(lr=lr)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
# Define model callbacks.
main_path = '/home/aldo/Downloads/'
# TODO: Set the filepath under which you want to save the model.
csv_logger = CSVLogger(filename=main_path + 'model_' + str(lr) + '.csv',
separator=',',
append=True)
f1_callback = f1_call(0.20,
0.45,
200,
normalize_coords,
img_height,
img_width,
(1, 2006, 14),
main_path + 'f1_' + str(lr) + '.csv',
main_path + 'model.h5',
label_csv='/home/aldo/Documents/data-cic/preprocess_data/PASCAL_val.csv',
path_img='/home/aldo/Documents/data-cic/PASCAL',
verborse=True)
callbacks = [csv_logger,
f1_callback]
initial_epoch = 0
final_epoch = 100
steps_per_epoch = 200
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
# +
import pandas as pd
import matplotlib.pyplot as plt
mob_01 = pd.read_csv(main_path + 'model_0.01.csv')
mob_001 = pd.read_csv(main_path + 'model_0.001.csv')
mob_0001 = pd.read_csv(main_path + 'model_0.0001.csv')
fig, ax = plt.subplots()
ax.plot(mob_01['epoch'][10:], mob_01['val_loss'][10:], color='r')
ax.plot(mob_001['epoch'][10:], mob_001['val_loss'][10:], color='b')
ax.plot(mob_0001['epoch'][10:], mob_0001['val_loss'][10:], color='g')
# +
mob_01_f1 = pd.read_csv(main_path + 'f1_0.01.csv')
mob_001_f1 = pd.read_csv(main_path + 'f1_0.001.csv')
mob_0001_f1 = pd.read_csv(main_path + 'f1_0.0001.csv')
fig, ax = plt.subplots()
ax.plot(mob_01_f1['epoch'], mob_01_f1['f1 score'], color='r')
ax.plot(mob_001_f1['epoch'], mob_001_f1['f1 score'], color='b')
ax.plot(mob_0001_f1['epoch'], mob_0001_f1['f1 score'], color='g')
# +
alphas = [1.0]
for alpha in alphas:
# 1: Build the Keras model.
K.clear_session() # Clear previous models from memory.
print('TRAINING MODEL WITH ALPHA:', alpha)
model = ssd_300(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
alpha=alpha,
scales=scales,
aspect_ratios_per_layer=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=mean_color,
divide_by_stddev=divide_by_stddev,
swap_channels=swap_channels)
adam = Adam(lr=0.001)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.load_weights('/home/aldo/Documents/weights/light_models/PASCAL/mobilenet_v2_alpha_1.0.h5')
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
# Define model callbacks.
main_path = '/home/aldo/Documents/'
# TODO: Set the filepath under which you want to save the model.
csv_logger = CSVLogger(filename=main_path + 'data-cic/history/light_models/CIC/mobilenet_v2_alpha_' + str(alpha) + '.csv',
separator=',',
append=True)
learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule, verbose=1)
callbacks = [csv_logger,
learning_rate_scheduler]
initial_epoch = 0
final_epoch = 500
steps_per_epoch = 15
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
initial_epoch=initial_epoch)
model.save(main_path + 'weights/light_models/CIC/mobilenet_v2_alpha_' + str(alpha) + '.h5')
| light_networks/CIC/ssd300_training_mobilenetv2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## <NAME>
# ## Research Project 2
# The goal of this project is to make a predictive model that can accurately predict Cetane values, particularly after reducing the number of features using Prinicpal Component Analysis
# Import Required Packages
import numpy as np
import inspect
import pandas as pd
import seaborn as sns
import math
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn import svm, metrics, linear_model
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import learning_curve, GridSearchCV
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from scipy import stats
from scipy.stats import norm, skew
from sklearn import preprocessing
from os import mkdir
from os.path import isdir, join
from ecnet import Server
from ecnet.models.mlp import MultilayerPerceptron
from ecnet.utils.logging import logger
# We create a server object from ECNet to get our random splits for training, validation and testing sets
sv = Server()
sv.load_data('cn_database_v2.0.csv',
random=True,
split=[0.7, 0.2, 0.1])
# Split our data into separate sets for training, validation and testing
# +
train_set = []
valid_set = []
test_set = []
for data in sv._df.learn_set:
train_set.append(data.id)
for data in sv._df.valid_set:
valid_set.append(data.id)
for data in sv._df.test_set:
test_set.append(data.id)
#Preview our data sets
print(len(train_set))
print(len(valid_set))
print(len(test_set))
# -
# Import our data separately
data = pd.read_csv('cn_database_v2.0.csv')
#Lets turn the dataset into a dataframe
data.head(5)
header = data.iloc[0, 300].reset_index(drop=True)
header
# Lets create separate dataframes for each of our sets
# +
datadf = data.iloc[:, ].reset_index(drop=True)
traindf = pd.DataFrame()
testdf = pd.DataFrame()
validdf = pd.DataFrame()
for molecule in train_set:
tempdf = datadf.loc[datadf['DATAID'] == molecule, :].reset_index(drop=True)
traindf = pd.concat([traindf,tempdf])
for molecule in test_set:
tempdf = datadf.loc[datadf['DATAID'] == molecule, :].reset_index(drop=True)
testdf = pd.concat([testdf,tempdf])
for molecule in valid_set:
tempdf = datadf.loc[datadf['DATAID'] == molecule, :].reset_index(drop=True)
validdf = pd.concat([validdf,tempdf])
# +
X_train = traindf.iloc[:, 12:].reset_index(drop=True)
y_train = traindf.iloc[:, :12].reset_index(drop=True)
X_valid = validdf.iloc[:, 12:].reset_index(drop=True)
y_valid = validdf.iloc[:, :12].reset_index(drop=True)
X_test = testdf.iloc[:, 12:].reset_index(drop=True)
y_test = testdf.iloc[:, :12].reset_index(drop=True)
# -
trainx = X_train.iloc[:, :].reset_index(drop=True)
testx = X_test.iloc[:, :].reset_index(drop=True)
validx = X_valid.iloc[:, :].reset_index(drop=True)
validy = y_valid.iloc[:, 11].reset_index(drop=True)
trainy = y_train.iloc[:, 11].reset_index(drop=True)
testy = y_test.iloc[:, 11].reset_index(drop=True)
y_train
train = pd.concat([trainy, trainx], axis=1, join='inner').reset_index(drop=True)
test = pd.concat([testy, testx], axis=1, join='inner').reset_index(drop=True)
valid = pd.concat([validy, validx], axis=1, join='inner').reset_index(drop=True)
train
full_data = pd.concat((train, valid, test)).reset_index(drop=True).astype(float)
y_train
# Cetane is the variable we need to predict, so lets do some analysis on that variable first
# +
sns.distplot(full_data['TARGET'] , fit=norm)
(mu, sigma) = norm.fit(full_data['TARGET'])
print( '\n mean = {:.2f} and st. dev = {:.2f}\n'.format(mu, sigma))
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('Cetane distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(full_data['TARGET'], plot=plt)
plt.show()
# -
# As we can see from the histogram, our data is NOT normally distributed. Our data seems to be slightly skewed left. It could be good to transform our target variable to have a more normal distribution for more accurate predictions. For now though, we will comment out the transformation
# +
#We use the numpy fuction log1p which applies log(1+x) to all elements of the column
#full_data['TARGET'] = np.log1p(full_data['TARGET'])
#Check the new distribution
sns.distplot(full_data['TARGET'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(full_data['TARGET'])
print( '\n mean = {:.2f} and st. dev = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('Cetane distribution')
#Get also the QQ-plot
fig = plt.figure()
res = stats.probplot(full_data['TARGET'], plot=plt)
plt.show()
# -
# Now lets try to focus on some feature engineering
#Here we concat all of our data into one dataframe for some easier preprocessing
ntrain = train.shape[0]
nvalid = valid.shape[0]
ntest = test.shape[0]
y_train_new = train.TARGET.values
y_test_new = test.TARGET.values
y_valid_new = valid.TARGET.values
targets = full_data['TARGET']
#all_data = pd.concat((train, valid, test)).reset_index(drop=True)
full_data.drop(['TARGET'], axis=1, inplace=True)
#all_data.head()
# After having all of our data into one dataframe, we check if there is any missing data.
data_na = (full_data.isnull().sum() / len(data)) * 100
data_na = data_na.drop(data_na[data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :data_na})
missing_data.head()
# Here we can see there is no missing data, now lets check the skew of our input data
# Normality is often assumed with statistical techniques; if you’re using regression algorithms such as linear regression or neural networks, you are likely to see large improvements if you transform variables with skewed distributions. So lets try to transform our input data
numeric_feats = full_data.dtypes[full_data.dtypes != 'object'].index
# Check the skew of all numerical features
skewed_feats = full_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
skewness.head(5)
# We preview our data before our transformation
full_data.head()
# We now perform a boxcox transformation on all the data that is slightly skewed
# +
#Here we perform a boxcox transformation on all the data that is skewed.
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
full_data[feat] = boxcox1p(full_data[feat], lam)
#all_data[skewed_features] = np.log1p(all_data[skewed_features])
# -
# We preview it after our transformation
full_data.head()
# As we can see, our box cox transformation caused some input features to be NaN values, so we must impute some missing data
#Wherever there is a NaN, lets replace it with the mean of the column it is in.
impute1 = full_data.fillna(full_data.mean())
impute1.head()
# As we can see, there are some columns that are filled entirely of NaN values. So we will just drop those columns
null_columns=impute1.columns[impute1.isnull().any()]
print(impute1[impute1.isnull().any(axis=1)][null_columns].head())
filled_data = impute1.dropna(axis = 1, how = 'all')
numeric_feats = filled_data.dtypes[filled_data.dtypes != 'object'].index
# Check the skew of all numerical features
skewed_feats = filled_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
skewness.head()
# As we can see there are still some columns who's data is skewed significantly. If we look into these features we might be able to find out the reasons as to why this is happening
filled_data['INPUT.1324']
filled_data['INPUT.1333']
# It seems as if the data that is skewed heavily is due to the features being filled of mostly zeros. I do not know the best course of action to take with this data so we will keep it for now.
#
# Note:
#
# This might not be the best course of action to take for accuracy
# Splitting our training and testing sets again
# +
train = filled_data[:ntrain]
valid = filled_data[ntrain:(ntrain+nvalid)]
test = filled_data[(ntrain+nvalid):]
#train = full_data[:ntrain]
#valid = full_data[ntrain:(ntrain+nvalid)]
#test = full_data[(ntrain+nvalid):]
train.head()
# +
###############################################################################
# Compute a PCA: unsupervised feature extraction / dimensionality reduction
#n-components, how many features we want our data to keep
n_components = 300
pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True).fit(train)
# -
# We will now transform our data. As a result we will have a new dataframe with however many columns we specified before.
# NOTE: These will NOT be original columns from our original dataframe
# Rather, they will be a transformation of our data into a new dataframe with less features
#
# The explained variance tells you how much information (variance) can be attributed to each of the principal components.
# This is important as while you can convert 4 dimensional space to 2 dimensional space, you lose some of the variance (information) when you do this.
# +
X_train_pca = pca.transform(train)
X_test_pca = pca.transform(test)
X_valid_pca = pca.transform(valid)
total = 0
for x in pca.explained_variance_ratio_:
total = total + x
print(total)
# -
X_train_pca
# Here we see many things:
#
# 300 components contain roughly 99.99999% of our information
#
# 250 components contain roughly 99.998% of our information
#
# 200 components contain roughly 99.99% of our information
#
# 175 components contain roughly 99.98% of our information
#
# 150 components contain roughly 99.96% of our information
#
# 125 components contain roughly 99.9% of our information
#
# 100 components contain roughly 99.8% of our information
#
# 75 components contain roughly 99.75% of our information
#
# 50 components contain roughly 99.5% of our information
#
# 45 components contain roughly 99.3% of our information
#
# 30 components contain roughly 99% of our information
#
# 25 components contain roughly 98% of our information
#
# 20 components contain roughly 98% of our information
#
# 15 components contain roughly 97.5% of our information
#
# 10 components contain roughly 96% of our information
# +
#We choose 300 features
#learning, validation, testing
#L, V, T
ytraindf = pd.DataFrame(data = y_train).reset_index(drop=True)
traindf = pd.DataFrame(data = X_train_pca)
traindf = pd.concat([ytraindf, traindf], axis = 1)
traindf['ASSIGNMENT'] = 'L'
#targetdf = pd.DataFrame(data = targets,
# columns = ['TARGET']).reset_index(drop=True)
ytestdf = pd.DataFrame(data = y_test).reset_index(drop=True)
testdf = pd.DataFrame(data = X_test_pca)
testdf = pd.concat([ytestdf, testdf], axis = 1)
testdf['ASSIGNMENT'] = 'T'
yvaliddf = pd.DataFrame(data = y_valid).reset_index(drop=True)
validdf = pd.DataFrame(data = X_valid_pca)
validdf = pd.concat([yvaliddf, validdf], axis = 1)
validdf['ASSIGNMENT'] = 'V'
finaldf = traindf.append([validdf, testdf])
#finaldf['TARGET'] = targetdf
finaldf.sort_values(by=['DATAID'], inplace=True)
finaldf
# -
finaldf
# +
#We now need to get our final df to have train/test splits that are np arrays
X_traindf = finaldf.loc[finaldf['ASSIGNMENT'] == 'L', :].reset_index(drop=True)
X_train = X_traindf.iloc[:, 12:].to_numpy()
y_train = X_traindf.iloc[:, 11].to_numpy()
X_validdf = finaldf.loc[finaldf['ASSIGNMENT'] == 'V', :].reset_index(drop=True)
X_valid = X_validdf.iloc[:, 12:].to_numpy()
y_valid = X_validdf.iloc[:, 11].to_numpy()
X_testdf = finaldf.loc[finaldf['ASSIGNMENT'] == 'T', :].reset_index(drop=True)
X_test = X_testdf.iloc[:, 12:].to_numpy()
y_test = X_testdf.iloc[:, 11].to_numpy()
# -
finaldf.to_csv(r'300pcav2.0.csv', index = False, header=True)
# We create an MLP with the input layer, then hidden layers with 150, and 75 neurons.
# +
from ecnet.models.mlp import MultilayerPerceptron as mp
model = mp(filename = 'model1.h5')
model.add_layer(num_neurons = 150, activation = 'relu', input_dim = 300)
model.add_layer(num_neurons = 75, activation = 'relu')
model.add_layer(num_neurons = 1, activation = 'linear')
#We fit the model
model.fit(X_train, y_train, X_valid, y_valid, 3000, 0.001, 0.9, 0.999, 0.0000001, 0.0, 0, 32)
# -
# Get our median absolute error
from sklearn.metrics import median_absolute_error
results = model.use(X_test)
median_absolute_error(y_true = y_test, y_pred = results)
y_test
results
| Dimensionality/PCA_2.0.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .rs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Rust
// language: rust
// name: rust
// ---
// # Rust Introduction
// ## Input, Output, and Data Types
use std::io;
println!("hello World")
// #### To take console input, use the match keyword
fn main() {
let mut lin = String::new();
match io::stdin().read_line(&mut lin) {
Ok(_) => {
println!("you said: {}", lin);
}
Err(e) => {
println!("failed: {}", e);
}
}
}
let mut string = String::from(" hello ");
println!("{}", string);
}
| Chapter_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import NearestNeighbors
from scipy import spatial
import operator
anime = pd.read_csv("my-data/anime.csv", error_bad_lines=False)
# # We have missing data, so we need to clean.
# # from analyzing the data, if the type is a movie and the number of episodes is unkown, then we can put 1.
# # For OVA(Original Video Animation), these are generally one/two episode long animes. I’ve decided to fill the unknown numbers of episodes with 1 again.
#
# # For all the other animes with unknown number of episodes, I’ve filled the known values with the median
# +
anime.loc[(anime["type"]=="OVA") & (anime["episodes"]=="Unknown"),"episodes"] = "1"
anime.loc[(anime["type"] == "Movie") & (anime["episodes"] == "Unknown")] = "1"
anime["episodes"] = anime["episodes"].map(lambda x:np.nan if x=="Unknown" else x)
anime["episodes"].fillna(anime["episodes"].median(),inplace = True)
anime["rating"] = anime["rating"].astype(float)
anime["rating"].fillna(anime["rating"].median(),inplace = True)
# -
anime_features = pd.concat([anime["genre"].str.get_dummies(sep=","),
pd.get_dummies(anime[["type"]]),
anime[["rating"]],anime["episodes"]],axis=1)
# you can see the features by using anime_features.columns
#I used MinMaxScaler from scikit-learn as it scales the values from 0–1.
min_max_scaler = MinMaxScaler()
anime_features = min_max_scaler.fit_transform(anime_features)
np.round(anime_features,2)
# number 2 in round means two decimal points
# # The scaling function (MinMaxScaler) returns a numpy array containing the features. Then we fit the KNN model from scikit learn to the data and calculate the nearest neighbors for each distances. In this case I’ve used the unsupervised NearestNeighbors method for implementing neighbor searches.
# +
nbrs = NearestNeighbors(n_neighbors=20, algorithm='ball_tree').fit(anime_features)
distances, indices = nbrs.kneighbors(anime_features)
# +
# Returns the index of the anime if (given the full name)
def get_index_from_name(name):
return anime[anime["name"]==name].index.tolist()[0]
all_anime_names = list(anime.name.values)
# +
# Prints the top K similar animes after querying
def print_similar_animes(query=None):
if query:
found_id = get_index_from_name(query)
for id in indices[found_id][1:]:
print(anime.ix[id]["name"])
print("Start of KNN Recommendation")
pred=print_similar_animes(query="Naruto")
# -
# # loading another dataset
r_cols = ['user_id', 'item_id', 'rating']
ratings = pd.read_csv('my-data/u.data', sep='\t', names=r_cols, usecols=range(3))
ratings.head()
# # Now, we'll group everything by movie ID(item_id), and compute the total number of ratings (each movie's popularity) and the average rating for every movie.
# # The raw number of ratings isn't very useful for computing distances between movies, so we'll create a new DataFrame that contains the normalized number of ratings. So, a value of 0 means nobody rated it, and a value of 1 will mean it's the most popular movie there is.
# +
movieProperties = ratings.groupby('item_id').agg({'rating': [np.size, np.mean]})
print(movieProperties.head())
movieNumRatings = pd.DataFrame(movieProperties['rating']['size'])
movieNormalizedNumRatings = movieNumRatings.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))
movieNormalizedNumRatings.head()
# -
# # Now, let's get the genre information from the u.item file. The way this works is there are 19 fields, each corresponding to a specific genre - a value of '0' means it is not in that genre, and '1' means it is in that genre. A movie may have more than one genre associated with it.
#
# # Then, we'll put together everything into one big Python dictionary called movieDict. Each entry will contain the movie name, list of genre values, the normalized popularity score, and the average rating for each movie.
movieDict = {}
with open('my-data/u.item') as f:
temp = ''
for line in f:
fields = line.rstrip('\n').split('|')
movieID = int(fields[0])
name = fields[1]
genres = fields[5:25]
genres = map(int, genres)
movieDict[movieID] = (name, genres, movieNormalizedNumRatings.loc[movieID].get('size'), movieProperties.loc[movieID].rating.get('mean'))
# For example, here's the record we end up with for movie ID 1, (Toy Story)
movieDict[1]
# you can change the number of movieDict[num]
# # Now, let's create a function that computes the (distance) between two movies based on how similar their genres are, and how similar their popularity is.
def ComputeDistance(a, b):
genresA = a[1]
genresB = b[1]
genreDistance = spatial.distance.cosine(genresA, genresB)
popularityA = a[2]
popularityB = b[2]
popularityDistance = abs(popularityA - popularityB)
return genreDistance + popularityDistance
# For example,here we compute the distance between two movies (movie id 2 and movie id 4)
print(ComputeDistance(movieDict[1], movieDict[4]))
# you can compute any other movies by changing the movieDict[number]
print movieDict[1]
print movieDict[4]
# # Now, let's compute the distance between some given test movie (Toy Story, in this example) and all of the movies in our data set. then sort those by distance, and print out the K nearest neighbors.
# +
def getNeighbors(movieID, K):
distances = []
for movie in movieDict:
if (movie != movieID):
dist = ComputeDistance(movieDict[movieID], movieDict[movie])
distances.append((movie, dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(K):
neighbors.append(distances[x][0])
return neighbors
K = 10
avgRating=0
neighbors = getNeighbors(1, K)
for neighbor in neighbors:
print (movieDict[neighbor][0])
# we can print the average rating also by using the print bellow
#print movieDict[neighbor][0] + " " + str(movieDict[neighbor][3])
avgRating /= float(K)
# -
# ##
| KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''automate'': conda)'
# name: python3
# ---
# # Cleaning and Tidying Data in Pandas
#
# ## Tips and Tricks
# +
import pandas as pd
import numpy as np
df = pd.read_csv('C:\\Users\\r0dne\\Downloads\\CSVs\\us2021census.csv')
# -
df
df.dtypes
type(df)
# Read and count rows and columns
df.shape
df.head()
# Find out what the info in the arrays
df.values
# Find out what type the header columns
df.columns
# Find out type and info
df.info()
# +
# Using the loc and iloc to find multiple columns and / or rows. Or subset of rows or columns
df.loc[df['Type'] == 'City', ['Counties', 'Population']]
# -
# What to find a single column
df['Type']
type_df = df['Type']
type_df.head()
# +
# Multiple columns use two sets of square brackets [[]]
subset = df[['City', 'State', 'Population']]
# -
subset
# Check what version you are using
pd.__version__
# Finding the most populated counties in California
df.loc[df['State'] == 'CA', ['Counties', 'City', 'Population']]
# Finding the most populated counties in Texas
df.loc[df['State'] == 'TX', ['Counties', 'City', 'Population']]
| projects/data_analysis/pandas_data_census2021.ipynb |
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell
-- language: haskell
-- name: haskell
-- ---
-- +
{-# LANGUAGE LambdaCase #-}
import qualified Data.Graph as G
import qualified Data.Array as A
import qualified Prelude as P
import Prelude hiding (lookup, read, replicate)
import Control.Monad.ST
import Data.STRef
import Control.Monad (forM_)
import Data.Vector.Mutable (STVector, read, replicate, write)
whenM :: Monad m => m Bool -> m () -> m ()
whenM condM block = condM >>= \cond -> if cond then block else return ()
tarjan :: Int -> G.Graph -> Maybe [[Int]]
tarjan n graph = runST $ do
index <- newSTRef 0
stack <- newSTRef []
stackSet <- replicate size False
indices <- replicate size Nothing
lowlinks <- replicate size Nothing
output <- newSTRef (Just [])
forM_ (G.vertices graph) $ \v ->
whenM ((==) Nothing <$> read indices v) $
strongConnect n v graph index stack stackSet indices lowlinks output
readSTRef output
where
size = snd (A.bounds graph) + 1
strongConnect
:: Int
-> Int
-> G.Graph
-> STRef s Int
-> STRef s [Int]
-> STVector s Bool
-> STVector s (Maybe Int)
-> STVector s (Maybe Int)
-> STRef s (Maybe [[Int]])
-> ST s ()
strongConnect n v graph index stack stackSet indices lowlinks output = do
i <- readSTRef index
write indices v (Just i)
write lowlinks v (Just i)
modifySTRef' index (+1)
push stack stackSet v
forM_ (graph A.! v) $ \w -> read indices w >>= \case
Nothing -> do
strongConnect n w graph index stack stackSet indices lowlinks output
write lowlinks v =<< (min <$> read lowlinks v <*> read lowlinks w)
Just{} -> whenM (read stackSet w) $
write lowlinks v =<< (min <$> read lowlinks v <*> read indices w)
whenM ((==) <$> read lowlinks v <*> read indices v) $ do
scc <- addSCC n v [] stack stackSet
modifySTRef' output $ \sccs -> (:) <$> scc <*> sccs
addSCC :: Int -> Int -> [Int] -> STRef s [Int] -> STVector s Bool -> ST s (Maybe [Int])
addSCC n v scc stack stackSet = pop stack stackSet >>= \w -> if ((other n w) `elem` scc) then return Nothing else
let scc' = w:scc
in if w == v then return (Just scc') else addSCC n v scc' stack stackSet
push :: STRef s [Int] -> STVector s Bool -> Int -> ST s ()
push stack stackSet e = do
modifySTRef' stack (e:)
write stackSet e True
pop :: STRef s [Int] -> STVector s Bool -> ST s Int
pop stack stackSet = do
e <- head <$> readSTRef stack
modifySTRef' stack tail
write stackSet e False
return e
denormalise = subtract
normalise = (+)
other n v = 2*n - v
clauses n [u,v] = [(other n u, v), (other n v, u)]
checkSat :: String -> IO Bool
checkSat name = do
p <- map (map P.read . words) . lines <$> readFile name
let pNo = head $ head p
pn = map (map (normalise pNo)) $ tail p
pGraph = G.buildG (0,2*pNo) $ concatMap (clauses pNo) pn
return $ (Nothing /=) $ tarjan pNo pGraph
-- -
checkSat "input/2sat1.txt"
checkSat "input/2sat2.txt"
checkSat "input/2sat3.txt"
checkSat "input/2sat4.txt"
checkSat "input/2sat5.txt"
checkSat "input/2sat6.txt"
| refactoring-tarjan/2SAT-WhenM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from nilearn import plotting
from dypac_utils import match_components
root_data = '/data/cisl/pbellec/cneuromod_embeddings/xp_202012/'
# # Load parcel for one subject
R = match_components(sub1='sub-03', sub2='sub-03', root_data=root_data, fwhm=5, cluster=300, state=900)
match_val = np.max(R, axis=1)
plt.hist(match_val, bins=100)
np.median(match_val)
# # Load parcels for two subjects
R = match_components(sub1='sub-03', sub2='sub-04', root_data=root_data, fwhm=5, cluster=300, state=900)
match_val = np.max(R, axis=1)
plt.hist(match_val, bins=100)
np.median(match_val)
| notebooks/inter_subject_parcel_similarity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mateustsleao/cautious-giggle/blob/main/vale_20220524.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="yS7BMCO5qvcL"
# # 00 - Imports
# + id="TLKJAp2FOBBr" colab={"base_uri": "https://localhost:8080/"} outputId="007bf554-4ceb-4f05-f1ad-8ade19b6ed01"
## Acess repository
# !git clone https://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@github.com/mateustsleao/vale-demurrage.git
# + id="TIMDryISO8dc"
import pandas as pd
import numpy as np
import math
## import seaborn as sns
## from matplotlib import pyplot as plt
from pathlib import Path
## import os
import glob
# + id="N1tFmpINDY3Z"
# + [markdown] id="oCcbQdnmDZ9f"
# # 02 - Get intervals
# + [markdown] id="0U2Z7VNRwqlV"
# Business Rules
#
# - Compare the respective month among the years.
#
#
# - Vessel number:
# - All vessels_imo's per month dataframe.
# - ETA:
# - Compare realized ETA with the Calculed.
# - calc median, variance and standard deviation.
# - get the min and max value.
# - ETB:
# - Compare realized ETB with the Calculed.
# - calc median, variance and standard deviation.
# - get the min and max value.
# - Quantity (Supply):
# - Aggs per porto, mês e produto.
# - Compare realized with the Calculed.
# - calc median, variance and standard deviation.
# - get the min and max value.
# - Demurrage:
# - get from (?)
# + id="l7vKB6e4EzHv"
path_vale_folder = Path( 'vale-demurrage', 'raw_data')
# + id="UZME--wvQrSI"
## when we'll have the historical data, we'll get the report by month grouped by year.
file_list = glob.glob(str(path_vale_folder) + "/cost_analysis_report*.xlsx")
excel_list = []
for file in file_list:
file_name = file.split("/")[-1].split(".")[0]
df = pd.read_excel(file)
df.name = file_name
excel_list.append(df)
# + [markdown] id="6PPlDNUfDwJ1"
# ## 2.1 Vessel Number
# + colab={"base_uri": "https://localhost:8080/"} id="8SXHE5sGKg92" outputId="a4cbacec-631f-429d-d06f-8e705a903881"
vessel_numbers_values = []
vessel_numbers_keys = []
for excel_file in excel_list:
# filtering by port ("Ponta da Madeira") and contract_type ("SPOT")
df = excel_file
key = str(excel_file.name.split("_")[-2])
df = df.loc[df["Origin Port"] == "Ponta da Madeira"]
df = df.loc[df["Contract Type"] == "SPOT"]
# get vessel numbers
vessel_numbers_values.append(len(df["Vessel IMO"]))
vessel_numbers_keys.append(key)
df_vessel = pd.DataFrame([ vessel_numbers_keys, vessel_numbers_values], index = ["month", "value"] )
df_vessel = df_vessel.sort_index(axis=1).T
print(df_vessel)
# Statistical values
vessel_median = df_vessel.groupby('month').median()
vessel_var = df_vessel.groupby('month').var()
vessel_min = df_vessel.groupby('month').min()
vessel_max = df_vessel.groupby('month').max()
vessel_mean = df_vessel.groupby('month').mean()
vessel_std = df_vessel.groupby('month').std()
vessel_median = vessel_median.rename(columns = {'value': 'median'})
vessel_var = vessel_var.rename(columns = {'value': 'var'})
vessel_min = vessel_min.rename(columns = {'value': 'min'})
vessel_max = vessel_max.rename(columns = {'value': 'max'})
vessel_mean = vessel_mean.rename(columns = {'value': 'mean'})
vessel_std = vessel_std.rename(columns = {'value': 'std'})
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="dmvA8fZWtQXr" outputId="0b683b13-e350-4193-8205-55a4789ec83f"
vessel_median
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="Mr7iJheutQaj" outputId="bb93ce60-1760-4d19-d23c-011b58a68f76"
vessel_var
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="Jzh0d7t3tQfc" outputId="c6704e2b-99ab-4ab6-fcf0-5414a99d07c1"
vessel_min
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="KMPprBhDtQiC" outputId="22822710-b020-4a43-b1bf-8d76f522d33d"
vessel_max
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="W_w5Oou-tavw" outputId="d460c466-1b64-4087-a918-c04116ae0cda"
vessel_mean
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="FkVFTmljtayY" outputId="c83e237d-fb3b-49df-83aa-84dc953fdb60"
vessel_std
# + [markdown] id="OZRYvmQeEOfD"
# ## 2.2 ETA
# + colab={"base_uri": "https://localhost:8080/"} id="tYKF-WksERpw" outputId="f77c786b-6752-40be-c0e9-e758a816e289"
# get planed_eta from SOAN
planned_eta_values = []
for excel_file in excel_list:
# filtering by port ("Ponta da Madeira") and contract_type ("SPOT")
df_plan_eta = excel_file
key = str(excel_file.name.split("_")[-2])
df_plan_eta = df_plan_eta.loc[df_plan_eta["Origin Port"] == "Ponta da Madeira"]
df_plan_eta = df_plan_eta.loc[df_plan_eta["Contract Type"] == "SPOT"]
df_plan_eta['Shipment Number'] = df_plan_eta['Shipment No.'].apply(lambda x : x.replace(" ", "/"))
df_plan_eta['Shipment Number'] = df_plan_eta['Shipment Number'].astype(np.dtype('U'))
df_plan_eta['calculed_origin_eta'] = df_plan_eta["Origin ETA"]
df_plan_eta = df_plan_eta[['Shipment Number', 'calculed_origin_eta']]
planned_eta_values.append(df_plan_eta)
planned_eta_values[0].shape
# + colab={"base_uri": "https://localhost:8080/"} id="heMOhr8D2wa3" outputId="7c050f33-4074-4bcd-e500-cefcb747c11b"
# get executed_eta
path_sc = Path('vale-demurrage', 'Realizado - Operations Desk','Shipment Cargoes.xlsx')
df_sc_raw = pd.read_excel(path_sc)
df_rls_sc = df_sc_raw
df_rls_sc = df_rls_sc.loc[df_rls_sc['Origin Port'] == 'Ponta da Madeira']
df_rls_sc['executed_origin_eta'] = df_rls_sc["Origin ETA"]
df_rls_sc['Shipment Number'] = df_rls_sc['Shipment Number'].astype(np.dtype('U'))
df_rls_sc = df_rls_sc[["Shipment Number", "executed_origin_eta"]]
df_rls_sc.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="RT7KO0xR2zRZ" outputId="543874d6-db30-467c-8f18-bab470ef39fa"
# dif between planed_eta and executed
df_difs_eta = []
for planned_eta_value in planned_eta_values:
df_dif_eta = pd.concat([planned_eta_value, df_rls_sc], ignore_index=True)
df_difs_eta.append(df_dif_eta)
df_difs_eta[0]
# + id="eHXZP6lm21Tm"
# statistical_values
# + [markdown] id="MDNN8QcNESRq"
# ## 2.3 ETB
# + colab={"base_uri": "https://localhost:8080/"} id="J9uUPOzyETdW" outputId="7bc4c960-b0f6-4fbe-d798-514cbcbd3acf"
# get planed_eta from SOAN
planned_etb_values = []
for excel_file in excel_list:
# filtering by port ("Ponta da Madeira") and contract_type ("SPOT")
df_plan_etb = excel_file
key = str(excel_file.name.split("_")[-2])
df_plan_etb = df_plan_etb.loc[df_plan_etb["Origin Port"] == "Ponta da Madeira"]
df_plan_etb = df_plan_etb.loc[df_plan_etb["Contract Type"] == "SPOT"]
df_plan_etb['Shipment Number'] = df_plan_etb['Shipment No.'].apply(lambda x : x.replace(" ", "/"))
df_plan_etb['Shipment Number'] = df_plan_etb['Shipment Number'].astype(np.dtype('U'))
df_plan_etb['calculed_origin_etb'] = df_plan_etb["Origin ETB"]
df_plan_etb = df_plan_etb[['Shipment Number', 'calculed_origin_etb']]
planned_etb_values.append(df_plan_etb)
planned_etb_values[0].shape
# + colab={"base_uri": "https://localhost:8080/"} id="FLvHjZjAsFyf" outputId="938f7b31-3547-4c25-b611-df941ca1e346"
# get executed_eta
path_sc = Path('vale-demurrage', 'Realizado - Operations Desk','Shipment Cargoes.xlsx')
df_sc_raw = pd.read_excel(path_sc)
df_rls_sc = df_sc_raw
df_rls_sc = df_rls_sc.loc[df_rls_sc['Origin Port'] == 'Ponta da Madeira']
df_rls_sc['executed_origin_etb'] = df_rls_sc["Origin ETB"]
df_rls_sc['Shipment Number'] = df_rls_sc['Shipment Number'].astype(np.dtype('U'))
df_rls_sc = df_rls_sc[["Shipment Number", "executed_origin_etb"]]
df_rls_sc.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="DSyj4oPJsJO_" outputId="a4b51df8-da0d-475a-a679-e606f7ba7008"
# dif between planed_eta and executed
df_difs_etb = []
for planned_etb_value in planned_etb_values:
df_dif_etb = pd.concat([planned_etb_value, df_rls_sc], ignore_index=True)
df_difs_etb.append(df_dif_etb)
df_difs_etb[0]
# + [markdown] id="yjSiT6KWEUAT"
# ## 2.4 Quantity
# + id="IKzRnDMUEXFd"
# calculed - Input: Amount – coluna H
# realized - Shipment cargoes: Quantity – coluna AD
# no primary_key between then.
# + [markdown] id="qYmsqFVsEYPK"
# ## 2.5 Demurrage
# + id="aVdAVbSIEaIB"
# calculed - Cost analysis - Column AA (Demurrage ($/t))
# realized - demurrage-brasil - Column (?)
# + [markdown] id="WQoEZRgFEjer"
# # 3.0 Function Objective
# + id="ZqaqhJieEowa"
# Set use case
# Ex: Use case 1 - min and max absolute
# Ex: Use case 2 - mean and std - min and max
# Apply pay-off matrix
# set indicators
# choose the robust scenario
# + [markdown] id="qfIgRvxdjlEx"
# # Inputs Soan
#
# > Bloco com recuo
| vale_20220524.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# language: python
# name: python37664bitbaseconda3fbb94aa868f4ead9c1226a8f3e8a66e
# ---
# # Fast*er* training with Kili Technology : active-learning
# It is no secret that machine learning models, especially deep learning models, need lots of training data. In the real world, unsupervised data is plenty while supervised data is rare and costly to obtain. Thus, you may be interested in using **[active learning](https://en.wikipedia.org/wiki/Active_learning_(machine_learning))**. It is the task of choosing the data samples to annotate to minimize the number of annotations required to achieve some performance. Since most real world applications are budget-constrained, active learning (AL) can help decrease the annotation cost.
#
# 
#
# At kili-technology, we develop an annotation platform, to quickly get a production ready dataset covering almost all use cases. One of the features of our [python api](https://github.com/kili-technology/kili-playground) is the ability to change the priority of samples to annotate. Coupled with our query mechanism, which allows to import labels into your scripts, it allows the use of active learning algorithms. We show you how to do that [here](https://github.com/kili-technology/kili-playground/tree/master/recipes/active-learning).
#
# In this blog post, we will provide an overview of the current state (05/2020) of active learning :
#
# - what is it ?
# - what are typical algorithms ?
# - how to apply it to your use cases ?
#
# This blog post is the first of a series on active-learning.
# ## 1. Introduction
# The most common case, where active learning will be most useful, is the situation where you have lots of unsupervised training data quickly available. In this situation, you can iteratively select a subset of labels to annotate. You need three components :
#
# - A machine learning model that learns with supervised data $(X_l, y_l)$
# - An algorithm, which given a trained model and unlabeled data $X_u$, sends a *query* : a subsample $X_s$ of this unlabeled data which will be the most beneficial to the training of a new model : $X_s \subseteq X_u$
# - An *oracle*, which is most probably you, the human annotator, that returns the labels $y_s$.
#
# 
#
# Then, the training process is as follows (see above schema).
#
# 1. You begin by training a model on preliminary labeled data.
# 2. You run the active learning algorithm on the remaining unsupervised data. It queries samples to annotate.
# 3. You annotate the queried data.
# 4. You retrain your model with this newly acquired data.
# 5. Repeat steps 2-4 until your model has good enough performance.
#
#
#
# In the following, we will talk about the **active learning algorithms** allowing this training proces. Two references for the following work can be found here :
#
# - [Settles, Burr (2010). "Active Learning Literature Survey"](http://burrsettles.com/pub/settles.activelearning.pdf) : reference review for active learning.
# - To get up to date with the state of the art : [Papers with code - active learning](https://paperswithcode.com/task/active-learning)
# ## 2. Common algorithms
# ### 2.1 Uncertainty based methods
# The first class of active learning algorithms is based on **uncertainty methods**. They require that the model **$m$ is a probabilistic learner**, that is it outputs prediction probabilities : $m(x) = (\mathbb{P}(y_i|x))_{1 \leq i \leq n}$ if there are $n$ classes. Those are often the most used types of active learning algorithms, because they are easily usable with neural networks whose last layer has a softmax activation, representing class-probabilities. There are three main algorithms :
#
# - The simplest idea is to query assets for which we are the *least confident* about. Given a sample $x$, the model predicts $\hat{y}$ with probability $\underset{y}{\operatorname{argmax}} \mathbb{P}(y|x)$. We then return the sample(s) for which we are the least sure (you can control the amount of assets queried).
# $$
# X_s = \underset{x}{\operatorname{argmax}} 1 - \mathbb{P}(\hat{y}|x)
# $$
# - Doing this discards all information about other class probabilities. One way to account for this is to use the margin between the predicted class $\hat{y}_1$ and the second top prediction $\hat{y}_2$. This is called *margin sampling*
# $$
# X_s = \underset{x}{\operatorname{argmin}} \mathbb{P}(\hat{y}_1|x) - \mathbb{P}(\hat{y}_2|x)
# $$
# - Finally, to use in the optimal way all labels, it is often best to use *entropy* : it returns the assets for which the distribution of the predictions has the largest entropy. Its main difference with the two previous algorithms is that it won't return assets for which one label is very unlikely.
# $$
# X_s = \underset{x}{\operatorname{argmax}} -\sum_y \mathbb{P}(y|x)\log\mathbb{P}(y|x)
# $$
#
# To conclude, if you are interested in reducing the loss, use entropy-based uncertainty sampling, if you are interested in reducing the classification error, use margin sampling.
# ### 2.2 Commitee-based methods
# A second class of algorithms are **commitee-based**. They require :
#
# - An ensemble of trained models $(m_1, ..., m_K)$
# - A way to compute disagreement between models $x \mapsto D(m_1, ..., m_K)(x)$
#
# If your machine learning models are neural network, you can either :
#
# - Train $K$ models with different initializations. Thanks to the probabilistic nature of training neural networks, it will create an ensemble of different models.
# - Train a Bayesian neural network
# - Use dropout to sample neural networks with different weights.
#
# To compute disagreement, you can use *vote-entropy* :
# $$
# X_s = \underset{x}{\operatorname{argmax}} -\sum_y \frac{N_y(x)}{K}\log\frac{N_y(x)}{K}
# $$
# where $N_y(x)$ is the number of models that predicted class $y$ for the sample $x$.
# You can also use the Kullback-Leibler divergence :
# $$
# X_s = \underset{x}{\operatorname{argmax}} \frac{1}{K} \sum_{k=1}^{K} D(m_k||m)(x)
# $$
# where $m(x)$ is the mean model prediction : $m(x) = \frac{1}{K} \sum_{k=1}^{K} m_k(x)$ and the KL divergence is computed as such : $D(m_k||m)(x) = \sum_y \mathbb{P}_k(y|x) \log \frac{\mathbb{P}_k(y|x)}{\mathbb{P}(y|x)}$. You don't need dozens of models, in most cases having between $3$ and $5$ models is enough.
# ### 2.3 Global methods
# A third class of algorithms requires that the model *is trained with a gradient descent*. Since currently neural networks are trained this way, those methods are applicable for deep learning. Those methods are less prone to fail against outliers and have proven experimentally to be effective, however they can be computationally expensive.
# #### Expected model change
# The first method computes a quantity called the **expected model change**, which, for a sample $x$, quantifies by how much the model would change if we added this sample to the training set. The question the algorithm answers is : which new labeled sample would minimize the prediction error if we performed one optimization step with it ? A common way to compute this quantity is :
#
# 1. First, get predictions $\mathbb{P}(y|x)$.
# 2. Then, for a class $c$ (which simulates a label we don't have access to), compute the gradient loss $\nabla l(\delta_c, \mathbb{P}(y|x))$ of the model, where $\delta_c$ is the dirac distribution with all mass probability on class $c$.
# 3. Finally, return the sum over $c$ of those class-gradients, weighted by their probability $\mathbb{P}(c|x)$
#
#
# This approximates the loss of the gradient of some sample $x$ :
# $$
# X_s = \underset{x}{\operatorname{argmax}} \sum_c \mathbb{P}(c|x) \| \nabla l(\delta_c, \mathbb{P}(y|x)) \|
# $$
# #### Expected error reduction
# The question the algorithm answers is : how much is the error prediction for all samples reduced if we re-train with that supplementary label ? We want to maximize the increase in certainty accross all labeled samples.
#
# In that case :
# $$
# X_s = \underset{x}{\operatorname{argmin}} \sum_c \mathbb{P}(c|x) \sum_j E(x_j)
# $$
# where $E(x_j)$ is the error on sample $x_j$ if we trained with $x$ labeled. Of course we don't have access to the label of $x$ and don't want to train for all samples, so an approximation of $E(x_j)$ can be :
#
# $$
# E(x_j) \approx \sum_{\tilde{c}} \mathbb{P}(\tilde{c}|x_j) \nabla l(\delta_{\tilde{c}}, \mathbb{P}(\tilde{c}|x_j)) \cdot \nabla l(\delta_{c}, \mathbb{P}(c|x))
# $$
# #### Density-weighted methods
# This method is a mix of both local and global based methods : it uses the result of a local method (like uncertainty sampling), and combines it with the representativeness of the considered data point. The idea is that it is no use knowing the label of a sample if it is an outlier. In that case :
#
# $$
# X_s = \underset{x}{\operatorname{argmax}} (\text{information brought by x}) \times \sum_{x_j} \text{similarity}(x, x_j)
# $$
# Other techniques exist, let's name a few :
#
# - **Variance reduction** : reduce the variance term of the predictions. See Section 3.5 of [Settles, Burr (2010). "Active Learning Literature Survey"](http://burrsettles.com/pub/settles.activelearning.pdf)
# - **Batch-mode active learning** : This family of methods tries to directly identify subsets of interesting assets to query instead of ranking each asset individually then returning the top $K$ assets.
# Recent papers :
#
# - [Active Learning for Convolutional Neural Networks: A Core-Set Approach](https://arxiv.org/pdf/1708.00489v4.pdf), ICLR 2018. Core-Set : **select samples that maximally cover the space of samples**, where the distance is defined as the l2 norm of activations. This is a kind of batch density weighted method, in the sense that we want queried samples to be as diverse as the whole dataset, and as informative as possible. **Very effective for deep learning compared to other methods**.
# - [Active Learning with Partial Feedback](https://arxiv.org/pdf/1802.07427v4.pdf), ICLR 2019. **Uses structure in the labels**. If you are doing classification of animals, it may be interesting to first classify an animal as a dog then to give its breed. You can already use the answer of the question (knownn as a **partial label**) to improve the model's accuracy. Another take from their paper : you should question the hypothesis that all assets are equally hard to label. The real rank should be : informativeness of the asset * cost to annotate the asset.
# - [Combining MixMatch and Active Learning for Better Accuracy with Fewer Labels](https://arxiv.org/pdf/1912.00594v2.pdf), ICLR 2020. They add simple active learning algorithms to **MixMatch**, a recent state-of-the-art **semi-supervised** learning algorithm. Performance increase : 1% absolute, ~15% relative. A take from the article : the higher the desired expected accuracy of the model, the more valuable it is getting labeled data compared to unlabeled data. This ratio decreases the more labeled data we amass.
# - [Deep Active Learning with a Neural Architecture Search](http://papers.nips.cc/paper/8831-deep-active-learning-with-a-neural-architecture-search.pdf), NeurIPS 2019. Alternatively do NAS based on the current labeled set and (simple) active learning, to challenge the idea that the initial architecture of the neural network is nearly optimal.
# - [Deep Active Learning with Adaptive Acquisition](https://arxiv.org/pdf/1906.11471v1.pdf), IJCAI 2019. There are no algorithm performing better than others, it depends too much on the dataset. They propose that the active learning algorithm be a learner that "adapts" to the samples it queried, thanks to a new Reinforcement Active Learning framework. To do so, they use a bayesian neural network (BNN), to have access to posterior probabilities of predictions as the base learner, and another BNN as the learner which selects samples to query.
# - [Deep Active Learning for Biased Datasets via Fisher Kernel Self-Supervision](https://arxiv.org/pdf/2003.00393v1.pdf). The authors consider the case where the unsupervised training data is biased w.r.t. the test data. This bias is often caused by a class imbalance. They use a fisher kernel to compare samples, and construct a validation dataset from the training data that has the same distribution as the test data.
# ## 3. Applying algorithms to specific tasks
# At the moment, we only mentioned cases where your the model is trained on a single-class classification task. However, there are many more cases where active learning can be useful :
#
# - For **regression**, you can
# - change your model so that it predicts a mean and a variance
# - use commitee-based methods to compute a mean and a variance of predictions.
# - For **object detection** :
# - [Active Learning for Deep Object Detection](https://arxiv.org/pdf/1809.09875.pdf). Compute the marging sampling for all detected objects in an asset, take the weighted (by class imabalance) average of all objects per image.
# - [ViewAL: Active Learning With Viewpoint Entropy for Semantic Segmentation](https://arxiv.org/pdf/1911.11789v2.pdf), CVPR 2020. Only works for multi-view datasets (the same object is present in multiple images, and we have 3D information).
# - For **semantic segmentation** :
# - [Deeply Supervised Active Learning for Finger Bones Segmentation](https://arxiv.org/pdf/2005.03225.pdf). Using a U-Net architecture, you can define predictions at different stages of the decoder, and compare those predictions to compute agreement. Then : less agreement = more informative sample.
# - For **Named Entity Recognition** (NER) :
# - [Practical, Efficient, and Customizable Active Learning for Named Entity Recognition in the Digital Humanities](https://www.aclweb.org/anthology/N19-1231.pdf), NAACL 2019. 20-60% increase speed compared to random.
# - [A study of active learning methods for named entity recognition in clinical text](https://www.sciencedirect.com/science/article/pii/S1532046415002038)
# - [Deep Active Learning for Named Entity Recognition](https://arxiv.org/pdf/1707.05928.pdf)
# - For **OCR** :
# - [Improving OCR Accuracy on Early Printed Books by combining Pretraining, Voting, and Active Learning](https://arxiv.org/pdf/1802.10038v2.pdf). Query-by-commitee active learning, using 5 models. They compute uncertainty with a normalized Levenshtein distance ratio.
# - For **NLP** :
# - [Deep Bayesian Active Learning for Natural Language Processing:
# Results of a Large-Scale Empirical Study](https://arxiv.org/pdf/1808.05697.pdf) Three tasks are studied : sentiment classification, NER and semantic role labeling. Their conclusion : if you do NLP, you should use [BALD](https://arxiv.org/pdf/1112.5745.pdf) : Bayesian active learning by disagreement. To compute it for deep learning, you can use [Monte-Carlo dropout](https://arxiv.org/pdf/1506.02142.pdf) or [Bayes-by-backprop](https://arxiv.org/pdf/1505.05424.pdf).
| recipes/active-learning/article_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append("..")
import numpy as np
import json
import seaborn as sns
sns.set(style="darkgrid")
import matplotlib.pyplot as plt
from project import data_preprocessing
from project.model import models
import seaborn as sns
sns.set(style="darkgrid")
from sklearn.cluster import KMeans
import pandas as pd
from pandas import DataFrame
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
import seaborn as sns
# %matplotlib inline
# +
from keras.models import load_model
from keras.models import model_from_json
with open('../project/model/saved_model/autoencoder_CV.json','r') as f:
autoencoder_json = json.load(f)
autoencoder = model_from_json(autoencoder_json)
autoencoder.load_weights('../project/model/saved_model/autoencoder_CV.h5')
with open('../project/model/saved_model/encoder_CV.json','r') as f:
encoder_json = json.load(f)
encoder = model_from_json(encoder_json)
encoder.load_weights('../project/model/saved_model/encoder_CV.h5')
with open('../project/model/saved_model/decoder_CV.json','r') as f:
decoder_json = json.load(f)
decoder = model_from_json(decoder_json)
decoder.load_weights('../project/model/saved_model/decoder_CV.h5')
# -
# #### Loading the data
file_path = '../output/universe_repo_cleaned.json'
with open(file_path) as json_file:
data = json.load(json_file)
# +
universe_indices = list(data.keys())
universe_dividends = []
for index in universe_indices:
universe_dividends.append(data[index][1])
universe_dividends = np.asarray(universe_dividends)
scaler = MinMaxScaler()
df_train = pd.DataFrame(universe_dividends).T
scaler.fit(df_train)
df_train = pd.DataFrame(scaler.transform(df_train)).T
universe_dividends_normalized = np.asarray(df_train)
# -
universe_dividends_normalized
# +
predicted_universe_CV_normalized = autoencoder.predict(universe_dividends_normalized)
predicted_universe_CV = np.asarray(pd.DataFrame(scaler.inverse_transform(pd.DataFrame(predicted_universe_CV_normalized).T)).T)
MSE_CV = []
for i in range(len(universe_dividends)):
try:
MSE_CV.append(mean_squared_error(universe_dividends_normalized[i] ,predicted_universe_CV_normalized[i]))
except:
MSE_CV.append(0.)
sns.distplot(MSE_CV, bins=25)
plt.xlim(0, np.max(MSE_CV)*2)
# +
from sklearn.cluster import KMeans
df = pd.DataFrame(MSE_CV)
kmeans = KMeans(n_clusters=2).fit(df)
centroids = kmeans.cluster_centers_
outliers_indices = np.where(kmeans.labels_==0)[0]
if(len(outliers_indices) >= (len(kmeans.labels_)-len(outliers_indices))):
outliers_indices = np.where(kmeans.labels_==1)[0]
print("Pourcentage d'outliers : " + str(np.around(len(outliers_indices)/len(kmeans.labels_)*100,2)) + "%")
print("Indices Of Outliers : " + str(outliers_indices))
color = []
for i in range(len(kmeans.labels_)):
if(i in outliers_indices):
color.append('#B05E77')
else:
color.append('#7470AB')
mses = np.round_(MSE_CV,decimals=3)
x = np.arange(0,len(mses))
plt.bar(x, height=mses,color=color)
MSE_outliers = mses[outliers_indices]
seuil = np.min(MSE_outliers)
plt.axhline(seuil)
plt.ylabel('MSE')
print("Seuil d'erreur : " + str(seuil))
# -
sns.distplot(MSE_CV, bins=25)
plt.axvline(0.085)
plt.xlim(0, np.max(MSE_CV)*2)
# +
curve_number = 2
plt.plot(universe_dividends[curve_number], label="real")
plt.plot(predicted_universe_CV[curve_number], label="predicted")
plt.legend()
plt.title('Index : {}, Date: {}'.format(list(data.keys())[curve_number], data[list(data.keys())[curve_number]][2]))
# -
| adam_api_repo_curve_anomaly_detection/notebooks/Testing autoencoder on Universe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Coding Exercises
# ### Binary Gap
#
# A function that, given a positive integer N, returns the length of the longest sequence of consecutive zeros surrounded by ones at both ends of the binary representation of N. The function should return 0 if N doesn't contain a binary gap.
def solution(N):
b = bin(N).replace('0b', '')
return len(max(b.strip('0').split('1')))
# ### Distinct
#
# Write a function that returns the number of distinct values in an array A. Assume that each element of the array is an integer within the range [−1,000,000:1,000,000].
def solution(A):
num_dic = {}
for i in A:
if i in num_dic:
num_dic[i] += 1
else:
num_dic[i] = 1
counter = 0
for i in num_dic:
counter += 1
return counter
| python-coding-exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# ## BDQN (Bayesian-Deep Q-Network)
# ### A Thompson Sampling of deep explorative RL
# This tutorial walks through the implementation of Bayesian deep Q networks (BDQNs),
# an RL method which applies the function approximation capabilities of deep neural networks
# to problems in reinforcement learning.
# The model in this tutorial follows the work described in the paper
# [Efficient Exploration through Bayesian Deep Q-Networks](Under Review), written by <NAME>, <NAME> and, <NAME>.
#
# To keep these toturial runnable
# by as many people as possible,
# on as many machines as possible,
# and with as few headaches as possible,
# we have so far avoided dependencies on external libraries
# (besides mxnet, numpy and matplotlib).
# However, in this case, we'll need to import the [OpenAI Gym](https://gym.openai.com/docs).
# That's because in reinforcement learning,
# instead of drawing examples from a data structure,
# our data comes from interactions with an environment.
# In this chapter, our environemnts will be classic Atari video games.
#
# ## Preliminaries
# The following code clones and installs the OpenAI gym.
# `git clone https://github.com/openai/gym ; cd gym ; pip install -e .[all]`
# Mosly users of AWS EC2 instance need an additional command to be run before intallation:
#
# `apt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb libav-tools xorg-dev python-opengl libboost-all-dev libsdl2-dev swig
# `
#
# Full documentation for the gym can be found on [at this website](https://gym.openai.com/).
# If you want to see reasonable results before the sun sets on your AI career,
# we suggest running these experiments on a server equipped with GPUs.
# +
#import sys
# #!conda install --yes libgcc
# -
# !pip install mxnet
# +
from __future__ import print_function
import mxnet as mx
from mxnet import nd, autograd
from mxnet import gluon
import os
import random
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
import gym
import math
from collections import namedtuple
import time
import pickle
import logging, logging.handlers
# %matplotlib inline
import matplotlib.ticker as mtick
command = 'mkdir data' # Creat a direcotry to store models and scores.
os.system(command)
# -
# ### Summary of the algorithm
# The BDQN, in the sense of implementation, is same as DDQN, [Deep Reinforcement Learning with Double Q-learning](https://arxiv.org/abs/1509.06461), written by <NAME>, except in the last layer, where instead of using linear regression as in DDQN, BDQN uses Bayesian Linear Regression (BLR) and for exploration, instead of using naive $\varepsilon$-greedy strategy as in DDQN, BDQN uses Thompson sampling and avoid any naive exploration.
#
# As it is mentioned before, BDQN has the same architecture as DDQN has except, in BDQN we remove the last layer of DDQN. We call the output of the network as a representation $\phi(\cdot)$, and instead assign BLR layer on the top of the representation. The input to the network is state of the environment, `x` and the output is $\phi(x)$, the feature representation. The input to BLR block is the feature representation.
#
# ||
# |:---------------:|
# |BDQN Algorithm|
#
# #### BDQN Architecture
# The input to the network part of BDQN is 4 × 84 × 84 tensor with a
# rescaled, mean-scale version of the last four observations. The first convolution layer has 32 filters of size 8 with a stride of 4. The second convolution layer has 64 filters of size 4 with stride 2. The last convolution layer has 64 filters of size 3 followed by a fully connected layers with size 512. We add a BLR layer on top of this.
#
# ||
# |:---------------:|
# |BDQN Architecture|
#
# #### BLR, a closed form way of computing posterior.
# In both DDQN (linear regression) and BDQN (Bayesian linear regression) the common assumptions are as follows:
#
# * The layer before the last layer provides features $\phi(\cdot)$, suitable for linear models.
# * The generative model for state-action value, Q(x,a) is drawn from the following generative model:
# $$y\sim w_a\phi(x)+\epsilon$$
# Where $y$ is a sample of Q(x,a) and for simplicity we assume $\epsilon$ is a mean-zero Gaussian noise with variance $\sigma_n^2$.
#
# The question in linear regression problem is given a bunch of (x,a,y), what $w_a$ can be in term of minimizing least square error and the task is to find a $w_a$ which matches $x,a$ to $y$. In Bayesian machinery, we assume $w_a$ is drawn from a prior distribution, e.g. mean-zero Gaussian distribution with variance $\sigma^2$. Given data, the question in BLR is what is the posterior distribution of $w_a$ which matches $x,a$ to $y$. The interesting property of BLR is that given data, the distribution of $w_a$, therefore Q(a,x), can be computed in closed form and due to the conjugacy, the distribution over of samples of Q(a,x) has closed form.
#
# Given this nice property, at each time step, we can compute the posterior distribution of Q-function. As Thompson Sampling based strategies suggest, we draw a Q-function out of the posterior distribution and act optimally with respect to that for that time step.
# #### Target Network
#
# In both DDQN and BDQN we assume $\phi_\theta(\cdot)$ is paramterzied by parameters $\theta$. Furthermore, the obervation $y$ for time step $t$ is after seeing the cosequative seqeucen of $x_t,a_t,r_t,x_{t+1}$
# $$y_t := r_t + \lambda Q^{target}(x_{t+1},argmax_{a'}Q(x_{t+1},a'))$$
# Where $Q^{target}$ has same structure of Q, but with paramteters $\theta^{target}$ and $w_a^{target},~\forall{a\in\mathcal{A}}$ where the target parameters get updated ones in a while.
#
# #### Posterior update
#
# Given a dataset $\mathcal{D}=\lbrace x_i,a_i,y_i\rbrace_{i=1}^D$, we construct $|\mathcal{A}|$ disjoint datasets for each action, $\mathcal{D}=\cup_{a\in\mathcal{A}} \mathcal{D}_a$ where $\mathcal{D}_a$ is a set of tuples $x_i,a_i,y_i$ with the action $a_i = a$ and size $D_a$. Given the data set $\mathcal{D}_a$, we are interested in $\mathbb{P}(w_a|\mathcal{D}_a)$ and $\mathbb{P}(Q(x,a)|\mathcal{D}_a),\forall x\in\mathcal{X}$. Let us construct a matrix $\Phi_a\in\Re^{d\times D_a}$, a concatenation of feature column vectors $\lbrace\phi(x_i)\rbrace_{i=1}^{D_a}$, and $\textbf{y}_a\in\Re^{D_a}$, a concatenation of target values in set $\mathcal{D}D_a$. Therefore the posterior distribution is as follows:
#
# $$w_a\sim \mathcal{N}\left(\frac{1}{\sigma^2_\epsilon}\Xi_a\Phi_a\textbf{y}_a ,\Xi_a\right),~~\textit{where}~~\Xi_a = \left(\frac{1}{\sigma_\epsilon^2}\Phi_a\Phi_a^\top+\frac{1}{\sigma^2}I \right)^{-1}$$
# and since the prior and likelihood are conjugate of each other we have
# $$
# Q(x,a)|\mathcal{D}_a\sim \mathcal{N}\left(\frac{1}{\sigma^2_\epsilon}\phi(x)^\top\Xi_a\Phi_a\textbf{y}_a ,\phi(x)^\top\Xi_a\phi(x)\right)$$
# where $I\in\Re^d$ is a identity matrix. The
#
#
#
#
#
#
#
#
# #### Collect samples
# At the beginning of each episode (one round of the game),
# reset the environment to its initial state using `env.reset()`.
# At each time step $t$, the environment is at `current_state`.
# Given the seen data up to time $t$, the agnet update the posteriro distribution of Q-functions and deploys Thompson Sampling in order to choose the action $a_{TS}$.
#
# Pass the action through `env.step(action)` to receive next frame, reward and whether the game terminates.
# Append this frame to the end of the `current_state` and construct `next_state` while removeing $frame(t-12)$.
# Store the tuple $( `current_state` $, action, reward, `next_ state` )$ in the replay buffer.
#
# #### Update Network
# * Draw batches of tuples from the replay buffer: $(\phi,r,a,\phi')$.
# * Update $\theta$ using the following loss:
# $$\Large(\small Q(W,a,\theta)-r-Q(W,argmax_{a'}Q(\phi',a',\theta),\theta^{target})\Large)^2$$
# where $W$ is the set of $w_a,~a\in\forall \mathcal{A}$
# * Update the $\theta$
# * Update the $\theta^{target}$ once in a while
#
#
#
#
#
#
#
# ## Set the hyper-parameters
# +
# #!conda install --yes numpy
# +
# #!yes | conda install libgcc
# #!conda install --yes libgcc
# -
import sys
sys.executable
# !pip install atari-py
# +
class Options:
def __init__(self):
#Articheture
self.batch_size = 32 # The size of the batch to learn the Q-function
self.image_size = 84 # Resize the raw input frame to square frame of size 80 by 80
#Trickes
self.replay_buffer_size = 100000 # The size of replay buffer; set it to size of your memory (.5M for 50G available memory)
self.learning_frequency = 4 # With Freq of 1/4 step update the Q-network
self.skip_frame = 4 # Skip 4-1 raw frames between steps
self.internal_skip_frame = 4 # Skip 4-1 raw frames between skipped frames
self.frame_len = 4 # Each state is formed as a concatination 4 step frames [f(t-12),f(t-8),f(t-4),f(t)]
self.Target_update = 10000 # Update the target network each 10000 steps
self.epsilon_min = 0.1 # Minimum level of stochasticity of policy (epsilon)-greedy
self.annealing_end = 1000000. # The number of step it take to linearly anneal the epsilon to it min value
self.gamma = 0.99 # The discount factor
self.replay_start_size = 50000 # Start to backpropagated through the network, learning starts
#otimization
self.max_episode = 200000000 #max number of episodes#
self.lr = 0.0025 # RMSprop learning rate
self.gamma1 = 0.95 # RMSprop gamma1
self.gamma2 = 0.95 # RMSprop gamma2
self.rms_eps = 0.01 # RMSprop epsilon bias
#self.ctx = mx.gpu() # Enables gpu if available, if not, set it to mx.cpu()
self.ctx = mx.cpu() # Enables gpu if available, if not, set it to mx.cpu()
self.lastlayer = 512 # Dimensionality of feature space
self.f_sampling = 1000 # frequency sampling E_W_ (Thompson Sampling)
self.alpha = .01 # forgetting factor 1->forget
self.alpha_target = 1 # forgetting factor 1->forget
self.f_bayes_update = 1000 # frequency update E_W and Cov
self.target_batch_size = 5000 #target update sample batch
self.BayesBatch = 10000 #size of batch for udpating E_W and Cov
self.target_W_update = 10
self.lambda_W = 0.1 #update on W = lambda W_new + (1-lambda) W
self.sigma = 0.001 # W prior variance
self.sigma_n = 1 # noise variacne
opt = Options()
env_name = 'AsterixNoFrameskip-v4' # Set the desired environment
env = gym.make(env_name)
num_action = env.action_space.n # Extract the number of available action from the environment setting
manualSeed = 1 # random.randint(1, 10000) # Set the desired seed to reproduce the results
mx.random.seed(manualSeed)
attrs = vars(opt)
# set the logger
logger = logging.getLogger()
file_name = './data/results_BDQN_%s_lr_%f.log' %(env_name,opt.lr)
fh = logging.handlers.RotatingFileHandler(file_name)
fh.setLevel(logging.DEBUG)#no matter what level I set here
formatter = logging.Formatter('%(asctime)s:%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
ff =(', '.join("%s: %s" % item for item in attrs.items()))
logging.error(str(ff))
# -
# ### Define the feature representation model
# The network is constructed as three CNN layers and a fully connected added on the top. Furthermore, the optimizer is assigned to the parameters.
# +
def DQN_gen():
DQN = gluon.nn.Sequential()
with DQN.name_scope():
#first layer
DQN.add(gluon.nn.Conv2D(channels=32, kernel_size=8,strides = 4,padding = 0))
DQN.add(gluon.nn.BatchNorm(axis = 1, momentum = 0.1,center=True))
DQN.add(gluon.nn.Activation('relu'))
#second layer
DQN.add(gluon.nn.Conv2D(channels=64, kernel_size=4,strides = 2))
DQN.add(gluon.nn.BatchNorm(axis = 1, momentum = 0.1,center=True))
DQN.add(gluon.nn.Activation('relu'))
#tird layer
DQN.add(gluon.nn.Conv2D(channels=64, kernel_size=3,strides = 1))
DQN.add(gluon.nn.BatchNorm(axis = 1, momentum = 0.1,center=True))
DQN.add(gluon.nn.Activation('relu'))
DQN.add(gluon.nn.Flatten())
#fourth layer
#fifth layer
DQN.add(gluon.nn.Dense(opt.lastlayer,activation ='relu'))
DQN.collect_params().initialize(mx.init.Normal(0.02), ctx=opt.ctx)
return DQN
dqn_ = DQN_gen()
target_dqn_ = DQN_gen()
DQN_trainer = gluon.Trainer(dqn_.collect_params(),'RMSProp', \
{'learning_rate': opt.lr ,'gamma1':opt.gamma1,'gamma2': opt.gamma2,'epsilon': opt.rms_eps,'centered' : True})
dqn_.collect_params().zero_grad()
# -
# ### Replay buffer
# Replay buffer store the tuple of : `state`, action , `next_state`, reward , done.
Transition = namedtuple('Transition',('state', 'action', 'next_state', 'reward','done'))
class Replay_Buffer():
def __init__(self, replay_buffer_size):
self.replay_buffer_size = replay_buffer_size
self.memory = []
self.position = 0
def push(self, *args):
if len(self.memory) < self.replay_buffer_size:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.replay_buffer_size
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
# ### Initialized BLR matrices
# +
bat_state = nd.empty((1,opt.frame_len,opt.image_size,opt.image_size), opt.ctx)
bat_state_next = nd.empty((1,opt.frame_len,opt.image_size,opt.image_size), opt.ctx)
bat_reward = nd.empty((1), opt.ctx)
bat_action = nd.empty((1), opt.ctx)
bat_done = nd.empty((1), opt.ctx)
eye = nd.zeros((opt.lastlayer,opt.lastlayer), opt.ctx)
for i in range(opt.lastlayer):
eye[i,i] = 1
E_W = nd.normal(loc=0, scale=.01, shape=(num_action,opt.lastlayer),ctx = opt.ctx)
E_W_target = nd.normal(loc=0, scale=.01, shape=(num_action,opt.lastlayer),ctx = opt.ctx)
E_W_ = nd.normal(loc=0, scale=.01, shape=(num_action,opt.lastlayer),ctx = opt.ctx)
Cov_W = nd.normal(loc=0, scale= 1, shape=(num_action,opt.lastlayer,opt.lastlayer),ctx = opt.ctx)+eye
Cov_W_decom = Cov_W
for i in range(num_action):
Cov_W[i] = eye
Cov_W_decom[i] = nd.array(np.linalg.cholesky(((Cov_W[i]+nd.transpose(Cov_W[i]))/2.).asnumpy()),ctx = opt.ctx)
Cov_W_target = Cov_W
phiphiT = nd.zeros((num_action,opt.lastlayer,opt.lastlayer), opt.ctx)
phiY = nd.zeros((num_action,opt.lastlayer), opt.ctx)
# -
# ### BLR posteriro update
# +
sigma = opt.sigma
sigma_n = opt.sigma_n
def BayesReg(phiphiT,phiY,alpha,batch_size):
phiphiT *= (1-alpha) #Forgetting parameter alpha suggest how much of the moment from the past can be used, we set alpha to 1 which means do not use the past moment
phiY *= (1-alpha)
for j in range(batch_size):
transitions = replay_memory.sample(1) # sample a minibatch of size one from replay buffer
bat_state[0] = transitions[0].state.as_in_context(opt.ctx).astype('float32')/255.
bat_state_next[0] = transitions[0].next_state.as_in_context(opt.ctx).astype('float32')/255.
bat_reward = transitions[0].reward
bat_action = transitions[0].action
bat_done = transitions[0].done
phiphiT[int(bat_action)] += nd.dot(dqn_(bat_state).T,dqn_(bat_state))
phiY[int(bat_action)] += (dqn_(bat_state)[0].T*(bat_reward +(1.-bat_done) * opt.gamma * nd.max(nd.dot(E_W_target,target_dqn_(bat_state_next)[0].T))))
for i in range(num_action):
inv = np.linalg.inv((phiphiT[i]/sigma_n + 1/sigma*eye).asnumpy())
E_W[i] = nd.array(np.dot(inv,phiY[i].asnumpy())/sigma_n, ctx = opt.ctx)
Cov_W[i] = sigma * inv
return phiphiT,phiY,E_W,Cov_W
# Thompson sampling, sample model W form the posterior.
def sample_W(E_W,U):
for i in range(num_action):
sam = nd.normal(loc=0, scale=1, shape=(opt.lastlayer,1),ctx = opt.ctx)
E_W_[i] = E_W[i] + nd.dot(U[i],sam)[:,0]
return E_W_
# -
# ### Preprocess frames
# * Take a frame, average over the `RGB` filter and append it to the `state` to construct `next_state`
# * Clip the reward
# * Render the frames
# +
def preprocess(raw_frame, currentState = None, initial_state = False):
raw_frame = nd.array(raw_frame,mx.cpu())
raw_frame = nd.reshape(nd.mean(raw_frame, axis = 2),shape = (raw_frame.shape[0],raw_frame.shape[1],1))
raw_frame = mx.image.imresize(raw_frame, opt.image_size, opt.image_size)
raw_frame = nd.transpose(raw_frame, (2,0,1))
raw_frame = raw_frame.astype('float32')/255.
if initial_state == True:
state = raw_frame
for _ in range(opt.frame_len-1):
state = nd.concat(state , raw_frame, dim = 0)
else:
state = mx.nd.concat(currentState[1:,:,:], raw_frame, dim = 0)
return state
def rew_clipper(rew):
if rew>0.:
return 1.
elif rew<0.:
return -1.
else:
return 0
def renderimage(next_frame):
if render_image:
plt.imshow(next_frame);
plt.show()
display.clear_output(wait=True)
time.sleep(.1)
l2loss = gluon.loss.L2Loss(batch_axis=0)
# -
# ### Initialize arrays
frame_counter = 0. # Counts the number of steps so far
annealing_count = 0. # Counts the number of annealing steps
epis_count = 0. # Counts the number episodes so far
replay_memory = Replay_Buffer(opt.replay_buffer_size) # Initialize the replay buffer
tot_clipped_reward = []
tot_reward = []
frame_count_record = []
moving_average_clipped = 0.
moving_average = 0.
flag = 0
c_t = 0
# ### Train the model
# +
render_image = False # Whether to render Frames and show the game
batch_state = nd.empty((opt.batch_size,opt.frame_len,opt.image_size,opt.image_size), opt.ctx)
batch_state_next = nd.empty((opt.batch_size,opt.frame_len,opt.image_size,opt.image_size), opt.ctx)
batch_reward = nd.empty((opt.batch_size),opt.ctx)
batch_action = nd.empty((opt.batch_size),opt.ctx)
batch_done = nd.empty((opt.batch_size),opt.ctx)
while epis_count < opt.max_episode:
cum_clipped_reward = 0
cum_reward = 0
next_frame = env.reset()
state = preprocess(next_frame, initial_state = True)
t = 0.
done = False
while not done:
mx.nd.waitall()
previous_state = state
# show the frame
renderimage(next_frame)
sample = random.random()
if frame_counter > opt.replay_start_size:
annealing_count += 1
if frame_counter == opt.replay_start_size:
logging.error('annealing and laerning are started ')
data = nd.array(state.reshape([1,opt.frame_len,opt.image_size,opt.image_size]),opt.ctx)
a = nd.dot(E_W_,dqn_(data)[0].T)
action = np.argmax(a.asnumpy()).astype(np.uint8)
# Skip frame
rew = 0
for skip in range(opt.skip_frame-1):
next_frame, reward, done,_ = env.step(action)
renderimage(next_frame)
cum_clipped_reward += rew_clipper(reward)
rew += reward
for internal_skip in range(opt.internal_skip_frame-1):
_ , reward, done,_ = env.step(action)
cum_clipped_reward += rew_clipper(reward)
rew += reward
next_frame_new, reward, done, _ = env.step(action)
renderimage(next_frame)
cum_clipped_reward += rew_clipper(reward)
rew += reward
cum_reward += rew
# Reward clipping
reward = rew_clipper(rew)
next_frame = np.maximum(next_frame_new,next_frame)
state = preprocess(next_frame, state)
replay_memory.push((previous_state*255.).astype('uint8')\
,action,(state*255.).astype('uint8'),reward,done)
# Thompson Sampling
if frame_counter % opt.f_sampling:
E_W_ = sample_W(E_W,Cov_W_decom)
# Train
if frame_counter > opt.replay_start_size:
if frame_counter % opt.learning_frequency == 0:
batch = replay_memory.sample(opt.batch_size)
#update network
for j in range(opt.batch_size):
batch_state[j] = batch[j].state.as_in_context(opt.ctx).astype('float32')/255.
batch_state_next[j] = batch[j].next_state.as_in_context(opt.ctx).astype('float32')/255.
batch_reward[j] = batch[j].reward
batch_action[j] = batch[j].action
batch_done[j] = batch[j].done
with autograd.record():
argmax_Q = nd.argmax(nd.dot(dqn_(batch_state_next),E_W_.T),axis = 1).astype('int32')
Q_sp_ = nd.dot(target_dqn_(batch_state_next),E_W_target.T)
Q_sp = nd.pick(Q_sp_,argmax_Q,1) * (1 - batch_done)
Q_s_array = nd.dot(dqn_(batch_state),E_W.T)
if (Q_s_array[0,0] != Q_s_array[0,0]).asscalar():
flag = 1
print('break')
break
Q_s = nd.pick(Q_s_array,batch_action,1)
loss = nd.mean(l2loss(Q_s , (batch_reward + opt.gamma *Q_sp)))
loss.backward()
DQN_trainer.step(opt.batch_size)
t += 1
frame_counter += 1
# Save the model, update Target model and update posterior
if frame_counter > opt.replay_start_size:
if frame_counter % opt.Target_update == 0 :
check_point = frame_counter / (opt.Target_update *100)
fdqn = './data/target_%s_%d' % (env_name,int(check_point))
dqn_.save_params(fdqn)
target_dqn_.load_params(fdqn, opt.ctx)
c_t += 1
if c_t == opt.target_W_update:
phiphiT,phiY,E_W,Cov_W = BayesReg(phiphiT,phiY,opt.alpha_target,opt.target_batch_size)
E_W_target = E_W
Cov_W_target = Cov_W
fnam = './data/clippted_rew_BDQN_%s_tarUpd_%d_lr_%f' %(env_name,opt.target_W_update,opt.lr)
np.save(fnam,tot_clipped_reward)
fnam = './data/tot_rew_BDQN_%s_tarUpd_%d_lr_%f' %(env_name,opt.target_W_update,opt.lr)
np.save(fnam,tot_reward)
fnam = './data/frame_count_BDQN_%s_tarUpd_%d_lr_%f' %(env_name,opt.target_W_update,opt.lr)
np.save(fnam,frame_count_record)
fnam = './data/E_W_target_BDQN_%s_tarUpd_%d_lr_%f_%d' %(env_name,opt.target_W_update,opt.lr,int(check_point))
np.save(fnam,E_W_target.asnumpy())
fnam = './data/Cov_W_target_BDQN_%s_tarUpd_%d_lr_%f_%d' %(env_name,opt.target_W_update,opt.lr,int(check_point))
np.save(fnam,Cov_W_target.asnumpy())
c_t = 0
for ii in range(num_action):
Cov_W_decom[ii] = nd.array(np.linalg.cholesky(((Cov_W[ii]+nd.transpose(Cov_W[ii]))/2.).asnumpy()),ctx = opt.ctx)
if len(replay_memory.memory) < 100000:
opt.target_batch_size = len(replay_memory.memory)
else:
opt.target_batch_size = 100000
if done:
if epis_count % 100. == 0. :
logging.error('BDQN:env:%s,epis[%d],durat[%d],fnum=%d, cum_cl_rew = %d, cum_rew = %d,tot_cl = %d , tot = %d'\
%(env_name, epis_count,t+1,frame_counter,cum_clipped_reward,cum_reward,moving_average_clipped,moving_average))
epis_count += 1
tot_clipped_reward = np.append(tot_clipped_reward, cum_clipped_reward)
tot_reward = np.append(tot_reward, cum_reward)
frame_count_record = np.append(frame_count_record,frame_counter)
if epis_count > 100.:
moving_average_clipped = np.mean(tot_clipped_reward[int(epis_count)-1-100:int(epis_count)-1])
moving_average = np.mean(tot_reward[int(epis_count)-1-100:int(epis_count)-1])
if flag:
print('break')
break
# -
# ### Plot the overall performace
# +
tot_c = tot_clipped_reward
tot = tot_reward
fram = frame_count_record
epis_count = len(fram)
bandwidth = 1 # Moving average bandwidth
total_clipped = np.zeros(int(epis_count)-bandwidth)
total_rew = np.zeros(int(epis_count)-bandwidth)
f_num = fram[0:epis_count-bandwidth]
for i in range(int(epis_count)-bandwidth):
total_clipped[i] = np.sum(tot_c[i:i+bandwidth])/bandwidth
total_rew[i] = np.sum(tot[i:i+bandwidth])/bandwidth
t = np.arange(int(epis_count)-bandwidth)
belplt = plt.plot(f_num,total_rew[0:int(epis_count)-bandwidth],"b", label = "BDQN")
plt.ticklabel_format(axis='both', style='sci', scilimits=(-2,2),fontsize=fonts, family = 'serif')
plt.legend(fontsize=fonts)
print('Running after %d number of episodes' %epis_count)
plt.xlabel("Number of steps",fontsize=fonts, family = 'serif')
plt.ylabel("Average Reward per episode",fontsize=fonts, family = 'serif')
plt.title("%s" %(env_name),fontsize=fonts, family = 'serif')
plt.show()
# -
# ### Accumulated average reward compared to DDQN
# |||
# |:---------------:|:---------------:|
#
# |||
# |:---------------:|:---------------:|
#
# |||
# |:---------------:|:---------------:|
#
# #### Limits of Open AI Gym
# As it is seen for Atlantis game, the reward suddenly saturates. We investigate this effect and realized that it reached the internal limit of OpenAIGym environment, after removing this limit the agent got to score of 62M after seeing 15M samples, which is 100X larger than DDQN.
#
# 
#
# As we can see the score drops down after reaching the score of 62M. This was expected since the length of reply buffer is short and the agent fills it with samples of latest part of the game. But we can see that thanks to Thompson Sampling, the agent reach the score of 30M immediately after that where it keeps alternating.
#
This article is under review in ICLR
@article{
anonymous2018efficient,
title={Efficient Exploration through Bayesian Deep Q-Networks},
author={Anonymous},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=Bk6qQGWRb}
}
| mltrain-nips-2017/kamyar_aziadensesheli/BDQN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # In-Class Coding Lab: Strings
#
# The goals of this lab are to help you to understand:
#
# - String slicing for substrings
# - How to use Python's built-in String functions in the standard library.
# - Tokenizing and Parsing Data
# - How to create user-defined functions to parse and tokenize strings
#
#
# # Strings
#
# ## Strings are immutable sequences
#
# Python strings are immutable sequences.This means we cannot change them "in part" and there is impicit ordering.
#
# The characters in a string are zero-based. Meaning the index of the first character is 0.
#
# We can leverage this in a variety of ways.
#
# For example:
# + code_cell_type="run_code"
x = input("Enter something: ")
print ("You typed:", x)
print ("We can extract parts of the string:")
print ("number of characters:", len(x) )
print ("First character is:", x[0])
print ("Last character is:", x[-1])
print ("They are sequences, so we can iterate over them:")
print ("Printing one character at a time: ")
for ch in x:
print(ch) # print a character at a time!
# -
# ## Slices as substrings
#
# Python lists and sequences use **slice notation** which is a clever way to get a substring from a given string.
#
# Slice notation requires two values: A start index and the end index. The substring returned starts at the start index, and *ends at the position before the end index*. It ends at the position *before* so that when you slice a string into parts you know where you've "left off".
#
# For example:
# + code_cell_type="run_code"
state = "Mississippi"
print (state[0:4]) # Miss
print (state[4:len(state)]) # issippi
# -
# In this next example, play around with the variable `split` adjusting it to how you want the string to be split up. Re run the cell several times with different values to get a feel for what happens.
# + code_cell_type="run_code"
state = "Mississippi"
split = 4 # TODO: play around with this number
left = state[0:split]
right = state[split:len(state)]
print(left, right)
# -
# ### Slicing from the beginning or to the end
#
# If you omit the begin or end slice, Python will slice from the beginnning of the string or all the way to the end. So if you say `x[:5]` its the same as `x[0:5]`
#
# For example:
# + code_cell_type="run_code"
state = "Ohio"
print(state[0:2], state[:2]) # same!
print(state[2:len(state)], state[2:]) # same
# -
# ### 1.1 You Code
#
# Split the string `"New Hampshire"` into two sub-strings one containing `"New"` the other containing `"Hampshire"` (without the space).
# + code_cell_type="write_code" label="1.1" solution=["text = \"New Hampshire\"\n", "first = text[:3]\n", "last = text[4:]\n", "print(first)\n", "print(last) \n"]
## TODO: Write code here
# -
# ## Python's built in String Functions
#
# Python includes several handy built-in string functions (also known as *methods* in object-oriented parlance). To get a list of available functions, use the `dir()` function on any string variable, or on the type `str` itself.
#
# + code_cell_type="run_code"
dir(str)
# -
# Let's suppose you want to learn how to use the `count` function. There are 2 ways you can do this.
#
# 1. search the web for `python 3 str count` or
# 1. bring up internal help `help(str.count)`
#
# Both have their advantages and disadvanges. I would start with the second one, and only fall back to a web search when you can't figure it out from the Python documenation.
#
# Here's the documentation for `count`
# + code_cell_type="run_code"
help(str.count)
# -
# You'll notice in the help output it says S.count() this indicates this function is a method function. this means you invoke it like this `variable.count()`.
#
# ### 1.2 You Code
#
# Try to use the count() function method to count the number of `'i'`'s in the string `'Mississippi`:
# + code_cell_type="debug_code" label="1.2" solution=["state = 'Mississippi'\n", "count = state.count('i')\n", "print(count) \n"]
#TODO: use state.count
state = 'Mississippi'
# -
# ### TANGENT: The Subtle difference between function and method.
#
# You'll notice sometimes we call our function alone, other times it's attached to a variable, as was the case in previous example. When we say `state.count('i')` the period (`.`) between the variable and function indicates this function is a *method function*. The key difference between a the two is a method is attached to a variable. To call a method function you must say `variable.function()` whereas when you call a function its just `function()`. The variable associated with the method call is usually part of the function's context.
#
# Here's an example:
# + code_cell_type="run_code"
name = "Larry"
print( len(name) ) # a function call len(name) stands on its own. Gets length of 'Larry'
print( name.__len__() ) # a method call name.__len__() does the name thing for its variable 'Larry'
# -
# ### 1.3 You Code
#
# Try to figure out which built in string method to use to accomplish this task.
#
# Write some code to find the text `'is'` in some text. The program shoud output the first position of `'is'` in the text.
#
# Examples:
#
# ```
# When: text = 'Mississippi' then position = 1
# When: text = "This is great" then position = 2
# When: text = "Burger" then position = -1
# ```
#
# Again: DO NOT WRITE your own function, use `dir(str)` then the `help()` function to figure out which built-in string method should be used to accomplish this task.
# + code_cell_type="run_code"
# workspace for using dir() and help()
# + code_cell_type="debug_code" label="1.3" solution=["text = input(\"Enter some text: \")\n", "pos = text.find(\"is\")\n", "print(pos)\n"]
# TODO: Write your code here
text = input("Enter some text: ")
# -
# ### 1.4 You Code:
#
# **Is that a URL?**
#
# Let's write a user-defined function called `isurl()` which when input any `text` sting will return `True` when the `text` is a URL. [https://developer.mozilla.org/en-US/docs/Learn/Common_questions/What_is_a_URL](https://developer.mozilla.org/en-US/docs/Learn/Common_questions/What_is_a_URL).
#
# Here is the strategy for the function:
#
# - use build in string method `startswith()`
# - when `text` starts with `http://` or `https://` then the `text` is a url.
#
#
# The function stub and tests have been written for you. All you need to do is implement the function body.
#
# + code_cell_type="debug_code" label="1.4" solution=["def isurl(text):\n", " # TODO implement function body here\n", " if text.startswith(\"http://\"):\n", " result= True\n", " elif text.startswith(\"https://\"):\n", " result= True\n", " else:\n", " result= False\n", " return result\n", "## tests\n", "print(\"when URL=http://www.syr.edu EXPECT=True ACTUAL=\", isurl(\"http://www.syr.edu\"))\n", "print(\"when URL=https://www.syr.edu EXPECT=True ACTUAL=\", isurl(\"http://www.syr.edu\"))\n", "print(\"when URL=www.syr.edu EXPECT=False ACTUAL=\", isurl(\"www.syr.edu\"))\n"]
def isurl(text):
# TODO implement function body here
## tests
print("when URL=http://www.syr.edu EXPECT=True ACTUAL=", isurl("http://www.syr.edu"))
print("when URL=https://www.syr.edu EXPECT=True ACTUAL=", isurl("http://www.syr.edu"))
print("when URL=www.syr.edu EXPECT=False ACTUAL=", isurl("www.syr.edu"))
# -
# # Spliting text strings into Enumerable Tokens
#
#
# The `split()` string method allows us to take a string and split it into smaller strings. Each smaller string is now enumerable which means we can `for` loop over the collection.
#
# The code sample below splits `text` into `words`. The default behavior of `split()` is to tokenize the string on whitespace.
#
# We can then iterate over the tokens (in this case `words`) using a `for` loop.
# + code_cell_type="run_code"
text = "this is a test"
words = text.split()
for word in words:
print(word)
# -
# This next sample demonstrates you can `split()` on any token, not just whitespace. Here we have `text` with each grade separated by a comma `,` we `split(',')` to create a list of grades which we then check to see which grades are in the "A" range (A, A+, A-) by checking to see if the `grade.startswith("A")`
# + code_cell_type="run_code"
acount=0
text = "A,B,A+,C-,D,A-,B+,C"
grades = text.split(',')
for grade in grades:
print(grade)
if grade.startswith("A"):
acount +=1
print("Grades in A range: ", acount)
# -
# ## Putting it all together: Extracting all URL's from text.
#
# Let's combine the `.split()` method with our own `isurl()` function to write a program which will extact all URL's from the input text. You will be given the alogorithm and be expected to write the code.
#
# INPUT: text
# OUTPUT: print each URL in the text:
#
#
# ALGORITHM / STRATEGY:
#
# input the text
# split the text into tokens on space (let's call these words)
# for each word in the words
# if the word is a url
# print word
#
#
# Example run:
#
# Enter text: Twitter https://twitter.com and Facebook https://facebook.com are social media sites. But the SU website
# https://syr.edu is not.
# EXTRACTED URLS:
# - https://twitter.com
# - https://facebook.com
# - https://syr.edu
#
#
# ### 1.5 You Code
# + code_cell_type="write_code" label="1.5" solution=["text = input(\"Enter text: \")\n", "words = text.split()\n", "print(\"EXTRACTED URLS:\")\n", "for word in words:\n", " if isurl(word):\n", " print(f\"- {word}\") \n"]
# TODO Write code here.
# -
# # Metacognition
#
# + [markdown] label="comfort_cell"
#
# ### Rate your comfort level with this week's material so far.
#
# **1** ==> I don't understand this at all yet and need extra help. If you choose this please try to articulate that which you do not understand to the best of your ability in the questions and comments section below.
# **2** ==> I can do this with help or guidance from other people or resources. If you choose this level, please indicate HOW this person helped you in the questions and comments section below.
# **3** ==> I can do this on my own without any help.
# **4** ==> I can do this on my own and can explain/teach how to do it to others.
#
# `--== Double-Click Here then Enter a Number 1 through 4 Below This Line ==--`
#
#
# + [markdown] label="questions_cell"
# ### Questions And Comments
#
# Record any questions or comments you have about this lab that you would like to discuss in your recitation. It is expected you will have questions if you did not complete the code sections correctly. Learning how to articulate what you do not understand is an important skill of critical thinking. Write them down here so that you remember to ask them in your recitation. We expect you will take responsilbity for your learning and ask questions in class.
#
# `--== Double-click Here then Enter Your Questions Below this Line ==--`
#
# -
# run this code to turn in your work!
from coursetools.submission import Submission
Submission().submit()
| lessons/06-Strings/LAB-Strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
from nltk.stem import WordNetLemmatizer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
import random
from wordcloud import WordCloud
from html.parser import HTMLParser
import bs4 as bs
import urllib.request
import re
# -
r=requests.get('https://www.globenewswire.com/news-release/2020/05/07/2029282/0/en/Stericycle-Inc-Reports-Results-For-the-First-Quarter-2020.html')
r.encoding = 'utf-8'
html = r.text
# Printing the first 500 characters in html
print(html[:500])
# Creating a BeautifulSoup object from the HTML
soup = BeautifulSoup(html)
text = soup.get_text()
#total length
len(text)
text
# +
#We see text length is 59000
#so we crop accordingly taking important parts with most relevant text
# -
text=text[21400:55650]
text
clean_text = re.sub(r'\[[0-9]*\]', ' ', text)
clean_text = re.sub(r'\s+', ' ', clean_text)
clean_text[500:900]
len(text)
# +
#final text
text
# -
# # Text Summarization
#
# +
sentence_list = nltk.sent_tokenize(clean_text)
# +
stopwords = nltk.corpus.stopwords.words('english')
word_frequencies = {}
for word in nltk.word_tokenize(clean_text):
if word not in stopwords:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
# +
maximum_frequncy = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word]/maximum_frequncy)
# -
sentence_scores = {}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequencies.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word]
else:
sentence_scores[sent] += word_frequencies[word]
sentence_scores
# # 10 Key Ideas
# +
import heapq
summary_sentences = heapq.nlargest(10, sentence_scores, key=sentence_scores.get)
summary = ' '.join(summary_sentences)
print(summary)
# -
# # 15 Key Ideas
# +
import heapq
summary_sentences_2 = heapq.nlargest(15, sentence_scores, key=sentence_scores.get)
summary_2 = ' '.join(summary_sentences_2)
print(summary_2)
# -
| Stericycle/SRCL Key Aspect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Given a string of words, reverse all the words.
#Ex: Given: 'This is the best', Return: 'best the is This'
#Note: Remove all leading and trailing whitespace.
# +
def rev_word1(s):
return " ".join(reversed(s.split()))
#Or
def rev_word2(s):
return " ".join(s.split()[::-1])
# -
rev_word1('Hi John, are you ready to go?')
rev_word2('Hi John, are you ready to go?')
def rev_word3(s):
"""
Manually doing the splits on the spaces.
"""
words = []
length = len(s)
spaces = [' ']
#Index Tracker
i = 0
#While index is less than length of string
while i < length:
#If element isn't a space
if s[i] not in spaces:
#The word starts at this index
word_start = i
while i < length and s[i] not in spaces:
#Get index where word ends
i += 1
#Append that word to the list
words.append(s[word_start:i])
#Add to index
i += 1
#Join the reversed words
return " ".join(reversed(words))
rev_word3('Hi John, are you ready to go?')
| Python for Algorithms, Data Structures, & Interviews/Array Sequences/Sentence Reversal - Interview Problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import copy
import itertools as it
import numpy as np
import yaml
import os
def expand_config(dict_config):
keys, values = zip(*dict_config.items())
permutations_dicts = [dict(zip(keys, v)) for v in it.product(*values)]
return permutations_dicts
# -
# # Test for all main scripts
def generate_str_config(config):
config_list = []
config_list.append('ue\={}'.format(config['ue']))
if config['ue'] == 'mc-dpp' or config['ue'] == 'mc':
config_list.append('ue.committee_size\=2')
config_list.append('do_ue_estimate\=True')
use_selective = 'False' if config['reg_type']=='raw' else 'True'
config_list.append('ue.use_selective\={}'.format(use_selective))
config_list.append('ue.reg_type\={}'.format(config['reg_type']))
config_list.append('++ue.use_spectralnorm\={}'.format(config['use_spectralnorm']))
config_list.append('data.validation_subsample\={}'.format(config['validation_subsample']))
config_list.append('data.subsample_perc\={}'.format(config['subsample_perc']))
config_list.append('training\=electra_base')
config_list.append('training.per_device_eval_batch_size\=32')
config_list.append("model.model_name_or_path\='{}'".format(config['model_name_or_path']))
return config_list
def generate_bash(config, cuda_devices, generate_func, filename):
full_config = '# exit when any command fails\n'
full_config += 'set -e\n'
full_config += 'cd ../src\n'
n_tests = 0
for conf in expand_config(config):
script = conf['script']
task_name = conf['task']
new_task = f'CUDA_VISIBLE_DEVICES={cuda_devices} HYDRA_CONFIG_PATH=../configs/{task_name}.yaml python {script} '
args = ' '.join(generate_func(conf))
reg = 'reg' if conf['reg_type'] == 'reg-curr' else conf['reg_type']
use_spectralnorm = 'sn' if conf['use_spectralnorm'] == 'True' else 'no_sn'
ue = conf['ue']
if ue=='sngp':
if reg!='raw':
continue
if use_spectralnorm!='no_sn':
continue
if conf['model_name_or_path']!='google/electra-base-discriminator':
continue
if ue=='mc' or ue=='mc-dpp':
if use_spectralnorm!='no_sn':
continue
new_task += f"{args}"
if 'deberta' in conf['model_name_or_path']:
model = 'deberta'
elif 'electra' in conf['model_name_or_path']:
model = 'electra'
elif 'roberta' in conf['model_name_or_path']:
model = 'roberta'
elif 'distilbert' in conf['model_name_or_path']:
model = 'distilbert'
new_task+=f' output_dir=../workdir/run_tests/{model}_{reg}_{use_spectralnorm}/{task_name}/{ue}'
full_config += '\n' + new_task if len(full_config) else new_task
n_tests += 1
print(f'n_tests: {n_tests}')
with open (f'../../scripts/{filename}', 'w') as rsh:
rsh.write(full_config)
# +
train_configs = {
'script': ['run_glue.py'],
'validation_subsample': [0.0],
'subsample_perc': [0.02],
'task': ['cola', 'sst5'],
'ue': ['decomposing_md', 'nuq', 'mahalanobis', 'mc', 'mc-dpp', 'sngp'],
'reg_type': ['metric', 'reg-curr', 'raw'],
'use_spectralnorm': ['False', 'True'],
'model_name_or_path': ['microsoft/deberta-base', 'roberta-base',
'google/electra-base-discriminator', 'distilbert-base-cased']
}
cuda_devices = '0'
generate_bash(train_configs, cuda_devices, generate_str_config, 'miscl_scripts/run_tests.sh')
# -
def generate_str_config(config):
config_list = []
config_list.append('ue\={}'.format(config['ue']))
config_list.append('do_ue_estimate\=True')
use_selective = 'False' if config['reg_type']=='raw' else 'True'
config_list.append('ue.use_selective\={}'.format(use_selective))
config_list.append('ue.reg_type\={}'.format(config['reg_type']))
config_list.append('++ue.use_spectralnorm\={}'.format(config['use_spectralnorm']))
config_list.append('data.subsample_perc_val\={}'.format(config['validation_subsample']))
config_list.append('data.subsample_perc\={}'.format(config['subsample_perc']))
config_list.append('training\=electra_base')
config_list.append("model.model_name_or_path\='{}'".format(config['model_name_or_path']))
return config_list
# +
train_configs = {
'script': ['run_conll2003.py'],
'validation_subsample': [0.01],
'subsample_perc': [0.01],
'task': ['conll2003'],
'ue': ['decomposing_md', 'nuq', 'mahalanobis', 'mc', 'mc-dpp', 'sngp'],
'reg_type': ['metric', 'reg-curr', 'raw'],
'use_spectralnorm': ['False', 'True'],
'model_name_or_path': ['microsoft/deberta-base', 'google/electra-base-discriminator', 'distilbert-base-cased']
}
cuda_devices = '2'
generate_bash(train_configs, cuda_devices, generate_str_config, 'ner_scripts/run_tests.sh')
# +
full_test = 'nohup bash miscl_scripts/run_tests.sh > miscl_scripts/test.out &\n'
full_test += 'nohup bash ner_scripts/run_tests.sh > ner_scripts/test.out &'
with open (f'../../scripts/run_tests.sh', 'w') as rsh:
rsh.write(full_test)
# -
| src/exps_notebooks/generate_series_of_tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Proactive Retention
# This notebook illustrates how to use the TED_CartesianExplainer class. The TED_CartesianExplainer is an implementation of the algorithm in the AIES'19 [paper](https://dl.acm.org/citation.cfm?id=3314273) by Hind et al. It is most suited for use cases where matching explanations to the mental model of the explanation consumer is the highest priority; i.e., where the explanations are similar to what would be produced by a domain expert.
#
# To achieve this goal, the TED (Teaching Explanations for Decisions) framework requires that the training data is augmented so that each instance contains an explanation (E). The goal is to teach the framework what are appropriate explanations in the same manner the training dataset teaches what are appropriate labels (Y). Thus, the training dataset contains the usual features (X) and labels (Y), augmented with an explanation (E) for each instance. For example, consider a loan application use case, where the features are the loan application answers, and the label is the decision to approve or reject the loan. The explanation would be the reason for the approve/reject decision.
#
# The format of the explanation is flexible and determined by the use case. It can be a number, text, an image, an audio, a video, etc. The TED framework simply requires that it can be mapped to a unique integer [0, N] and that any two explanations that are semantically the same should be mapped to the same integer. In many domains there are a list of reasons for making a decision, such as denying a loan, and these reasons would form the finite explanation space.
#
# Given this setup, the TED framework will train a classifier on this training set of instances of (X, Y, E); i.e, features, labels, and explanations. When the classifier is given a new feature vector, it will produce a label (Y) and explanation (E).
#
# There are many approaches to implementing this functionality. In this notebook we illustrate the simplest implementation, TED_CartesianExplainer, which simply takes the Cartesian product of the label and explanation and creates a new label (YE) and uses this to train a (multiclass) classifier. (See the TED_CartesianExplainer for more details.) There are other possibilities, such as Codella et al.'s [paper](https://arxiv.org/abs/1906.02299) at the HILL 2019 workshop. However, we expect the interface to these implementations to be the same, and thus the user of the TED framework, illustrated by this notebook, would not have to change their code.
#
# This simple cartesian product approach is quite general in that it can use any classifier (passed as a parameter), as long as it complies with the fit/predict paradigm.
#
# This implementation assumes the initial problem is a binary classification problem with labels 0 and 1, and the explanations form a dense integer space from [0, NumExplanations -1]. The mapping of explanations to integers is performed by the user of the explanation as we will illustrate below. This allows flexibility to the user if, for example, they want to change explanations from text to a video.
#
# Before we show how to use TED_Cartesian, we will describe our use case and associated dataset. Then we will walk through the code, following these steps.
#
# [Step 1. Import relevant packages](#c1)<br>
# [Step 2. Open datafile and create train/test splits](#c2)<br>
# [Step 3. Create a fit/predict classifier and TED classifer](#c3)<br>
# [Step 4. Train the TED classifier](#c4)<br>
# [Step 5. Ask classifer for a few predictions and explanations](#c5)<br>
# [Step 6. Create a more relevant human interface](#c6)<br>
# [Step 7. Compute overall accuracy metrics using the test dataset](#c7)<br>
# [Step 8. Conclusions](#c8)<br>
#
# # The use case
# The use case we will consider in this notebook is predicting which employees should be targeted for retention actions at a fictious company, based on various features of the employee. The features we will consider are
# - Position, [1, 2, 3, 4]; higher is better
# - Organization, [1, 2, 3]; organization 1 has more retention challenges
# - Potential, an integer mapped to Yes (-10), No (-11)
# - Rating, an integer mapped to High (-3), Med (-2), and Low (-1)
# - Rating slope (average rating over last 2 years), an integer mapped to High (-3), Med (-2), and Low (-1)
# - Salary competitiveness, an integer mapped to High (-3), Med (-2), and Low (-1)
# - Tenure, # of months at company, an integer in [0..360]
# - Position tenure, # of months at current position, an integer in [0..360]
#
# These features generate a feature space of over 80,000,000 possibilities.
# # The dataset
# Given these features, we synthetically generate a dataset using the following distribution functions:
# - Position: 1 (45%), 2 (30%), 3 (20%), 4 (5%)
# - Organization: 1 (40%), 2 (30%), 3 (30%)
# - Potential: Yes (50%), No (50%)
# - Rating: High (15%), Med (80%), and Low (5%)
# - Rating slope: High (15%), Med (80%), and Low (5%)
# - Salary competitiveness: High (10%), Med (70%), and Low (20%)
# - Tenure: [0..24] (30%), [25..60] (30%), [61..360] (40%); values are evenly distributed within each range
# - Position tenure: [0..12] (70%), [13..24] (20%), [25..360] (10%); values are evenly distributed within each range
#
# These are the target distributions. The actual distributions in the dataset vary slightly because they are selected randomly from these distributions.
#
# The values for each feature are generated independently; i.e., it is equally likely that a person in position 1 and a person in position 4 will be in the same organization. The only constraint among features is that the Position tenure cannot be greater than the Tenure (with the company); i.e., one cannot be in a position for longer than they have been with the company.
#
# The [dataset](https://github.com/IBM/AIX360/blob/master/aix360/data/ted_data/Retention.csv) and [code](https://github.com/IBM/AIX360/blob/master/aix360/data/ted_data/GenerateData.py) to generate is available as part of AI Fairness 360.
#
#
# ### Assigning labels
# To determine if a given employee, as represented by these features, should be target with a retention action, we would ideally ask a human resource specialist with deep knowledge of the circumstances for employee retention. Under the TED framework, we would ask this expert for both a prediction of whether the employee was at risk to leave AND for a reason why the HR expert felt that way.
#
# We simulate this process by creating 25 rules, based on the above features, for why a retention action is needed to reduce the chances of an employee choosing to leave our fictitious company. These rules are motivated by common scenarios, such as not getting a promotion in a while, not being paid competitively, receiving a disappointing evaluation, being a new employee in certain organizations with inherently high attrition, not having a salary that is consistent with positive evaluations, mid-career crisis, etc. We vary the application of these rules depending on various positions and organizations. For example, in our fictitious company organization #1 has much higher attrition because their skills are more transferable outside the company.
#
# Each of these 25 rules would result in the label "Yes"; i.e., the employee is a risk to leave the company. Because the rules capture the reason for the "Yes", we use the rule number as the explanation (E), which is required by the TED framework.
#
# If none of the rules are satisfied, it means the employee is not a candidate for a retention action; i.e., a "No" label is assigned. Although we could also construct explanations for these cases (see AIES'19 paper for such examples), we choose not to in this use case because there are many such cases where explanations for users will only be required in the "Bad" case. For example, if a person is denied credit, rejected for a job, or is diagnosed with a disease, they will want to know why. However, when they are approved for credit, get the job, or are told they do not have a disease, they are usually not interested in, or told, the reasons for the decision.
#
# We make no claim that all predictions are in this category or that other personas (the data scientist, regulator, or loan agent) might want to know why for both kinds of decisions. In fact, the TED framework can provide explanation for each decision outcome. We are just not addressing these general situations in this notebook.
#
# ### Dataset characteristics
# With the above distribution, we generate 10,000 fictious employees (X) and applied the 26 (25 Yes + 1 No) rules to produce Yes/No labels (Y), using these rules as explanations (E). After applying these rules, the resulting dataset has the following characteristics:
# - Yes (33.8%)
# - No (66.2%)
#
# Of the 33.6% of "Yes" labels, each of 25 explanations (rules) were used with frequencies ranging from 20 (rule 16 & 18, counting from 0) to 410 (rule 13). (When multiple rules applied to a feature vector (3.5% of the dataset, or 10.24% of Yes instances in the dataset), we chose the more specific rule, i.e., the one that only matched specified values for a feature, as opposed to matching all values for that feature.
#
# We now are ready to discuss the code that uses the TED_CartesianExplainer class to produce explanations.
# <a name="c1"></a>
# # Step 1: Import relevant packages
# The code below sets up the imports. We will use the svm classifier, the train_test_split routine to partition
# our dataset, and the TED_CartesianExplainer for explanations and the TEDDataset for the training and test data.
# +
from sklearn import svm # this can be any classifier that follows the fit/predict paradigm
from sklearn.model_selection import train_test_split
from aix360.algorithms.ted.TED_Cartesian import TED_CartesianExplainer
from aix360.datasets.ted_dataset import TEDDataset
# -
# <a name="c2"></a>
# # Step 2: Open datafile and create train/test splits
# Below we create a new TEDDataset object based on the "Retention.csv" file. The load_file method decomposes the dataset into its X, Y, and E components. (See [TEDDataset class](https://github.com/IBM/AIX360/blob/master/aix360/datasets/ted_dataset.py) for the expected format.) We then partition these instances into train and test sets, using the sklearn routine train_test_split, with 80% going to train and 20% going to test.
# +
# Decompose the dataset into X, Y, E
X, Y, E = TEDDataset().load_file('Retention.csv')
print("X's shape:", X.shape)
print("Y's shape:", Y.shape)
print("E's shape:", E.shape)
print()
# set up train/test split
X_train, X_test, Y_train, Y_test, E_train, E_test = train_test_split(X, Y, E, test_size=0.20, random_state=0)
print("X_train shape:", X_train.shape, ", X_test shape:", X_test.shape)
print("Y_train shape:", Y_train.shape, ", Y_test shape:", Y_test.shape)
print("E_train shape:", E_train.shape, ", E_test shape:", E_test.shape)
# -
# <a name="c3"></a>
# # Step 3: Create a fit/predict classifier and TED classifer
# We can now create a fit/predict classifier and the TED_CartesianExplainer instance, passing in the classifier.
# The commented out code shows some other example classifiers that can be used. (You will need to add the appropriate import statements.) There are many more classifiers that can be used.
# +
# Create classifier and pass to TED_CartesianExplainer
estimator = svm.SVC(kernel='linear')
# estimator = DecisionTreeClassifier()
# estimator = RandomForestClassifier()
# estimator = AdaBoostClassifier()
ted = TED_CartesianExplainer(estimator)
# -
# <a name="c4"></a>
# # Step 4: Train the TED classifier
# Next, we fit the TED-enhanced classifier, passing in the 3 training components: features (X), labels (Y), and explanations (E).
# +
print("Training the classifier")
ted.fit(X_train, Y_train, E_train) # train classifier
# -
# <a name="c5"></a>
# # Step 5: Ask classifer for a few predictions and explanations
# The trained TED classifier is now ready for predictions with explanations. We construct some raw feature vectors, created from the original dataset, and ask for a label (Y) prediction and its explanation (E).
# +
import numpy as np
# Create an instance level example
X1 = [[1, 2, -11, -3, -2, -2, 22, 22]]
# correct answers: Y:-10; E:13
Y1, E1 = ted.predict_explain(X1)
print("Predicting for feature vector:")
print(" ", X1[0])
print("\t\t Predicted \tCorrect")
print("Label(Y)\t\t " + np.array2string(Y1[0]) + "\t\t -10")
print("Explanation (E) \t " + np.array2string(E1[0]) + "\t\t 13")
print()
X2 = [[3, 1, -11, -2, -2, -2, 296, 0]]
## correct answers: Y:-11, E:25
Y2, E2 = ted.predict_explain(X2)
print("Predicting for feature vector:")
print(" ", X2[0])
print("\t\t Predicted \tCorrect")
print("Label(Y)\t\t " + np.array2string(Y2[0]) + "\t\t -11")
print("Explanation (E) \t " + np.array2string(E2[0]) + "\t\t 25")
# -
# <a name="c6"></a>
# # Step 6: Create a more relevant human interface
# Although we just showed how TED_CaresianExplainer can produce the correct explanation for a feature vector, simply producing "3" as an explanation is not sufficient in most uses. This section shows one way to implement the mapping of real explanations to the explanation IDs that TED requires. This is inspired by the [FICO reason codes](https://www.fico.com/en/latest-thinking/product-sheet/us-fico-score-reason-codes), which are explanations for a FICO credit score.
#
# In this case the explanations are text, but the same idea can be used to map explanation IDs to other formats, such as a file name containing an audio or video explanation.
# +
Label_Strings =["IS", "Approved for"]
def labelToString(label) :
if label == -10 :
return "IS"
else :
return "IS NOT"
Explanation_Strings = [
"Seeking Higher Salary in Org 1",
"Promotion Lag, Org 1, Position 1",
"Promotion Lag, Org 1, Position 2",
"Promotion Lag, Org 1, Position 3",
"Promotion Lag, Org 2, Position 1",
"Promotion Lag, Org 2, Position 2",
"Promotion Lag, Org 2, Position 3",
"Promotion Lag, Org 3, Position 1",
"Promotion Lag, Org 3, Position 2",
"Promotion Lag, Org 3, Position 3",
"New employee, Org 1, Position 1",
"New employee, Org 1, Position 2",
"New employee, Org 1, Position 3",
"New employee, Org 2, Position 1",
"New employee, Org 2, Position 2",
"Disappointing evaluation, Org 1",
"Disappointing evaluation, Org 2",
"Compensation does not match evaluations, Med rating",
"Compensation does not match evaluations, High rating",
"Compensation does not match evaluations, Org 1, Med rating",
"Compensation does not match evaluations, Org 2, Med rating",
"Compensation does not match evaluations, Org 1, High rating",
"Compensation does not match evaluations, Org 2, High rating",
"Mid-career crisis, Org 1",
"Mid-career crisis, Org 2",
"Did not match any retention risk rules"]
print("Employee #1 " + labelToString(Y1[0]) + " a retention risk with explanation: " + Explanation_Strings[E1[0]])
print()
print("Employee #2 " + labelToString(Y2[0]) + " a retention risk with explanation: " + Explanation_Strings[E2[0]])
# -
# <a name="c7"></a>
# # Step 7: Compute overall accuracy metrics using the test dataset
# As we have a test part of the dataset, we can use it to see how well TED_Cartesian does in predicting all test labels (Y) and explanations (E). We use the handy "score" method of TED_Cartesian to do this computation.
# We also report the accuracy of predicting the combined YE labels, which could be of interest to researchers who want to better understand the inner workings of TED_Cartesian.
# +
YE_accuracy, Y_accuracy, E_accuracy = ted.score(X_test, Y_test, E_test) # evaluate the classifier
print("Evaluating accuracy of TED-enhanced classifier on test data")
print(' Accuracy of predicting Y labels: %.2f%%' % (100*Y_accuracy))
print(' Accuracy of predicting explanations: %.2f%%' % (100*E_accuracy))
print(' Accuracy of predicting Y + explanations: %.2f%%' % (100*YE_accuracy))
# -
# <a name="c8"></a>
# # Conclusions
# This notebook has illustrated how easy it is to use the TED_CartesianExplainer if you have a training dataset that contains explanations. The framework is general in that it can use any classification technique that follows the fit/predict paradigm, so that if you already have a favorite algorithm, you can use it with the TED framework.
#
# The main advantage of this algorithm is that the quality of the explanations produced are exactly the same quality as those that the algorithm is trained on. Thus, if you teach (train) the system well with good training data and good explanations, you will get good explanations out in a language you should understand.
#
# The downside of this approach is that someone needs to create explanations. This should be straightforward when a domain expert is creating the initial training data: if they decide a loan should be rejected, they should know why, and if they do not, it may not be a good decision.
#
# However, this may be more of a challenge when a training dataset already exists without explanations and now someone needs to create the explanations. The original person who did the labeling of decisions may no longer be available, so the explanations for the decisions may not be known. In this case, we argue, the system is in a dangerous state. Training data exists that no one understands why it is labeled in a certain way. Asking the model to explain one of its predictions when no person can explain an instance in the training data does not seem consistent.
#
# Dealing with this situation is one of the open research problems that comes from the TED approach.
| examples/tutorials/retention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Synthetic dataset
# In this file we are going to generate synthetic noisy data. The result of this section will be used for training a TD3 model to eliminate noise and reconstruct phylogenetic tree.
#
# It is mentiond that the generated data contains both SNV matrix and CNV profile per sample beside ground-truth tree.
# ____
# ## Setup Docker
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "Afshin"
# !apt update
# !apt --assume-yes install python-pydot python-pydot-ng graphviz
# !pip install --upgrade pip
# !pip install -r ../requirements.txt
# ## Import required packages
import scipy as sp
import numpy as np
from scipy import stats
import networkx as nx
from matplotlib import pyplot as plt
import matplotlib as mpl
import random
import matplotlib.image as mpimg
import graphviz
import imageio, json
from IPython.display import Image
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
import matplotlib.patches as mpatches
# ## Configuration
font = {
'weight' : 'normal',
'size' : 16,
}
mpl.rc('font', **font)
# ## Define functions
# +
def plot_mat(M, row='', col='', title='', save_name=None):
rows, cols = M.shape[:2]
plt.imshow(M, cmap='GnBu', interpolation="nearest")
plt.yticks(range(E.shape[0]), ['%s %d'%(row,i) for i in range(rows)])
plt.xticks(range(E.shape[1]), ['%s %d'%(col,i) for i in range(cols)])
plt.xticks(rotation=60)
plt.xlabel('{}-{} Matrix'.format(row.capitalize(), col.capitalize()))
plt.title(title)
if save_name:
plt.savefig(save_name)
plt.imshow()
plt.close()
def plot_tree(tree, save_name=None):
pass
# -
# ## Define classes
# ### Define the Tree class
class Tree(object):
def __init__(self, T, E, CP, **params):
self.__T = T
self.__E = E
self.__CP = CP
self.__N = E.shape[0]
self.__M = E.shape[1]
params['N'] = self.__N
params['M'] = self.__M
self.__plot_scale = 30./max(self.__M, self.__N)
self.__set_params(params)
self.generate_data(**params)
def generate_data(self, **params):
self.__new_param = self.__params
for k,v in params.items():
self.__new_param[k]=v
if not json.dumps(self.__params) == json.dumps(self.__new_param):
print('Prev params:')
print('\t'.join(json.dumps(self.__params, indent=True).splitlines()))
self.__set_params(self.__new_param)
print('New params:')
print('\t'.join(json.dumps(params, indent=True).splitlines()))
## ========================================================
## ~~~~~~~~~~~~~~~~~~~~~~~~ E to D ~~~~~~~~~~~~~~~~~~~~~~~~
## ========================================================
D = self.__E.copy()
nz_idxs = np.nonzero(self.__E)
z_idxs = np.nonzero(self.__E-1)
z_rnds = np.random.rand(len( z_idxs[0]))
nz_rnds = np.random.rand(len(nz_idxs[0]))
z_rnds = [1 if i < self.__alpha else 0 for i in z_rnds]
nz_rnds = [0 if i < self.__beta else 1 for i in nz_rnds]
D[nz_idxs] = nz_rnds
D[ z_idxs] = z_rnds
self.__D = D
## ========================================================
## ~~~~~~~~~~~~~~~~~~ add missing data ~~~~~~~~~~~~~~~~~~~~
## ========================================================
Dm = self.__D.copy()
idxs = np.nonzero(self.__D+1)
rnds = np.random.rand(self.__N, self.__M)
for n in range(self.__N):
for m in range(self.__M):
if rnds[n, m] < self.__MR:
Dm[n, m] = 3
self.__Dm = Dm
def __set_params(self, params):
self.__alpha = params['alpha']
self.__beta = params['beta']
self.__MR = params['MR'] # missing rate
self.__params = params
self.__str_params ='_'.join(['{}={}'.format(k,v) for k,v in params.items()])
self.__latex_params='\ '.join(['{}={}'.format(k if len(k)<3 else '\%s'%k,v) for k,v in params.items()])
def save_data(save_dir):
if not save_dir[-1]=='/':
save_dir += '/'
p = 'Parameters: {}\n'.format(self.__str_params)
np.savetxt('{}E.csv'.format(save_dir), E, fmt='%.0f', delimiter=',', header=p)
np.savetxt('{}D.csv'.format(save_dir), D, fmt='%.0f', delimiter=',', header=p)
np.savetxt('{}DmE.csv'.format(save_dir), D-E, fmt='%.0f', delimiter=',', header=p)
np.savetxt('{}Dm.csv'.format(save_dir), Dm, fmt='%.0f', delimiter=',', header=p)
def get_E(self,):
return self.__E
def get_D(self,):
return self.__D
def get_Dm(self,):
return self.__Dm
def get_T(self,):
return self.__T
def get_params(self,):
return self.__params
def get_alpha(self,):
return self.__alpha
def get_beta(self,):
return self.__beta
def save_tree(self, save_path):
file_path = '{}Tree_{}.gpickle'.format(save_path, self.__str_params) if save_path[-1] == '/' else save_path
nx.write_gpickle(self.__T, file_path)
def plot_tree_mut(self, save_path):
mut_T = self.__T.copy()
# mut_T.remove_nodes_from([i for i,n in enumerate(self.__T.nodes()) if 'cell' in str(n)])
pdot = nx.drawing.nx_pydot.to_pydot(mut_T)
file_path = '{}treeM_{}.png'.format(save_path, self.__str_params) if save_path[-1] == '/' else save_path
pdot.write_png(file_path)
display(Image(filename=file_path))
def plot_tree_full(self, save_path, title=None):
pdot = nx.drawing.nx_pydot.to_pydot(self.__T)
for i, node in enumerate(pdot.get_nodes()):
node_name = str(node)[:-1]
if 'cell' in node_name:
node.set_label('s%s'%node_name.split()[-1][:-1])
node.set_shape('egg')
node.set_fillcolor('#db8625')
node.set_color('red')
file_path = '{}treeF_{}.png'.format(save_path, self.__str_params) if save_path[-1] == '/' else save_path
pdot.write_png(file_path)
if title: print(title)
display(Image(filename=file_path))
def plot_E(self, save_path=None, nofig=False, figsize=None):
if not nofig:
plt.figure(figsize=figsize if figsize else (self.__M*self.__plot_scale,self.__N*self.__plot_scale))
plt.imshow(self.__E, cmap='GnBu', interpolation="nearest")
plt.yticks(range(self.__E.shape[0]), ['cell %d'%i for i in range(self.__N)])
plt.xticks(range(self.__E.shape[1]), [ 'mut %d'%i for i in range(self.__M)])
plt.xticks(rotation=60)
plt.xlabel('Genes-Cells Matrix E (Error-less)')
plt.title(r'Parameters: ${}$'.format(self.__latex_params))
if save_path is not None:
file_path = '{}E_{}.png'.format(save_path, self.__str_params) if save_path[-1] == '/' else save_path
plt.savefig(file_path)
plt.close()
return imageio.imread(file_path)
if not nofig:
plt.show()
plt.close()
def plot_D(self, save_path=None, nofig=False, figsize=None):
if not nofig:
plt.figure(figsize=figsize if figsize else (self.__M*self.__plot_scale,self.__N*self.__plot_scale))
plt.imshow(self.__D, cmap='GnBu', interpolation="nearest")
plt.yticks(range(self.__D.shape[0]), ['cell %d'%i for i in range(self.__N)])
plt.xticks(range(self.__D.shape[1]), [ 'mut %d'%i for i in range(self.__M)])
plt.xticks(rotation=60)
plt.xlabel('Noisy Genes-Cells Matrix D (input Data)')
plt.title(r'Parameters: ${}$'.format(self.__latex_params))
if save_path is not None:
file_path = '{}D_{}.png'.format(save_path, self.__str_params) if save_path[-1] == '/' else save_path
plt.savefig(file_path)
plt.close()
return imageio.imread(file_path)
if not nofig:
plt.show()
plt.close()
def plot_DmE(self, save_path=None, nofig=False, figsize=None):
if not nofig:
plt.figure(figsize=figsize if figsize else (self.__M*self.__plot_scale,self.__N*self.__plot_scale))
## first you need to define your color map and value name as a dict
t = 1 ## alpha value
cmap = {0:[1,1,0.95,t], 1:[0.5,0.5,0.8,t], -1:[0.8,0.5,0.5,t]}
labels = {0:'true', 1:'false positive', -1:'false negetive'}
arrayShow = np.array([[cmap[i] for i in j] for j in self.__D-self.__E])
## create patches as legend
patches =[mpatches.Patch(color=cmap[i],label=labels[i]) for i in cmap]
plt.imshow(arrayShow, interpolation="nearest")
plt.legend(handles=patches, loc=2, borderaxespad=-6)
plt.yticks(range(self.__E.shape[0]), ['cell %d'%i for i in range(self.__N)])
plt.xticks(range(self.__E.shape[1]), [ 'mut %d'%i for i in range(self.__M)])
plt.xticks(rotation=60)
plt.xlabel('D-E')
plt.title(r'Parameters: ${}$'.format(self.__latex_params))
if save_path is not None:
file_path = '{}DmE_{}.png'.format(save_path, self.__str_params) if save_path[-1] == '/' else save_path
plt.savefig(file_path)
plt.close()
return imageio.imread(file_path)
if not nofig:
plt.show()
plt.close()
def plot_Dm(self, save_path=None, nofig=False, figsize=None):
if not nofig:
plt.figure(figsize=figsize if figsize else (self.__M*self.__plot_scale,self.__N*self.__plot_scale))
## first you need to define your color map and value name as a dict
t = 1 ## alpha value
cmap = {0:[1,1,0.95,t], 1:[0.2,0.2,0.4,t], 3:[0.8,0.5,0.5,t]}
labels = {0:'0', 1:'1', 3:'missed'}
arrayShow = np.array([[cmap[i] for i in j] for j in self.__Dm])
## create patches as legend
patches =[mpatches.Patch(color=cmap[i],label=labels[i]) for i in cmap]
plt.imshow(arrayShow, interpolation="nearest")
plt.legend(handles=patches, loc=2, borderaxespad=-6)
plt.yticks(range(self.__D.shape[0]), ['cell %d'%i for i in range(self.__N)])
plt.xticks(range(self.__D.shape[1]), [ 'mut %d'%i for i in range(self.__M)])
plt.xticks(rotation=60)
plt.xlabel('Noisy Genes-Cells Matrix with Missed Data ($D_m$)')
plt.title(r'Parameters: ${}$'.format(self.__latex_params))
if save_path is not None:
file_path = '{}Dm_{}.png'.format(save_path, self.__str_params) if save_path[-1] == '/' else save_path
plt.savefig(file_path)
plt.close()
return imageio.imread(file_path)
if not nofig:
plt.show()
plt.close()
def plot_all_mat(self, figsize=None):
figsize = figsize if figsize else (self.__M*self.__plot_scale,self.__N*self.__plot_scale)
plt.figure(figsize=figsize)
plt.subplot(2, 2, 1)
plt.title('E')
self.plot_E(figsize=np.asarray(figsize)/2, nofig=True)
plt.subplot(2, 2, 2)
plt.title('D')
self.plot_D(figsize=np.asarray(figsize)/2, nofig=True)
plt.subplot(2, 2, 3)
plt.title('D-E')
self.plot_DmE(figsize=np.asarray(figsize)/2, nofig=True)
plt.subplot(2, 2, 4)
plt.title('D with missed data')
self.plot_Dm(figsize=np.asarray(figsize)/2, nofig=True)
plt.show()
class TreeGenerator():
'''
Inputs: (M, N, ZETA, Gamma, alpha, beta, MR, save_images, save_trees, save_mats)
--------------------------------------
M : num of genes (mutations)
--------------------------------------
N : num of samples (cells)
--------------------------------------
ZETA : homogeness of tree
--------------------------------------
Gamma : merge genes
--------------------------------------
alpha : ~ P(D=1|E=0)
--------------------------------------
beta : ~ P(D=0|E=1)
--------------------------------------
MR : missing ratio
--------------------------------------
Outputs: (E, D, Dm, T)
------------------------------------------------
E : Mutation-cell matrix without errors
------------------------------------------------
D : Mutation-cell matrix with errors
------------------------------------------------
Dm : <D> with missed data
------------------------------------------------
T : The generated tree
------------------------------------------------
'''
def __init__(self,
M, N,
ZETA=1, Gamma=0.15, alpha=0.1,beta=0.08, MR=0.05,
save_dir=None):
self.M=M
self.N=max(M, N)
self.ZETA=ZETA
self.Gamma=Gamma
self.alpha=alpha
self.beta=beta
self.MR=MR
self.save_dir=save_dir
def generate(self,):
## ========================================================
## ~~~~~~~~~~~~~~~~ generate a random tree ~~~~~~~~~~~~~~~~
## ========================================================
self.Tree = dict()
self.cnt = 2
xk = np.arange(self.M+1)
name_k = [str(i) for i in xk]
wk = np.ones(self.M+1, dtype=np.float128)
while True:
xk, wk, name_k, u, v = self.do_next(xk, wk, name_k)
self.cnt+=1
if len(xk) < 2:
break
T = nx.DiGraph(self.Tree)
T_leaves = [x for x in T.nodes() if T.out_degree(x)==0 and T.in_degree(x)==1]
T.remove_nodes_from(T_leaves)
t = np.arange(self.M)
np.random.shuffle(t)
t = dict((i, j) for i,j in zip(T.nodes(), t))
T = nx.relabel_nodes(T, t)
raw_T = T.copy()
root = [n for n,d in raw_T.in_degree() if d==0][0]
## ========================================================
## ~~~~~~~~~~~~~~~ merge some of mutations ~~~~~~~~~~~~~~~~
## ========================================================
A = int(np.floor(self.Gamma*self.M))
if A:
for i in range(A):
while True:
p, c = random.sample(T.edges(),1)[0]
if p != root: break
for child in T.successors(c):
T.add_edge(p,child)
T.remove_node(c)
T = nx.relabel_nodes(T, {p: '{} . {}'.format(p,c)})
## ========================================================
## ~~~~~~~~~~~~~~~~~ add cells to the tree ~~~~~~~~~~~~~~~~
## ========================================================
Mutaions_T = T.copy()
mutaion_nodes = Mutaions_T.nodes()
cells = np.array(['cell %d'%i for i in range(self.N)])
np.random.shuffle(cells)
for n in mutaion_nodes:
T.add_edge(n, cells[0])
cells = cells[1:]
for cell in cells:
node = random.sample(mutaion_nodes, 1)[0]
T.add_edge(node, cell)
## ========================================================
## ~~~~~~~~~~~~~~~~~~~~~~ Tree to E ~~~~~~~~~~~~~~~~~~~~~~~
## ========================================================
E = np.zeros([self.N, self.M])
E[int(root), :] = 1
for n in range(self.N):
try:
path = list(nx.all_simple_paths(T, root, 'cell %d'%n))[0]
except:
print('root:', root)
pdot = nx.drawing.nx_pydot.to_pydot(T)
pdot.write_png('problem_tree.png')
exit()
for g in path[:-1]:
try:
E[n, int(g)] = 1
except:
gs = g.split(' . ')
for g in gs:
E[n, int(g)] = 1
## ========================================================
## ~~~~~~~~~~~~ perform acceptable losses (CP) ~~~~~~~~~~~~
## ========================================================
'''
0. Prpare list of links named `all_L`
1. Choose a random link L_i:(M_u->M_v) from `all_L` if their contain at least one sample individualy.
In fact we choose two samples (S_u, S_v).
2. Choose a set of mutations in ancestors of `M_v` named M_xs.
3. [a] Add an attribute to selected link L_i:(loss M_x).
[b] For each cell_i that contains M_v, triger M_x to 0.
[c] Write "L(S_u,_Sv): M_xs" in the CP_gt.txt file.
[d] Write "L(S_u,_Sv): M_xs,<some random Ms in [M in path (v->root)]-[M_xs]>" in the CP.txt file.
4. Remove L_i from `all_L`
5. If it is not enough go to step 1.
6. Repeat Y times above loop to achieve additional L(i,j).
But in this case append them just to the CP.txt file.
'''
# step 0.
links = self.__T.edges(data=True)
# step 1.
while True:
l_i = random.choice(links)
if
E_CL = E.copy()
for n in range(self.N):
try:
path = list(nx.all_simple_paths(T, root, 'cell %d'%n))[0]
except:
print('root:', root)
pdot = nx.drawing.nx_pydot.to_pydot(T)
pdot.write_png('problem_tree.png')
exit()
for g in path[:-1]:
try:
E[n, int(g)] = 1
except:
gs = g.split(' . ')
for g in gs:
E[n, int(g)] = 1
## ========================================================
## ~~~~~~~~~~~~~~~~ return generated data ~~~~~~~~~~~~~~~~~
## ========================================================
tree_obj = Tree(
T=T.copy(), E=E, CP=None,
zeta = self.ZETA,
gamma = self.Gamma,
alpha = self.alpha,
beta = self.beta,
MR = self.MR,
)
return tree_obj
return (E.astype(int), D.astype(int), Dm.astype(int), raw_T)
def do_next(self, xk, wk, name_k):
u, v = self.__weighted_drand(xk, wk, size=2)
idx_u = np.where(xk==u)[0]
idx_v = np.where(xk==v)[0]
w_u = wk[idx_u]
w_v = wk[idx_v]
w_uv = (w_u+w_v)/(self.ZETA**0.25)
nu = name_k[int(idx_u)]
nv = name_k[int(idx_v)]
nuv = '{}.{}'.format(nu, nv)
self.Tree[nuv] = [nu, nv]
xk = np.delete(xk, [idx_u, idx_v])
name_k = np.delete(name_k, [idx_u, idx_v])
wk = np.delete(wk, [idx_u, idx_v])
xk = np.append(xk, self.M+self.cnt)
name_k = np.append(name_k, nuv)
wk = np.append(wk, w_uv)
return (xk, wk, name_k, u, v)
@staticmethod
def __rand_pmf(xk, pk, size=1):
custm = stats.rv_discrete(name='custm', values=(xk, pk))
cnt = 0
while True:
rs = custm.rvs(size = size)
if len(set(rs)) == len(rs):
break
cnt+=1
return rs
def __weighted_drand(self, xk, wk, size=1):
pk = wk/np.sum(wk, dtype=np.float128)
return self.__rand_pmf(xk, pk, size)
# ## Test generator
# +
N, M = 13, 10
generator = TreeGenerator(
M,
N,
ZETA=1,
Gamma=0.15,
alpha=0.2,
beta=0.08,
MR=0.05,
save_dir=False,
)
# -
tree_obj = generator.generate()
tree_obj.plot_all_mat()
tree_obj.plot_tree_full('../tmp/', title="Ground-truth tree with attached samples")
E = tree_obj.get_E()
E
# to generate `CP` (Copy-number Profile)
T = tree_obj.get_T()
| new_src/data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Line Detection - Using Hough Lines
#
# cv2.HoughLines**(binarized/thresholded image, 𝜌 accuracy, 𝜃 accuracy, threshold)
# - Threshold here is the minimum vote for it to be considered a line
#
# http://cmp.felk.cvut.cz/~matas/papers/matas-bmvc98.pdf
#
# https://www.geeksforgeeks.org/line-detection-python-opencv-houghline-method/
#
# https://medium.com/@mrhwick/simple-lane-detection-with-opencv-bfeb6ae54ec0
#
# +
import cv2
import numpy as np
image = cv2.imread('../images/soduku.jpg')
# Grayscale and Canny Edges extracted
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 200)
# Run HoughLines using a rho accuracy of 1 pixel
# theta accuracy of np.pi / 180 which is 1 degree
# Our line threshold is set to 240 (number of points on line)
for i in range(1,100):
lines = cv2.HoughLines(edges, 15, np.pi / 180, 300)
# We iterate through each line and convert it to the format
# required by cv.lines (i.e. requiring end points)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.imshow('Hough Lines', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
# ### Probabilistic Hough Lines
#
# **cv2.HoughLinesP(binarized image, 𝜌 accuracy, 𝜃 accuracy, threshold, minimum line length, max line gap)
#
#
#
# +
import cv2
import numpy as np
# Grayscale and Canny Edges extracted
image = cv2.imread('../images/soduku.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 170, apertureSize = 3)
# we use the same rho and theta accuracies
# we specific a minimum vote (threshold) of 200 and min line length of 5 pixels and max gap between lines of 10 pixels
for i in range(1,100):
lines = cv2.HoughLinesP(edges, i, np.pi / 180, 200, 5, 10)
# print (lines.shape)
for x1, y1, x2, y2 in lines[0]:
cv2.line(image, (x1, y1), (x2, y2),(0, 255, 0), 3)
cv2.imshow('Probabilistic Hough Lines', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
# # Circle Detection - Hough Cirlces
#
# cv2.HoughCircles(image, method, dp, MinDist, param1, param2, minRadius, MaxRadius)
#
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html
#
# - Method - currently only cv2.HOUGH_GRADIENT available
# - dp - Inverse ratio of accumulator resolution
# - MinDist - the minimum distance between the center of detected circles
# - param1 - Gradient value used in the edge detection
# - param2 - Accumulator threshold for the HOUGH_GRADIENT method (lower allows more circles to be detected (false positives))
# - minRadius - limits the smallest circle to this size (via radius)
# - MaxRadius - similarly sets the limit for the largest circles
#
#
# +
import cv2
import numpy as np
img = cv2.imread('../images/bottlecaps.jpg',0)
img = cv2.medianBlur(img,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=2, maxRadius=50)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
| 06 - countours extended/01 - Line and circle Detection .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center><h1><b><span style="color:blue">Particles & decays</span></b></h1></center>
#
# <br>
# **Particles and decays are central "concepts" in Particle Physics.** Could one really do without dedicated packages in the ecosystem? ...
# Of course not!
#
# ### **Quick intro to the following packages**
# - [Particle](https://github.com/scikit-hep/particle) - *Particle Data Group* particle data, Monte Carlo identification codes, and more.
# - [DecayLanguage](https://github.com/scikit-hep/decaylanguage) - Decay files (notably for EvtGen), universal description of decay chains.
#
# <center><img src="images/logo_particle.png" alt="Particle package logo" style="width: 150px;"/></center>
#
# <center><h2><b><span style="color:green">PDG particle data, MC identification codes</span></b></h2></center>
# **The [Particle Data Group](https://pdg.lbl.gov/) (PDG) is an "international collaboration that provides a comprehensive summary of Particle Physics and related areas of Cosmology: the Review of Particle Physics."**
# <center><img src="images/intro_PDG.jpg" style="width:55%"/></center>
# The **Review of Particle Physics** is a document whose importance is impossible to oversell.
# Some interesting facts about it and its more recent edition [https://pdg.lbl.gov/#about]:
#
# <center><img src="images/intro_PDG_about.png" style="width:55%"/></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ### Package motivation - particle data
#
# - The PDG provides a series of downloadable <span style="color:green">*Computer Readable Files* and in particular a table of particle masses, widths, etc. and PDG Monte Carlo particle ID numbers</span> (PDG IDs).
# The most recent file is [here](http://pdg.lbl.gov/2021/html/computer_read.html).
# - It <span style="color:green">also provided an experimental file with extended information</span>
# (spin, quark content, P and C parities, etc.) until 2008 only, see [here](http://pdg.lbl.gov/2008/html/computer_read.html) (not widely known!).
#
# - But <span style="color:green"><i>anyone</i> wanting to use these data</span>, the only readily available,
# <span style="color:green">has to parse the file programmatically</span>.
# - Why not make a Python package to deal with all these data, for everyone?
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Package motivation - MC identification codes
#
# - The <span style="color:green">C++ HepPID and HepPDT libraries provide functions for processing particle ID codes</apan>
# in the standard particle (aka PDG) numbering scheme.
# - Different event generators may have their separate set of particle IDs: Geant3, etc.
# - Again, why not make a package providing all functionality/conversions, Python-ically, for everyone?
# -
# ### **Pythonic interface to**
# - Particle Data Group (PDG) particle data table.
# - Particle MC identification codes, with inter-MC converters.
# - With various extra goodies.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Package, in short
#
# - <span style="color:green">Particle</span> - loads extended <b>PDG data tables</b> and implements search and manipulations / display.
# - <span style="color:green">PDGID</span> - find out as much as possible from the PDG ID number. <b>No table lookup</b>.
# - <span style="color:green">Converters for MC IDs</span> used in [Pythia](https://pythia.org/) and Geant3.
# - Flexible / advanced usage programmatically.
# - Basic usage via the command line.
# + [markdown] slideshow={"slide_type": "slide"}
# ### **1. `PDGID` class and MC ID classes**
#
#
# - Classes `PDGID`, `PythiaID`, `Geant3ID`.
# - Converters in module `particle.converters`: `Geant2PDGIDBiMap`, etc.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### PDG IDs module overview
#
# - <span style="color:green">Process and query PDG IDs</span>, and more – no look-up table needed.
# - Current version of package reflects the latest version of the
# <span style="color:green">HepPID & HepPDT utility functions</span> defined in the C++ HepPID and HepPDT versions 3.04.01
# - It contains more functionality than that available in the C++ code … and minor fixes too.
# - Definition of a <span style="color:green">PDGID class, PDG ID literals</span>,
# and set of standalone HepPID <span style="color:green">functions to query PDG IDs</span>
# (is_meson, has_bottom, j_spin, charge, etc.).
# - All PDGID class functions are available standalone.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### PDGID class
# - Wrapper class `PDGID` for PDG IDs.
# - Behaves like an int, with extra goodies.
# - Large spectrum of properties and methods, with a Pythonic interface, and yet more!
# + slideshow={"slide_type": "subslide"}
from particle import PDGID
# -
pid = PDGID(211)
pid
PDGID(99999999)
# + slideshow={"slide_type": "subslide"}
from particle.pdgid import is_meson
pid.is_meson, is_meson(pid)
# + [markdown] slideshow={"slide_type": "subslide"}
# To print all `PDGID` properties:
# -
print(pid.info())
# + [markdown] slideshow={"slide_type": "subslide"}
# #### MC ID classes and converters
#
# - <span style="color:green">Classes for MC IDs</span> used in Pythia and Geant3: `PythiaID` and `Geant3ID`.
# - <span style="color:green">ID converters</span> in module `particle.converters`: `Geant2PDGIDBiMap`, etc.
# +
from particle import PythiaID, Geant3ID
pyid = PythiaID(10221)
pyid.to_pdgid()
# + [markdown] slideshow={"slide_type": "subslide"}
# Conversions are directly available via mapping classes.
#
# E.g., bi-directional map Pythia ID - PDG ID:
# +
from particle.converters import Pythia2PDGIDBiMap
Pythia2PDGIDBiMap[PDGID(9010221)]
# -
Pythia2PDGIDBiMap[PythiaID(10221)]
# + [markdown] slideshow={"slide_type": "slide"}
# ### **2. `Particle` class**
#
# There are various ways to create a particle. The often used method is via its PDG ID.
# -
from particle import Particle
Particle.from_pdgid(211)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Searching
#
# <span style="color:green">Simple and natural API</span> to deal with the PDG particle data table,<br>with <span style="color:green">powerful 1-line search and look-up utilities!</span>
#
# - `Particle.find(…)` – search a single match (exception raised if multiple particles match the search specifications).
# - `Particle.findall(…)` – search a list of candidates.
#
# - Search methods that can query any particle property!
# + slideshow={"slide_type": "subslide"}
Particle.find('J/psi')
# + [markdown] slideshow={"slide_type": "subslide"}
# You can specify search terms as keywords - _any particle property_:
# + [markdown] slideshow={"slide_type": "subslide"}
# You can directly check the numeric charge:
# -
Particle.findall('pi', charge=-1)
# + [markdown] slideshow={"slide_type": "subslide"}
# Or use a **lambda function** for the ultimate in generality! For example, to find all the neutral particles with a bottom quark between 5.2 and 5.3 GeV:
# -
from hepunits import GeV, s # Units are good. Use them.
Particle.findall(lambda p:
p.pdgid.has_bottom
and p.charge==0
and 5.2*GeV < p.mass < 5.3*GeV
)
# + [markdown] slideshow={"slide_type": "subslide"}
# Another lambda function example: You can use the width or the lifetime:
# + slideshow={"slide_type": "-"}
Particle.findall(lambda p: p.lifetime > 1000*s)
# + [markdown] slideshow={"slide_type": "subslide"}
# If you want infinite lifetime, you could just use the keyword search instead:
# -
Particle.findall(lifetime=float('inf'))
# + [markdown] slideshow={"slide_type": "subslide"}
# Trivially find all pseudoscalar charm mesons:
# +
from particle import SpinType
Particle.findall(lambda p: p.pdgid.is_meson and p.pdgid.has_charm and p.spin_type==SpinType.PseudoScalar)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Display
#
# Nice display in Jupyter notebooks, as well as `str` and `repr` support:
# -
p = Particle.from_pdgid(-415)
p
# + slideshow={"slide_type": "subslide"}
print(p)
# -
print(repr(p))
# + [markdown] slideshow={"slide_type": "subslide"}
# Full descriptions:
# -
print(p.describe())
# + [markdown] slideshow={"slide_type": "subslide"}
# You may find LaTeX or HTML to be more useful in your program; both are supported:
# -
print(p.latex_name, '\n', p.html_name)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Particle properties
#
# You can do things to particles, like **invert** them:
# -
~p
# + [markdown] slideshow={"slide_type": "subslide"}
# There are a plethora of properties you can access:
# -
p.spin_type
# + [markdown] slideshow={"slide_type": "subslide"}
# You can quickly access the PDGID of a particle:
# -
p.pdgid
# + [markdown] slideshow={"slide_type": "slide"}
# ### **3. Literals**
#
# They provide a <span style="color:green">handy way to manipulate things with human-readable names!</span>
#
# `Particle` defines <span style="color:green">literals for all particles</span>, with easily recognisable (programmatic friendly) names.
# - Literals are dynamically generated on import for both `PDGID` and `Particle` classes.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Particle literals
# -
from particle import literals as lp
lp.phi_1020
# + [markdown] slideshow={"slide_type": "subslide"}
# #### PDGID literals
# -
from particle.pdgid import literals as lid
lid.phi_1020
# + [markdown] slideshow={"slide_type": "slide"}
# ### **4. Data files, stored in `particle/data/`**
#
# - <b>PDG particle data files</b>
# - Original PDG data files, which are in a fixed-width format - simply for bookkeeping and reference.
# - Code rather uses “digested forms” of these, produced within `Particle`, stored as CSV, for optimised querying.
# - Latest PDG data (2020) used by default.
# - Advanced usage: user can load older PDG tables, load a “user table” with new particles, append to default table.
#
# - <b>Other data files</b>
# - CSV file for mapping of PDG IDs to particle LaTeX names.
# + [markdown] slideshow={"slide_type": "subslide"}
# **Dump table contents**
#
# The package provides the 2 methods `Particle.to_list(...)` and `Particle.to_dict(...)`, which make it easy to dump (selected) particle properties in an easy way. No need to dig into the package installation directory to inspect the particle data table ;-).
#
# Tabular output can be formatted with the powerful package `tabulate`, for example (other similar libraries exist).
# +
from tabulate import tabulate
query_as_list = Particle.to_list(filter_fn=lambda p: p.pdgid.is_lepton and p.charge!=0, exclusive_fields=['pdgid', 'name', 'mass', 'charge'])
print(tabulate(query_as_list, headers="firstrow"))
# -
# Fancy creating tables of particle properties in, say, HTML or reStructuredText format, as below? Then check out the *exercises/particle.ipynb* notebook in the exercises.
# <table>
# <tr style="background: white;">
# <td align="center"><img src="images/Scikit-HEP_gallery_Particle.jpg" width="80%"></td>
# <td align="center"><img src="images/Scikit-HEP_gallery_Particle_ex-table-rst.png" width="80%"></td>
# </tr>
# </table>
# + [markdown] slideshow={"slide_type": "slide"}
# ### **5. Advanced usage**
#
# You can:
#
# * Extend or replace the default particle data table in `Particle`.
# * Adjust properties for a particle.
# * Make custom particles.
# -
# <br>
# <center>
# <img src="images/logo_decaylanguage.png" style="width: 200px;"/>
# <h2><b><span style="color:green">Decay files, universal description of decay chains</span></b></h2>
# </center>
#
# `DecayLanguage` is designed for the manipulation of decay structures in Python. The current package has:
#
# - Decay file parsers:
# - Read *.dec DecFiles*, such as [EvtGen](https://evtgen.hepforge.org/)<sup>(*)</sup> decay files typically used in Flavour Physics experiments.
# - Manipulate and visualise them in Python.
# - Amplitude Analysis decay language:
# - Input based on AmpGen generator, output format for GooFit C++ program.
#
# > <sup>(*)</sup> *"EvtGen is a Monte Carlo event generator that simulates the decays of heavy flavour particles, primarily B and D mesons. It contains a range of decay models for intermediate and final states containing scalar, vector and tensor mesons or resonances, as well as leptons, photons and baryons. Decay amplitudes are used to generate each branch of a given full decay tree, taking into account angular and time-dependent correlations which allows for the simulation of CP-violating processes..."*
# + [markdown] slideshow={"slide_type": "slide"}
# ### Package motivation
#
# - Ability to describe decay-tree-like structures.
# - Provide a translation of decay amplitude models from AmpGen to GooFit.
# - Idea is to generalise this to other decay descriptions.
# - Any experiment uses event generators which, among many things, need to describe particle decay chains.
# - Programs such as EvtGen rely on so-called .dec decay files.
# - Many experiments need decay data files.
# - Why not make a Python package to deal with decay files, for everyone?
# + [markdown] slideshow={"slide_type": "slide"}
# ### Package, in short
#
# - Tools to parse decay files and programmatically manipulate them, query, display information.
# - Descriptions and parsing built atop the [Lark parser](https://github.com/lark-parser/lark/).
# - Tools to translate decay amplitude models from AmpGen to GooFit, and manipulate them.
# + [markdown] slideshow={"slide_type": "slide"}
# ### **1. Decay files**
#
# #### *Master file” DECAY.DEC
#
# <span style="color:green">Gigantic file defining decay modes for all relevant particles, including decay model specifications.</span>
# The LHCb experiment uses one. Belle II as well, and others.
#
# #### User .dec files
# - Needed to produce specific MC samples.
# - Typically contain a single decay chain (except if defining inclusive samples).
# + [markdown] slideshow={"slide_type": "subslide"}
# **Example user decay file:**
#
# <small>
# <pre>
# # Decay file for [B_c+ -> (B_s0 -> K+ K-) pi+]cc
#
# Alias B_c+sig B_c+
# Alias B_c-sig B_c-
# ChargeConj B_c+sig B_c-sig
# Alias MyB_s0 B_s0
# Alias Myanti-B_s0 anti-B_s0
# ChargeConj MyB_s0 Myanti-B_s0
#
# Decay B_c+sig
# 1.000 MyB_s0 pi+ PHOTOS PHSP;
# Enddecay
# CDecay B_c-sig
#
# Decay MyB_s0
# 1.000 K+ K- SSD_CP 20.e12 0.1 1.0 0.04 9.6 -0.8 8.4 -0.6;
# Enddecay
# CDecay Myanti-B_s0
# </pre>
# </small>
# + [markdown] slideshow={"slide_type": "slide"}
# ### **2. Decay file parsing**
#
# - Parsing should be simple
# - Parsing should be (reasonably) fast!
#
# After parsing, many queries are possible!
# + slideshow={"slide_type": "subslide"}
from decaylanguage import DecFileParser
# -
# #### The LHCb "master decay file"
#
# It's a big file! ~ 500 particle decays defined, thousands of decay modes, over 11k lines in total.
dfp = DecFileParser('data/DECAY_LHCB.DEC')
# %%time
dfp.parse()
dfp
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's parse and play with a small decay file:
# -
with open('data/Dst.dec') as f:
print(f.read())
# + slideshow={"slide_type": "subslide"}
dfp_Dst = DecFileParser('data/Dst.dec')
dfp_Dst
# -
dfp_Dst.parse()
dfp_Dst
# + [markdown] slideshow={"slide_type": "subslide"}
# It can be handy to **parse from a multi-line string** rather than a file:
# -
s = """
# Decay file for [B_c+ -> (B_s0 -> K+ K-) pi+]cc
Alias B_c+sig B_c+
Alias B_c-sig B_c-
ChargeConj B_c+sig B_c-sig
Alias MyB_s0 B_s0
Alias Myanti-B_s0 anti-B_s0
ChargeConj MyB_s0 Myanti-B_s0
Decay B_c+sig
1.000 MyB_s0 pi+ PHOTOS PHSP;
Enddecay
CDecay B_c-sig
Decay MyB_s0
1.000 K+ K- SSD_CP 20.e12 0.1 1.0 0.04 9.6 -0.8 8.4 -0.6;
Enddecay
CDecay Myanti-B_s0
"""
# + slideshow={"slide_type": "subslide"}
dfp = DecFileParser.from_string(s)
dfp.parse()
dfp
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Decay file information
#
# Several methods are available, e.g.:
# -
dfp_Dst.list_decay_mother_names()
dfp_Dst.print_decay_modes('D*+')
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Info such as particle aliases
# -
dfp.dict_aliases()
dfp.dict_charge_conjugates()
# + [markdown] slideshow={"slide_type": "slide"}
# ### **3. Display of decay chains**
#
# The parser can provide a simple `dict` representation of any decay chain found in the input decay file(s). Being generic and simple, that is what is used as input information for the viewer class (see below).
# -
dc = dfp_Dst.build_decay_chains('D+')
dc
# + slideshow={"slide_type": "subslide"}
from decaylanguage import DecayChainViewer
# + slideshow={"slide_type": "-"}
DecayChainViewer(dc)
# + slideshow={"slide_type": "subslide"}
dc = dfp_Dst.build_decay_chains('D*+', stable_particles=['D+', 'D0', 'pi0'])
DecayChainViewer(dc)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### **Charge conjugation**
# -
dc_cc = dfp_Dst.build_decay_chains('D*-', stable_particles=['D-', 'anti-D0', 'pi0'])
DecayChainViewer(dc_cc)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### **Parsing several files**
#
# Typically useful when the user decay file needs information from the master decay file.
# -
s = u"""
Alias MyXic+ Xi_c+
Alias MyantiXic- anti-Xi_c-
ChargeConj MyXic+ MyantiXic-
Decay Xi_cc+sig
1.000 MyXic+ pi- pi+ PHSP;
Enddecay
CDecay anti-Xi_cc-sig
Decay MyXic+
1.000 p+ K- pi+ PHSP;
Enddecay
CDecay MyantiXic-
End
"""
# + slideshow={"slide_type": "subslide"}
dfp = DecFileParser.from_string(s)
dfp.parse()
dfp
# -
# Note the subtletly: 3, not 4 decays, are found! This is because the file contains no statement
# `ChargeConj anti-Xi_cc-sigXi_cc+sig`, hence the parser cannot know to which particle (matching `Decay` statement) the charge-conjugate decay of `anti-Xi_cc-sig` relates to (code does not rely on position of statements to guess ;-)).
# + slideshow={"slide_type": "subslide"}
d = dfp.build_decay_chains('Xi_cc+sig')
DecayChainViewer(d)
# + [markdown] slideshow={"slide_type": "subslide"}
# As said in the warning, the information provided is not enough for the anti-Xi_cc-sig to make sense:
# +
from decaylanguage.dec.dec import DecayNotFound
try:
d = dfp.build_decay_chains('anti-Xi_cc-sig')
except DecayNotFound:
print("Decays of particle 'anti-Xi_cc-sig' not found in .dec file!")
# + [markdown] tags=[]
# But the missing information is easily providing **parsing two files simultaneously ...!** (Any number of files is allowed.)
# + slideshow={"slide_type": "subslide"} tags=[]
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(delete=False) as tf:
tf.write(s.encode('utf-8'))
dfp = DecFileParser(tf.name, 'data/DECAY_LHCB.DEC')
dfp.parse()
# + slideshow={"slide_type": "subslide"}
dc = dfp.build_decay_chains('Xi_cc+sig')
DecayChainViewer(dc)
# + slideshow={"slide_type": "subslide"}
dc_cc = dfp.build_decay_chains('anti-Xi_cc-sig')
DecayChainViewer(dc_cc)
# -
# Want to save a graph? Try for example
# ```python
# dcv = DecayChainViewer(...)
# dcv.graph.save(...)
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ### **4. Representation of decay chains**
#
# <span style="color:green">The universal (and digital) representation of decay chains is of interest well outside the context of decay file parsing!</span>
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Building blocks
#
# - A <span style="color:green">daughters list</span> - list of final-state particles.
# - A <span style="color:green">decay mode</span> - typically a branching fraction and a list of final-state particles (may also contain _any_ metadata such as decay model and optional decay-model parameters, as defined for example in .dec decay files).
# - A <span style="color:green">decay chain</span> - can be seen as a mother particle and a list of decay modes.
# + slideshow={"slide_type": "subslide"}
from decaylanguage.decay.decay import DaughtersDict, DecayMode, DecayChain
# -
# **Daughters list** (actually a ``Counter`` dictionary, internally):
# + slideshow={"slide_type": "subslide"}
# Constructor from a dictionary
dd = DaughtersDict({'K+': 1, 'K-': 2, 'pi+': 1, 'pi0': 1})
# Constructor from a list of particle names
dd = DaughtersDict(['K+', 'K-', 'K-', 'pi+', 'pi0'])
# Constructor from a string representing the final state
dd = DaughtersDict('K+ K- pi0')
dd
# -
# #### Decay Modes
# + slideshow={"slide_type": "subslide"}
# A 'default' and hence empty, decay mode
dm = DecayMode()
# Decay mode with minimal input information
dd = DaughtersDict('K+ K-')
dm = DecayMode(0.5, dd)
# Decay mode with decay model information and user metadata
dm = DecayMode(0.2551, # branching fraction
'pi- pi0 nu_tau', # final-state particles
model='TAUHADNU', # decay model
model_params=[-0.108, 0.775, 0.149, 1.364, 0.400], # decay-model parameters
study='toy', year=2019 # user metadata
)
dm
# + slideshow={"slide_type": "subslide"}
print(dm.describe())
# + [markdown] slideshow={"slide_type": "subslide"}
# Various manipulations are available:
# +
dm = DecayMode.from_pdgids(0.5, [321, -321])
print(dm)
dm = DecayMode(1.0, 'K+ K+ pi-')
dm.charge_conjugate()
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Decay chains
# +
dm1 = DecayMode(0.0124, 'K_S0 pi0', model='PHSP')
dm2 = DecayMode(0.692, 'pi+ pi-')
dm3 = DecayMode(0.98823, 'gamma gamma')
dc = DecayChain('D0', {'D0':dm1, 'K_S0':dm2, 'pi0':dm3})
dc
# -
dc.decays
# + [markdown] slideshow={"slide_type": "subslide"}
# Flatten the decay chain, i.e. replace all intermediate, decaying particles, with their final states:
# - The BF is now the *visible BF*
# + slideshow={"slide_type": "-"}
dc.flatten()
# -
# Of course you can sill just as easily visualise decays defined via this `DecayChain` class:
# + slideshow={"slide_type": "subslide"}
DecayChainViewer(dc.to_dict())
| scikit-hep/03_Scikit-HEP_particles-decays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests, urllib.parse
serviceurl = 'https://api.exchangerate.host/latest?base=USD&symbols=EUR,GBP,INR&format=xml&amount='
value = str(input('Enter the amount to convert: '))
url = serviceurl + value
uh = requests.get(url)
data = uh.text
print('Retrieved', len(data), 'characters')
print(data)
import xml.etree.ElementTree as ET
tree3 = ET.fromstring(data)
for elem in tree3.iter():
print(elem.text)
for elem in tree3.iter():
curr=elem.find('code')
amount=elem.find('rate')
if curr!=None and amount!=None:
print(value + " USD = " + amount.text + " " + curr.text)
| Chapter07/Exercise87-92/Exercise_92_update.ipynb |