text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import math
from functools import reduce
# import pytorch_lightning as pl
import torch
from torch import nn
import torch.nn.functional as F
import random
import numpy as np
# helpers
def prob_mask_like(t, prob):
return torch.zeros_like(t).float().uniform_(0, 1) < prob
def mask_with_tokens(t, token_ids):
"""
用记号遮掩
"""
init_no_mask = torch.full_like(t, False, dtype=torch.bool)
mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask)
# print("mask",mask)
return mask
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
def get_mask_subset_with_prob_tri(mask, prob,subset_with_prob=True):
"""优化版本 三角形 动态掩盖 对屏蔽的数据进行放弃一部分
上三角和下三角自动随机选择
# 添加按照比例 subset_with_prob 掩码
"""
batch, seq_len, device = *mask.shape, mask.device
# 自动选择头尾
if random.choice([0,1])==0:
a_mask = mask.triu(diagonal=2) # diagonal 设置偏移
else:
a_mask = mask.tril(diagonal=-2) # diagonal 设置偏移
# a_mask.bool()
mask1=a_mask.bool()
# print("mask1",mask1)
if subset_with_prob==True:
# 融合概率mask
mask2=get_mask_subset_with_prob(mask,prob*2)
# print("mask2",mask2)
# print("mask1",mask1)
a_mask=torch.where(mask1==True,mask1,mask2)
# a_mask=torch.where(a_mask==False,a_mask,mask2)
# a_mask=get_mask_subset_with_prob(a_mask,prob)
return a_mask
else:
return mask1
def get_mask_subset_with_prob_diagonal(mask, prob,subset_with_prob=True):
"""优化版本 对角线矩阵掩盖
迭代中会自动随机掩盖 实现连续多个 掩盖。用于预测片段内容
# 添加按照比例 subset_with_prob 掩码
"""
batch, seq_len, device = *mask.shape, mask.device
x = mask.triu() # diagonal 设置偏移
# diagonal=random.randint(1,int(seq_len/2)) # 生成随机的掩盖长度
# 设置最大的掩盖长度
diagonal=random.randint(1,int(seq_len/2))
max_d=min(24,diagonal)
diagonal=random.randint(5,max_d) # 生成随机的掩盖长度
y = mask.tril(diagonal=diagonal) # diagonal 设置偏移
a_mask=torch.where(y==0,y,x)
# a_mask.bool()
mask1=a_mask.bool()
# print("mask1",mask1)
if subset_with_prob==True:
# 融合概率mask
mask2=get_mask_subset_with_prob(mask,prob*2)
# print("mask2",mask2)
# print("mask1",mask1)
a_mask=torch.where(mask1==True,mask1,mask2)
# a_mask=torch.where(a_mask==False,a_mask,mask2)
# a_mask=get_mask_subset_with_prob(a_mask,prob)
return a_mask
else:
return mask1
# main class
# main class
class autoMask(nn.Module):
"""
动态mask数据
示例
>>> from transformers import BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("uer/chinese_roberta_L-2_H-128")
# dir(tokenizer)
>>> tomask = autoMask(
>>> # transformer,
>>> mask_token_id = tokenizer.mask_token_id, # the token id reserved for masking
>>> pad_token_id = tokenizer.pad_token_id, # the token id for padding
>>> mask_prob = 0.05, # masking probability for masked language modeling
>>> replace_prob = 0.90, # ~10% probability that token will not be masked, but included in loss, as detailed in the epaper
>>> mask_ignore_token_ids = [tokenizer.cls_token_id,tokenizer.eos_token_id] # other tokens to exclude from masking, include the [cls] and [sep] here
>>> )
修改默认的pad和mask_token_id 默认使用https://huggingface.co/uer/chinese_roberta_L-2_H-128/blob/main/vocab.txt
"""
def __init__(
self,
# transformer,
mask_prob = 0.15,
replace_prob = 0.9,
num_tokens = None,
random_token_prob = 0.,
mask_token_id = 103,
pad_token_id = -100,
mask_ignore_token_ids = [],
probabilitis = [0.9,0.05,0.05]):
super().__init__()
# self.transformer = transformer
# mlm related probabilities
self.mask_prob = mask_prob
self.replace_prob = replace_prob
self.num_tokens = num_tokens
self.random_token_prob = random_token_prob
# token ids
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.mask_ignore_token_ids = set([*mask_ignore_token_ids, pad_token_id])
self.probabilitis=probabilitis
def forward(self, input,indices=False, **kwargs):
"""
indices :获取mask的索引
"""
# do not mask [pad] tokens, or any other tokens in the tokens designated to be excluded ([cls], [sep])
# 不要屏蔽[pad]令牌或指定排除的令牌中的任何其他令牌([cls]、[sep])
# also do not include these special tokens in the tokens chosen at random
# 也不要在随机选择的令牌中包含这些特殊令牌
no_mask = mask_with_tokens(input, self.mask_ignore_token_ids)
# mask = get_mask_subset_with_prob(~no_mask, self.mask_prob)
# 设置每种方案的概率
c=np.random.choice(len(self.probabilitis),1, p=self.probabilitis)
if c[0]==0:
# 默认 概率掩盖
mask = get_mask_subset_with_prob(~no_mask, self.mask_prob)
elif c[0]==1:
# 随机上下三角
mask = get_mask_subset_with_prob_tri(~no_mask, self.mask_prob)
else:
# 对角线 相当于 连续掩盖
mask = get_mask_subset_with_prob_diagonal(~no_mask, self.mask_prob)
# print("mask结果\n",mask)
# get mask indices 获取掩码索引
mask_indices = torch.nonzero(mask, as_tuple=True)
# mask input with mask tokens with probability of `replace_prob` (keep tokens the same with probability 1 - replace_prob)
masked_input = input.clone().detach()
# if random token probability > 0 for mlm
if self.random_token_prob > 0:
assert self.num_tokens is not None, 'num_tokens keyword must be supplied when instantiating MLM if using random token replacement'
random_token_prob = prob_mask_like(input, self.random_token_prob)
random_tokens = torch.randint(0, self.num_tokens, input.shape, device=input.device)
random_no_mask = mask_with_tokens(random_tokens, self.mask_ignore_token_ids)
random_token_prob &= ~random_no_mask
random_indices = torch.nonzero(random_token_prob, as_tuple=True)
masked_input[random_indices] = random_tokens[random_indices]
# [mask] input
replace_prob = prob_mask_like(input, self.replace_prob)
# print("replace_prob",replace_prob)
masked_input = masked_input.masked_fill(mask * replace_prob, self.mask_token_id)
# mask out any tokens to padding tokens that were not originally going to be masked
labels = input.masked_fill(~mask, self.pad_token_id)
if indices==True:
return masked_input,labels,mask_indices
else:
return masked_input,labels
|
{"hexsha": "ca25758e3390c28b097f4dda2fc61b1736a16636", "size": 7328, "ext": "py", "lang": "Python", "max_stars_repo_path": "tkitAutoMask/mask.py", "max_stars_repo_name": "napoler/tkit-automask", "max_stars_repo_head_hexsha": "ee6a85af36c971c54793d14c4e7e07c87a24bb58", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tkitAutoMask/mask.py", "max_issues_repo_name": "napoler/tkit-automask", "max_issues_repo_head_hexsha": "ee6a85af36c971c54793d14c4e7e07c87a24bb58", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tkitAutoMask/mask.py", "max_forks_repo_name": "napoler/tkit-automask", "max_forks_repo_head_hexsha": "ee6a85af36c971c54793d14c4e7e07c87a24bb58", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5660377358, "max_line_length": 157, "alphanum_fraction": 0.6543395197, "include": true, "reason": "import numpy", "num_tokens": 2095}
|
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
theory WPBang
imports
WP
Eisbach_Tools.ProvePart
NonDetMonadVCG
begin
lemma conj_meta_forward:
"P \<and> Q \<Longrightarrow> (P \<Longrightarrow> P') \<Longrightarrow> (Q \<Longrightarrow> Q') \<Longrightarrow> P' \<and> Q'"
by simp
text \<open>Applying safe WP rules.\<close>
ML \<open>
structure WP_Safe = struct
fun check_has_frees_tac Ps (_ : int) thm = let
val fs = Term.add_frees (Thm.prop_of thm) [] |> filter (member (=) Ps)
in if null fs then Seq.empty else Seq.single thm end
fun wp_bang wp_safe_rules ctxt = let
val wp_safe_rules_conj = ((wp_safe_rules RL @{thms hoare_vcg_conj_lift hoare_vcg_R_conj})
RL @{thms hoare_strengthen_post hoare_post_imp_R})
|> map (rotate_prems 1)
in
resolve_tac ctxt wp_safe_rules_conj
THEN' Split_To_Conj.get_split_tac "P" ctxt
(fn Ps => fn i => eresolve0_tac [@{thm conj_meta_forward}] i
THEN (REPEAT_ALL_NEW ((CHANGED o asm_full_simp_tac ctxt)
ORELSE' Classical.safe_steps_tac ctxt)
THEN_ALL_NEW Partial_Prove.cleanup_tac ctxt Ps) i
THEN (Partial_Prove.finish_tac ctxt Ps THEN' (assume_tac ctxt)) i
)
end
val wpe_args =
Attrib.thms >> curry (fn (rules, ctxt) =>
Method.SIMPLE_METHOD (
wp_bang rules ctxt 1
)
);
end
\<close>
method_setup wpe = \<open>WP_Safe.wpe_args\<close>
"applies 'safe' wp rules to eliminate postcondition components"
text \<open>Testing.\<close>
lemma
assumes x: "\<lbrace> P \<rbrace> f \<lbrace> \<lambda>rv. Q \<rbrace>"
and y: "\<lbrace> P \<rbrace> f \<lbrace> \<lambda>rv. R \<rbrace>"
shows
"\<lbrace> P \<rbrace> f \<lbrace> \<lambda>rv s. \<forall>x y. (fst rv = Some x \<longrightarrow> Q s)
\<and> (snd rv = Some y \<longrightarrow> Q s )
\<and> R s \<rbrace>"
apply (rule hoare_pre)
apply (simp add: all_conj_distrib)
apply (rule hoare_vcg_conj_lift)
apply (wpe x)
apply wp
apply (wpe x)
apply (wp y)
apply simp
done
end
|
{"author": "seL4", "repo": "l4v", "sha": "9ba34e269008732d4f89fb7a7e32337ffdd09ff9", "save_path": "github-repos/isabelle/seL4-l4v", "path": "github-repos/isabelle/seL4-l4v/l4v-9ba34e269008732d4f89fb7a7e32337ffdd09ff9/lib/Monads/wp/WPBang.thy"}
|
[STATEMENT]
lemma cong_diff_trans[trans]:
"[a = b - x] (mod n) \<Longrightarrow> [x = y] (mod n) \<Longrightarrow> [a = b - y] (mod n)"
"[a = x - b] (mod n) \<Longrightarrow> [x = y] (mod n) \<Longrightarrow> [a = y - b] (mod n)"
"[b - x = a] (mod n) \<Longrightarrow> [x = y] (mod n) \<Longrightarrow> [b - y = a] (mod n)"
"[x - b = a] (mod n) \<Longrightarrow> [x = y] (mod n) \<Longrightarrow> [y - b = a] (mod n)"
for a :: "'a :: {unique_euclidean_semiring, euclidean_ring_cancel}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lbrakk>[a = b - x] (mod n); [x = y] (mod n)\<rbrakk> \<Longrightarrow> [a = b - y] (mod n)) &&& (\<lbrakk>[a = x - b] (mod n); [x = y] (mod n)\<rbrakk> \<Longrightarrow> [a = y - b] (mod n))) &&& (\<lbrakk>[b - x = a] (mod n); [x = y] (mod n)\<rbrakk> \<Longrightarrow> [b - y = a] (mod n)) &&& (\<lbrakk>[x - b = a] (mod n); [x = y] (mod n)\<rbrakk> \<Longrightarrow> [y - b = a] (mod n))
[PROOF STEP]
unfolding cong_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lbrakk>a mod n = (b - x) mod n; x mod n = y mod n\<rbrakk> \<Longrightarrow> a mod n = (b - y) mod n) &&& (\<lbrakk>a mod n = (x - b) mod n; x mod n = y mod n\<rbrakk> \<Longrightarrow> a mod n = (y - b) mod n)) &&& (\<lbrakk>(b - x) mod n = a mod n; x mod n = y mod n\<rbrakk> \<Longrightarrow> (b - y) mod n = a mod n) &&& (\<lbrakk>(x - b) mod n = a mod n; x mod n = y mod n\<rbrakk> \<Longrightarrow> (y - b) mod n = a mod n)
[PROOF STEP]
by (metis mod_diff_eq)+
|
{"llama_tokens": 642, "file": "Probabilistic_Prime_Tests_Algebraic_Auxiliaries", "length": 2}
|
[STATEMENT]
lemma D_append[iff]: "\<And>A. \<D>s (es @ es') A = (\<D>s es A \<and> \<D>s es' (A \<squnion> \<A>s es))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>A. \<D>s (es @ es') A = (\<D>s es A \<and> \<D>s es' (A \<squnion> \<A>s es))
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>A. \<D>s (es @ es') A = (\<D>s es A \<and> \<D>s es' (A \<squnion> \<A>s es))
[PROOF STEP]
by (induct es type:list) (auto simp:hyperUn_assoc)
|
{"llama_tokens": 232, "file": "JinjaThreads_J_DefAss", "length": 2}
|
import numpy as np
class Agent():
def __init__(self, lr, gamma, n_actions, n_states, eps_start, eps_end,
eps_dec):
self.lr = lr
self.gamma = gamma
self.n_actions = n_actions
self.n_states = n_states
self.epsilon = eps_start
self.eps_min = eps_end
self.eps_dec = eps_dec
self.Q = {}
self.init_Q()
def init_Q(self):
for state in range(self.n_states):
for action in range(self.n_actions):
self.Q[(state, action)] = 0.0
def choose_action(self, state):
if np.random.random() < self.epsilon:
action = np.random.choice([i for i in range(self.n_actions)])
else:
actions = np.array([self.Q[(state, a)] \
for a in range(self.n_actions)])
action = np.argmax(actions)
return action
def decrement_epsilon(self):
self.epsilon = self.epsilon*self.eps_dec if self.epsilon>self.eps_min\
else self.eps_min
def learn(self, state, action, reward, state_):
actions = np.array([self.Q[(state_, a)] for a in range(self.n_actions)])
a_max = np.argmax(actions)
self.Q[(state, action)] += self.lr*(reward +
self.gamma*self.Q[(state_, a_max)] -
self.Q[(state, action)])
self.decrement_epsilon()
|
{"hexsha": "43c7ef39f7952c52056013747aa798cf891602d3", "size": 1456, "ext": "py", "lang": "Python", "max_stars_repo_path": "q_learning/q_learning_agent.py", "max_stars_repo_name": "Srikanth-Kb/Deep-Q-Learning-Paper-To-Code", "max_stars_repo_head_hexsha": "0351272399847e23aa1509c04781507e5a34d3dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 240, "max_stars_repo_stars_event_min_datetime": "2019-11-20T23:53:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T15:13:12.000Z", "max_issues_repo_path": "q_learning/q_learning_agent.py", "max_issues_repo_name": "Srikanth-Kb/Deep-Q-Learning-Paper-To-Code", "max_issues_repo_head_hexsha": "0351272399847e23aa1509c04781507e5a34d3dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2019-12-01T01:00:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T18:23:34.000Z", "max_forks_repo_path": "q_learning/q_learning_agent.py", "max_forks_repo_name": "Srikanth-Kb/Deep-Q-Learning-Paper-To-Code", "max_forks_repo_head_hexsha": "0351272399847e23aa1509c04781507e5a34d3dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 112, "max_forks_repo_forks_event_min_datetime": "2019-11-25T02:43:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T10:29:01.000Z", "avg_line_length": 33.0909090909, "max_line_length": 80, "alphanum_fraction": 0.5377747253, "include": true, "reason": "import numpy", "num_tokens": 317}
|
# Source: http://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
# http://people.wku.edu/qi.li/teaching/446/cg14_curve_surface.pdf
import numpy as np
from utils import distance
def CatmullRomSpline(P0, P1, P2, P3, nPoints=100):
"""
P0, P1, P2, and P3 should be (x,y) point pairs that define the Catmull-Rom spline.
nPoints is the number of points to include in this curve segment.
"""
# Convert the points to numpy so that we can do array multiplication
P0, P1, P2, P3 = map(np.array, [P0, P1, P2, P3])
# Calculate t0 to t4
alpha = 0.5
def tj(ti, Pi, Pj):
xi, yi = Pi
xj, yj = Pj
return ( ( (xj-xi)**2 + (yj-yi)**2 )**0.5 )**alpha + ti
t0 = P0[0]
t1 = tj(t0, P0, P1)
t2 = tj(t1, P1, P2)
t3 = P3[0]
# Only calculate points between P1 and P2
t = np.linspace(t1,t2,nPoints)
# Reshape so that we can multiply by the points P0 to P3
# and get a point for each value of t.
t = t.reshape(len(t),1)
A1 = (t1-t)/(t1-t0)*P0 + (t-t0)/(t1-t0)*P1
A2 = (t2-t)/(t2-t1)*P1 + (t-t1)/(t2-t1)*P2
A3 = (t3-t)/(t3-t2)*P2 + (t-t2)/(t3-t2)*P3
B1 = (t2-t)/(t2-t0)*A1 + (t-t0)/(t2-t0)*A2
B2 = (t3-t)/(t3-t1)*A2 + (t-t1)/(t3-t1)*A3
C = (t2-t)/(t2-t1)*B1 + (t-t1)/(t2-t1)*B2
return C
def CatmullRomLoop(loop, pointsPerUnitDist=1.):
"""
Calculate Catmull Rom for a list of points, named loop, with loop[0] == loop[-1].
"""
if len(loop) < 4:
raise ValueError("Loop must have at least 4 points in it")
ret = []
# Add extra control points to ends
loop = [loop[-2],] + loop + [loop[1],]
# Produce coords for loop
for i in xrange(len(loop)-3):
numPoints = int(distance(loop[i+1], loop[i+2]) * pointsPerUnitDist)
ret.append(CatmullRomSpline(loop[i], loop[i+1], loop[i+2], loop[i+3], nPoints=numPoints))
ret = [tuple(coords) for seg in ret for coords in seg]
return ret
|
{"hexsha": "a3a62cced88f20b7a24320490c467244f76df642", "size": 1950, "ext": "py", "lang": "Python", "max_stars_repo_path": "catmullrom.py", "max_stars_repo_name": "andrewyang96/RacetrackGenerator", "max_stars_repo_head_hexsha": "9febabb7fb782951ab6a01b5330171f6e4c8cacf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "catmullrom.py", "max_issues_repo_name": "andrewyang96/RacetrackGenerator", "max_issues_repo_head_hexsha": "9febabb7fb782951ab6a01b5330171f6e4c8cacf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "catmullrom.py", "max_forks_repo_name": "andrewyang96/RacetrackGenerator", "max_forks_repo_head_hexsha": "9febabb7fb782951ab6a01b5330171f6e4c8cacf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6206896552, "max_line_length": 97, "alphanum_fraction": 0.5897435897, "include": true, "reason": "import numpy", "num_tokens": 738}
|
""" check-controllability-and-observability.py
Example to check the controllability and the observability of a state space system.
RMM, 6 Sep 2010
"""
from __future__ import print_function
from scipy import * # Load the scipy functions
from control.matlab import * # Load the controls systems library
# Parameters defining the system
m = 250.0 # system mass
k = 40.0 # spring constant
b = 60.0 # damping constant
# System matrices
A = matrix([[1, -1, 1.],
[1, -k / m, -b / m],
[1, 1, 1]])
B = matrix([[0],
[1 / m],
[1]])
C = matrix([[1., 0, 1.]])
sys = ss(A, B, C, 0)
# Check controllability
Wc = ctrb(A, B)
print("Wc = ", Wc)
# Check Observability
Wo = obsv(A, C)
print("Wo = ", Wo)
|
{"hexsha": "d20416f1f136c7ac312a3514acf1ca24915cae89", "size": 748, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/check-controllability-and-observability.py", "max_stars_repo_name": "joaoantoniocardoso/python-control", "max_stars_repo_head_hexsha": "1ab67560db5319843a2c43a20944da061011399d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-29T18:28:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-29T18:28:03.000Z", "max_issues_repo_path": "examples/check-controllability-and-observability.py", "max_issues_repo_name": "joaoantoniocardoso/python-control", "max_issues_repo_head_hexsha": "1ab67560db5319843a2c43a20944da061011399d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/check-controllability-and-observability.py", "max_forks_repo_name": "joaoantoniocardoso/python-control", "max_forks_repo_head_hexsha": "1ab67560db5319843a2c43a20944da061011399d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-13T22:30:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-13T02:35:39.000Z", "avg_line_length": 19.6842105263, "max_line_length": 83, "alphanum_fraction": 0.6069518717, "include": true, "reason": "from scipy", "num_tokens": 237}
|
import numpy as np
import torch
from ..utils import common_functions as c_f
def split_half(x, dim):
d = x.shape[dim] // 2
return torch.split(x, d, dim=dim)
def num_elements_minus_diag(x):
n = x.shape[0]
return n * (n - 1)
def get_kernel_scales(low=-8, high=8, num_kernels=33, base=2.0):
return torch.from_numpy(np.logspace(low, high, num=num_kernels, base=base))
def _mmd_dist_mats(x, y, dist_func):
xx = dist_func(x, x)
yy = dist_func(y, y)
zz = dist_func(x, y)
with torch.no_grad():
# https://arxiv.org/pdf/1409.6041.pdf
# https://arxiv.org/pdf/1707.07269.pdf
scale = -1.0 / torch.median(xx)
return xx, yy, zz, scale
def get_mmd_dist_mats(x, y, dist_func):
if c_f.is_list_or_tuple(x):
xx, yy, zz, scale = [], [], [], []
for i in range(len(x)):
_xx, _yy, _zz, _scale = _mmd_dist_mats(x[i], y[i], dist_func)
xx.append(_xx)
yy.append(_yy)
zz.append(_zz)
scale.append(_scale)
return xx, yy, zz, scale
else:
return _mmd_dist_mats(x, y, dist_func)
def get_default_kernel_weights(scale):
if torch.is_tensor(scale) and torch.numel(scale) > 1:
return torch.ones_like(scale) / len(scale)
else:
return 1
def _mmd_quadratic(x, scale, weights):
return torch.sum(torch.exp(x.unsqueeze(2) * scale) * weights, dim=2)
def get_mmd_quadratic(xx, yy, zz, scale, weights=None):
# https://jmlr.csail.mit.edu/papers/volume13/gretton12a/gretton12a.pdf
# https://arxiv.org/pdf/1502.02791.pdf
is_joint_mmd = c_f.is_list_or_tuple(xx)
if is_joint_mmd:
xx_prod, yy_prod, zz_prod = 1, 1, 1
for i in range(len(xx)):
curr_weights = c_f.default(weights, get_default_kernel_weights(scale[i]))
xx_prod *= _mmd_quadratic(xx[i], scale[i], curr_weights)
yy_prod *= _mmd_quadratic(yy[i], scale[i], curr_weights)
zz_prod *= _mmd_quadratic(zz[i], scale[i], curr_weights)
xx_prod.fill_diagonal_(0)
yy_prod.fill_diagonal_(0)
xx, yy, zz = xx_prod, yy_prod, zz_prod
else:
weights = c_f.default(weights, get_default_kernel_weights(scale))
xx = _mmd_quadratic(xx, scale, weights).fill_diagonal_(0)
yy = _mmd_quadratic(yy, scale, weights).fill_diagonal_(0)
zz = _mmd_quadratic(zz, scale, weights)
xx_scaler = 1.0 / num_elements_minus_diag(xx)
yy_scaler = 1.0 / num_elements_minus_diag(yy)
return xx_scaler * torch.sum(xx) + yy_scaler * torch.sum(yy) - 2 * torch.mean(zz)
def _mmd_linear(x, i, j, scale, weights):
return torch.sum(torch.exp(x[i, j] * scale) * weights, dim=0)
def _mmd_linear_helper(xx, yy, zz, scale, weights):
B = xx.shape[0]
idx_range = torch.arange(0, B // 2, device=xx.device)
s1 = idx_range * 2
s2 = s1 + 1
if scale.ndim == 0:
scale = scale.unsqueeze(0)
scale = scale.unsqueeze(1)
weights = c_f.default(weights, get_default_kernel_weights(scale))
loss1 = _mmd_linear(xx, s1, s2, scale, weights)
loss2 = _mmd_linear(yy, s1, s2, scale, weights)
loss3 = _mmd_linear(zz, s1, s2, scale, weights)
loss4 = _mmd_linear(zz, s2, s1, scale, weights)
return loss1, loss2, loss3, loss4
def get_mmd_linear(xx, yy, zz, scale, weights=None):
# https://jmlr.csail.mit.edu/papers/volume13/gretton12a/gretton12a.pdf
# https://arxiv.org/pdf/1502.02791.pdf
is_joint_mmd = c_f.is_list_or_tuple(xx)
B = xx[0].shape[0] if is_joint_mmd else xx.shape[0]
if is_joint_mmd:
product_list = [1, 1, 1, 1]
for i in range(len(xx)):
curr_kernels = _mmd_linear_helper(xx[i], yy[i], zz[i], scale[i], weights)
product_list = [a * b for a, b in zip(product_list, curr_kernels)]
loss1, loss2, loss3, loss4 = [torch.sum(a) for a in product_list]
else:
loss1, loss2, loss3, loss4 = [
torch.sum(a) for a in _mmd_linear_helper(xx, yy, zz, scale, weights)
]
loss = loss1 + loss2 - loss3 - loss4
return torch.sum(loss) / float(B // 2)
|
{"hexsha": "2628d9ac29094d6fb3c2daaf5f59ca63fe47b0a5", "size": 4108, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pytorch_adapt/layers/utils.py", "max_stars_repo_name": "MarkusSagen/pytorch-adapt", "max_stars_repo_head_hexsha": "947b9f1b748d2078cecbf4a00c34f73108d9ecde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-15T19:36:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T19:36:01.000Z", "max_issues_repo_path": "src/pytorch_adapt/layers/utils.py", "max_issues_repo_name": "MarkusSagen/pytorch-adapt", "max_issues_repo_head_hexsha": "947b9f1b748d2078cecbf4a00c34f73108d9ecde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pytorch_adapt/layers/utils.py", "max_forks_repo_name": "MarkusSagen/pytorch-adapt", "max_forks_repo_head_hexsha": "947b9f1b748d2078cecbf4a00c34f73108d9ecde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3464566929, "max_line_length": 85, "alphanum_fraction": 0.6307205453, "include": true, "reason": "import numpy", "num_tokens": 1266}
|
import numpy as np
import pandas as pd
import streamlit as st
from PIL import Image
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
st.write("""
# Favorite Object detection CNN
"""
)
st.write("This is a simple web app to classify images in 8 categories")
st.set_option('deprecation.showfileUploaderEncoding', False)
file = st.file_uploader("Please upload an image file", type=["jpg", "png"])
model = load_model('./models/best_model.h5')
if file is None:
st.text("Please upload an image file")
else:
img = Image.open(file)
st.image(img, use_column_width=True)
img.save("./data/temp.png")
img = image.load_img('./data/temp.png', grayscale=False, target_size=(256, 256),
color_mode='rgb', interpolation='bilinear')
img_array = image.img_to_array(img)
img_array = np.array([img_array])
prediction = model.predict(img_array)
# st.text("Probability (0: Airplane, 1: Car, 2: Cat, 3: Dog, 4: Flower, 5: Fruit, 6: Motorbike, 7: Person")
df = pd.DataFrame(prediction,
columns=['Airplane', 'Car', 'Cat', 'Dog', 'Flower', 'Fruit', 'Motorbike', 'Person'])
st.dataframe(df)
|
{"hexsha": "0214240417ece8c13b568e43533fae0ef4efeb8e", "size": 1242, "ext": "py", "lang": "Python", "max_stars_repo_path": "streamlit_app.py", "max_stars_repo_name": "rubenwo/ml3", "max_stars_repo_head_hexsha": "aa1de2ad27c4906a6158ee82e11ba7bb10da9ce7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "streamlit_app.py", "max_issues_repo_name": "rubenwo/ml3", "max_issues_repo_head_hexsha": "aa1de2ad27c4906a6158ee82e11ba7bb10da9ce7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "streamlit_app.py", "max_forks_repo_name": "rubenwo/ml3", "max_forks_repo_head_hexsha": "aa1de2ad27c4906a6158ee82e11ba7bb10da9ce7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6842105263, "max_line_length": 111, "alphanum_fraction": 0.6578099839, "include": true, "reason": "import numpy", "num_tokens": 306}
|
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <iostream>
#include <sstream>
using namespace boost::archive;
std::stringstream ss;
class animal
{
public:
animal() = default;
animal(int legs) : legs_{legs} {}
int legs() const { return legs_; }
private:
friend class boost::serialization::access;
template <typename Archive>
void serialize(Archive &ar, const unsigned int version) { ar & legs_; }
int legs_;
};
class bird : public animal
{
public:
bird() = default;
bird(int legs, bool can_fly) :
animal{legs}, can_fly_{can_fly} {}
bool can_fly() const { return can_fly_; }
private:
friend class boost::serialization::access;
template <typename Archive>
void serialize(Archive &ar, const unsigned int version)
{
ar & boost::serialization::base_object<animal>(*this);
ar & can_fly_;
}
bool can_fly_;
};
void save()
{
text_oarchive oa{ss};
bird penguin{2, false};
oa << penguin;
}
void load()
{
text_iarchive ia{ss};
bird penguin;
ia >> penguin;
std::cout << penguin.legs() << '\n';
std::cout << std::boolalpha << penguin.can_fly() << '\n';
}
int main()
{
save();
load();
}
|
{"hexsha": "738a7692b198122c3c07d2011ccbd5febf13d787", "size": 1193, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Example/serialization_11/main.cpp", "max_stars_repo_name": "KwangjoJeong/Boost", "max_stars_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Example/serialization_11/main.cpp", "max_issues_repo_name": "KwangjoJeong/Boost", "max_issues_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Example/serialization_11/main.cpp", "max_forks_repo_name": "KwangjoJeong/Boost", "max_forks_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.0757575758, "max_line_length": 73, "alphanum_fraction": 0.665549036, "num_tokens": 324}
|
using MechanicalSketch
import MechanicalSketch: foil_spline_local
import MechanicalSketch: text, circle, Turtle, Pencolor, Penwidth, Forward, Turn
import MechanicalSketch: HueShift, O, sethue, finish, EM, WI, background, empty_figure
let
empty_figure(filename = joinpath(@__DIR__, "test_1.png"));
background("midnightblue")
posx = -WI / 2
sethue("green")
# The unicode ∈ may not be supported by the default font and show as a square instead.
stri = "Da jeg var på vei til kirken ∈ dag morges så kom jeg forbi en liten sjømann. En frisk og hyggelig liten sjømann som hilste meg."
text(stri, posx, 0)
sethue("yellow")
text("1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890",
posx, EM)
circle(O, 20, :stroke)
t = Turtle()
Pencolor(t, "cyan")
Penwidth(t, 1.5)
n = 5
for i in 1:400
Forward(t, n)
Turn(t, 89.5)
HueShift(t)
n += 0.75
end
finish()
end
|
{"hexsha": "02f1685ea67d8290c9d3740913d695faf8457beb", "size": 1006, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_1.jl", "max_stars_repo_name": "hustf/MechanicalSketch.jl", "max_stars_repo_head_hexsha": "162102d6ccbb5a25911b0a36074295832c7d858e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-01T17:54:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T17:54:59.000Z", "max_issues_repo_path": "test/test_1.jl", "max_issues_repo_name": "hustf/MechanicalSketch.jl", "max_issues_repo_head_hexsha": "162102d6ccbb5a25911b0a36074295832c7d858e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-05T13:00:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-21T20:00:25.000Z", "max_forks_repo_path": "test/test_1.jl", "max_forks_repo_name": "hustf/MechanicalSketch.jl", "max_forks_repo_head_hexsha": "162102d6ccbb5a25911b0a36074295832c7d858e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4516129032, "max_line_length": 140, "alphanum_fraction": 0.6809145129, "num_tokens": 311}
|
import importlib
import numpy as np
import torch
import torch.nn as nn
from torchvision import models
class CILRSModel(nn.Module):
def __init__(
self,
backbone='resnet18',
pretrained=True,
normalize=True,
num_branch=6,
speed_dim=1,
embedding_dim=512,
hidden_size=256,
input_speed=True,
predict_speed=True
):
super().__init__()
self._normalize = normalize
assert backbone in ['resnet18', 'resnet34', 'resnet50'], backbone
backbone_cls = {
'resnet18': models.resnet18,
'resnet34': models.resnet34,
'resnet50': models.resnet50,
}[backbone]
self._backbone = backbone_cls(pretrained=pretrained)
self._backbone.fc = nn.Sequential()
self._num_branch = num_branch
self._input_speed = input_speed
self.predict_speed = predict_speed
# Project input speed measurement to feature size
if input_speed:
self._speed_in = nn.Sequential(
nn.Linear(speed_dim, hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, embedding_dim),
)
# Project feature to speed prediction
if predict_speed:
self._speed_out = nn.Sequential(
nn.Linear(embedding_dim, hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, speed_dim),
)
# Control branches
fc_branch_list = []
for i in range(num_branch):
fc_branch_list.append(
nn.Sequential(
nn.Linear(embedding_dim, hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(True),
nn.Linear(hidden_size, 3),
nn.Sigmoid(),
)
)
self._branches = nn.ModuleList(fc_branch_list)
def _normalize_imagenet(self, x):
"""
Normalize input images according to ImageNet standards.
:Arguments:
x (tensor): input images
"""
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
def encode(self, input_images):
embedding = 0
for x in input_images:
if self._normalize:
x = self._normalize_imagenet(x)
embedding += self._backbone(x)
return embedding
def forward(self, embedding, speed, command):
if len(command.shape) == 1:
command = command.unsqueeze(1)
if self._input_speed:
if len(speed.shape) == 1:
speed = speed.unsqueeze(1)
embedding += self._speed_in(speed)
control_pred = 0.
for i, branch in enumerate(self._branches):
# Choose control for branch of only active command
# We check for (command - 1) since navigational command 0 is ignored
control_pred += branch(embedding) * (i == (command - 1))
if self.predict_speed:
speed_pred = self._speed_out(embedding)
return control_pred, speed_pred
return control_pred
|
{"hexsha": "a7b6a248c08212d00b95df80a54c2e4f8a3ed3b2", "size": 3405, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/models/cilrs_model.py", "max_stars_repo_name": "L-Net-1992/DI-drive", "max_stars_repo_head_hexsha": "cc7f47bedbf60922acbcf3a5f77fc8e274df62cf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 219, "max_stars_repo_stars_event_min_datetime": "2021-07-07T21:55:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:56:43.000Z", "max_issues_repo_path": "core/models/cilrs_model.py", "max_issues_repo_name": "L-Net-1992/DI-drive", "max_issues_repo_head_hexsha": "cc7f47bedbf60922acbcf3a5f77fc8e274df62cf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-08-11T05:26:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T22:21:24.000Z", "max_forks_repo_path": "core/models/cilrs_model.py", "max_forks_repo_name": "L-Net-1992/DI-drive", "max_forks_repo_head_hexsha": "cc7f47bedbf60922acbcf3a5f77fc8e274df62cf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2021-07-08T03:17:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T03:51:43.000Z", "avg_line_length": 30.9545454545, "max_line_length": 80, "alphanum_fraction": 0.5439060206, "include": true, "reason": "import numpy", "num_tokens": 749}
|
import numpy as np
import cv2 as cv
import glob
import math
import random
from matplotlib import pyplot as plt
# from scipy.optimize import leastsq
from skspatial.objects import Plane, Points
from skspatial.plotting import plot_3d
#-------------------------#
# HOUGH LINES BUNDLER #
#-------------------------#
class HoughBundler:
# Based on: https://stackoverflow.com/questions/45531074/how-to-merge-lines-after-houghlinesp
def get_slope(self, line):
# https://en.wikipedia.org/wiki/Atan2
slope = math.atan2(abs((line[0] - line[2])), abs((line[1] - line[3])))
return math.degrees(slope)
def is_different_line(self, line_new, groups, min_distance_to_merge, min_angle_to_merge):
for group in groups:
for line_old in group:
if self.get_distance_between_lines(line_old, line_new) < min_distance_to_merge:
slope_new = self.get_slope(line_new)
slope_old = self.get_slope(line_old)
if abs(slope_new - slope_old) < min_angle_to_merge:
group.append(line_new)
return False
return True
def point_to_line_dist(self, point, line):
# http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba
px, py = point
x1, y1, x2, y2 = line
def get_line_length(x1, y1, x2, y2):
line_length = math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2))
return line_length
line_length = get_line_length(x1, y1, x2, y2)
if line_length < 0.00000001:
point_to_line_dist = 9999
return point_to_line_dist
u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1)))
u = u1 / (line_length * line_length)
if (u < 0.00001) or (u > 1):
# closest point does not fall within the line segment, take the shorter distance to an endpoint
ix = get_line_length(px, py, x1, y1)
iy = get_line_length(px, py, x2, y2)
if ix > iy:
point_to_line_dist = iy
else:
point_to_line_dist = ix
else:
# intersecting point is on the line, use the formula
ix = x1 + u * (x2 - x1)
iy = y1 + u * (y2 - y1)
point_to_line_dist = get_line_length(px, py, ix, iy)
return point_to_line_dist
def get_distance_between_lines(self, line1, line2):
dist1 = self.point_to_line_dist(line1[:2], line2)
dist2 = self.point_to_line_dist(line1[2:], line2)
dist3 = self.point_to_line_dist(line2[:2], line1)
dist4 = self.point_to_line_dist(line2[2:], line1)
return min(dist1, dist2, dist3, dist4)
def merge_lines_pipeline(self, lines):
groups = [] # all lines groups are here
# Parameters to play with
min_distance_to_merge = 30
min_angle_to_merge = 30
# first line will create new group every time
groups.append([lines[0]])
# if line is different from existing gropus, create a new group
for line_new in lines[1:]:
if self.is_different_line(line_new, groups, min_distance_to_merge, min_angle_to_merge):
groups.append([line_new])
return groups
def merge_lines_segments(self, lines):
# Sort lines cluster and return first and last coordinates
slope = self.get_slope(lines[0])
# special case
if(len(lines) == 1):
return [lines[0][:2], lines[0][2:]]
# [[1,2,3,4],[]] to [[1,2],[3,4],[],[]]
points = []
for line in lines:
points.append(line[:2])
points.append(line[2:])
# if vertical
if 45 < slope < 135:
#sort by y
points = sorted(points, key=lambda point: point[1])
else:
#sort by x
points = sorted(points, key=lambda point: point[0])
# return first and last point in sorted group
# [[x,y],[x,y]]
return [points[0], points[-1]]
def process_lines(self, lines):
lines_x = []
lines_y = []
# for every line of cv.HoughLinesP()
for line_i in [l[0] for l in lines]:
slope = self.get_slope(line_i)
# if vertical
if 45 < slope < 135:
lines_y.append(line_i)
else:
lines_x.append(line_i)
lines_y = sorted(lines_y, key=lambda line: line[1])
lines_x = sorted(lines_x, key=lambda line: line[0])
merged_lines_all = []
# for each cluster in vertical and horizontal lines leave only one line
for i in [lines_x, lines_y]:
if len(i) > 0:
groups = self.merge_lines_pipeline(i)
merged_lines = []
for group in groups:
merged_lines.append(self.merge_lines_segments(group))
merged_lines_all.extend(merged_lines)
return merged_lines_all
#-------------------------#
# EXTRINSIC #
#-------------------------#
# pattern size
pattern = ((9,6))
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((pattern[0]*pattern[1],3), np.float32)
objp[:,:2] = np.mgrid[0:pattern[0],0:pattern[1]].T.reshape(-1,2) * 2.5
axis = np.float32([[2.5,0,0], [0,6.5,0], [0,0,-2.5]]).reshape(-1,3)
# load camera intrinsic parameters
with np.load('intrinsicParams.npz') as intrinsicParams:
mtx, dist = [intrinsicParams[i] for i in ('camera_matrix', 'dist_coeffs')]
# find chessboard pose using solvePnP
extrinsicCalibImage = cv.imread('images/small_res/chess3.jpg')
grayextrinsicCalibImage = cv.cvtColor(extrinsicCalibImage, cv.COLOR_BGR2GRAY)
ret, corners = cv.findChessboardCorners(grayextrinsicCalibImage, pattern, None)
if ret != True:
print("Failed to calibrate extrinsic parameters")
exit()
corners2 = cv.cornerSubPix(grayextrinsicCalibImage, corners, (11,11), (-1,-1), criteria)
ret, rvecs, tvecs = cv.solvePnP(objp, corners2, mtx, dist)
# load xiaomi box img
img = cv.imread('images/small_res/test36.jpg')
# undistort
# h, w = img.shape[:2]
# newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
# img = cv.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
# x, y, w, h = roi
# img = img[y:y+h, x:x+w]
# subtract images
# img_bg = cv.imread('images/small_res/test37.jpg')
# sub = cv.subtract(img_bg,img)
#cv.imshow('Subtraction',cv.resize(sub, (800, 800)))
#cv.waitKey()
# detect edges
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
filtered = cv.bilateralFilter(gray, 30, 50, 50)
blur = cv.blur(filtered, (3,3))
edges = cv.Canny(blur, 20, 30)
# line enhancement
kernel = np.ones((3,3), np.uint8)
closing = cv.morphologyEx(edges, cv.MORPH_CLOSE, kernel, iterations=15)
### Hough detection ###
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 100 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 150 # minimum number of pixels making up a line
max_line_gap = 100 # maximum gap in pixels between connectable line segments
# "lines" is an array containing endpoints of detected line segments
lines = cv.HoughLinesP(closing, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
# filter unwanted lines
new_lines = []
for line in lines:
for x1,y1,x2,y2 in line:
if not y1 > 500 and abs(y1-y2) < 200:
new_lines.append(line)
lines = new_lines
# bundle repeated and/or staggered lines
line_bundler = HoughBundler()
bundled_lines = line_bundler.process_lines(lines)
# draw resulting lines
imgBundled = np.copy(img) * 0
for line in bundled_lines:
x1, y1, x2, y2 = line[0][0], line[0][1], line[1][0], line[1][1]
cv.line(imgBundled, (x1, y1), (x2, y2), (random.random() * 255,random.random() * 255,random.random() * 255),2)
### TEMPORARY ###
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)
img = draw(img,corners2,imgpts)
### /TEMPORARY ###
### Display all results ###
#cv.imshow('', np.hstack([cv.resize(edges, (1000, 1000)), cv.resize(closing, (1000, 1000))]))
cv.imshow('Edge detection', np.hstack([cv.resize(img, (800, 800)), cv.resize(imgBundled, (800, 800))]))
cv.waitKey()
#-------------------------#
# 3D CALCULATION #
#-------------------------#
# calculate projection matrix (projMtx) -> 3D to 2D
rmtx, _ = cv.Rodrigues(rvecs)
rotTransMtx = np.zeros((3,4))
rotTransMtx[:,:-1] = rmtx
rotTransMtx[:,-1:] = tvecs
projMtx = np.dot(mtx, rotTransMtx)
# 3D -> 2D
def world_to_image(x, y, z):
res = np.dot(projMtx, [[x],[y],[z],[1]])
return res/res[2]
# 2D -> 3D
def image_to_world(matrix, i, j):
res = np.linalg.solve(matrix, [[i], [j], [1], [0]])
[[x], [y], [z], [_]] = res/res[3]
return [x, y, z]
###############
# TODO: assumming the hough lines returns 3 lines:
# left (on the floor)
# middle (on the object)
# right (on the floor)
def get_pnts_from_line(line):
[[x1, y1, x2, y2]] = line
slope = (y2 - y1) / (x2 - x1)
b = y1 - x1 * slope
pnts = []
for i in range(x1, x2 + 1):
j = i * slope + b
pnts.append([i, j])
return pnts
# calculate inverse matrix (inverseMtx) -> 2D to 3D
OBJ_HEIGHT = 4
# initialize list for least squares
obj_points_shadow = []
# we know lines on the left and right are on the plane z = 0´
inverseMtxFloor = np.zeros((4,4))
inverseMtxFloor[:-1,:] = projMtx
inverseMtxFloor[-1:,:] = [[0, 0, 1, 0]]
# left line
# points_line_0 = get_pnts_from_line(lines[0])
# for i, j in points_line_0:
# obj_points_shadow.append(image_to_world(inverseMtxFloor, i, j))
[[i1, j1, i2, j2]] = lines[0]
obj_points_shadow.append(image_to_world(inverseMtxFloor, i1, j1))
obj_points_shadow.append(image_to_world(inverseMtxFloor, i2, j2))
# right line
# points_line_2 = get_pnts_from_line(lines[2])
# for i, j in points_line_2:
# obj_points_shadow.append(image_to_world(inverseMtxFloor, i, j))
[[i1, j1, i2, j2]] = lines[2]
obj_points_shadow.append(image_to_world(inverseMtxFloor, i1, j1))
obj_points_shadow.append(image_to_world(inverseMtxFloor, i2, j2))
# we know the center line is on the plane z = OBJ_HEIGHT
inverseMtxObj = np.zeros((4,4))
inverseMtxObj[:-1,:] = projMtx
inverseMtxObj[-1:,:] = [[0, 0, 1, OBJ_HEIGHT]]
# points_line_1 = get_pnts_from_line(lines[1])
# for i, j in points_line_1:
# obj_points_shadow.append(image_to_world(inverseMtxObj, i, j))
[[i1, j1, i2, j2]] = lines[1]
obj_points_shadow.append(image_to_world(inverseMtxObj, i1, j1))
obj_points_shadow.append(image_to_world(inverseMtxObj, i2, j2))
############################
# least_squares method (start)
############################
# initial guess
# initial_plane = [0, 1, 0, -6.5]
# # with the points, use least_squares to calculate plane equation
# def f_min(X, p):
# plane_xyz = p[0:3]
# distance = (plane_xyz * X).sum(axis=1) + p[3]
# return distance / np.linalg.norm(plane_xyz)
# def residuals(params, signal, X):
# return f_min(X, params)
# final_shadow_plane = leastsq(residuals, initial_plane, args=(None, np.array(obj_points_shadow)))[0]
############################
# least_squares method (end)
############################
############################
# ransac method (start)
############################
plane_points = Points(obj_points_shadow)
plane = Plane.best_fit(plane_points)
# plot_3d(
# plane_points.plotter(c='k', s=50, depthshade=False),
# plane.plotter(alpha=0.2, lims_x=(-5, 5), lims_y=(-5, 5)),
# )
# cv.waitKey()
# exit(0)
final_shadow_plane = plane.cartesian()
print(f'A: {final_shadow_plane[0]}')
print(f'B: {final_shadow_plane[1]}')
print(f'C: {final_shadow_plane[2]}')
print(f'D: {final_shadow_plane[3]}')
############################
# ransac method (end)
############################
# calculate final matrix for 2D -> 3D conversion, now with the shadow plane's equations
inverseMtxFinal = np.zeros((4,4))
inverseMtxFinal[:-1,:] = projMtx
inverseMtxFinal[-1:,:] = [final_shadow_plane]
[[_, _, IMAGE_I, IMAGE_J]] = lines[1]
# IMAGE_I = 773
# IMAGE_J = 376
[ex_x, ex_y, ex_z] = image_to_world(inverseMtxObj, IMAGE_I, IMAGE_J)
[x, y, z] = image_to_world(inverseMtxFinal, IMAGE_I, IMAGE_J)
print(f'Image: ({IMAGE_I}, {IMAGE_J})')
print(f'Expected: ({ex_x}, {ex_y}, {ex_z})')
print(f'Calculated: ({x}, {y}, {z})')
#-------------------------#
# PLOT RESULTS #
#-------------------------#
# scatterplot to display height of all pixels on the line
# EDU
pixel_hor = []
pnt_height = []
for line in lines:
[[x1, y1, x2, y2]] = line
slope = (y2 - y1) / (x2 - x1)
b = y1 - x1 * slope
for i in range(x1, x2 + 1):
j = i * slope + b
[[_], [_], [h], [_]] = image_to_world(inverseMtxFinal, i, j)
pixel_hor.append(i)
pnt_height.append(-h)
# HENRIQUE
# pixel_hor = []
# pnt_height = []
# for i in range (0, 6000, 50):
# height = None
# for line in lines:
# points = line[0]
# if points[0] < i and points[2] > i:
# slope = (points[3]-points[1])/(points[2]-points[0])
# b = points[3]-slope*points[2]
# j = slope*i+b
# [[_], [_], [height], [_]] = image_to_world(i, j)
# break
# if height != None:
# pixel_hor.append(i)
# pnt_height.append(height)
plt.scatter(pixel_hor, pnt_height)
plt.show()
cv.waitKey()
|
{"hexsha": "8ac342bb29cd3544b763f7b79e06e6e8cab74f60", "size": 13867, "ext": "py", "lang": "Python", "max_stars_repo_path": "proj1/extrinsic.py", "max_stars_repo_name": "EduRibeiro00/feup-vcom", "max_stars_repo_head_hexsha": "d856cd2c260c72df7c5b85191f54e241b8efdc7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-09T17:18:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-09T17:18:34.000Z", "max_issues_repo_path": "proj1/extrinsic.py", "max_issues_repo_name": "EduRibeiro00/feup-vcom", "max_issues_repo_head_hexsha": "d856cd2c260c72df7c5b85191f54e241b8efdc7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proj1/extrinsic.py", "max_forks_repo_name": "EduRibeiro00/feup-vcom", "max_forks_repo_head_hexsha": "d856cd2c260c72df7c5b85191f54e241b8efdc7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1740139211, "max_line_length": 114, "alphanum_fraction": 0.6046729646, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4063}
|
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
directory = '../../model'
reward_his_path1 = os.path.join(directory, 'history_loss-400.pkl')
#reward_his_path2 = os.path.join(directory, 'plot_wgan_gp.pkl')
#reward_his_path3 = os.path.join(directory, 'plot_wgan.pkl')
def plot():
reward_his1 = pickle.load(open(reward_his_path1, 'rb'))
#reward_his2 = pickle.load(open(reward_his_path2, 'rb'))
#reward_his3 = pickle.load(open(reward_his_path3, 'rb'))
plt.plot(np.arange(len(reward_his1)), reward_his1, 'r')
#plt.plot(np.arange(len(reward_his2)), reward_his2, 'b')
#plt.plot(np.arange(len(reward_his3)), reward_his3, 'g')
plt.ylabel('Loss')
plt.xlabel('Number of Epochs')
plt.show()
plot()
|
{"hexsha": "47e330987570429be93d3bd73daa326a3269eb67", "size": 735, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/util/plot.py", "max_stars_repo_name": "samirsahoo007/Conditional-SeqGAN-Tensorflow", "max_stars_repo_head_hexsha": "3610e606e845ebf40ac8a832aa5d5ca16fbf9013", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2018-11-30T11:02:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T03:06:04.000Z", "max_issues_repo_path": "src/util/plot.py", "max_issues_repo_name": "samirsahoo007/Conditional-SeqGAN-Tensorflow", "max_issues_repo_head_hexsha": "3610e606e845ebf40ac8a832aa5d5ca16fbf9013", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-12-12T08:05:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-10T12:42:45.000Z", "max_forks_repo_path": "src/util/plot.py", "max_forks_repo_name": "samirsahoo007/Conditional-SeqGAN-Tensorflow", "max_forks_repo_head_hexsha": "3610e606e845ebf40ac8a832aa5d5ca16fbf9013", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2018-11-29T03:16:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-24T05:32:48.000Z", "avg_line_length": 23.7096774194, "max_line_length": 67, "alphanum_fraction": 0.7238095238, "include": true, "reason": "import numpy", "num_tokens": 219}
|
theory Prelude_ListNoNumbers__E5
imports "$HETS_ISABELLE_LIB/MainHC"
uses "$HETS_ISABELLE_LIB/prelude"
begin
setup "Header.initialize
[\"Comp1\", \"IdDef\", \"FlipDef\", \"FstDef\", \"SndDef\",
\"CurryDef\", \"UncurryDef\", \"NotFalse\", \"NotTrue\",
\"AndFalse\", \"AndTrue\", \"AndSym\", \"OrDef\", \"OtherwiseDef\",
\"NotFalse1\", \"NotTrue1\", \"notNot1\", \"notNot2\",
\"EqualTDef\", \"EqualSymDef\", \"EqualReflex\", \"EqualTransT\",
\"DiffDef\", \"DiffSymDef\", \"DiffTDef\", \"DiffFDef\", \"TE1\",
\"TE2\", \"TE3\", \"TE4\", \"IBE1\", \"IBE2\", \"IBE3\", \"IBE4\",
\"IBE5\", \"IBE6\", \"IBE7\", \"IBE8\", \"IUE1\", \"IUE2\",
\"IOE01\", \"IOE02\", \"IOE03\", \"IOE04\", \"IOE05\", \"IOE06\",
\"IOE07\", \"IOE08\", \"IOE09\", \"LeIrreflexivity\",
\"LeTAsymmetry\", \"LeTTransitive\", \"LeTTotal\", \"GeDef\",
\"GeIrreflexivity\", \"GeTAsymmetry\", \"GeTTransitive\",
\"GeTTotal\", \"LeqDef\", \"LeqReflexivity\", \"LeqTTransitive\",
\"LeqTTotal\", \"GeqDef\", \"GeqReflexivity\", \"GeqTTransitive\",
\"GeqTTotal\", \"EqTSOrdRel\", \"EqFSOrdRel\", \"EqTOrdRel\",
\"EqFOrdRel\", \"EqTOrdTSubstE\", \"EqTOrdFSubstE\",
\"EqTOrdTSubstD\", \"EqTOrdFSubstD\", \"LeTGeFEqFRel\",
\"LeFGeTEqTRel\", \"LeTGeTRel\", \"LeFGeFRel\", \"LeqTGetTRel\",
\"LeqFGetFRel\", \"GeTLeTRel\", \"GeFLeFRel\", \"GeqTLeqTRel\",
\"GeqFLeqFRel\", \"LeqTGeFRel\", \"LeqFGeTRel\", \"GeTLeFEqFRel\",
\"GeFLeTEqTRel\", \"GeqTLeFRel\", \"GeqFLeTRel\",
\"LeqTLeTEqTRel\", \"LeqFLeFEqFRel\", \"GeqTGeTEqTRel\",
\"GeqFGeFEqFRel\", \"LeTGeqFRel\", \"GeTLeqFRel\", \"LeLeqDiff\",
\"CmpLTDef\", \"CmpEQDef\", \"CmpGTDef\", \"MaxYDef\", \"MaxXDef\",
\"MinXDef\", \"MinYDef\", \"MaxSym\", \"MinSym\", \"TO1\", \"TO3\",
\"TO4\", \"TO5\", \"IOO13\", \"IOO14\", \"IOO15\", \"IOO16\",
\"IOO17\", \"IOO18\", \"IOO19\", \"IOO20\", \"IOO21\", \"IOO22\",
\"IOO23\", \"IOO24\", \"IOO25\", \"IOO26\", \"IOO27\", \"IOO28\",
\"IOO29\", \"IOO30\", \"IOO31\", \"IOO32\", \"IOO33\", \"IBO5\",
\"IBO6\", \"IBO7\", \"IBO8\", \"IBO9\", \"IBO10\", \"IBO11\",
\"IBO12\", \"IUO01\", \"IUO02\", \"IUO03\", \"IUO04\", \"IUO05\",
\"IUO06\", \"IUO07\", \"NotDefHead\", \"HeadDef\", \"NotDefTail\",
\"TailDef\", \"FoldrNil\", \"FoldrCons\", \"FoldlNil\",
\"FoldlCons\", \"MapNil\", \"MapCons\", \"XPlusXPlusNil\",
\"XPlusXPlusCons\", \"FilterNil\", \"FilterConsT\",
\"FilterConsF\", \"ZipNil\", \"ZipConsNil\", \"ZipConsCons\",
\"UnzipNil\", \"UnzipCons\", \"ILE01\", \"ILE02\", \"ILO01\",
\"ILO02\", \"ILO03\", \"ILO04\", \"ILO05\", \"ILO06\", \"ILO07\",
\"ILO08\", \"ILO09\", \"ILO10\", \"ILO11\", \"ILO12\", \"ILO13\",
\"ILO14\", \"ILO15\", \"ILO16\", \"ILO17\", \"ILO18\", \"ILO19\",
\"ILO20\", \"ILO21\", \"ILO22\", \"FoldlDecomp\", \"MapDecomp\",
\"MapFunctor\", \"FilterProm\", \"InitNil\", \"InitConsNil\",
\"InitConsCons\", \"LastNil\", \"LastConsNil\", \"LastConsCons\",
\"NullNil\", \"NullCons\", \"ReverseNil\", \"ReverseCons\",
\"Foldr1Nil\", \"Foldr1ConsNil\", \"Foldr1ConsCons\",
\"Foldl1Nil\", \"Foldl1ConsNil\", \"Foldl1ConsCons\", \"ScanlNil\",
\"ScanlCons\", \"Scanl1Nil\", \"Scanl1Cons\", \"ScanrNil\",
\"ScanrCons\", \"Scanr1Nil\", \"Scanr1ConsNil\",
\"Scanr1ConsCons\", \"ScanlProperty\", \"ScanrProperty\"]"
typedecl Unit
datatype Bool = X_False ("False''") | X_True ("True''")
datatype Ordering = EQ | GT | LT
datatype 'a List = X_Cons 'a "'a List" | X_Nil ("Nil''")
consts
X__XAmpXAmp__X :: "Bool => Bool => Bool" ("(_/ &&/ _)" [54,54] 52)
X__XEqXEq__X :: "'a => 'a => Bool" ("(_/ ==''/ _)" [54,54] 52)
X__XGtXEq__X :: "'a => 'a => Bool" ("(_/ >=''/ _)" [54,54] 52)
X__XGt__X :: "'a => 'a => Bool" ("(_/ >''/ _)" [54,54] 52)
X__XLtXEq__X :: "'a => 'a => Bool" ("(_/ <=''/ _)" [54,54] 52)
X__XLt__X :: "'a => 'a => Bool" ("(_/ <''/ _)" [54,54] 52)
X__XPlusXPlus__X :: "'a List => 'a List => 'a List" ("(_/ ++''/ _)" [54,54] 52)
X__XSlashXEq__X :: "'a => 'a => Bool" ("(_/ '/=/ _)" [54,54] 52)
X__XVBarXVBar__X :: "Bool => Bool => Bool" ("(_/ ||/ _)" [54,54] 52)
X__o__X :: "('b => 'c) * ('a => 'b) => 'a => 'c"
X_curry :: "('a * 'b => 'c) => 'a => 'b => 'c"
X_filter :: "('a => Bool) => 'a List => 'a List"
X_flip :: "('a => 'b => 'c) => 'b => 'a => 'c"
X_foldl :: "('a => 'b => 'a) => 'a => 'b List => 'a partial"
X_foldr :: "('a => 'b => 'b) => 'b => 'a List => 'b partial"
X_fst :: "'a => 'b => 'a" ("fst''/'(_,/ _')" [3,3] 999)
X_head :: "'a List => 'a partial" ("head/'(_')" [3] 999)
X_id :: "'a => 'a" ("id''/'(_')" [3] 999)
X_init :: "'a List => 'a List partial" ("init/'(_')" [3] 999)
X_last :: "'a List => 'a partial" ("last''/'(_')" [3] 999)
X_map :: "('a => 'b) => 'a List => 'b List"
X_max :: "'a => 'a => 'a"
X_min :: "'a => 'a => 'a"
X_null :: "'a List => Bool" ("null''/'(_')" [3] 999)
X_reverse :: "'a List => 'a List" ("reverse/'(_')" [3] 999)
X_snd :: "'a => 'b => 'b" ("snd''/'(_,/ _')" [3,3] 999)
X_tail :: "'a List => 'a List partial" ("tail/'(_')" [3] 999)
X_unzip :: "('a * 'b) List => 'a List * 'b List" ("unzip/'(_')" [3] 999)
X_zip :: "'a List => 'b List => ('a * 'b) List"
compare :: "'a => 'a => Ordering"
foldl1 :: "('a => 'a => 'a) => 'a List => 'a partial"
foldr1 :: "('a => 'a => 'a) => 'a List => 'a partial"
notH__X :: "Bool => Bool" ("(notH/ _)" [56] 56)
otherwiseH :: "Bool"
scanl :: "('a => 'b => 'a) => 'a => 'b List => 'a List"
scanl1 :: "('a => 'a => 'a) => 'a List => 'a List"
scanr :: "('a => 'b => 'b) => 'b => 'a List => 'b List"
scanr1 :: "('a => 'a => 'a) => 'a List => 'a List"
uncurry :: "('a => 'b => 'c) => 'a * 'b => 'c"
axioms
Comp1 [rule_format] :
"ALL (f :: 'b => 'c).
ALL (g :: 'a => 'b). ALL (y :: 'a). X__o__X (f, g) y = f (g y)"
IdDef [rule_format] : "ALL (x :: 'a). id'(x) = x"
FlipDef [rule_format] :
"ALL (f :: 'a => 'b => 'c).
ALL (x :: 'a). ALL (y :: 'b). X_flip f y x = f x y"
FstDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'b). fst'(x, y) = x"
SndDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'b). snd'(x, y) = y"
CurryDef [rule_format] :
"ALL (g :: 'a * 'b => 'c).
ALL (x :: 'a). ALL (y :: 'b). X_curry g x y = g (x, y)"
UncurryDef [rule_format] :
"ALL (f :: 'a => 'b => 'c).
ALL (x :: 'a). ALL (y :: 'b). uncurry f (x, y) = f x y"
NotFalse [rule_format] : "notH False' = True'"
NotTrue [rule_format] : "notH True' = False'"
AndFalse [rule_format] : "ALL (x :: Bool). False' && x = False'"
AndTrue [rule_format] : "ALL (x :: Bool). True' && x = x"
AndSym [rule_format] :
"ALL (x :: Bool). ALL (y :: Bool). x && y = y && x"
OrDef [rule_format] :
"ALL (x :: Bool).
ALL (y :: Bool). x || y = notH (notH x && notH y)"
OtherwiseDef [rule_format] : "otherwiseH = True'"
NotFalse1 [rule_format] :
"ALL (x :: Bool). notH x = True' = (x = False')"
NotTrue1 [rule_format] :
"ALL (x :: Bool). notH x = False' = (x = True')"
notNot1 [rule_format] :
"ALL (x :: Bool). (~ x = True') = (notH x = True')"
notNot2 [rule_format] :
"ALL (x :: Bool). (~ x = False') = (notH x = False')"
EqualTDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x = y --> x ==' y = True'"
EqualSymDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x ==' y = y ==' x"
EqualReflex [rule_format] : "ALL (x :: 'a). x ==' x = True'"
EqualTransT [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a).
x ==' y = True' & y ==' z = True' --> x ==' z = True'"
DiffDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x /= y = notH (x ==' y)"
DiffSymDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x /= y = y /= x"
DiffTDef [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). x /= y = True' = (notH (x ==' y) = True')"
DiffFDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x /= y = False' = (x ==' y = True')"
TE1 [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x ==' y = False' --> ~ x = y"
TE2 [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). notH (x ==' y) = True' = (x ==' y = False')"
TE3 [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). notH (x ==' y) = False' = (x ==' y = True')"
TE4 [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). (~ x ==' y = True') = (x ==' y = False')"
IBE1 [rule_format] : "True' ==' True' = True'"
IBE2 [rule_format] : "False' ==' False' = True'"
IBE3 [rule_format] : "False' ==' True' = False'"
IBE4 [rule_format] : "True' ==' False' = False'"
IBE5 [rule_format] : "True' /= False' = True'"
IBE6 [rule_format] : "False' /= True' = True'"
IBE7 [rule_format] : "notH (True' ==' False') = True'"
IBE8 [rule_format] : "notH notH (True' ==' False') = False'"
IUE1 [rule_format] : "() ==' () = True'"
IUE2 [rule_format] : "() /= () = False'"
IOE01 [rule_format] : "LT ==' LT = True'"
IOE02 [rule_format] : "EQ ==' EQ = True'"
IOE03 [rule_format] : "GT ==' GT = True'"
IOE04 [rule_format] : "LT ==' EQ = False'"
IOE05 [rule_format] : "LT ==' GT = False'"
IOE06 [rule_format] : "EQ ==' GT = False'"
IOE07 [rule_format] : "LT /= EQ = True'"
IOE08 [rule_format] : "LT /= GT = True'"
IOE09 [rule_format] : "EQ /= GT = True'"
LeIrreflexivity [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x ==' y = True' --> x <' y = False'"
LeTAsymmetry [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <' y = True' --> y <' x = False'"
LeTTransitive [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a). x <' y = True' & y <' z = True' --> x <' z = True'"
LeTTotal [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). (x <' y = True' | y <' x = True') | x ==' y = True'"
GeDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >' y = y <' x"
GeIrreflexivity [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x ==' y = True' --> x >' y = False'"
GeTAsymmetry [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >' y = True' --> y >' x = False'"
GeTTransitive [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a). (x >' y) && (y >' z) = True' --> x >' z = True'"
GeTTotal [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). ((x >' y) || (y >' x)) || (x ==' y) = True'"
LeqDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <=' y = (x <' y) || (x ==' y)"
LeqReflexivity [rule_format] : "ALL (x :: 'a). x <=' x = True'"
LeqTTransitive [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a). (x <=' y) && (y <=' z) = True' --> x <=' z = True'"
LeqTTotal [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). (x <=' y) && (y <=' x) = x ==' y"
GeqDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >=' y = (x >' y) || (x ==' y)"
GeqReflexivity [rule_format] : "ALL (x :: 'a). x >=' x = True'"
GeqTTransitive [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a). (x >=' y) && (y >=' z) = True' --> x >=' z = True'"
GeqTTotal [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). (x >=' y) && (y >=' x) = x ==' y"
EqTSOrdRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x ==' y = True' = (x <' y = False' & x >' y = False')"
EqFSOrdRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x ==' y = False' = (x <' y = True' | x >' y = True')"
EqTOrdRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x ==' y = True' = (x <=' y = True' & x >=' y = True')"
EqFOrdRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x ==' y = False' = (x <=' y = True' | x >=' y = True')"
EqTOrdTSubstE [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a). x ==' y = True' & y <' z = True' --> x <' z = True'"
EqTOrdFSubstE [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a).
x ==' y = True' & y <' z = False' --> x <' z = False'"
EqTOrdTSubstD [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a). x ==' y = True' & z <' y = True' --> z <' x = True'"
EqTOrdFSubstD [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a).
x ==' y = True' & z <' y = False' --> z <' x = False'"
LeTGeFEqFRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x <' y = True' = (x >' y = False' & x ==' y = False')"
LeFGeTEqTRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x <' y = False' = (x >' y = True' | x ==' y = True')"
LeTGeTRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <' y = True' = (y >' x = True')"
LeFGeFRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <' y = False' = (y >' x = False')"
LeqTGetTRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <=' y = True' = (y >=' x = True')"
LeqFGetFRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). x <=' y = False' = (y >=' x = False')"
GeTLeTRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >' y = True' = (y <' x = True')"
GeFLeFRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >' y = False' = (y <' x = False')"
GeqTLeqTRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >=' y = True' = (y <=' x = True')"
GeqFLeqFRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). x >=' y = False' = (y <=' x = False')"
LeqTGeFRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <=' y = True' = (x >' y = False')"
LeqFGeTRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <=' y = False' = (x >' y = True')"
GeTLeFEqFRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x >' y = True' = (x <' y = False' & x ==' y = False')"
GeFLeTEqTRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x >' y = False' = (x <' y = True' | x ==' y = True')"
GeqTLeFRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >=' y = True' = (x <' y = False')"
GeqFLeTRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >=' y = False' = (x <' y = True')"
LeqTLeTEqTRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x <=' y = True' = (x <' y = True' | x ==' y = True')"
LeqFLeFEqFRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x <=' y = False' = (x <' y = False' & x ==' y = False')"
GeqTGeTEqTRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x >=' y = True' = (x >' y = True' | x ==' y = True')"
GeqFGeFEqFRel [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
x >=' y = False' = (x >' y = False' & x ==' y = False')"
LeTGeqFRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <' y = True' = (x >=' y = False')"
GeTLeqFRel [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x >' y = True' = (x <=' y = False')"
LeLeqDiff [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). x <' y = (x <=' y) && (x /= y)"
CmpLTDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). compare x y ==' LT = x <' y"
CmpEQDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). compare x y ==' EQ = x ==' y"
CmpGTDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). compare x y ==' GT = x >' y"
MaxYDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). X_max x y ==' y = x <=' y"
MaxXDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). X_max x y ==' x = y <=' x"
MinXDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). X_min x y ==' x = x <=' y"
MinYDef [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). X_min x y ==' y = y <=' x"
MaxSym [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). X_max x y ==' y = X_max y x ==' y"
MinSym [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). X_min x y ==' y = X_min y x ==' y"
TO1 [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a).
(x ==' y = True' | x <' y = True') = (x <=' y = True')"
TO3 [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). notH notH (x <' y) = True' | notH (x <' y) = True'"
TO4 [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'a). x <' y = True' --> notH (x ==' y) = True'"
TO5 [rule_format] :
"ALL (w :: 'a).
ALL (x :: 'a).
ALL (y :: 'a).
ALL (z :: 'a).
(x <' y = True' & y <' z = True') & z <' w = True' -->
x <' w = True'"
IOO13 [rule_format] : "LT <' EQ = True'"
IOO14 [rule_format] : "EQ <' GT = True'"
IOO15 [rule_format] : "LT <' GT = True'"
IOO16 [rule_format] : "LT <=' EQ = True'"
IOO17 [rule_format] : "EQ <=' GT = True'"
IOO18 [rule_format] : "LT <=' GT = True'"
IOO19 [rule_format] : "EQ >=' LT = True'"
IOO20 [rule_format] : "GT >=' EQ = True'"
IOO21 [rule_format] : "GT >=' LT = True'"
IOO22 [rule_format] : "EQ >' LT = True'"
IOO23 [rule_format] : "GT >' EQ = True'"
IOO24 [rule_format] : "GT >' LT = True'"
IOO25 [rule_format] : "X_max LT EQ ==' EQ = True'"
IOO26 [rule_format] : "X_max EQ GT ==' GT = True'"
IOO27 [rule_format] : "X_max LT GT ==' GT = True'"
IOO28 [rule_format] : "X_min LT EQ ==' LT = True'"
IOO29 [rule_format] : "X_min EQ GT ==' EQ = True'"
IOO30 [rule_format] : "X_min LT GT ==' LT = True'"
IOO31 [rule_format] : "compare LT LT ==' EQ = True'"
IOO32 [rule_format] : "compare EQ EQ ==' EQ = True'"
IOO33 [rule_format] : "compare GT GT ==' EQ = True'"
IBO5 [rule_format] : "False' <' True' = True'"
IBO6 [rule_format] : "False' >=' True' = False'"
IBO7 [rule_format] : "True' >=' False' = True'"
IBO8 [rule_format] : "True' <' False' = False'"
IBO9 [rule_format] : "X_max False' True' ==' True' = True'"
IBO10 [rule_format] : "X_min False' True' ==' False' = True'"
IBO11 [rule_format] : "compare True' True' ==' EQ = True'"
IBO12 [rule_format] : "compare False' False' ==' EQ = True'"
IUO01 [rule_format] : "() <=' () = True'"
IUO02 [rule_format] : "() <' () = False'"
IUO03 [rule_format] : "() >=' () = True'"
IUO04 [rule_format] : "() >' () = False'"
IUO05 [rule_format] : "X_max () () ==' () = True'"
IUO06 [rule_format] : "X_min () () ==' () = True'"
IUO07 [rule_format] : "compare () () ==' EQ = True'"
NotDefHead [rule_format] : "~ defOp (head(Nil'))"
HeadDef [rule_format] :
"ALL (x :: 'a).
ALL (xs :: 'a List). head(X_Cons x xs) = makePartial x"
NotDefTail [rule_format] : "~ defOp (tail(Nil'))"
TailDef [rule_format] :
"ALL (x :: 'a).
ALL (xs :: 'a List). tail(X_Cons x xs) = makePartial xs"
FoldrNil [rule_format] :
"ALL (f :: 'a => 'b => 'b).
ALL (s :: 'b). X_foldr f s Nil' = makePartial s"
FoldrCons [rule_format] :
"ALL (f :: 'a => 'b => 'b).
ALL (s :: 'b).
ALL (x :: 'a).
ALL (xs :: 'a List).
X_foldr f s (X_Cons x xs) =
restrictOp (makePartial (f x (makeTotal (X_foldr f s xs))))
(defOp (X_foldr f s xs))"
FoldlNil [rule_format] :
"ALL (g :: 'a => 'b => 'a).
ALL (t :: 'a). X_foldl g t Nil' = makePartial t"
FoldlCons [rule_format] :
"ALL (g :: 'a => 'b => 'a).
ALL (t :: 'a).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_foldl g t (X_Cons z zs) = X_foldl g (g t z) zs"
MapNil [rule_format] : "ALL (h :: 'a => 'b). X_map h Nil' = Nil'"
MapCons [rule_format] :
"ALL (h :: 'a => 'b).
ALL (x :: 'a).
ALL (xs :: 'a List).
X_map h (X_Cons x xs) = X_Cons (h x) (X_map h xs)"
XPlusXPlusNil [rule_format] : "ALL (l :: 'a List). Nil' ++' l = l"
XPlusXPlusCons [rule_format] :
"ALL (l :: 'a List).
ALL (x :: 'a).
ALL (xs :: 'a List). X_Cons x xs ++' l = X_Cons x (xs ++' l)"
FilterNil [rule_format] :
"ALL (p :: 'a => Bool). X_filter p Nil' = Nil'"
FilterConsT [rule_format] :
"ALL (p :: 'a => Bool).
ALL (x :: 'a).
ALL (xs :: 'a List).
p x = True' -->
X_filter p (X_Cons x xs) = X_Cons x (X_filter p xs)"
FilterConsF [rule_format] :
"ALL (p :: 'a => Bool).
ALL (x :: 'a).
ALL (xs :: 'a List).
p x = False' --> X_filter p (X_Cons x xs) = X_filter p xs"
ZipNil [rule_format] : "ALL (l :: 'a List). X_zip Nil' l = Nil'"
ZipConsNil [rule_format] :
"ALL (l :: 'a List).
ALL (x :: 'a).
ALL (xs :: 'a List). l = Nil' --> X_zip (X_Cons x xs) l = Nil'"
ZipConsCons [rule_format] :
"ALL (l :: 'a List).
ALL (x :: 'a).
ALL (xs :: 'a List).
ALL (y :: 'a).
ALL (ys :: 'a List).
l = X_Cons y ys -->
X_zip (X_Cons x xs) l = X_Cons (x, y) (X_zip xs ys)"
UnzipNil [rule_format] : "unzip(Nil') = (Nil', Nil')"
UnzipCons [rule_format] :
"ALL (ps :: ('a * 'b) List).
ALL (x :: 'a).
ALL (z :: 'b).
unzip(X_Cons (x, z) ps) =
(let (ys, zs) = unzip(ps) in (X_Cons x ys, X_Cons z zs))"
ILE01 [rule_format] : "Nil' ==' Nil' = True'"
ILE02 [rule_format] :
"ALL (x :: 'a).
ALL (xs :: 'a List).
ALL (y :: 'a).
ALL (ys :: 'a List).
X_Cons x xs ==' X_Cons y ys = (x ==' y) && (xs ==' ys)"
ILO01 [rule_format] : "Nil' <' Nil' = False'"
ILO02 [rule_format] : "Nil' <=' Nil' = True'"
ILO03 [rule_format] : "Nil' >' Nil' = False'"
ILO04 [rule_format] : "Nil' >=' Nil' = True'"
ILO05 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
z <' w = True' --> X_Cons z zs <' X_Cons w ws = True'"
ILO06 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
z ==' w = True' --> X_Cons z zs <' X_Cons w ws = zs <' ws"
ILO07 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
z <' w = False' & z ==' w = False' -->
X_Cons z zs <' X_Cons w ws = False'"
ILO08 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_Cons z zs <=' X_Cons w ws =
(X_Cons z zs <' X_Cons w ws) || (X_Cons z zs ==' X_Cons w ws)"
ILO09 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_Cons z zs >' X_Cons w ws = X_Cons w ws <' X_Cons z zs"
ILO10 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_Cons z zs >=' X_Cons w ws =
(X_Cons z zs >' X_Cons w ws) || (X_Cons z zs ==' X_Cons w ws)"
ILO11 [rule_format] : "compare Nil' Nil' ==' EQ = Nil' ==' Nil'"
ILO12 [rule_format] : "compare Nil' Nil' ==' LT = Nil' <' Nil'"
ILO13 [rule_format] : "compare Nil' Nil' ==' GT = Nil' >' Nil'"
ILO14 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
compare (X_Cons z zs) (X_Cons w ws) ==' EQ =
X_Cons z zs ==' X_Cons w ws"
ILO15 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
compare (X_Cons z zs) (X_Cons w ws) ==' LT =
X_Cons z zs <' X_Cons w ws"
ILO16 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
compare (X_Cons z zs) (X_Cons w ws) ==' GT =
X_Cons z zs >' X_Cons w ws"
ILO17 [rule_format] : "X_max Nil' Nil' ==' Nil' = Nil' <=' Nil'"
ILO18 [rule_format] : "X_min Nil' Nil' ==' Nil' = Nil' <=' Nil'"
ILO19 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_Cons z zs <=' X_Cons w ws =
X_max (X_Cons z zs) (X_Cons w ws) ==' X_Cons w ws"
ILO20 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_Cons w ws <=' X_Cons z zs =
X_max (X_Cons z zs) (X_Cons w ws) ==' X_Cons z zs"
ILO21 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_Cons z zs <=' X_Cons w ws =
X_min (X_Cons z zs) (X_Cons w ws) ==' X_Cons z zs"
ILO22 [rule_format] :
"ALL (w :: 'b).
ALL (ws :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
X_Cons w ws <=' X_Cons z zs =
X_min (X_Cons z zs) (X_Cons w ws) ==' X_Cons w ws"
FoldlDecomp [rule_format] :
"ALL (e :: 'a).
ALL (i :: 'a => 'b => 'a).
ALL (ts :: 'b List).
ALL (ys :: 'b List).
X_foldl i e (ys ++' ts) =
restrictOp (X_foldl i (makeTotal (X_foldl i e ys)) ts)
(defOp (X_foldl i e ys))"
MapDecomp [rule_format] :
"ALL (f :: 'a => 'b).
ALL (xs :: 'a List).
ALL (zs :: 'a List).
X_map f (xs ++' zs) = X_map f xs ++' X_map f zs"
MapFunctor [rule_format] :
"ALL (f :: 'a => 'b).
ALL (g :: 'b => 'c).
ALL (xs :: 'a List).
X_map (X__o__X (g, f)) xs = X_map g (X_map f xs)"
FilterProm [rule_format] :
"ALL (f :: 'a => 'b).
ALL (p :: 'b => Bool).
ALL (xs :: 'a List).
X_filter p (X_map f xs) = X_map f (X_filter (X__o__X (p, f)) xs)"
InitNil [rule_format] : "~ defOp (init(Nil'))"
InitConsNil [rule_format] :
"ALL (x :: 'a). init(X_Cons x Nil') = makePartial Nil'"
InitConsCons [rule_format] :
"ALL (x :: 'a).
ALL (xs :: 'a List).
init(X_Cons x xs) =
restrictOp (makePartial (X_Cons x (makeTotal (init(xs)))))
(defOp (init(xs)))"
LastNil [rule_format] : "~ defOp (last'(Nil'))"
LastConsNil [rule_format] :
"ALL (x :: 'a). last'(X_Cons x Nil') = makePartial x"
LastConsCons [rule_format] :
"ALL (x :: 'a).
ALL (xs :: 'a List). last'(X_Cons x xs) = last'(xs)"
NullNil [rule_format] : "null'(Nil') = True'"
NullCons [rule_format] :
"ALL (x :: 'a). ALL (xs :: 'a List). null'(X_Cons x xs) = False'"
ReverseNil [rule_format] : "reverse(Nil') = Nil'"
ReverseCons [rule_format] :
"ALL (x :: 'a).
ALL (xs :: 'a List).
reverse(X_Cons x xs) = reverse(xs) ++' X_Cons x Nil'"
Foldr1Nil [rule_format] :
"ALL (f :: 'a => 'a => 'a). ~ defOp (foldr1 f Nil')"
Foldr1ConsNil [rule_format] :
"ALL (f :: 'a => 'a => 'a).
ALL (x :: 'a). foldr1 f (X_Cons x Nil') = makePartial x"
Foldr1ConsCons [rule_format] :
"ALL (f :: 'a => 'a => 'a).
ALL (x :: 'a).
ALL (xs :: 'a List).
foldr1 f (X_Cons x xs) =
restrictOp (makePartial (f x (makeTotal (foldr1 f xs))))
(defOp (foldr1 f xs))"
Foldl1Nil [rule_format] :
"ALL (f :: 'a => 'a => 'a). ~ defOp (foldl1 f Nil')"
Foldl1ConsNil [rule_format] :
"ALL (f :: 'a => 'a => 'a).
ALL (x :: 'a). foldl1 f (X_Cons x Nil') = makePartial x"
Foldl1ConsCons [rule_format] :
"ALL (f :: 'a => 'a => 'a).
ALL (x :: 'a).
ALL (xs :: 'a List).
foldl1 f (X_Cons x xs) =
restrictOp (makePartial (f x (makeTotal (foldr1 f xs))))
(defOp (foldr1 f xs))"
ScanlNil [rule_format] :
"ALL (g :: 'a => 'b => 'a).
ALL (q :: 'a).
ALL (ys :: 'b List). ys = Nil' --> scanl g q ys = X_Cons q Nil'"
ScanlCons [rule_format] :
"ALL (g :: 'a => 'b => 'a).
ALL (q :: 'a).
ALL (ys :: 'b List).
ALL (z :: 'b).
ALL (zs :: 'b List).
ys = X_Cons z zs --> scanl g q ys = X_Cons q (scanl g (g q z) zs)"
Scanl1Nil [rule_format] :
"ALL (f :: 'a => 'a => 'a). scanl1 f Nil' = Nil'"
Scanl1Cons [rule_format] :
"ALL (f :: 'a => 'a => 'a).
ALL (x :: 'a).
ALL (xs :: 'a List). scanl1 f (X_Cons x xs) = scanl f x xs"
ScanrNil [rule_format] :
"ALL (h :: 'a => 'b => 'b).
ALL (z :: 'b). scanr h z Nil' = X_Cons z Nil'"
ScanrCons [rule_format] :
"ALL (h :: 'a => 'b => 'b).
ALL (x :: 'a).
ALL (xs :: 'a List).
ALL (y :: 'b).
ALL (ys :: 'b List).
ALL (z :: 'b).
X_Cons y ys = scanr h z xs -->
scanr h z (X_Cons x xs) = X_Cons (h x y) (X_Cons y ys)"
Scanr1Nil [rule_format] :
"ALL (f :: 'a => 'a => 'a). scanr1 f Nil' = Nil'"
Scanr1ConsNil [rule_format] :
"ALL (f :: 'a => 'a => 'a).
ALL (x :: 'a). scanr1 f (X_Cons x Nil') = X_Cons x Nil'"
Scanr1ConsCons [rule_format] :
"ALL (f :: 'a => 'a => 'a).
ALL (q :: 'a).
ALL (qs :: 'a List).
ALL (x :: 'a).
ALL (xs :: 'a List).
X_Cons q qs = scanr1 f xs -->
scanr1 f (X_Cons x xs) = X_Cons (f x q) (X_Cons q qs)"
declare Comp1 [simp]
declare IdDef [simp]
declare FlipDef [simp]
declare FstDef [simp]
declare SndDef [simp]
declare CurryDef [simp]
declare UncurryDef [simp]
declare NotFalse [simp]
declare NotTrue [simp]
declare AndFalse [simp]
declare AndTrue [simp]
declare EqualReflex [simp]
declare IBE1 [simp]
declare IBE2 [simp]
declare IBE3 [simp]
declare IBE4 [simp]
declare IBE5 [simp]
declare IBE6 [simp]
declare IBE7 [simp]
declare IBE8 [simp]
declare IOE01 [simp]
declare IOE02 [simp]
declare IOE03 [simp]
declare IOE04 [simp]
declare IOE05 [simp]
declare IOE06 [simp]
declare IOE07 [simp]
declare IOE08 [simp]
declare IOE09 [simp]
declare LeIrreflexivity [simp]
declare LeTAsymmetry [simp]
declare GeIrreflexivity [simp]
declare GeTAsymmetry [simp]
declare GeTTransitive [simp]
declare GeTTotal [simp]
declare LeqReflexivity [simp]
declare LeqTTransitive [simp]
declare LeqTTotal [simp]
declare GeqReflexivity [simp]
declare GeqTTransitive [simp]
declare GeqTTotal [simp]
declare CmpLTDef [simp]
declare CmpEQDef [simp]
declare CmpGTDef [simp]
declare MaxYDef [simp]
declare MaxXDef [simp]
declare MinXDef [simp]
declare MinYDef [simp]
declare TO4 [simp]
declare IOO13 [simp]
declare IOO14 [simp]
declare IOO15 [simp]
declare IOO16 [simp]
declare IOO17 [simp]
declare IOO18 [simp]
declare IOO19 [simp]
declare IOO20 [simp]
declare IOO21 [simp]
declare IOO22 [simp]
declare IOO23 [simp]
declare IOO24 [simp]
declare IOO25 [simp]
declare IOO26 [simp]
declare IOO27 [simp]
declare IOO28 [simp]
declare IOO29 [simp]
declare IOO30 [simp]
declare IOO31 [simp]
declare IOO32 [simp]
declare IOO33 [simp]
declare IBO5 [simp]
declare IBO6 [simp]
declare IBO7 [simp]
declare IBO8 [simp]
declare IBO9 [simp]
declare IBO10 [simp]
declare IBO11 [simp]
declare IBO12 [simp]
declare IUO05 [simp]
declare IUO06 [simp]
declare IUO07 [simp]
declare NotDefHead [simp]
declare HeadDef [simp]
declare NotDefTail [simp]
declare TailDef [simp]
declare FoldrNil [simp]
declare FoldlNil [simp]
declare MapNil [simp]
declare XPlusXPlusNil [simp]
declare FilterNil [simp]
declare FilterConsF [simp]
declare ZipNil [simp]
declare ILE01 [simp]
declare ILO01 [simp]
declare ILO02 [simp]
declare ILO03 [simp]
declare ILO04 [simp]
declare ILO05 [simp]
declare ILO06 [simp]
declare ILO11 [simp]
declare ILO12 [simp]
declare ILO13 [simp]
declare ILO14 [simp]
declare ILO15 [simp]
declare ILO16 [simp]
declare ILO17 [simp]
declare ILO18 [simp]
declare InitNil [simp]
declare InitConsNil [simp]
declare LastNil [simp]
declare LastConsNil [simp]
declare LastConsCons [simp]
declare NullNil [simp]
declare NullCons [simp]
declare ReverseNil [simp]
declare Foldr1Nil [simp]
declare Foldr1ConsNil [simp]
declare Foldl1Nil [simp]
declare Foldl1ConsNil [simp]
declare Scanl1Nil [simp]
declare Scanl1Cons [simp]
declare ScanrNil [simp]
declare Scanr1Nil [simp]
declare Scanr1ConsNil [simp]
theorem ScanlProperty :
"ALL (g :: 'a => 'b => 'a).
ALL (x :: 'a).
ALL (ys :: 'b List). last'(scanl g x ys) = X_foldl g x ys"
by (auto)
setup "Header.record \"ScanlProperty\""
theorem ScanrProperty :
"ALL (h :: 'a => 'b => 'b).
ALL (xs :: 'a List).
ALL (y :: 'b). head(scanr h y xs) = X_foldr h y xs"
by (auto)
setup "Header.record \"ScanrProperty\""
end
|
{"author": "glaubersp", "repo": "HasCASL-Library_Source", "sha": "be605b06acfc124d8e88829cc931a1148ea30460", "save_path": "github-repos/isabelle/glaubersp-HasCASL-Library_Source", "path": "github-repos/isabelle/glaubersp-HasCASL-Library_Source/HasCASL-Library_Source-be605b06acfc124d8e88829cc931a1148ea30460/Prelude.Strict/Prelude_ListNoNumbers__E5.thy"}
|
import shutil
from pathlib import Path
import pickle
import tensorflow as tf
import os
import numpy as np
from models.PositiveLearningElkan.pu_learning import PULogisticRegressionSK
from models.model_base import DetektorModel
from project_paths import ProjectPaths
from evaluations.area_roc import ROC, plot_roc
from models.baselines import LogisticRegression, MLP, LogisticRegressionSK, SVMSK, GaussianProcess
from evaluations import Accuracy, F1, TruePositives, TrueNegatives, FalsePositives, FalseNegatives, Samples, \
AreaUnderROC
from models.recurrent.basic_recurrent import BasicRecurrent
from models.dnn import BasicDNN
from util.learning_rate_utilities import linear_geometric_curve
from util.tensor_provider import TensorProvider
from util.utilities import ensure_folder, save_fig, redirect_stdout_to_file, close_stdout_file, SDataArray, \
tf_number_of_trainable_parameters
from datetime import datetime
def single_training(tensor_provider, model,
test_split, training_split,
base_path, eval_functions=None, return_predictions=False,
split_is_keys=False, access_restricted_data=False):
"""
:param TensorProvider tensor_provider: Class providing all data to models.
:param DetektorModel model: Model-class to train and test.
:param list | np.ndarray test_split: List of program IDs or sentence-keys used for testing
(depending on programs_are_keys).
:param list | np.ndarray training_split: List of program IDs or sentence-keys used for training.
(depending on programs_are_keys).
:param Path base_path: Path of directory where we can put results (in a subdirectory with the model's name).
:param list[Evaluation] eval_functions: List of evaluation functions used to test models.
:param bool return_predictions: If True, the method stores all model test-predictions and returns them as well.
Can be used to determine whether errors are the same across models.
:param bool split_is_keys:
False: test_split and training_split are program numbers.
True: test_split and training_split are sentence KEYS (list of (program_id, sentence_id)-tuples).
"""
# Create model-specific path and ensure directory
results_path = model.results_path
if results_path is None:
results_path = model.create_model_path(results_path=base_path)
ensure_folder(results_path)
# Write name
with Path(results_path, "name.txt").open("w") as file:
file.write(model.generate_settings_name())
# Redirect prints to a file and denote script start-time
redirect_stdout_to_file(Path(results_path, "log.txt"))
print("Script starting at: {}".format(datetime.now().strftime("%d-%m-%Y %H:%M:%S")))
# Default evaluation score
if eval_functions is None:
eval_functions = [Accuracy(), F1(), TruePositives(), TrueNegatives(), FalsePositives(), FalseNegatives(),
Samples(), AreaUnderROC(), ROC()]
# Initialize array for holding results
special_results_train = dict()
evaluation_names = [val.name() for val in eval_functions if val.is_single_value]
classification_results_train = np.full((1, len(evaluation_names)), np.nan)
classification_results_train = SDataArray(classification_results_train,
name="Training Results",
dims=["Model", "Evaluation"],
coords=dict(Evaluation=evaluation_names,
Model=[model.name]))
special_results_test = dict()
classification_results_test = np.full((1, len(evaluation_names)), np.nan)
classification_results_test = SDataArray(classification_results_test,
name="Test Results",
dims=["Model", "Evaluation"],
coords=dict(Evaluation=evaluation_names,
Model=[model.name]))
# Check if split is in keys and not programs
if split_is_keys:
train_idx = training_split
test_idx = test_split
# Otherwise use program-indices to get keys for training and test (the correct and default way)
else:
# Sentences keys
if not access_restricted_data:
keys = list(sorted(tensor_provider.accessible_annotated_keys))
else:
keys = list(sorted(tensor_provider.annotated_keys(access_restricted_data=True)))
# Get program ids and number of programs
program_ids = np.array(list(zip(*keys))[0])
# Get test-indices
test_idx = np.sum([program_ids == val for val in test_split], axis=0)
test_idx = np.where(test_idx > 0.5)[0]
# Get test-indices
train_idx = np.sum([program_ids == val for val in training_split], axis=0)
train_idx = np.where(train_idx > 0.5)[0]
# Convert to keys
train_idx = [keys[val] for val in train_idx]
test_idx = [keys[val] for val in test_idx]
# Sanity check
assert not set(test_idx).intersection(set(train_idx)), "Overlap between training and test set."
# Report
if not split_is_keys:
print("Test programs {}, using {} training samples and {} test samples."
.format(test_split,
len(train_idx),
len(test_idx)))
else:
print("Training and testing with specifically selected keys. {} training and {} test."
.format(len(train_idx), len(test_idx)))
# Make and set BoW-vocabulary
bow_vocabulary = tensor_provider.extract_programs_vocabulary(train_idx)
tensor_provider.set_bow_vocabulary(bow_vocabulary)
# Get truth of train-set
y_true_train = tensor_provider.load_labels(data_keys_or_idx=train_idx)
# Get truth of test-set
y_true = tensor_provider.load_labels(data_keys_or_idx=test_idx)
# Initialize model
model.initialize_model(tensor_provider=tensor_provider)
# Number of parameters
if model.save_type == "tf":
with model._tf_graph.as_default():
print("Number of trainable parameters: {}".format(tf_number_of_trainable_parameters()))
# Fit model
model.fit(tensor_provider=tensor_provider,
train_idx=train_idx,
verbose=2)
# Predict on training-data
print("\tPredicting on training data")
y_pred_train, y_pred_train_binary = model.predict(tensor_provider=tensor_provider,
predict_idx=train_idx)
y_pred_train = np.squeeze(y_pred_train)
y_pred_train_binary = np.squeeze(y_pred_train_binary)
train_predictions = y_pred_train
# Predict on test-data for performance
print("\tPredicting on test data")
y_pred, y_pred_binary = model.predict(tensor_provider=tensor_provider,
predict_idx=test_idx)
y_pred = np.squeeze(y_pred)
y_pred_binary = np.squeeze(y_pred_binary)
# Store predictions
test_predictions = y_pred
# Evaluate with eval_functions
print("\tRunning evaluation functions")
evaluation_nr = 0
for evalf in eval_functions:
# Training evaluation
assert y_pred_train.shape == y_true_train.shape, "y_pred ({}) and y_true ({}) " \
"do not have same shape".format(y_pred_train.shape,
y_true_train.shape)
if evalf.is_single_value:
evaluation_result = evalf(y_true=y_true_train,
y_pred=y_pred_train,
y_pred_binary=y_pred_train_binary)
classification_results_train[0, evaluation_nr] = evaluation_result
else:
special_results_train[(model.name, evalf.name())] = evalf(y_true=y_true_train,
y_pred=y_pred_train,
y_pred_binary=y_pred_train_binary)
# Test evaluation
assert y_pred.shape == y_true.shape, "y_pred ({}) and y_true ({}) " \
"do not have same shape".format(y_pred.shape, y_true.shape)
if evalf.is_single_value:
evaluation_result = evalf(y_true=y_true,
y_pred=y_pred,
y_pred_binary=y_pred_binary)
classification_results_test[0, evaluation_nr] = evaluation_result
evaluation_nr += 1
else:
special_results_test[(model.name, evalf.name())] = evalf(y_true=y_true,
y_pred=y_pred,
y_pred_binary=y_pred_binary)
# Save model
print("\tSaving model")
model.save_model()
# Return list
returns = [classification_results_train, classification_results_test,
special_results_train, special_results_test, model.summary_to_string()]
# Additional returns
if return_predictions:
returns.extend([train_predictions, test_predictions])
############################################
# Print, plot and store!
# Make summary
model_summary = model.summary_to_string()
# Print mean results
results_train = classification_results_train.to_dataset_split("Model").to_dataframe()
results_test = classification_results_test.to_dataset_split("Model").to_dataframe()
with Path(results_path, "results.txt").open("w") as file:
file.write(model_summary + "\n\n")
print("Training\n")
file.write(str(results_train) + "\n\n")
print("Test\n")
file.write(str(results_test) + "\n\n")
# Store results
pickle.dump(results_train, Path(results_path, "results_train.p").open("wb"))
pickle.dump(results_test, Path(results_path, "results_test.p").open("wb"))
# Basic settings
settings = dict()
if not split_is_keys:
settings["test_programs"] = test_split
settings["training_programs"] = training_split
else:
settings["test_programs"] = "specific keys"
settings["training_programs"] = "specific keys"
pickle.dump(settings, Path(results_path, "settings.p").open("wb"))
# Print results for each data-set
print("\nSingle training Results - TRAINING \n" + "-" * 75)
print(results_train)
print("\nSingle training Results - TEST \n" + "-" * 75)
print(results_test)
print("\nModel Summary \n" + "-" * 75)
print(model_summary)
# Plot ROC of training
roc_key = (model.name, "ROC")
if roc_key in special_results_train:
positive_rate, negative_rate = special_results_train[roc_key]
plot_roc(tp_rate=positive_rate,
fp_rate=negative_rate,
title="{} ROC Training".format(model.name))
save_fig(Path(results_path, "ROC_Train"))
# Plot ROC of test
if roc_key in special_results_test:
positive_rate, negative_rate = special_results_test[roc_key]
plot_roc(tp_rate=positive_rate,
fp_rate=negative_rate,
title="{} ROC Test".format(model.name))
save_fig(Path(results_path, "ROC_Test"))
# Print ending
print("Script ended at: {}".format(datetime.now().strftime("%d-%m-%Y %H:%M:%S")))
close_stdout_file()
# Write a file called done.txt to mark that the script is done
with Path(results_path, "done.txt").open("w") as file:
file.write("The deed is done. ")
return tuple(returns)
if __name__ == "__main__":
# Print TensorFlow GPU information
try:
print("GPU's visible: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
except KeyError:
print("No GPU's visible.")
# Initialize tensor-provider (data-source)
the_tensor_provider = TensorProvider(verbose=True)
# Results path
used_base_path = Path(ProjectPaths.results, "single_train")
# Choose model
# model = GaussianProcess(
# tensor_provider=the_tensor_provider,
# use_bow=True,
# use_embedsum=True,
# verbose=True,
# results_path=results_path,
# n_jobs=-1
# )
n_test_programs = 2
n_batches = 3000
learning_rates = linear_geometric_curve(n=n_batches,
starting_value=5e-4,
end_value=1e-10,
geometric_component=3. / 4,
geometric_end=5)
# a_model = BasicRecurrent(
# tensor_provider=the_tensor_provider,
# results_path=used_base_path,
# use_bow=True,
# n_batches=n_batches,
# batch_size=64,
# learning_rate_progression=learning_rates,
# recurrent_units=400,
# feedforward_units=[200],
# dropouts=[1],
# dropout_rate=0.65,
# l2_weight_decay=1e-6,
# recurrent_neuron_type=tf.nn.rnn_cell.GRUCell,
# training_curve_y_limit=1000
# )
a_model = BasicDNN(
tensor_provider=the_tensor_provider,
units=[150, 50],
n_batches=n_batches,
learning_rate_progression=learning_rates,
dropouts=[1, 2],
dropout_rate=0.5,
results_path=used_base_path
)
# a_model = LogisticRegression(
# tensor_provider=the_tensor_provider,
# )
# a_model = MLP(
# tensor_provider=the_tensor_provider,
# )
# a_model = SVMSK(
# tensor_provider=the_tensor_provider,
# verbose=True
# )
# a_model = LogisticRegressionSK(
# tensor_provider=the_tensor_provider,
# )
# a_model = PULogisticRegressionSK(
# tensor_provider=the_tensor_provider,
# )
# Select test-programs
unique_programs = np.array(sorted(set(the_tensor_provider.accessible_annotated_program_ids)))
used_test_programs = np.random.choice(unique_programs, size=n_test_programs, replace=False)
used_training_programs = np.array(sorted(set(unique_programs).difference(set(used_test_programs))))
# Run training on a single model
single_training(
tensor_provider=the_tensor_provider,
model=a_model,
test_split=used_test_programs,
training_split=used_training_programs,
base_path=used_base_path
)
|
{"hexsha": "c53c47749eeb2bfb06815053c1a35256fab19c1c", "size": 14780, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_files/single_train.py", "max_stars_repo_name": "sfvnDTU/deep_detektor", "max_stars_repo_head_hexsha": "3413b805b1d108480358a3f50ec5bb18b1d6845b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-10-23T13:29:56.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-23T09:03:57.000Z", "max_issues_repo_path": "run_files/single_train.py", "max_issues_repo_name": "sfvnDTU/deep_detektor", "max_issues_repo_head_hexsha": "3413b805b1d108480358a3f50ec5bb18b1d6845b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-10-30T15:32:54.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-30T17:32:54.000Z", "max_forks_repo_path": "run_files/single_train.py", "max_forks_repo_name": "sfvnDTU/deep_detektor", "max_forks_repo_head_hexsha": "3413b805b1d108480358a3f50ec5bb18b1d6845b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2849162011, "max_line_length": 115, "alphanum_fraction": 0.6263870095, "include": true, "reason": "import numpy", "num_tokens": 3055}
|
import numpy as np
from typing import Tuple
from typing import Union
from typing import Sequence
class ReplayBuffer: # todo maybe do more clean in the future
def __init__(self, max_size: int, input_shape: Union[Sequence[int], int], num_actions: int):
self.memory_counter = 0
self.memory_size = int(max_size)
self.state_memory = np.zeros((self.memory_size, input_shape)) # todo rename
self.action_memory = np.zeros((self.memory_size, num_actions))
self.reward_memory = np.zeros(self.memory_size)
self.new_state_memory = np.zeros((self.memory_size, input_shape))
self.done_memory = np.zeros(self.memory_size, dtype=np.float32)
def store_transition(self, state: Union[list, np.array], action, reward: float, new_state: Union[list, np.array], done: Union[int, bool]):
index = self.memory_counter % self.memory_size
self.state_memory[index] = state
self.new_state_memory[index] = new_state
self.action_memory[index] = action
self.reward_memory[index] = reward
self.done_memory[index] = done
self.memory_counter += 1
def sample_buffer(self, batch_size: int) -> Tuple[np.array, np.array, np.array, np.array, np.array]:
max_mem = min(self.memory_counter, self.memory_size)
batch = np.random.choice(max_mem, batch_size)
state_batch = self.state_memory[batch]
action_batch = self.action_memory[batch]
reward_batch = self.reward_memory[batch]
new_state_batch = self.new_state_memory[batch]
done_batch = self.done_memory[batch]
return state_batch, action_batch, reward_batch, new_state_batch, done_batch
|
{"hexsha": "9433bb919fedea7b26fd489b90f8328304f2c439", "size": 1693, "ext": "py", "lang": "Python", "max_stars_repo_path": "soft_actor_critic/memory.py", "max_stars_repo_name": "thomashirtz/pytorch-soft-actor-critic", "max_stars_repo_head_hexsha": "501810da3c8d470f74b646e7b822b07378edc8be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-05-12T20:46:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T12:05:58.000Z", "max_issues_repo_path": "soft_actor_critic/memory.py", "max_issues_repo_name": "thomashirtz/pytorch-soft-actor-critic", "max_issues_repo_head_hexsha": "501810da3c8d470f74b646e7b822b07378edc8be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "soft_actor_critic/memory.py", "max_forks_repo_name": "thomashirtz/pytorch-soft-actor-critic", "max_forks_repo_head_hexsha": "501810da3c8d470f74b646e7b822b07378edc8be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-21T08:27:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-03T17:47:32.000Z", "avg_line_length": 40.3095238095, "max_line_length": 142, "alphanum_fraction": 0.6987595983, "include": true, "reason": "import numpy", "num_tokens": 382}
|
'''
Takes in video_list as input, which consist of paths to jpg files of all testing video.
Returns result stored in json file(a list of dictionaries):
Element can be original clip features or mean feature of a video
'''
import os
import sys
import json
import subprocess
import numpy as np
import torch
from torch import nn
import time
from opts import parse_opts
from model import generate_model
from mean import get_mean
from classify import classify_video
if __name__=="__main__":
start_time = time.time()
opt = parse_opts()
opt.mean = get_mean()
opt.arch = '{}-{}'.format(opt.model_name, opt.model_depth)
opt.sample_size = 112
opt.sample_duration = 16
print('please check: number of classes is {}'.format(opt.n_classes))
assert opt.clip_vid in ['ori','mean']
downrate = opt.down_rate
model = generate_model(opt)
print('loading model {}'.format(opt.model))
model_data = torch.load(opt.model)
assert opt.arch == model_data['arch']
model.load_state_dict(model_data['state_dict'])
model.eval()
if opt.verbose:
print(model)
input_files = []
with open(opt.input, 'r') as f:
for row in f:
input_files.append(row[:-1])
class_names = []
with open('class_names_list') as f:
for row in f:
class_names.append(row[:-1])
ffmpeg_loglevel = 'quiet'
if opt.verbose:
ffmpeg_loglevel = 'info'
if os.path.exists('tmp'):
subprocess.call('rm -rf tmp', shell=True)
outputs = []
for cnt, input_file in enumerate(input_files):
# if (cnt % 100) ==0:
# print('on the {} video on the list'.format(cnt))
video_path = os.path.join(opt.video_root, input_file)
if os.path.exists(video_path):
print(video_path)
# subprocess.call('mkdir tmp', shell=True)
# subprocess.call('ffmpeg -i {} tmp/image_%05d.jpg'.format(video_path),
# shell=True)
video_name = os.path.basename(input_file)
result = classify_video(video_path, video_name, class_names, model, opt, downrate)
if opt.clip_vid == 'mean':
vid_feature = []
for clip in result['clips']:
# clip is a dictionary with keys "segment" and "features"
vid_feature.append(clip['features'])
mean_feature = np.mean(vid_feature, axis=0)
result['clips'] = []
result['mean_feature'] = mean_feature.tolist()
outputs.append(result)
elif opt.clip_vid == 'ori':
outputs.append(result)
# subprocess.call('rm -rf tmp', shell=True)
else:
print('{} does not exist'.format(input_file))
if os.path.exists('tmp'):
subprocess.call('rm -rf tmp', shell=True)
with open(opt.output, 'w') as f:
json.dump(outputs, f)
print("--- %s seconds ---" % (time.time() - start_time))
|
{"hexsha": "cd34911e9bfca3da1cccf2346cfe755b8e5a3077", "size": 3003, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "MYusha/video-classification-3d-cnn-pytorch", "max_stars_repo_head_hexsha": "12e317c65df5306235da6bf2e0d872babbe5cf65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "MYusha/video-classification-3d-cnn-pytorch", "max_issues_repo_head_hexsha": "12e317c65df5306235da6bf2e0d872babbe5cf65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "MYusha/video-classification-3d-cnn-pytorch", "max_forks_repo_head_hexsha": "12e317c65df5306235da6bf2e0d872babbe5cf65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6413043478, "max_line_length": 94, "alphanum_fraction": 0.6053946054, "include": true, "reason": "import numpy", "num_tokens": 675}
|
function process_reload_hash(request::HTTP.Request, state::HandlerState)
reload_tuple = (
reloadHash = state.reload.hash,
hard = state.reload.hard,
packages = keys(state.cache.resources.files),
files = state.reload.changed_assets
)
state.reload.hard = false
state.reload.changed_assets = []
return HTTP.Response(200, ["Content-Type" => "application/json"], body = JSON3.write(reload_tuple))
end
|
{"hexsha": "38952c1c51bffa1435b273db3c0b03c51ae59115", "size": 447, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/handler/processors/reload_hash.jl", "max_stars_repo_name": "waralex/Dash.jl", "max_stars_repo_head_hexsha": "f0606e07d2479fd8b5be1da4a6a59656e24acfa3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/handler/processors/reload_hash.jl", "max_issues_repo_name": "waralex/Dash.jl", "max_issues_repo_head_hexsha": "f0606e07d2479fd8b5be1da4a6a59656e24acfa3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/handler/processors/reload_hash.jl", "max_forks_repo_name": "waralex/Dash.jl", "max_forks_repo_head_hexsha": "f0606e07d2479fd8b5be1da4a6a59656e24acfa3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.25, "max_line_length": 103, "alphanum_fraction": 0.6823266219, "num_tokens": 95}
|
include("tools.jl")
include("loadFiles.jl")
xf = XLSX.open_empty_template()
# Testing class matches
#counter = 1
# Iterate over all classes:
classFile = "/Users/cfranken/GDrive/work/Caltech/OptionRepWork/TA2021/GPSClassList_2021.xlsx"
tabs = ["Division", "Geology", "Geophysics", "Geobiology", "Geochemistry", "Planetary Science", "ESE", "Special Courses"]
data_per_option = []
for ii in eachindex(tabs)
println(tabs[ii])
df_classes = DataFrame(XLSX.readtable(classFile, tabs[ii])...)
df_classes."Matched" .= ""
df_classes."Unmatched" .= ""
for class in df_classes."Class Name"
foundEntry = false
for row in eachrow(df_faculty)
for course in keys(FacTA_class)
#filteredClass = dropmissing(df_faculty,FacTA_class[course])
if ismissing(row[FacTA_class[course]]) == false
# Look for match:
if class == row[FacTA_class[course]]
foundEntry = true
#println(row[FacTA_class[course]])
find_matched_students(df_classes, df_students, row, course)
end
end
end
end
if !foundEntry
find_matched_students(df_classes, df_students, class)
#println(class, " ", !foundEntry)
end
end
# Export to Excel:
if xf[1].name == "Sheet1"
# first sheet already exists in template file
sheet = xf[1]
XLSX.rename!(sheet, tabs[ii])
XLSX.writetable!(sheet, collect(DataFrames.eachcol(df_classes)), DataFrames.names(df_classes))
else
println("Adding new tab ",tabs[ii] )
sheet = XLSX.addsheet!(xf, tabs[ii])
XLSX.writetable!(sheet, collect(DataFrames.eachcol(df_classes)), DataFrames.names(df_classes))
end
#push!(data_per_option,df_classes)
end
outFile = "/Users/cfranken/GDrive/work/Caltech/OptionRepWork/TA2021/GPS_TA_2021_matchedList.xlsx"
XLSX.writexlsx(outFile, xf, overwrite=true)
|
{"hexsha": "918a2ccdd7484a78cc10f3652e3f33924ce0cec6", "size": 2036, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "performAnalysis_v2.jl", "max_stars_repo_name": "cfranken/TA-matching", "max_stars_repo_head_hexsha": "d7bce40f21b22b9297edc5c40f872293cb71add2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "performAnalysis_v2.jl", "max_issues_repo_name": "cfranken/TA-matching", "max_issues_repo_head_hexsha": "d7bce40f21b22b9297edc5c40f872293cb71add2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "performAnalysis_v2.jl", "max_forks_repo_name": "cfranken/TA-matching", "max_forks_repo_head_hexsha": "d7bce40f21b22b9297edc5c40f872293cb71add2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0181818182, "max_line_length": 122, "alphanum_fraction": 0.6227897839, "num_tokens": 510}
|
c
subroutine kalman(F,dta,Y,p,it,w,v,x,xt,imax,m,atime,stn)
c
c*********************************************************************
c
c Routine to apply Kalman Filter to a set of obs for QC purposes.
c
c Original: John McGinley, NOAA/FSL Spring 1998
c Changes:
c 21 Aug 1998 Peter Stamus, NOAA/FSL
c Make code dynamic, housekeeping changes, for use in LAPS.
c 09 Dec 1999 John McGinley and Peter Stamus, NOAA/FSL
c New version; additional housekeeping changes too.
c
c*********************************************************************
c
Real K(m,m),P(m,m),F(m,m),II(m,m)
Real H(m,m),HT(m,m),FT(m,m),G(m)
Real X(m),Y(m),XT(m)
Real PT(m,m),ZZ(m,m)
Real A(m,m),B(m),C(m),Z(m),D(m,m),E(m,m)
Real w(m,m),v(m,m)
real dta(m),UU(m,m),VV(m,m)
integer sca(2,2),scb(2,2),scf(2,2),on,off
character atime*(*),stn(m)*5
c
c Initialize arrays
c
on=1
off=0
iiii=20998
c
c fill initial matrix values
c
call zero(II, m,m)
call zero( H, m,m)
call zero(ZZ, m,m)
call zero(PT, m,m)
call zero(A, m,m)
call zero(E, m,m)
call zero(HT,m,m)
c
do i=1,2
do j=1,2
sca(i,j) = 0
scf(i,j) = 0
scb(i,j) = 0
enddo !j
enddo !i
c
c writeout parameter settings
c
Do i=1,imax
Z(i) = 0.
II(i,i) = 1.
H(i,i) = 1.
enddo !i
c
c Set obs
c first guess - initial
c
Do i=1,imax
X(i) = dta(i)
enddo !i
c
c XT=FX
c
Call mvmult(F,X,XT,imax,imax,1,m)
call trans(F,FT,imax,imax,m)
c call writev(F,imax,imax,m,' F ',atime,on ,1.0)
c call writev(X,imax,1,m,' X ',atime,on,10000.)
c call writev(XT,imax,1,m,' XT ',atime,on,10000.)
c
c PT=FPFT+T
c
call mvmult(F,P,A,imax,imax,imax,m)
call mvmult(A,FT,PT,imax,imax,imax,m)
call addmv(PT,W,PT,imax,imax,m)
call writev(PT,imax,imax,m,' PT ',atime,off,0.)
c
c K=PTH/(HPTHT+V)
c
call mvmult(H,PT,A,imax,imax,imax,m)
call trans(H,HT,imax,imax,m)
Call mvmult(A,HT,E,imax,imax,imax,m)
call addmv(E,V,ZZ,imax,imax,m)
call writev(ZZ,imax,imax,m,'HPTHT+V 2INV',atime,off ,0.)
idiag=0
call matrixanal(ZZ,imax,imax,m,idiag, ' HPTHT+V ')
if(idiag.eq.1) then
call fastinv(ZZ,imax,imax,m)
call mvmult(PT,H,A,imax,imax,imax,m)
call mvmult(A,ZZ,K,imax,imax,imax,m)
go to 34
endif
call trans(ZZ,A,imax,imax,m)
call replace(A,UU,imax,imax,m,m)
call svdcmp(UU,imax,imax,m,m,B,VV,m)
c call writev(B ,imax,1,m,'DIAG WJ ',atime,on,0.)
c call writev(UU,imax,imax,m,' UU svdcmp ',atime,off,0.)
c call writev(VV,imax,imax,m,' VV svdcmp ',atime,off,0.)
wmax=0.
do j=1,imax
if(b(j) .gt. wmax) wmax = b(j)
g(j) = b(j)
enddo !j
wmin=wmax*1.e-6
do j=1,imax
if(b(j) .lt. wmin) b(j) = 0.
enddo !j
call mvmult(PT,H,A,imax,imax,imax,m)
call trans(A,D ,imax,imax,m)
do j=1,imax
do i=1,imax
c(i) = d(i,j)
enddo !i
call svbksb(UU,b,VV,imax,imax,m,m,c,z,m)
do i=1,imax
zz(i,j) = z(i)
enddo !i
enddo!on j
call trans(ZZ,K,imax,imax,m)
c call invert(A,imax,m,E,m)
c
c..... This is single value decomposition solution for A
c
call trans(UU,zz,imax,imax,m)
call zero(E,m,m)
call mvmult(E,ZZ,UU,imax,imax,imax,m)
call mvmult(VV,UU,ZZ,imax,imax,imax,m)
call trans(ZZ,A ,imax,imax,m)
c call writev(D,imax,imax,m,'PT TRANS ',atime,off,0.)
c call writev(A,imax,imax,m,'AT INVERTED ',atime,off,0.)
34 call writev(K,imax,imax,m,'KALMAN GAIN ',atime,off,0.)
c
c..... Estimate obs loop
c
c X=XT+K(Y-HXT)
c
call mvmult(H,XT,C,imax,imax,1,m)
call submv(Y,C,B,imax,1,m)
call mvmult(K,B,C,imax,imax,1,m)
call addmv(XT,C,X,imax,1,m)
c
c P=(I-K)PT
c
call mvmult(K,H,E,imax,imax,imax,m)
call submv(II,E,A,imax,imax,m)
call mvmult(A,PT,P,imax,imax,imax,m)
c
sum = 0.
write(6,2000)
2000 format(1x,' Stn Indx',' Kalman X ',' Forecast ',' Observatn'
&,' KalmGn',' W ',' V ')
do i=1,imax
write(6,1098) stn(i),i,X(i)-10000.,XT(i)-10000.,
& Y(i)-10000.,K(i,i),w(i,i),v(i,i)
1098 format(1x,a5,i4,3f10.3,f7.4,2f10.3)
sum=sum+K(i,i)
enddo !i
print*, 'MEAN KALMAN ',sum/float(imax)
write(6,*) 'MEAN KALMAN ',sum/float(imax)
call writev(P,imax,imax,m,'ANAL COV ERR',atime,off,0.)
c
return
end
c
c
Subroutine kalmod(F,yta,byta,dta,ta,wmt,wot,wbt,offset,
& imax,mwt,m)
c
c*********************************************************************
c
c Kalman Filter tool.
c
c Original: John McGinley, NOAA/FSL December 1999
c Changes:
c
c 09 Dec 1999 Peter Stamus, NOAA/FSL
c Housekeeping changes.
c
c*********************************************************************
c
real yta(m),byta(m),dta(m),ta(m),wmt(m),wot(m),wbt(m)
real mwt(m,m),F(m,m),a,b,c
c
do i=1,imax
sum=wmt(i)+wot(i)+wbt(i)
a=0.5*(wmt(i)+wbt(i))/sum
b=0.5*(wot(i)+wmt(i))/sum
c=0.5*(wbt(i)+wot(i))/sum
sum=0.
sum1=0.
if(mwt(i,i).eq.1.) then
print*,'Station ',i,' is isolated: set buddy trend to 0'
sum=0.
else
do j = 1,imax
if(i.eq.j) go to 1
sum=mwt(i,j)/(1.-mwt(i,i))*yta(j)+sum
F(i,j)=0.
1 continue
enddo
endif
byta(i)=sum
F(i,i)=a*(1.+yta(i)/(ta(i)+offset)) +
& c*(1.+dta(i)/(ta(i)+offset)) +
& b*(1+byta(i)/(ta(i)+offset))
enddo
c
return
end
|
{"hexsha": "29e5114f0ec1f9fe9d76ac6c2abe96ee1943466e", "size": 5952, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/ingest/sfc_qc/kalman.f", "max_stars_repo_name": "maxinye/laps-mirror", "max_stars_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_stars_repo_licenses": ["Intel", "Unlicense", "OLDAP-2.2.1", "NetCDF"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-04-05T12:28:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T06:37:29.000Z", "max_issues_repo_path": "src/ingest/sfc_qc/kalman.f", "max_issues_repo_name": "longwosion/laps-mirror", "max_issues_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_issues_repo_licenses": ["Intel", "NetCDF", "OLDAP-2.2.1", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ingest/sfc_qc/kalman.f", "max_forks_repo_name": "longwosion/laps-mirror", "max_forks_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_forks_repo_licenses": ["Intel", "NetCDF", "OLDAP-2.2.1", "Unlicense"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-27T12:51:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-19T13:57:44.000Z", "avg_line_length": 27.5555555556, "max_line_length": 70, "alphanum_fraction": 0.4968077957, "num_tokens": 2199}
|
# step3_train.py
"""Use projected data to learn reduced-order models via Tikhonov-regularized
Operator Inference with regularization hyperparameter selection.
Examples
--------
## --single: train and save a single ROM for a given λ1, λ2.
# Use 10,000 projected snapshots to learn a ROM of dimension r = 24
# with regularization hyperparameters λ1 = 400, λ2 = 21000.
$ python3 step3_train.py --single 10000 24 400 21000
## --gridsearch: train over a grid of candidates for λ1 and λ2, saving
only the stable ROM with least training error.
# Use 20,000 projected snapshots to learn a ROM of dimension r = 40 and save
# the one with the regularization resulting in the least training error and
# for which the integrated POD modes stay within 150% of the training data in
# magnitude for 60,000 time steps. For the regularization hyperparameters, test
# each point in the 4x5 logarithmically-spaced grid [500,9000]x[8000,10000]
$ python3 step3_train.py --gridsearch 10000 40 5e2 9e3 4 8e3 1e4 5
--testsize 60000 --margin 1.5
## --minimize: given initial guesses for λ1 and λ2, use Nelder-Mead search
to train and save a ROM that is locally optimal in the
regularization hyperparameter space.
# Use 10,000 projected snapshots to learn a ROM of dimension r = 30 and save
# the one with the regularization resulting in the least training error and
# for which the integrated POD modes stay within 150% of the training data in
# magnitude for 60,000 time steps. For the regularization hyperparameters,
# search starting from λ1 = 300, λ2 = 7000.
$ python3 step3_train.py --minimize 10000 30 300 7000
--testsize 60000 --margin 1.5
Indicating 3 regularization hyperparameters instead of 2 results in training a
cubic model.
Loading Results
---------------
>>> import utils
>>> trainsize = 10000 # Number of snapshots used as training data.
>>> num_modes = 44 # Number of POD modes.
>>> regs = 1e4, 1e5 # OpInf regularization hyperparameters.
>>> rom = utils.load_rom(trainsize, num_modes, reg)
Command Line Arguments
----------------------
"""
import logging
import itertools
import numpy as np
import scipy.optimize as opt
import rom_operator_inference as opinf
import config
import utils
_MAXFUN = 100 # Artificial ceiling for optimization routine.
# Subroutines =================================================================
def get_modelform(regs):
"""Return the rom_operator_inference ROM modelform that is appropriate for
the number of regularization parameters (fully quadratic or fully cubic).
Parameters
----------
regs : two or three non-negative floats
Regularization hyperparameters for Operator Inference.
Returns
-------
modelform : str
'cAHB' for fully quadratic ROM; 'cAHGB' for fully cubic ROM.
"""
if np.isscalar(regs) or len(regs) == 2:
return "cAHB"
elif len(regs) == 3:
return "cAHGB"
raise ValueError("expected 2 or 3 regularization hyperparameters")
def check_lstsq_size(trainsize, r, modelform="cAHB"):
"""Report the number of unknowns in the Operator Inference problem,
compared to the number of snapshots. Ask user for confirmation before
attempting to solve an underdetermined problem.
"""
# Print info on the size of the system to be solved.
d = opinf.lstsq.lstsq_size(modelform, r, m=1)
message = f"{trainsize} snapshots, {r}x{d} DOFs ({r*d} total)"
print(message)
logging.info(message)
# If the system is underdetermined, ask for confirmation before proceeding.
if d > trainsize:
message = "LSTSQ SYSTEM UNDERDETERMINED"
logging.warning(message)
if input(f"{message}! CONTINUE? [y/n] ") != "y":
raise ValueError(message)
return d
def check_regs(regs):
"""Assure there are the correct number of non-negative regularization
hyperparameters.
Parameters
----------
regs : list/ndarray of two or three non-negative floats
Regularization hyperparameters.
"""
if np.isscalar(regs):
regs = [regs]
# Check number of values.
nregs = len(regs)
if nregs not in (2,3):
raise ValueError(f"expected 2 or 3 hyperparameters, got {nregs}")
# Check non-negativity.
if any(λ < 0 for λ in regs):
raise ValueError("regularization hyperparameters must be non-negative")
return regs
def regularizer(r, λ1, λ2, λ3=None):
"""Return the regularizer that penalizes all operator elements by λ1,
except for the quadratic operator elements, which are penalized by λ2.
If λ3 is given, the entries of the cubic operator are penalized by λ3.
Parameters
----------
r : int
Dimension of the ROM.
λ1 : float
Regularization hyperparameter for the non-quadratic operators.
λ2 : float
Regularization hyperparameter for the quadratic operator.
λ2 : float or None
Regularization hyperparameter for the cubic operator (if present).
Returns
-------
diag(𝚪) : (d,) ndarray
Diagonal entries of the dxd regularizer 𝚪.
"""
r1 = 1 + r
r2 = r1 + r*(r + 1)//2
if λ3 is None:
diag𝚪 = np.full(r2+1, λ1)
diag𝚪[r1:-1] = λ2
else:
r3 = r2 + r*(r + 1)*(r + 2)//6
diag𝚪 = np.full(r3+1, λ1)
diag𝚪[r1:r2] = λ2
diag𝚪[r2:-1] = λ3
return diag𝚪
def is_bounded(q_rom, B, message="bound exceeded"):
"""Return True if the absolute integrated POD coefficients lie within the
given bound.
Parameters
----------
q_rom : (r,len(time_domain)) ndarray
Integrated POD modes, i.e., the direct result of integrating a ROM.
B : float > 0
The bound that the integrated POD coefficients must satisfy.
"""
if np.abs(q_rom).max() > B:
print(message+"...", end='')
logging.info(message)
return False
return True
def save_trained_rom(trainsize, r, regs, rom):
"""Save the trained ROM with the specified attributes.
Parameters
----------
trainsize : int
Number of snapshots used to train the ROM.
r : int
Dimension of the ROM. Also the number of retained POD modes
(left singular vectors) used to project the training data.
regs : two or three non-negative floats
regularization hyperparameters (first-order, quadratic, cubic) used
in the Operator Inference least-squares problem for training the ROM.
rom : rom_operator_inference.InferredContinuousROM
Actual trained ROM object. Must have a `save_model()` method.
"""
save_path = config.rom_path(trainsize, r, regs)
rom.save_model(save_path, save_basis=False, overwrite=True)
logging.info(f"ROM saved to {save_path}")
# Main routines ===============================================================
def train_single(trainsize, r, regs):
"""Train and save a ROM with the given dimension and regularization
hyperparameters.
Parameters
----------
trainsize : int
Number of snapshots to use to train the ROM.
r : int
Dimension of the desired ROM. Also the number of retained POD modes
(left singular vectors) used to project the training data.
regs : two or three non-negative floats
Regularization hyperparameters (first-order, quadratic, cubic) to use
in the Operator Inference least-squares problem for training the ROM.
"""
utils.reset_logger(trainsize)
# Validate inputs.
modelform = get_modelform(regs)
check_lstsq_size(trainsize, r, modelform)
check_regs(regs)
# Load training data.
Q_, Qdot_, t = utils.load_projected_data(trainsize, r)
U = config.U(t)
# Train and save the ROM.
with utils.timed_block(f"Training ROM with k={trainsize:d}, "
f"{config.REGSTR(regs)}"):
rom = opinf.InferredContinuousROM(modelform)
rom.fit(None, Q_, Qdot_, U, P=regularizer(r, *list(regs)))
save_trained_rom(trainsize, r, regs, rom)
def train_gridsearch(trainsize, r, regs, testsize=None, margin=1.1):
"""Train ROMs with the given dimension over a grid of potential
regularization hyperparameters, saving only the ROM with the least
training error that satisfies a bound on the integrated POD coefficients.
Parameters
----------
trainsize : int
Number of snapshots to use to train the ROM.
r : int
Dimension of the desired ROM. Also the number of retained POD modes
(left singular vectors) used to project the training data.
regs : (float, float, int, float, float, int)
Bounds and sizes for the grid of regularization hyperparameters.
First-order: search in [regs[0], regs[1]] at regs[2] points.
Quadratic: search in [regs[3], regs[4]] at regs[5] points.
Cubic: search in [regs[6], regs[7]] at regs[8] points.
testsize : int
Number of time steps for which a valid ROM must satisfy the POD bound.
margin : float ≥ 1
Amount that the integrated POD coefficients of a valid ROM are allowed
to deviate in magnitude from the maximum magnitude of the training
data Q, i.e., bound = margin * max(abs(Q)).
Returns
-------
regs : ndarray
Regularization hyperparameter winners.
"""
utils.reset_logger(trainsize)
# Parse aguments.
if len(regs) not in [6, 9]:
raise ValueError("6 or 9 regs required (bounds / sizes of grids")
grids = []
for i in range(0, len(regs), 3):
check_regs(regs[i:i+2])
grids.append(np.logspace(np.log10(regs[i]),
np.log10(regs[i+1]), int(regs[i+2])))
modelform = get_modelform(grids)
d = check_lstsq_size(trainsize, r, modelform)
# Load training data.
t = utils.load_time_domain(testsize)
Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
U = config.U(t[:trainsize])
# Compute the bound to require for integrated POD modes.
M = margin * np.abs(Q_).max()
# Create a solver mapping regularization hyperparameters to operators.
num_tests = np.prod([grid.size for grid in grids])
print(f"TRAINING {num_tests} ROMS")
with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
rom = opinf.InferredContinuousROM(modelform)
rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))
# Test each regularization hyperparameter.
errors_pass = {}
errors_fail = {}
for i, regs in enumerate(itertools.product(*grids)):
with utils.timed_block(f"({i+1:d}/{num_tests:d}) Testing ROM with "
f"{config.REGSTR(regs)}"):
# Train the ROM on all training snapshots.
rom._evaluate_solver(regularizer(r, *list(regs)))
# Simulate the ROM over the full domain.
with np.warnings.catch_warnings():
np.warnings.simplefilter("ignore")
q_rom = rom.predict(Q_[:,0], t, config.U, method="RK45")
# Check for boundedness of solution.
errors = errors_pass if is_bounded(q_rom, M) else errors_fail
# Calculate integrated relative errors in the reduced space.
if q_rom.shape[1] > trainsize:
errors[tuple(regs)] = opinf.post.Lp_error(Q_,
q_rom[:,:trainsize],
t[:trainsize])[1]
# Choose and save the ROM with the least error.
if not errors_pass:
message = f"NO STABLE ROMS for r={r:d}"
print(message)
logging.info(message)
return
err2reg = {err:reg for reg,err in errors_pass.items()}
regs = list(err2reg[min(err2reg.keys())])
logging.info(f"Best regularization for k={trainsize:d}, r={r:d}: "
f"{config.REGSTR(regs)}")
return regs
def train_minimize(trainsize, r, regs, testsize=None, margin=1.1):
"""Train ROMs with the given dimension(s), saving only the ROM with
the least training error that satisfies a bound on the integrated POD
coefficients, using a search algorithm to choose the regularization
hyperparameters.
Parameters
----------
trainsize : int
Number of snapshots to use to train the ROM.
r : int
Dimension of the desired ROM. Also the number of retained POD modes
(left singular vectors) used to project the training data.
regs : two positive floats
Initial guesses for the regularization hyperparameters (non-quadratic,
quadratic) to use in the Operator Inference least-squares problem
for training the ROM.
testsize : int
Number of time steps for which a valid ROM must satisfy the POD bound.
margin : float ≥ 1
Amount that the integrated POD coefficients of a valid ROM are allowed
to deviate in magnitude from the maximum magnitude of the training
data Q, i.e., bound = margin * max(abs(Q)).
"""
utils.reset_logger(trainsize)
# Parse aguments.
modelform = get_modelform(regs)
d = check_lstsq_size(trainsize, r, modelform)
log10regs = np.log10(check_regs(regs))
# Load training data.
t = utils.load_time_domain(testsize)
Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
U = config.U(t[:trainsize])
# Compute the bound to require for integrated POD modes.
B = margin * np.abs(Q_).max()
# Create a solver mapping regularization hyperparameters to operators.
with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
rom = opinf.InferredContinuousROM(modelform)
rom._construct_solver(None, Q_, Qdot_, U, np.ones(d))
# Test each regularization hyperparameter.
def training_error(log10regs):
"""Return the training error resulting from the regularization
parameters λ1 = 10^log10regs[0], λ1 = 10^log10regs[1]. If the
resulting model violates the POD bound, return "infinity".
"""
regs = list(10**log10regs)
# Train the ROM on all training snapshots.
with utils.timed_block(f"Testing ROM with {config.REGSTR(regs)}"):
rom._evaluate_solver(regularizer(r, *regs))
# Simulate the ROM over the full domain.
with np.warnings.catch_warnings():
np.warnings.simplefilter("ignore")
q_rom = rom.predict(Q_[:,0], t, config.U, method="RK45")
# Check for boundedness of solution.
if not is_bounded(q_rom, B):
return _MAXFUN
# Calculate integrated relative errors in the reduced space.
return opinf.post.Lp_error(Q_,
q_rom[:,:trainsize],
t[:trainsize])[1]
opt_result = opt.minimize(training_error, log10regs, method="Nelder-Mead")
if opt_result.success and opt_result.fun != _MAXFUN:
regs = list(10**opt_result.x)
with utils.timed_block(f"Best regularization for k={trainsize:d}, "
f"r={r:d}: {config.REGSTR(regs)}"):
rom._evaluate_solver(regularizer(r, *regs))
save_trained_rom(trainsize, r, regs, rom)
else:
message = "Regularization search optimization FAILED"
print(message)
logging.info(message)
# First draft approach: single regularization hyperparameter, i.e., ===========
# equally penalize all entries of the ROM operators. ==========================
def _train_minimize_1D(trainsize, r, regs, testsize=None, margin=1.1):
"""Train ROMs with the given dimension(s), saving only the ROM with
the least training error that satisfies a bound on the integrated POD
coefficients, using a search algorithm to choose the regularization
parameter.
Parameters
----------
trainsize : int
Number of snapshots to use to train the ROM.
r : int
Dimension of the desired ROM. Also the number of retained POD modes
(left singular vectors) used to project the training data.
regs : two non-negative floats
Bounds for the (single) regularization hyperparameter to use in the
Operator Inference least-squares problem for training the ROM.
testsize : int
Number of time steps for which a valid ROM must satisfy the POD bound.
margin : float ≥ 1
Amount that the integrated POD coefficients of a valid ROM are allowed
to deviate in magnitude from the maximum magnitude of the training
data Q, i.e., bound = margin * max(abs(Q)).
"""
utils.reset_logger(trainsize)
# Parse aguments.
check_lstsq_size(trainsize, r, modelform="cAHB")
log10regs = np.log10(regs)
# Load training data.
t = utils.load_time_domain(testsize)
Q_, Qdot_, _ = utils.load_projected_data(trainsize, r)
U = config.U(t[:trainsize])
# Compute the bound to require for integrated POD modes.
B = margin * np.abs(Q_).max()
# Create a solver mapping regularization hyperparameters to operators.
with utils.timed_block(f"Constructing least-squares solver, r={r:d}"):
rom = opinf.InferredContinuousROM("cAHB")
rom._construct_solver(None, Q_, Qdot_, U, 1)
# Test each regularization hyperparameter.
def training_error(log10reg):
"""Return the training error resulting from the regularization
hyperparameters λ1 = λ2 = 10^log10reg. If the resulting model
violates the POD bound, return "infinity".
"""
λ = 10**log10reg
# Train the ROM on all training snapshots.
with utils.timed_block(f"Testing ROM with λ={λ:e}"):
rom._evaluate_solver(λ)
# Simulate the ROM over the full domain.
with np.warnings.catch_warnings():
np.warnings.simplefilter("ignore")
q_rom = rom.predict(Q_[:,0], t, config.U, method="RK45")
# Check for boundedness of solution.
if not is_bounded(q_rom, B):
return _MAXFUN
# Calculate integrated relative errors in the reduced space.
return opinf.post.Lp_error(Q_,
q_rom[:,:trainsize],
t[:trainsize])[1]
opt_result = opt.minimize_scalar(training_error,
method="bounded", bounds=log10regs)
if opt_result.success and opt_result.fun != _MAXFUN:
λ = 10**opt_result.x
with utils.timed_block(f"Best regularization for k={trainsize:d}, "
f"r={r:d}: λ={λ:.0f}"):
rom._evaluate_solver(λ)
save_trained_rom(trainsize, r, (λ,λ), rom)
else:
message = "Regularization search optimization FAILED"
print(message)
logging.info(message)
# =============================================================================
if __name__ == "__main__":
# Set up command line argument parsing.
import argparse
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.usage = f""" python3 {__file__} --help
python3 {__file__} --single TRAINSIZE R REG1 REG2 [REG3]
python3 {__file__} --gridsearch TRAINSIZE R REG1 ... REG6 [... REG9]
--testsize TESTSIZE --margin TAU
python3 {__file__} --minimize TRAINSIZE R REG1 REG2 [REG3]
--testsize TESTSIZE --margin TAU"""
# Parser subcommands
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--single", action="store_true",
help="train and save a single ROM with regularization "
"hyperparameters REG1 (non-quadratic penalizer) "
"and REG2 (quadratic penalizer)")
group.add_argument("--gridsearch", action="store_true",
help="train over the REG3xREG6 grid "
"[REG1,REG2]x[REG4,REG5] of regularization "
"hyperparameter candidates, followed by a "
"minimization-based search from the winner")
group.add_argument("--minimize", action="store_true",
help="given initial guesses REG1 (non-quadratic "
"penalizer) and REG2 (quadratic penalizer), use "
"Nelder-Mead search to train and save a ROM that "
"is locally optimal in the regularization "
"hyperparameter space")
# Positional arguments.
parser.add_argument("trainsize", type=int,
help="number of snapshots in the training data")
parser.add_argument("modes", type=int,
help="number of POD modes used to project the data "
"(dimension of ROM to be learned)")
parser.add_argument("regularization", type=float, nargs='+',
help="regularization hyperparameters for ROM training")
# Other keyword arguments.
parser.add_argument("--testsize", type=int, default=None,
help="number of time steps for which the trained ROM "
"must satisfy the POD bound")
parser.add_argument("--margin", type=float, default=1.1,
help="factor by which the POD coefficients of the ROM "
"simulation are allowed to deviate in magnitude "
"from the training data (default 1.1)")
# Parse arguments and do one of the main routines.
args = parser.parse_args()
if args.single:
train_single(args.trainsize, args.modes, args.regularization)
elif args.gridsearch:
regs = train_gridsearch(args.trainsize, args.modes,
args.regularization,
args.testsize, args.margin)
if regs is not None:
train_minimize(args.trainsize, args.modes, regs,
args.testsize, args.margin)
elif args.minimize:
train_minimize(args.trainsize, args.modes, args.regularization,
args.testsize, args.margin)
|
{"hexsha": "35062e8dc83b2ff6ae5561f6440004791fa5f823", "size": 22476, "ext": "py", "lang": "Python", "max_stars_repo_path": "step3_train.py", "max_stars_repo_name": "shanemcq18/ROM-OpInf-Combustion-2D", "max_stars_repo_head_hexsha": "73a99bd7ebbfb6d071c4cd150d17b6291b7d1dd0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "step3_train.py", "max_issues_repo_name": "shanemcq18/ROM-OpInf-Combustion-2D", "max_issues_repo_head_hexsha": "73a99bd7ebbfb6d071c4cd150d17b6291b7d1dd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "step3_train.py", "max_forks_repo_name": "shanemcq18/ROM-OpInf-Combustion-2D", "max_forks_repo_head_hexsha": "73a99bd7ebbfb6d071c4cd150d17b6291b7d1dd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8510638298, "max_line_length": 90, "alphanum_fraction": 0.6253781812, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5220}
|
# This program is designed to implement the Trapezoid Rule for numerical integration
from __future__ import division
import numpy as np
def Tz(f, a, b, n, args):
# Inputs:
# f - the function being integrated
# a - lower integration limit
# b - upper integration limit
# n - the number of "bins" to integrate over
# args - a tuple of arguments to pass to f
# Outputs:
# integral - the approximated value of the integral
###################################################
# Calculate the height of each trapezoid/bin
h = (b-a)/n
s = 0.5*(f(a, args) + f(b, args))
for i in range(1,n):
s = s + f((a + (i*h)), args)
integral = h*s
return integral
def trapezoid(f, lam, lower, upper, n):
# Inputs:
# f - function to be integrated
# lam - a vector of relevant parameter values
# lower - lower bound of integration
# upper - upper bound of integration
# n - the number of "bins" to integrate over
# Output:
# solution - vector of results
###############################
# create an empty array to store results to
solution = np.zeros(len(lam))
# For each value of lambda, the loop will find the value of the integral, and store it to the above vector
for i in range(len(lam)):
# Create a tuple that contains a vector of parameters
parameters = np.array([0.04,lam[i],0.5])
ptup = (parameters)
# This takes care of infinite upper bound
if (upper == np.inf):
bdiff = 1
b_0 = 50
# While the difference between choices of b is still significant, we will continue to increase b
# When doubling b no longer significantly changes the integral, then the loop breaks.
while abs(bdiff) > .01:
b_1 = b_0*2
result_0 = Tz(f, lower, b_0, n, ptup)
result_1 = Tz(f, lower, b_1, n, ptup)
bdiff = result_1 - result_0
b_0 = b_1
else:
b_1 = up
# Call tz function from above to find the value of the integral.
trapsol = Tz(f, lower, b_1, n, ptup)
# Store results
solution[i] = trapsol
# The function returns a vector of results. each element of the vector is a different integral value
return solution
|
{"hexsha": "02c140d4a0c11c0a0121e03b13cf4fff17e2b958", "size": 2078, "ext": "py", "lang": "Python", "max_stars_repo_path": "integration/trapezoid.py", "max_stars_repo_name": "anna-elsa/solvers", "max_stars_repo_head_hexsha": "25e4f00db447fde3461b477aa7247d65c6cdf27b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "integration/trapezoid.py", "max_issues_repo_name": "anna-elsa/solvers", "max_issues_repo_head_hexsha": "25e4f00db447fde3461b477aa7247d65c6cdf27b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "integration/trapezoid.py", "max_forks_repo_name": "anna-elsa/solvers", "max_forks_repo_head_hexsha": "25e4f00db447fde3461b477aa7247d65c6cdf27b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4657534247, "max_line_length": 107, "alphanum_fraction": 0.656400385, "include": true, "reason": "import numpy", "num_tokens": 604}
|
import numpy as np
from pySDC.core.Problem import ptype
from pySDC.implementations.datatype_classes.particles import particles, fields, acceleration
class planewave_single(ptype):
"""
Example implementing a single particle spiraling in a trap
"""
def __init__(self, cparams, dtype_u=particles, dtype_f=fields):
"""
Initialization routine
Args:
cparams: custom parameters for the example
dtype_u: particle data type (will be passed parent class)
dtype_f: fields data type (will be passed parent class)
"""
# these parameters will be used later, so assert their existence
assert 'delta' in cparams # polarization
assert 'a0' in cparams # normalized amplitude
assert 'u0' in cparams # initial position and velocity
# add parameters as attributes for further reference
for k, v in cparams.items():
setattr(self, k, v)
# set nparts to one (lonely particle, you know)
self.nparts = 1
# invoke super init, passing nparts, dtype_u and dtype_f
super(planewave_single, self).__init__(self.nparts, dtype_u, dtype_f, cparams)
def eval_f(self, part, t):
"""
Routine to compute the electric and magnetic fields
Args:
t: current time
part: the current particle
Returns:
E and B field for the particle (external only)
"""
f = self.dtype_f((3, self.nparts))
R = np.linalg.norm(part.pos.values[:, 0], 2)
f.elec.values[0, 0] = self.params.a0 / (R ** 3) * part.pos.values[0, 0]
f.elec.values[1, 0] = self.params.a0 / (R ** 3) * part.pos.values[1, 0]
f.elec.values[2, 0] = 0
f.magn.values[0, 0] = 0
f.magn.values[1, 0] = 0
f.magn.values[2, 0] = R
return f
def u_init(self):
"""
Initialization routine for the single particle
Returns:
particle type
"""
u0 = self.params.u0
# some abbreviations
u = self.dtype_u((3, 1))
u.pos.values[0, 0] = u0[0][0]
u.pos.values[1, 0] = u0[0][1]
u.pos.values[2, 0] = u0[0][2]
u.vel.values[0, 0] = u0[1][0]
u.vel.values[1, 0] = u0[1][1]
u.vel.values[2, 0] = u0[1][2]
u.q[:] = u0[2][0]
u.m[:] = u0[3][0]
return u
def build_f(self, f, part, t):
"""
Helper function to assemble the correct right-hand side out of B and E field
Args:
f: wannabe right-hand side, actually the E field
part: particle data
t: current time
Returns:
correct RHS of type acceleration
"""
assert isinstance(part, particles)
rhs = acceleration((3, self.nparts))
rhs.values[:, 0] = part.q[:] / part.m[:] * \
(f.elec.values[:, 0] + np.cross(part.vel.values[:, 0], f.magn.values[:, 0]))
return rhs
def boris_solver(self, c, dt, old_fields, new_fields, old_parts):
"""
The actual Boris solver for static (!) B fields, extended by the c-term
Args:
c: the c term gathering the known values from the previous iteration
dt: the (probably scaled) time step size
old_fields: the field values at the previous node m
new_fields: the field values at the current node m+1
old_parts: the particles at the previous node m
Returns:
the velocities at the (m+1)th node
"""
N = self.nparts
vel = particles.velocity((3, 1))
Emean = 1.0 / 2.0 * (old_fields.elec + new_fields.elec)
for n in range(N):
a = old_parts.q[n] / old_parts.m[n]
c.values[:, n] += dt / 2 * a * \
np.cross(old_parts.vel.values[:, n], old_fields.magn.values[:, n] - new_fields.magn.values[:, n])
# pre-velocity, separated by the electric forces (and the c term)
vm = old_parts.vel.values[:, n] + dt / 2 * a * Emean.values[:, n] + c.values[:, n] / 2
# rotation
t = dt / 2 * a * new_fields.magn.values[:, n]
s = 2 * t / (1 + np.linalg.norm(t, 2) ** 2)
vp = vm + np.cross(vm + np.cross(vm, t), s)
# post-velocity
vel.values[:, n] = vp + dt / 2 * a * Emean.values[:, n] + c.values[:, n] / 2
return vel
|
{"hexsha": "f76d99fc7f0d426183b63f7169e565fb1319b868", "size": 4465, "ext": "py", "lang": "Python", "max_stars_repo_path": "pySDC/playgrounds/Boris/spiraling_particle_ProblemClass.py", "max_stars_repo_name": "janEbert/pySDC", "max_stars_repo_head_hexsha": "167d78c4118bc3a5a446ec973fe65fb35db94471", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pySDC/playgrounds/Boris/spiraling_particle_ProblemClass.py", "max_issues_repo_name": "janEbert/pySDC", "max_issues_repo_head_hexsha": "167d78c4118bc3a5a446ec973fe65fb35db94471", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pySDC/playgrounds/Boris/spiraling_particle_ProblemClass.py", "max_forks_repo_name": "janEbert/pySDC", "max_forks_repo_head_hexsha": "167d78c4118bc3a5a446ec973fe65fb35db94471", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-27T11:44:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-27T11:44:54.000Z", "avg_line_length": 31.8928571429, "max_line_length": 113, "alphanum_fraction": 0.5505039194, "include": true, "reason": "import numpy", "num_tokens": 1205}
|
"""
formulagrader.py
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from numbers import Number
import numpy as np
import six
from voluptuous import Schema, Required, Any, All, Invalid, Length
from mitxgraders.comparers import equality_comparer
from mitxgraders.sampling import schema_user_functions_no_random
from mitxgraders.exceptions import MissingInput
from mitxgraders.baseclasses import ItemGrader
from mitxgraders.helpers.calc import evaluator, DEFAULT_VARIABLES
from mitxgraders.helpers.validatorfuncs import NonNegative, PercentageString, is_callable_with_args, text_string
from mitxgraders.helpers.math_helpers import MathMixin
from mitxgraders.helpers.calc.mathfuncs import merge_dicts
class FormulaGrader(ItemGrader, MathMixin):
"""
Grades mathematical expressions, like edX FormulaResponse. Note that comparison will
always be performed in a case-sensitive nature, unlike edX, which allows for a
case-insensitive comparison.
Configuration options:
user_functions (dict): A dictionary of user-defined functions that students can
use in their solutions (default {}). Eg: {'f': lambda x:x**2}. Can also point
a function name to a list of functions, from which one will be chosen randomly,
or a FunctionSamplingSet, eg, RandomFunction().
user_constants (dict): A dictionary of user-defined constants that students can
use in their solutions (default {}). Eg: {'c': 3e10}
blacklist ([str]): A list of functions that students may not use in their solutions
(default []). Eg: ['cos', 'sin']
whitelist ([str or None]): A list of the only functions that students may use in
their solutions (default []). Eg: ['cos', 'sin']. To disallow all functions,
use [None].
forbidden_strings ([str]): A list of strings that are forbidden from student
solutions (default []). Do not put spaces in these strings. This will match
against student input with spaces stripped. For example, if you want to ask
for the expansion of sin(2*theta) and expect 2*sin(theta)*cos(theta), you may
set this to:
['*theta', 'theta*', 'theta/', '+theta', 'theta+', '-theta', 'theta-']
so that students can't just enter 'sin(2*theta)'. Students receive the error
message in forbidden_message if they attempt to use these strings in their
solution.
forbidden_message (str): Error message displayed to students when they use forbidden
input (default "Invalid Input: This particular answer is forbidden")
required_functions ([str]): A list of functions that must be used by the students
in their solutions (default []). Eg: ['sin', 'cos']
tolerance (number or PercentageString): Tolerance with which answers are compared to
the solutions. Can be expressed as an absolute number (eg, 0.1), or as a string
percentage (default '0.01%'). Must be positive or zero.
metric_suffixes (bool): Should metric affixes be available to students to modify
answers (default False). If true, then "2m" == 0.002, for example.
samples (int): The number of times to sample random variables (default 5)
variables ([str]): A list of variable names (default [])
numbered_vars ([str]): A list of numbered variable names, which can only occur
with a number attached to the end. For example, ['numvar'] will allow students
to write `numvar_{0}`, `numvar_{5}` or `numvar_{-2}`. Any integer will be
accepted. Use a sample_from entry for `numvar`. Note that a specifically-named
variable will take priority over a numbered variable. (default [])
sample_from (dict): A dictionary of VariableSamplingSets for specific variables. By
default, each variable samples from RealInterval([1, 5]) (default {}). Will
also accept a list with two values [a, b] to sample from the real interval
between a and b. Will also accept a tuple of discrete values to sample from.
failable_evals (int): The number of samples that may disagree before the student's
answer is marked incorrect (default 0). Ignored by correlated comparers.
answers: A single "expect" value, a dictionary, or a tuple thereof, as
described in the documentation for ItemGraders.
The expect value can be a string, or can itself be a dictionary.
If the expect value is a string, it represents the correct
answer and is compared to student input for equality.
If the expect value is a dictionary, it needs keys:
- comparer_params: a list of strings to be numerically sampled and passed to the
comparer function.
- comparer: a function with signature comparer(comparer_params_eval, student_eval,
utils) that compares student and comparer_params after evaluation. This function
should return True, False, 'partial', or a dictionary with required key
'grade_decimal' and optional key 'msg'. Comparer messages are ignored
when comparison succeeds (result['ok'] is True).
instructor_vars ([str]): A list of variable/constant names that cannot be used by
students. This can be useful in constructing DependentSampler expressions or
blacklisting constants. Note that this list is not validated against the list
of constants/variables.
"""
# Comparer functionality
# Default comparer for FormulaGrader
default_comparer = staticmethod(equality_comparer)
@classmethod
def set_default_comparer(cls, comparer):
"""
Used to set the default comparer of FormulaGrader class.
Note: This class method exists primarily to ensure that
FormulaGrader.default_comparer is a static method. If the staticmethod
decorator is not used,
FormulaGrader.default_grader = equality_comparer
grader = FormulaGrader()
then grader.default_grader will be a bound method. That's bad, since
comparer functions do not expect self as the first argument.
"""
cls.default_comparer = staticmethod(comparer)
@classmethod
def reset_default_comparer(cls):
"""
Resets the default_comparer to equality_comparer.
"""
cls.set_default_comparer(equality_comparer)
@staticmethod
def eval_and_validate_comparer_params(scoped_eval, comparer_params, siblings_eval):
"""
Evaluate the comparer_params, and make sure they contain no references
to empty siblings.
Arguments
=========
- scoped_eval (func): a unary function to evaluate math expressions.
Same keyword arguments as calc's evaluator, but with appropriate
default variables, functions, suffixes
- comparer_params ([str]): unevaluated expressions
- siblings_eval (dict): evaluated expressions
"""
results = [scoped_eval(param, max_array_dim=float('inf'))
for param in comparer_params]
# results is a list of (value, EvalMetaData) pairs
comparer_params_eval = [value for value, _ in results]
used_variables = set().union(*[used.variables_used for _, used in results])
for variable in used_variables:
if variable in siblings_eval and np.isnan(siblings_eval[variable]):
raise MissingInput('Cannot grade answer, a required input is missing.')
return comparer_params_eval
# Configuration
@property
def schema_config(self):
"""Define the configuration options for FormulaGrader"""
# Construct the default ItemGrader schema
schema = super(FormulaGrader, self).schema_config
# Apply the default math schema
schema = schema.extend(self.math_config_options)
# Append FormulaGrader-specific options
return schema.extend({
Required('allow_inf', default=False): bool,
Required('max_array_dim', default=0): NonNegative(int) # Do not use this; use MatrixGrader instead
})
schema_expect = Schema({
Required('comparer_params'): [text_string],
# Functions seem not to be usable as default values, so the default comparer is added later.
# https://github.com/alecthomas/voluptuous/issues/340
Required('comparer'): is_callable_with_args(3)
})
def validate_expect(self, expect):
"""
Validate the answers's expect key.
>>> result = FormulaGrader().validate_expect('mc^2')
>>> expected = {
... 'comparer_params': ['mc^2'],
... 'comparer': equality_comparer
... }
>>> result == expected
True
"""
if isinstance(expect, six.string_types):
return self.schema_expect({
'comparer': self.default_comparer,
'comparer_params': [expect]
})
try:
return self.schema_expect(expect)
except Invalid:
# Only raise the detailed error message if author is trying to use comparer.
if isinstance(expect, dict) and 'comparer' in expect:
raise
# Otherwise, be generic.
else:
raise Invalid("Something's wrong with grader's 'answers' configuration key. "
"Please see documentation for accepted formats.")
debug_appendix_eval_template = (
"\n"
"==========================================\n"
"Evaluation Data for Sample Number {sample_num} of {samples_total}\n"
"==========================================\n"
"Variables:\n"
"{variables}\n"
"Student Eval: {student_eval}\n"
"Compare to: {comparer_params_eval}\n"
""
)
def __init__(self, config=None, **kwargs):
"""
Validate the FormulaGrader's configuration.
First, we allow the ItemGrader initializer to construct the function list.
We then construct the lists of functions, suffixes and constants.
Finally, we refine the sample_from entry.
"""
super(FormulaGrader, self).__init__(config, **kwargs)
# If we are allowing infinities, add this to the default constants.
# Note that this is done before variable validation.
if self.config['allow_inf']:
# Make a new copy, so we don't change this for all FormulaGraders
self.default_variables = merge_dicts(DEFAULT_VARIABLES, {'infty': float('inf')})
# Store the comparer utils
self.comparer_utils = self.get_comparer_utils()
# Perform standard math validation
self.validate_math_config()
def check_response(self, answer, student_input, **kwargs):
"""Check the student response against a given answer"""
return self.check_math_response(answer, student_input, **kwargs)
@staticmethod
def sibling_varname(index):
"""Generate name for sibling variables"""
return 'sibling_{}'.format(index + 1)
@staticmethod
def get_sibling_formulas(siblings, required_siblings):
"""
Returns a dict containing sibling formula inputs.
Arguments:
siblings ([dict]): each sibling dict has keys 'grader' and 'input'
required_siblings (set): Only include siblings whose varnames are
included in this set
Note: siblings are present when a grader is used inside a ListGrader.
"""
if siblings is None:
return {}
formula_siblings = [(i, sibling['input']) for i, sibling
in enumerate(siblings)
if isinstance(sibling['grader'], FormulaGrader)]
return {
FormulaGrader.sibling_varname(i): sibling_input
for i, sibling_input in formula_siblings
if FormulaGrader.sibling_varname(i) in required_siblings
}
def gen_evaluations(self, comparer_params, student_input, sibling_formulas,
var_samples, func_samples):
"""
Evaluate the comparer parameters and student inputs for the given samples.
Returns:
A tuple (list, list, set). The first two lists are comparer_params_evals
and student_evals. These have length equal to number of samples specified
in config. The set is a record of mathematical functions used in the
student's input.
"""
funclist = self.functions.copy()
varlist = {}
comparer_params_evals = []
student_evals = []
# Create a list of instructor variables to remove from student evaluation
var_blacklist = []
for var in self.config['instructor_vars']:
if var in var_samples[0]:
var_blacklist.append(var)
for i in range(self.config['samples']):
# Update the functions and variables listings with this sample
funclist.update(func_samples[i])
varlist.update(var_samples[i])
def scoped_eval(expression,
variables=varlist,
functions=funclist,
suffixes=self.suffixes,
max_array_dim=self.config['max_array_dim']):
return evaluator(expression, variables, functions, suffixes, max_array_dim,
allow_inf=self.config['allow_inf'])
# Compute the sibling values, and add them to varlist
siblings_eval = {
key: scoped_eval(sibling_formulas[key])[0]
for key in sibling_formulas
}
varlist.update(siblings_eval)
# Compute expressions
comparer_params_eval = self.eval_and_validate_comparer_params(
scoped_eval, comparer_params, siblings_eval)
comparer_params_evals.append(comparer_params_eval)
# Before performing student evaluation, scrub the sibling and instructor
# variables so that students can't use them
for key in siblings_eval:
del varlist[key]
for key in var_blacklist:
del varlist[key]
student_eval, meta = scoped_eval(student_input)
student_evals.append(student_eval)
# TODO: Remove this if statement
if self.config['debug']:
# Put the siblings and instructor variables back in for the debug output
varlist.update(var_samples[i])
varlist.update(siblings_eval)
self.log_eval_info(i, varlist, funclist,
comparer_params_eval=comparer_params_eval,
student_eval=student_eval)
return comparer_params_evals, student_evals, meta.functions_used
def raw_check(self, answer, student_input, **kwargs):
"""Perform the numerical check of student_input vs answer"""
# Extract sibling formulas to allow for sampling
siblings = kwargs.get('siblings', None)
comparer_params = answer['expect']['comparer_params']
required_siblings = self.get_used_vars(comparer_params)
# required_siblings might include some extra variable names, but no matter
sibling_formulas = self.get_sibling_formulas(siblings, required_siblings)
# Generate samples, using student input, sibling formulas and any comparer
# parameters (including answers) as the list of expressions to check
var_samples, func_samples = self.gen_var_and_func_samples(student_input,
sibling_formulas,
comparer_params)
(comparer_params_evals,
student_evals,
functions_used) = self.gen_evaluations(comparer_params, student_input,
sibling_formulas, var_samples, func_samples)
# Get the comparer function
comparer = answer['expect']['comparer']
results = self.compare_evaluations(comparer_params_evals, student_evals,
comparer, self.get_comparer_utils())
# Comparer function results might assign partial credit.
# But the answer we're testing against might only merit partial credit.
for result in results:
result['grade_decimal'] *= answer['grade_decimal']
consolidated = self.consolidate_results(results, answer, self.config['failable_evals'])
return consolidated, functions_used
class NumericalGrader(FormulaGrader):
"""
Grades mathematical expressions without random functions or variables.
This is a convenience class built on top of FormulaGrader that sets a number of
default values to be more amenable to grading numerical input. It is set up to mimic
NumericalResponse graders in edX.
Configuration options as per FormulaGrader, except:
user_functions (dict): A dictionary of user-defined functions that students can
use in their solutions (default {}). Cannot have random functions, unlike
FormulaGrader. Eg: {'f': lambda x:x**2}
tolerance (number or PercentageString): As in FormulaGrader (default '5%')
samples (int): Will always be 1
variables ([str]): Will always be an empty list
numbered_vars ([str]): Will always be an empty list
sample_from (dict): Will always be an empty dictionary
failable_evals (int): Will always be 0
"""
# Default comparer for NumericalGrader (independent of FormulaGrader)
default_comparer = staticmethod(equality_comparer)
@property
def schema_config(self):
"""Define the configuration options for NumericalGrader"""
# Construct the default FormulaGrader schema
schema = super(NumericalGrader, self).schema_config
# Modify the default FormulaGrader options
return schema.extend({
Required('user_functions', default={}): schema_user_functions_no_random,
Required('tolerance', default='5%'): Any(PercentageString, NonNegative(Number)),
Required('samples', default=1): 1,
Required('variables', default=[]): All(Length(max=0), []),
Required('numbered_vars', default=[]): All(Length(max=0), []),
Required('sample_from', default={}): {},
Required('failable_evals', default=0): 0
})
|
{"hexsha": "ae6bdddbbaec01fc3c7317610c068e37fb7068bb", "size": 18939, "ext": "py", "lang": "Python", "max_stars_repo_path": "python_lib/mitxgraders/formulagrader/formulagrader.py", "max_stars_repo_name": "haharay/python_lib", "max_stars_repo_head_hexsha": "8acfc634ceb1943da5163c81b79bad126b27212f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-06-20T19:38:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T19:52:52.000Z", "max_issues_repo_path": "python_lib/mitxgraders/formulagrader/formulagrader.py", "max_issues_repo_name": "haharay/python_lib", "max_issues_repo_head_hexsha": "8acfc634ceb1943da5163c81b79bad126b27212f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 282, "max_issues_repo_issues_event_min_datetime": "2017-11-07T13:34:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T04:25:20.000Z", "max_forks_repo_path": "python_lib/mitxgraders/formulagrader/formulagrader.py", "max_forks_repo_name": "haharay/python_lib", "max_forks_repo_head_hexsha": "8acfc634ceb1943da5163c81b79bad126b27212f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-06-05T23:27:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T08:02:50.000Z", "avg_line_length": 44.7730496454, "max_line_length": 112, "alphanum_fraction": 0.6395797033, "include": true, "reason": "import numpy", "num_tokens": 3768}
|
function F = diff(F, dim, n)
%DIFF Componentwise derivative of a DISKFUNV.
% DIFF(F) is the derivative of each component of F in
% x-direction.
%
% DIFF(F, DIM) is the first derivative of F along the
% dimension DIM.
% DIM = 1 (default) is the derivative in the x-direction.
% DIM = 2 is the derivative in the y-direction.
%
% DIFF(F, DIM, N) is the Nth derivative each component of F
% along the dimension specified.
%
% See also CURL and DIV
% Copyright 2017 by The University of Oxford and The Chebfun Developers.
% See http://www.chebfun.org/ for Chebfun information.
% Empty check:
if ( isempty( F ) )
return
end
% Default to x-derivative:
if ( nargin == 1 || isempty(dim) )
dim = 1;
end
% Default to the first derivative:
if ( nargin < 3 )
n = 1;
end
% Differentiate each component:
F.components{1} = diff(F.components{1}, dim, n);
F.components{2} = diff(F.components{2}, dim, n);
end
|
{"author": "chebfun", "repo": "chebfun", "sha": "8c49396a55e46ddd57a1d108c6a8f32e37536d54", "save_path": "github-repos/MATLAB/chebfun-chebfun", "path": "github-repos/MATLAB/chebfun-chebfun/chebfun-8c49396a55e46ddd57a1d108c6a8f32e37536d54/@diskfunv/diff.m"}
|
[STATEMENT]
lemma spr_sim_r:
"sim_r SPR.MC spr_simMC spr_sim"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sim_r SPR.MC spr_simMC spr_sim
[PROOF STEP]
proof(rule sim_rI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
fix a p q'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
assume pT: "p \<in> worlds SPR.MC"
and fpq': "(spr_sim p, q') \<in> relations spr_simMC a"
[PROOF STATE]
proof (state)
this:
p \<in> worlds SPR.MC
(spr_sim p, q') \<in> relations spr_simMC a
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
from fpq'
[PROOF STATE]
proof (chain)
picking this:
(spr_sim p, q') \<in> relations spr_simMC a
[PROOF STEP]
obtain uq fq vq
where q': "q' = \<lparr> sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p \<rparr>"
and uq: "envObs a (tFirst p) = envObs a uq"
and vq: "envObs a (tLast p) = envObs a vq"
[PROOF STATE]
proof (prove)
using this:
(spr_sim p, q') \<in> relations spr_simMC a
goal (1 subgoal):
1. (\<And>uq vq. \<lbrakk>q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>; envObs a (tFirst p) = envObs a uq; envObs a (tLast p) = envObs a vq\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding mkKripke_def spr_sim_def spr_simRels_def
[PROOF STATE]
proof (prove)
using this:
(\<lparr>sprFst = tFirst p, sprLst = tLast p, sprCRel = tObsC_abs p\<rparr>, q') \<in> relations \<lparr>worlds = (\<lambda>t. \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = tObsC_abs t\<rparr>) ` SPR.jkbpC, relations = \<lambda>a. Restr {(s, s') |s s'. envObs a (sprFst s) = envObs a (sprFst s') \<and> envObs a (sprLst s) = envObs a (sprLst s') \<and> sprCRel s = sprCRel s'} ((\<lambda>t. \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = tObsC_abs t\<rparr>) ` SPR.jkbpC), valuation = spr_simVal\<rparr> a
goal (1 subgoal):
1. (\<And>uq vq. \<lbrakk>q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>; envObs a (tFirst p) = envObs a uq; envObs a (tLast p) = envObs a vq\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
envObs a (tFirst p) = envObs a uq
envObs a (tLast p) = envObs a vq
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
from fpq'
[PROOF STATE]
proof (chain)
picking this:
(spr_sim p, q') \<in> relations spr_simMC a
[PROOF STEP]
have "q' \<in> worlds spr_simMC"
[PROOF STATE]
proof (prove)
using this:
(spr_sim p, q') \<in> relations spr_simMC a
goal (1 subgoal):
1. q' \<in> worlds spr_simMC
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
q' \<in> worlds spr_simMC
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
with q'
[PROOF STATE]
proof (chain)
picking this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
q' \<in> worlds spr_simMC
[PROOF STEP]
have "(uq, vq) \<in> tObsC_abs p"
[PROOF STATE]
proof (prove)
using this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
q' \<in> worlds spr_simMC
goal (1 subgoal):
1. (uq, vq) \<in> tObsC_abs p
[PROOF STEP]
using spr_sim_tFirst_tLast[where s=q']
[PROOF STATE]
proof (prove)
using this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
q' \<in> worlds spr_simMC
\<lbrakk>spr_sim ?t = q'; ?t \<in> SPR.jkbpC\<rbrakk> \<Longrightarrow> (sprFst q', sprLst q') \<in> sprCRel q'
goal (1 subgoal):
1. (uq, vq) \<in> tObsC_abs p
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
(uq, vq) \<in> tObsC_abs p
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(uq, vq) \<in> tObsC_abs p
[PROOF STEP]
obtain t
where tT: "t \<in> SPR.jkbpC"
and tp: "tObsC t = tObsC p"
and tuq: "tFirst t = uq"
and tvq: "tLast t = vq"
[PROOF STATE]
proof (prove)
using this:
(uq, vq) \<in> tObsC_abs p
goal (1 subgoal):
1. (\<And>t. \<lbrakk>t \<in> SPR.jkbpC; tObsC t = tObsC p; tFirst t = uq; tLast t = vq\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto iff: tObsC_abs_conv)
[PROOF STATE]
proof (state)
this:
t \<in> SPR.jkbpC
tObsC t = tObsC p
tFirst t = uq
tLast t = vq
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
define q where "q = t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p"
[PROOF STATE]
proof (state)
this:
q = t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
from tp tuq uq
[PROOF STATE]
proof (chain)
picking this:
tObsC t = tObsC p
tFirst t = uq
envObs a (tFirst p) = envObs a uq
[PROOF STEP]
have "spr_jview a p = spr_jview a q"
[PROOF STATE]
proof (prove)
using this:
tObsC t = tObsC p
tFirst t = uq
envObs a (tFirst p) = envObs a uq
goal (1 subgoal):
1. spr_jview a p = spr_jview a q
[PROOF STEP]
unfolding q_def
[PROOF STATE]
proof (prove)
using this:
tObsC t = tObsC p
tFirst t = uq
envObs a (tFirst p) = envObs a uq
goal (1 subgoal):
1. spr_jview a p = spr_jview a (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p)
[PROOF STEP]
by (simp add: tSplice_spr_jview_a)
[PROOF STATE]
proof (state)
this:
spr_jview a p = spr_jview a q
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
with pT tT tp tuq uq
[PROOF STATE]
proof (chain)
picking this:
p \<in> worlds SPR.MC
t \<in> SPR.jkbpC
tObsC t = tObsC p
tFirst t = uq
envObs a (tFirst p) = envObs a uq
spr_jview a p = spr_jview a q
[PROOF STEP]
have pt: "(p, q) \<in> relations SPR.MC a"
[PROOF STATE]
proof (prove)
using this:
p \<in> worlds SPR.MC
t \<in> SPR.jkbpC
tObsC t = tObsC p
tFirst t = uq
envObs a (tFirst p) = envObs a uq
spr_jview a p = spr_jview a q
goal (1 subgoal):
1. (p, q) \<in> relations SPR.MC a
[PROOF STEP]
unfolding SPR.mkM_def q_def
[PROOF STATE]
proof (prove)
using this:
p \<in> worlds \<lparr>worlds = SPR.jkbpC, relations = \<lambda>a. {(t, t'). {t, t'} \<subseteq> SPR.jkbpC \<and> spr_jview a t = spr_jview a t'}, valuation = envVal \<circ> tLast\<rparr>
t \<in> SPR.jkbpC
tObsC t = tObsC p
tFirst t = uq
envObs a (tFirst p) = envObs a uq
spr_jview a p = spr_jview a (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p)
goal (1 subgoal):
1. (p, t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) \<in> relations \<lparr>worlds = SPR.jkbpC, relations = \<lambda>a. {(t, t'). {t, t'} \<subseteq> SPR.jkbpC \<and> spr_jview a t = spr_jview a t'}, valuation = envVal \<circ> tLast\<rparr> a
[PROOF STEP]
by (simp add: tSplice_jkbpC)
[PROOF STATE]
proof (state)
this:
(p, q) \<in> relations SPR.MC a
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
from q' uq vq tp tuq tvq
[PROOF STATE]
proof (chain)
picking this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
envObs a (tFirst p) = envObs a uq
envObs a (tLast p) = envObs a vq
tObsC t = tObsC p
tFirst t = uq
tLast t = vq
[PROOF STEP]
have ftq': "spr_sim q = q'"
[PROOF STATE]
proof (prove)
using this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
envObs a (tFirst p) = envObs a uq
envObs a (tLast p) = envObs a vq
tObsC t = tObsC p
tFirst t = uq
tLast t = vq
goal (1 subgoal):
1. spr_sim q = q'
[PROOF STEP]
unfolding spr_sim_def q_def
[PROOF STATE]
proof (prove)
using this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
envObs a (tFirst p) = envObs a uq
envObs a (tLast p) = envObs a vq
tObsC t = tObsC p
tFirst t = uq
tLast t = vq
goal (1 subgoal):
1. \<lparr>sprFst = tFirst (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p), sprLst = tLast (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p), sprCRel = tObsC_abs (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p)\<rparr> = q'
[PROOF STEP]
using tSplice_tObsC[where a=a and t=t and t'=p]
[PROOF STATE]
proof (prove)
using this:
q' = \<lparr>sprFst = uq, sprLst = vq, sprCRel = tObsC_abs p\<rparr>
envObs a (tFirst p) = envObs a uq
envObs a (tLast p) = envObs a vq
tObsC t = tObsC p
tFirst t = uq
tLast t = vq
tObsC t = tObsC p \<Longrightarrow> tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC t
goal (1 subgoal):
1. \<lparr>sprFst = tFirst (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p), sprLst = tLast (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p), sprCRel = tObsC_abs (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p)\<rparr> = q'
[PROOF STEP]
apply clarsimp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>q' = \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = tObsC_abs p\<rparr>; envObs a (tFirst p) = envObs a (tFirst t); envObs a (tLast p) = envObs a (tLast t); tObsC t = tObsC p; tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC p; uq = tFirst t; vq = tLast t\<rbrakk> \<Longrightarrow> tFirst (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tFirst t \<and> tLast (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tLast t \<and> tObsC_abs (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC_abs p
[PROOF STEP]
apply (intro conjI)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>q' = \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = tObsC_abs p\<rparr>; envObs a (tFirst p) = envObs a (tFirst t); envObs a (tLast p) = envObs a (tLast t); tObsC t = tObsC p; tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC p; uq = tFirst t; vq = tLast t\<rbrakk> \<Longrightarrow> tFirst (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tFirst t
2. \<lbrakk>q' = \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = tObsC_abs p\<rparr>; envObs a (tFirst p) = envObs a (tFirst t); envObs a (tLast p) = envObs a (tLast t); tObsC t = tObsC p; tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC p; uq = tFirst t; vq = tLast t\<rbrakk> \<Longrightarrow> tLast (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tLast t
3. \<lbrakk>q' = \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = tObsC_abs p\<rparr>; envObs a (tFirst p) = envObs a (tFirst t); envObs a (tLast p) = envObs a (tLast t); tObsC t = tObsC p; tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC p; uq = tFirst t; vq = tLast t\<rbrakk> \<Longrightarrow> tObsC_abs (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC_abs p
[PROOF STEP]
apply (auto dest: tObsC_tLength)[2]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>q' = \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = tObsC_abs p\<rparr>; envObs a (tFirst p) = envObs a (tFirst t); envObs a (tLast p) = envObs a (tLast t); tObsC t = tObsC p; tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC p; uq = tFirst t; vq = tLast t\<rbrakk> \<Longrightarrow> tObsC_abs (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC_abs p
[PROOF STEP]
unfolding tObsC_abs_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>q' = \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = {(tFirst t', tLast t') |t'. t' \<in> SPR.jkbpC \<and> tObsC t' = tObsC p}\<rparr>; envObs a (tFirst p) = envObs a (tFirst t); envObs a (tLast p) = envObs a (tLast t); tObsC t = tObsC p; tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC p; uq = tFirst t; vq = tLast t\<rbrakk> \<Longrightarrow> {(tFirst t', tLast t') |t'. t' \<in> SPR.jkbpC \<and> tObsC t' = tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p)} = {(tFirst t', tLast t') |t'. t' \<in> SPR.jkbpC \<and> tObsC t' = tObsC p}
[PROOF STEP]
(* FIXME abstract *)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>q' = \<lparr>sprFst = tFirst t, sprLst = tLast t, sprCRel = {(tFirst t', tLast t') |t'. t' \<in> SPR.jkbpC \<and> tObsC t' = tObsC p}\<rparr>; envObs a (tFirst p) = envObs a (tFirst t); envObs a (tLast p) = envObs a (tLast t); tObsC t = tObsC p; tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p) = tObsC p; uq = tFirst t; vq = tLast t\<rbrakk> \<Longrightarrow> {(tFirst t', tLast t') |t'. t' \<in> SPR.jkbpC \<and> tObsC t' = tObsC (t \<^bsub>\<^esub>\<bowtie>\<^bsub>a\<^esub> p)} = {(tFirst t', tLast t') |t'. t' \<in> SPR.jkbpC \<and> tObsC t' = tObsC p}
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
spr_sim q = q'
goal (1 subgoal):
1. \<And>a u v'. \<lbrakk>u \<in> worlds SPR.MC; (spr_sim u, v') \<in> relations spr_simMC a\<rbrakk> \<Longrightarrow> \<exists>v. (u, v) \<in> relations SPR.MC a \<and> spr_sim v = v'
[PROOF STEP]
from pt ftq'
[PROOF STATE]
proof (chain)
picking this:
(p, q) \<in> relations SPR.MC a
spr_sim q = q'
[PROOF STEP]
show "\<exists>q. (p, q) \<in> relations SPR.MC a \<and> spr_sim q = q'"
[PROOF STATE]
proof (prove)
using this:
(p, q) \<in> relations SPR.MC a
spr_sim q = q'
goal (1 subgoal):
1. \<exists>q. (p, q) \<in> relations SPR.MC a \<and> spr_sim q = q'
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>q. (p, q) \<in> relations SPR.MC a \<and> spr_sim q = q'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6967, "file": "KBPs_SPRViewNonDet", "length": 42}
|
"""
This program implements the DC power flow as a linear program
"""
from pulp import *
import numpy as np
import pandas as pd
from scipy.sparse import hstack as hstack_s, vstack as vstack_s
from GridCal.Engine import *
class AcOPf:
def __init__(self, circuit: MultiCircuit, voltage_band=0.1):
"""
Linearized AC power flow, solved with a linear solver :o
:param circuit: GridCal Circuit instance
"""
self.vm_low = 1.0 - voltage_band
self.vm_high = 1.0 + voltage_band
self.load_shedding = False
self.circuit = circuit
self.Sbase = circuit.Sbase
# node sets
self.pv = circuit.power_flow_input.pv
self.pq = circuit.power_flow_input.pq
self.vd = circuit.power_flow_input.ref
self.pvpq = r_[self.pv, self.pq]
self.pvpqpq = r_[self.pv, self.pq, self.pq]
Y = circuit.power_flow_input.Ybus
self.B = circuit.power_flow_input.Ybus.imag
Ys = circuit.power_flow_input.Yseries
S = circuit.power_flow_input.Sbus
self.V = circuit.power_flow_input.Vbus.copy()
# form the system matrix
A11 = -Ys.imag[self.pvpq, :][:, self.pvpq]
A12 = Y.real[self.pvpq, :][:, self.pq]
A21 = -Ys.real[self.pq, :][:, self.pvpq]
A22 = -Y.imag[self.pq, :][:, self.pq]
self.sys_mat = vstack_s([hstack_s([A11, A12]),
hstack_s([A21, A22])], format="csr")
# form the slack system matrix
A11s = -Ys.imag[self.vd, :][:, self.pvpq]
A12s = Y.real[self.vd, :][:, self.pq]
self.sys_mat_slack = hstack_s([A11s, A12s], format="csr")
# compose the right hand side (power vectors)
self.rhs = r_[S.real[self.pvpq], S.imag[self.pq]]
# declare the voltage increments dx
self.nn = self.sys_mat.shape[0]
self.nbranch = len(self.circuit.branches)
self.nbus = len(self.circuit.buses)
self.dx_var = [None] * self.nn
self.flow_ij = [None] * self.nbranch
self.flow_ji = [None] * self.nbranch
self.theta_dict = dict()
self.loads = np.zeros(self.nn)
self.load_shed = [None] * self.nn
npv = len(self.pv)
npq = len(self.pq)
for i in range(self.nn):
if i < (npv+npq):
self.dx_var[i] = LpVariable("Va" + str(i), -0.5, 0.5)
self.theta_dict[self.pvpq[i]] = self.dx_var[i] # dictionary to store the angles for the pvpq nodes
self.load_shed[i] = pulp.LpVariable("LoadShed_P_" + str(i), 0.0, 1e20)
else:
self.dx_var[i] = LpVariable("Vm" + str(i))
self.load_shed[i] = pulp.LpVariable("LoadShed_Q_" + str(i), 0.0, 1e20)
# declare the slack vars
self.slack_loading_ij_p = [None] * self.nbranch
self.slack_loading_ji_p = [None] * self.nbranch
self.slack_loading_ij_n = [None] * self.nbranch
self.slack_loading_ji_n = [None] * self.nbranch
if self.load_shedding:
pass
else:
for i in range(self.nbranch):
self.slack_loading_ij_p[i] = pulp.LpVariable("LoadingSlack_ij_p_" + str(i), 0, 1e20)
self.slack_loading_ji_p[i] = pulp.LpVariable("LoadingSlack_ji_p_" + str(i), 0, 1e20)
self.slack_loading_ij_n[i] = pulp.LpVariable("LoadingSlack_ij_n_" + str(i), 0, 1e20)
self.slack_loading_ji_n[i] = pulp.LpVariable("LoadingSlack_ji_n_" + str(i), 0, 1e20)
# declare the generation
self.PG = list()
# LP problem
self.problem = None
# potential errors flag
self.potential_errors = False
# Check if the problem was solved or not
self.solved = False
# LP problem restrictions saved on build and added to the problem with every load change
self.s_restrictions = list()
self.p_restrictions = list()
def build(self, t_idx=None):
"""
Formulate and Solve the AC LP problem
:return: Nothing
"""
prob = LpProblem("AC power flow", LpMinimize)
npv = len(self.pv)
npq = len(self.pq)
################################################################################################################
# Add the objective function
################################################################################################################
fobj = 0.0
# Add the objective function (all zeros)
# for i in range(self.nn):
# fobj += self.dx_var[i] * 0.0
# Add the generators cost
for k, bus in enumerate(self.circuit.buses):
generators = bus.controlled_generators + bus.batteries
# check that there are at least one generator at the slack node
if len(generators) == 0 and bus.type == BusMode.Slack:
self.potential_errors = True
warn('There is no generator at the Slack node ' + bus.name + '!!!')
# Add the bus LP vars
for i, gen in enumerate(generators):
# add the variable to the objective function
if gen.active and gen.enabled_dispatch:
if t_idx is None:
fobj += gen.LPVar_P * gen.Cost
# add the var reference just to print later...
self.PG.append(gen.LPVar_P)
else:
fobj += gen.LPVar_P_prof[t_idx] * gen.Cost
# add the var reference just to print later...
self.PG.append(gen.LPVar_P_prof[t_idx])
else:
pass # the generator is not active
# minimize the load shedding if activated
if self.load_shedding:
fobj += self.load_shed[k]
# minimize the branch loading slack if not load shedding
if not self.load_shedding:
# Minimize the branch overload slacks
for k, branch in enumerate(self.circuit.branches):
if branch.active:
fobj += self.slack_loading_ij_p[k] + self.slack_loading_ij_n[k]
fobj += self.slack_loading_ji_p[k] + self.slack_loading_ji_n[k]
else:
pass # the branch is not active
# Add the objective function to the problem
prob += fobj
################################################################################################################
# Add the matrix multiplication as constraints
# See: https://math.stackexchange.com/questions/1727572/solving-a-feasible-system-of-linear-equations-
# using-linear-programming
################################################################################################################
# Matrix product
for i in range(self.nn):
calculated_node_power = 0
node_power_injection = 0
# add the calculated node power
for ii in range(self.sys_mat.indptr[i], self.sys_mat.indptr[i + 1]):
j = self.sys_mat.indices[ii]
calculated_node_power += self.sys_mat.data[ii] * self.dx_var[j]
# Only for PV!
if i < npv:
# gather the generators at the node
k = self.pvpqpq[i]
generators = self.circuit.buses[k].controlled_generators + self.circuit.buses[k].batteries
# add the generation LP vars
if t_idx is None:
for gen in generators:
if gen.active:
if gen.enabled_dispatch:
# add the dispatch variable
node_power_injection += gen.LPVar_P
else:
# set the default value
node_power_injection += gen.P / self.Sbase
else:
pass
else:
for gen in generators:
if gen.active:
if gen.enabled_dispatch:
# add the dispatch variable
node_power_injection += gen.LPVar_P_prof[t_idx]
else:
# set the default profile value
node_power_injection += gen.P_prof.values[t_idx] / self.Sbase
else:
pass
else:
pass # it is a PQ node, no generators there
# Store the terms for adding the load later.
# This allows faster problem compilation in case of recurrent runs
self.s_restrictions.append(calculated_node_power)
self.p_restrictions.append(node_power_injection)
# const = s == self.rhs[i]
# prob += const
################################################################################################################
# Add the matrix multiplication as constraints (slack)
################################################################################################################
for i in range(self.sys_mat_slack.shape[0]): # vd nodes
calculated_node_power = 0
node_power_injection = 0
# add the calculated node power
for ii in range(self.sys_mat_slack.indptr[i], self.sys_mat_slack.indptr[i + 1]):
j = self.sys_mat_slack.indices[ii]
calculated_node_power += self.sys_mat_slack.data[ii] * self.dx_var[j]
# Only for PV!
if i < npv:
# gather the generators at the node
k = self.vd[i]
generators = self.circuit.buses[k].controlled_generators + self.circuit.buses[k].batteries
# add the generation LP vars
if t_idx is None:
for gen in generators:
if gen.active and gen.enabled_dispatch:
node_power_injection += gen.LPVar_P
else:
pass
else:
for gen in generators:
if gen.active and gen.enabled_dispatch:
node_power_injection += gen.LPVar_P_prof[t_idx]
else:
pass
else:
pass # it is a PQ node, no generators there
# the sum of the slack node generators must be equal to the slack node power
prob.add(calculated_node_power == node_power_injection, 'ct_slack_power_' + str(i))
################################################################################################################
# control the voltage module between vm_low and vm_high
################################################################################################################
for k, i in enumerate(self.pq):
vm_var = abs(self.V[i]) + self.dx_var[npv + npq + k] # compose the voltage module
prob += vm_var <= self.vm_high
prob += self.vm_low <= vm_var
################################################################################################################
# control the voltage angles: Already defined with bounds
################################################################################################################
# No need, already done
################################################################################################################
# Set the branch limits: This is the same as in the DC OPF, unless a better approximation is found
################################################################################################################
for k, branch in enumerate(self.circuit.branches):
i = self.circuit.buses_dict[branch.bus_from]
j = self.circuit.buses_dict[branch.bus_to]
if i in self.theta_dict.keys():
va_i = self.theta_dict[i]
else:
va_i = 0.0 # is slack
if j in self.theta_dict.keys():
va_j = self.theta_dict[j]
else:
va_j = 0.0
# branch flow
self.flow_ij[k] = self.B[i, j] * (va_i - va_j)
self.flow_ji[k] = self.B[i, j] * (va_j - va_i)
# constraints
if not self.load_shedding:
# Add slacks
prob.add(self.flow_ij[k] + self.slack_loading_ij_p[k] - self.slack_loading_ij_n[k] <= branch.rate / self.Sbase,
'ct_br_flow_ij_' + str(k))
prob.add(self.flow_ji[k] + self.slack_loading_ji_p[k] - self.slack_loading_ji_n[k] <= branch.rate / self.Sbase,
'ct_br_flow_ji_' + str(k))
else:
# The slacks are in the form of load shedding
prob.add(self.flow_ij[k] <= branch.rate / self.Sbase, 'ct_br_flow_ij_' + str(k))
prob.add(self.flow_ji[k] <= branch.rate / self.Sbase, 'ct_br_flow_ji_' + str(k))
# set the current problem
self.problem = prob
def set_loads(self, t_idx=None):
"""
Add the loads to the LP problem
Args:
t_idx: time index, if none, the default object values are taken
"""
npv = len(self.pv)
npq = len(self.pq)
if t_idx is None:
# use the default loads
for k, i in enumerate(self.pvpqpq):
# these restrictions come from the build step to be fulfilled with the load now
node_power_injection = self.p_restrictions[k]
calculated_node_power = self.s_restrictions[k]
# add the nodal demand
for load in self.circuit.buses[i].loads:
if load.active:
if k < (npq + npv):
self.loads[i] += load.S.real / self.Sbase
else:
self.loads[i] += load.S.imag / self.Sbase
else:
pass
if calculated_node_power is 0 and node_power_injection is 0:
# nodes without injection or generation
pass
else:
# add the restriction
if self.load_shedding:
self.problem.add(calculated_node_power == node_power_injection - self.loads[i] + self.load_shed[i],
self.circuit.buses[i].name + '_ct_node_mismatch_' + str(k))
# if there is no load at the node, do not allow load shedding
if len(self.circuit.buses[i].loads) == 0:
self.problem.add(self.load_shed[i] == 0.0, self.circuit.buses[i].name + '_ct_null_load_shed_' + str(k))
else:
self.problem.add(calculated_node_power == node_power_injection - self.loads[i],
self.circuit.buses[i].name + '_ct_node_mismatch_' + str(k))
else:
# Use the load profile values at index=t_idx
for k, i in enumerate(self.pqpv):
# these restrictions come from the build step to be fulfilled with the load now
node_power_injection = self.p_restrictions[k]
calculated_node_power = self.s_restrictions[k]
# add the nodal demand
for load in self.circuit.buses[i].loads:
if load.active:
if k < (npq + npv):
self.loads[i] += load.P_prof.values[t_idx] / self.Sbase
else:
self.loads[i] += load.Q_prof.values[t_idx] / self.Sbase
else:
pass
# add the restriction
if self.load_shedding:
self.problem.add(
calculated_node_power == node_power_injection - self.loads[i] + self.load_shed[i],
self.circuit.buses[i].name + '_ct_node_mismatch_' + str(k))
# if there is no load at the node, do not allow load shedding
if len(self.circuit.buses[i].loads) == 0:
self.problem.add(self.load_shed[i] == 0.0,
self.circuit.buses[i].name + '_ct_null_load_shed_' + str(k))
else:
self.problem.add(calculated_node_power == node_power_injection - self.loads[i],
self.circuit.buses[i].name + '_ct_node_mismatch_' + str(k))
def solve(self):
"""
Solve the LP OPF problem
"""
if self.problem is None:
self.build()
if not self.potential_errors:
# if there is no problem there, make it
if self.problem is None:
self.build()
print('Solving LP')
print('Load shedding:', self.load_shedding)
self.problem.solve() # solve with CBC
# prob.solve(CPLEX())
# self.problem.writeLP('dcopf.lp')
# The status of the solution is printed to the screen
print("Status:", pulp.LpStatus[self.problem.status])
# The optimised objective function value is printed to the screen
print("Cost =", pulp.value(self.problem.objective), '€')
self.solved = True
# Solve
self.problem.solve()
self.problem.writeLP('ac_opf.lp')
# compose the results vector ###############################################################################
npv = len(self.pv)
npq = len(self.pq)
x_inc = zeros(self.nn)
for i, th in enumerate(self.dx_var):
x_inc[i] = th.value()
# set the pv voltages
va_pv = x_inc[0:npv]
vm_pv = abs(self.V[self.pv])
self.V[self.pv] = vm_pv * exp(1j * va_pv)
# set the pq voltages
va_pq = x_inc[npv:npv + npq]
vm_pq = abs(self.V[self.pq]) + x_inc[npv + npq:]
self.V[self.pq] = vm_pq * exp(1j * va_pq)
else:
self.solved = False
def print(self):
print('Voltage solution')
# compose voltages results
df_v = pd.DataFrame(data=np.c_[abs(self.V), np.angle(self.V), self.V.real, self.V.imag],
columns=['Module', 'Angle(rad)', 'Real', 'Imag'],
index=['Bus' + str(i) for i in range(self.V.shape[0])])
# compose branches results
flows = zeros(self.nbranch)
loading = zeros(self.nbranch)
br_names = [None] * self.nbranch
for k in range(self.nbranch):
flows[k] = abs(self.flow_ij[k].value()) * self.Sbase
loading[k] = flows[k] / self.circuit.branches[k].rate * 100.0
br_names[k] = 'Branch ' + str(k)
df_f = pd.DataFrame(data=np.c_[flows, loading],
columns=['Flow (MW)', 'Loading (%)'],
index=br_names)
generation = zeros(len(self.PG))
gen_names = [None] * len(self.PG)
for k, gen_var in enumerate(self.PG):
generation[k] = gen_var.value() * self.Sbase
gen_names[k] = 'Gen' + str(k)
df_g = pd.DataFrame(data=generation,
columns=['Gen(MW)'],
index=gen_names)
print(df_v)
print(df_f)
print(df_g)
if __name__ == '__main__':
print('Loading...')
grid = FileOpen('lynn5buspv.xlsx').open()
print('Solving...')
# declare and solve problem
problem = AcOPf(grid)
problem.build()
problem.set_loads()
problem.solve()
problem.print()
|
{"hexsha": "f814719e3763efba2c359a135d19a864304bd5e4", "size": 20336, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/research/opf/ac_opf.py", "max_stars_repo_name": "mzy2240/GridCal", "max_stars_repo_head_hexsha": "0352f0e9ce09a9c037722bf2f2afc0a31ccd2880", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 284, "max_stars_repo_stars_event_min_datetime": "2016-01-31T03:20:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T21:16:52.000Z", "max_issues_repo_path": "src/research/opf/ac_opf.py", "max_issues_repo_name": "mzy2240/GridCal", "max_issues_repo_head_hexsha": "0352f0e9ce09a9c037722bf2f2afc0a31ccd2880", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 94, "max_issues_repo_issues_event_min_datetime": "2016-01-14T13:37:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T03:13:56.000Z", "max_forks_repo_path": "src/research/opf/ac_opf.py", "max_forks_repo_name": "mzy2240/GridCal", "max_forks_repo_head_hexsha": "0352f0e9ce09a9c037722bf2f2afc0a31ccd2880", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 84, "max_forks_repo_forks_event_min_datetime": "2016-03-29T10:43:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T16:26:55.000Z", "avg_line_length": 40.5099601594, "max_line_length": 131, "alphanum_fraction": 0.4850511408, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4342}
|
# The random pools in pool_info were created by uniformly randomly sampling from
# all images in a particular split. Sometimes this means the list of 100 images
# in a random pool contains the root image itself. In order to avoid that, this
# script simply moves the duplicate to the back of the list.
import pickle as pkl
import numpy as np
POOL_INFO_PATH = 'data/pool_info_v3.pkl'
NEW_POOL_INFO_PATH = 'data/pool_info_v4.pkl'
with open(POOL_INFO_PATH, 'rb') as f:
pool_info = pkl.load(f)
for split in pool_info:
img_ids = pool_info[split]['image_ids']
for difficulty in ['easy', 'rand', 'hard']:
pool = pool_info[split][difficulty]
for img_idx, img_id in enumerate(img_ids):
pool_idxs = pool[img_idx]
# check if there's a duplicate
if (pool_idxs == img_idx).sum() != 0:
# if there is then move it to the back
dup_idxs = np.where(pool_idxs == img_idx)[0]
assert len(dup_idxs) == 1, 'easy to deal with, but unlikely'
dup_idx = dup_idxs[0]
new_idxs = np.concatenate([pool_idxs[:dup_idx],
pool_idxs[dup_idx+1:],
[img_idx]])
pool[img_idx] = new_idxs
with open(NEW_POOL_INFO_PATH, 'wb') as f:
pkl.dump(pool_info, f)
|
{"hexsha": "20dfe6e0f5a9a0638aaa384dee7fd218f7f10a2c", "size": 1368, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/filter_pool_info.py", "max_stars_repo_name": "soumye/dialog_without_dialog", "max_stars_repo_head_hexsha": "9f95d6fb457659f9007445d9036b94e639bddd8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-08T23:19:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-21T11:12:48.000Z", "max_issues_repo_path": "tools/filter_pool_info.py", "max_issues_repo_name": "soumye/dialog_without_dialog", "max_issues_repo_head_hexsha": "9f95d6fb457659f9007445d9036b94e639bddd8b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-01-08T02:08:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-15T23:51:02.000Z", "max_forks_repo_path": "tools/filter_pool_info.py", "max_forks_repo_name": "soumye/dialog_without_dialog", "max_forks_repo_head_hexsha": "9f95d6fb457659f9007445d9036b94e639bddd8b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-10T15:53:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-11T03:24:00.000Z", "avg_line_length": 39.0857142857, "max_line_length": 80, "alphanum_fraction": 0.6096491228, "include": true, "reason": "import numpy", "num_tokens": 327}
|
(*
Copyright 2022 ZhengPu Shi
This file is part of CoqExt. It is distributed under the MIT
"expat license". You should have recieved a LICENSE file with it.
purpose : Basic configuration (Library, Notations, Warning, etc.)
author : ZhengPu Shi
date : 2022.06
remark :
1. Basic libraries in whole project
3. Reserved notations for consistence.
https://coq.inria.fr/distrib/V8.13.2/refman/language/coq-library.html
3. Eliminate some warning.
https://coq.inria.fr/distrib/V8.13.2/refman/user-extensions/
syntax-extensions.html?highlight=warning
4. Customized tactics.
*)
(* ######################################################################### *)
(** * Basic libraries *)
Require Export Coq.Classes.Morphisms. (* respectful, ==> *)
Require Export Coq.Setoids.Setoid. (* *)
Require Export Coq.Classes.SetoidTactics. (* add_morphism_tactic *)
Require Export Coq.Relations.Relations. (* equivalence *)
Require Export Coq.Bool.Bool. (* reflect *)
Require Export Ring. (* ring *)
Require Export Field. (* field *)
Require Export Coq.Logic.Classical.
Require Export Coq.Logic.FunctionalExtensionality.
(* ######################################################################### *)
(** * Reserved Notations *)
(** Reserved Notations, to keep same precedence and associativity *)
Reserved Infix "==" (at level 70, no associativity).
Reserved Notation "a != b" (at level 70, no associativity).
Reserved Infix "=?" (at level 70, no associativity).
Reserved Infix "+" (at level 50, left associativity).
Reserved Infix "-" (at level 50, left associativity).
Reserved Infix "*" (at level 40, left associativity).
Reserved Infix "/" (at level 40, left associativity).
Reserved Infix "c*" (at level 40, left associativity).
Reserved Infix "*c" (at level 40, left associativity).
Reserved Infix "\o" (at level 50, no associativity).
Reserved Infix "⋅" (at level 40, no associativity).
Reserved Notation "- a" (at level 35, right associativity).
Reserved Notation "/ a" (at level 35, right associativity).
Reserved Notation "a \T" (at level 34, left associativity).
Reserved Notation "m1 @ m2" (at level 30, no associativity).
(* this level is consistent with Mathcomp.ssreflect.ssrnotations.v *)
(* safe access (any index) *)
Reserved Notation "m ! i ! j" (at level 20, i at next level).
Reserved Notation "v ! i" (at level 20, i at next level).
(* unsafe access (developer must give valid index) *)
Reserved Notation "m $ i $ j" (at level 20, i at next level).
Reserved Notation "v $ i" (at level 20, i at next level).
(* ######################################################################### *)
(** * Eliminate Warning. *)
(* Export Set Warnings "-notation-overridden". *)
(* ######################################################################### *)
(** * Customized tactics *)
(** ** Tactics with a short name *)
Global Ltac gd k := generalize dependent k.
(* repeat split *)
Ltac ssplit :=
repeat
match goal with
| |- _ /\ _ => split
end.
Ltac inv H :=
inversion H; clear H; subst.
Ltac simp_proper :=
unfold Proper; unfold respectful.
(* ######################################################################### *)
(** * Global notations *)
(* this level is consistent with coq.ssr.ssrbool.v *)
(* Notation "~~ b" := (negb b) (at level 35, right associativity) : bool_scope. *)
(* ######################################################################### *)
(** * Global coercions *)
(** bool to Prop *)
Definition is_true (b : bool) : Prop := b = true.
Coercion is_true : bool >-> Sortclass.
Goal true.
apply eq_refl. Qed.
Goal negb false.
simpl. apply eq_refl. Qed.
Example eqnP (n m : nat) : reflect (n = m) (Nat.eqb n m).
Proof.
gd m. induction n; intros [|m]; simpl; try constructor; auto.
destruct IHn with m; subst; constructor; auto.
Qed.
(* ######################################################################### *)
(** * General propeties of algebraic structures *)
(* Section general_props. *)
(* Context {A B : Type}. *)
(* Variable fa ga : A -> A -> A. *)
(* Infix "+" := fa. *)
(* Infix "*" := ga. *)
(* Variable fb : B -> B -> B. *)
(* Infix "⊕" := fb (at level 50). *)
(* End general_props. *)
(* ######################################################################### *)
(** * Usually used scopes *)
(** Scope for matrix/vector/list element type *)
Declare Scope A_scope.
Delimit Scope A_scope with A.
Open Scope A.
(** Scope for list type *)
Declare Scope list_scope.
Delimit Scope list_scope with list.
Open Scope list.
(** Scope for list list type *)
Declare Scope dlist_scope.
Delimit Scope dlist_scope with dlist.
Open Scope dlist.
(** Scope for matrix type *)
Declare Scope mat_scope.
Delimit Scope mat_scope with mat.
Open Scope mat.
(** Scope for vector type *)
Declare Scope vec_scope.
Delimit Scope vec_scope with vec.
Open Scope vec_scope.
|
{"author": "zhengpushi", "repo": "CoqMatrix", "sha": "28fa5f96e38a07659cfd373e09b0e75c24c22bfd", "save_path": "github-repos/coq/zhengpushi-CoqMatrix", "path": "github-repos/coq/zhengpushi-CoqMatrix/CoqMatrix-28fa5f96e38a07659cfd373e09b0e75c24c22bfd/CoqMatrix/CoqExt/BasicConfig.v"}
|
[STATEMENT]
lemma mapl_G_comp: "mapl_G l1 l2 \<circ> mapl_G l1' l2' = mapl_G (l1 \<circ> l1') (l2 \<circ> l2')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mapl_G l1 l2 \<circ> mapl_G l1' l2' = mapl_G (l1 \<circ> l1') (l2 \<circ> l2')
[PROOF STEP]
unfolding mapl_G_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_G l1 l2 id id id id \<circ> map_G l1' l2' id id id id = map_G (l1 \<circ> l1') (l2 \<circ> l2') id id id id
[PROOF STEP]
apply (rule trans)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. map_G l1 l2 id id id id \<circ> map_G l1' l2' id id id id = ?s
2. ?s = map_G (l1 \<circ> l1') (l2 \<circ> l2') id id id id
[PROOF STEP]
apply (rule map_G_comp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_G (l1 \<circ> l1') (l2 \<circ> l2') (id \<circ> id) (id \<circ> id) (id \<circ> id) (id \<circ> id) = map_G (l1 \<circ> l1') (l2 \<circ> l2') id id id id
[PROOF STEP]
apply (simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 485, "file": "BNF_CC_Axiomatised_BNF_CC", "length": 5}
|
(*===========================================================================
Specification logic -- step-indexed and with hidden frames
This is a step-indexed version of the specification logic defined in Chapter
3 of Krishnaswami's thesis, which is adapted from Birkedal et al.
A specification S is a predicate on nat and SPred, where it holds for a pair
(k,P) if it denotes a "desirable" machine execution for up to k steps
starting from any machine configuration that has a sub-state satisfying P.
The meaning of "desirable" depends on S; it might, for example, mean "safe
under certain assumptions". Note the wording "up to" and "sub-state" above:
they suggest that formally, S must be closed under decreasing k and starring
on assertions to P.
The utility of this is to get a higher-order frame rule in the specification
logic, which is very valuable for a low-level language that does not have
structured control flow and therefore does not have the structure required
to define a Hoare triple. When frames cannot be fixed by the symmetry of
pre- and postconditions, they must float freely around the specification
logic, which is exactly what the higher-order frame rule allows -- see
[spec_frame].
===========================================================================*)
Require Import ssreflect ssrbool ssrfun ssrnat eqtype tuple seq fintype.
Require Import bitsrep procstate procstatemonad SPred septac.
Require Import instr eval monad monadinst reader step cursor.
Require Import common_tactics.
(* Importing this file really only makes sense if you also import ilogic, so we
force that. *)
Require Export ilogic later.
Transparent ILPre_Ops.
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
Require Import Setoid RelationClasses Morphisms.
(* The ssreflect inequalities on nat are just getting in the way here. They
don't work with non-Equivalence setoids. *)
Local Open Scope coq_nat_scope.
(* The natural numbers in descending order. *)
(*
Instance ge_Pre: PreOrder ge.
Proof. repeat constructor. hnf. eauto with arith. Qed.
*)
(* SPred, ordered by extension with **. *)
Definition extSP (P Q: SPred) := exists R, (P ** R) -|- Q.
Instance extSP_Pre: PreOrder extSP.
Proof.
split.
- exists empSP. apply empSPR.
- move=> P1 P2 P3 [R1 H1] [R2 H2]. exists (R1 ** R2).
by rewrite <-H1, sepSPA in H2.
Qed.
Instance extSP_impl_m :
Proper (extSP --> extSP ++> Basics.impl) extSP.
Proof.
rewrite /extSP. intros P P' [RP HP] Q Q' [RQ HQ] [R H].
eexists. rewrite -HQ -H -HP. split; by ssimpl.
Qed.
Instance extSP_iff_m :
Proper (lequiv ==> lequiv ==> iff) extSP.
Proof.
rewrite /extSP. intros P P' HP Q Q' HQ.
setoid_rewrite HP. setoid_rewrite HQ. done.
Qed.
Instance extSP_sepSP_m :
Proper (extSP ++> extSP ++> extSP) sepSP.
Proof.
rewrite /extSP. intros P P' [RP HP] Q Q' [RQ HQ].
eexists. rewrite -HP -HQ. split; by ssimpl.
Qed.
Instance subrelation_equivSP_extSP: subrelation lequiv extSP.
Proof. rewrite /extSP => P P' HP. exists empSP. rewrite HP. apply empSPR. Qed.
Instance subrelation_equivSP_inverse_extSP: subrelation lequiv (inverse extSP).
Proof. rewrite /extSP => P P' HP. exists empSP. rewrite HP. apply empSPR. Qed.
Hint Extern 0 (extSP ?P ?P) => reflexivity.
(*
Definition of "spec" and properties of it as a logic
*)
Section ILSpecSect.
Local Existing Instance ILPre_Ops.
Local Existing Instance ILPre_ILogic.
Local Existing Instance ILLaterPreOps.
Local Existing Instance ILLaterPre.
Definition spec := ILPreFrm ge (ILPreFrm extSP Prop).
Global Instance ILSpecOps: ILLOperators spec | 2 := _.
Global Instance ILOps: ILogicOps spec | 2 := _.
Global Instance ILSpec: ILLater spec | 2 := _.
End ILSpecSect.
Local Obligation Tactic := try solve [Tactics.program_simpl|auto].
(* This uses 'refine' instead of Program Definition to work around a Coq 8.4
bug. *)
Definition mkspec (f: nat -> SPred -> Prop)
(Hnat: forall k P, f (S k) P -> f k P)
(HSPred: forall k P P', extSP P P' -> f k P -> f k P') : spec.
refine (mkILPreFrm (fun k => mkILPreFrm (f k) _) _).
Proof.
have Hnat' : forall k' k P, k' >= k -> f k' P -> f k P.
- move => k' k P. elim; by auto.
move=> k' k Hk P /= Hf.
eapply Hnat'; eassumption.
Grab Existential Variables.
Proof.
move=> P P' HP /= Hf. eapply HSPred; eassumption.
Defined.
Implicit Arguments mkspec [].
Definition spec_fun (S: spec) := fun k P => S k P.
Coercion spec_fun: spec >-> Funclass.
Add Parametric Morphism (S: spec) : S with signature
ge ++> extSP ++> Basics.impl
as spec_impl_m.
Proof.
rewrite /spec_fun. move => k k' Hk P P' HP.
rewrite -[Basics.impl _ _]/(@lentails _ ILogicOps_Prop _ _).
by rewrite ->HP, ->Hk.
Qed.
Add Parametric Morphism (S: spec) : S with signature
eq ==> lequiv ==> iff
as spec_iff_m.
Proof.
move=> k P P' HP. split => HS.
- by rewrite ->HP in HS.
- by rewrite <-HP in HS.
Qed.
(* The default definition of spec equivalence is sometimes inconvenient.
Here is an alternative one. *)
Lemma spec_equiv (S S': spec):
(forall k P, S k P <-> S' k P) -> S -|- S'.
Proof. move=> H; split => k P /=; apply H. Qed.
Lemma spec_downwards_closed (S: spec) P k k':
k <= k' -> S k' P -> S k P.
Proof.
move=> Hk. have Hk' : k' >= k by auto. by rewrite ->Hk'.
Qed.
(*
Properties of spec_at
*)
Program Definition spec_at (S: spec) (R: SPred) :=
mkspec (fun k P => S k (R ** P)) _ _.
Next Obligation.
move=> S R k P P'. eauto using spec_downwards_closed.
Qed.
Next Obligation.
move=> S R k P P' HP. by rewrite ->HP.
Qed.
Infix "@" := spec_at (at level 44, left associativity).
Lemma spec_frame (S : spec) (R : SPred) :
S |-- S @ R.
Proof.
move => k P H. rewrite /spec_at /=.
assert (extSP P (P ** R)) as HPR by (exists R; reflexivity).
rewrite ->sepSPC in HPR. by rewrite <-HPR.
Qed.
(* For variations of this instance with lentails instead of extSP, look at
[spec_at_covar_m] and [spec_at_contra_m]. *)
Instance spec_at_entails_m:
Proper (lentails ++> extSP ++> lentails) spec_at.
Proof.
move=> S S' HS R R' HR k P. rewrite /spec_at /= /spec_fun /=.
by rewrite <- HR, -> HS.
Qed.
Instance spec_at_equiv_m:
Proper (lequiv ++> lequiv ++> lequiv) spec_at.
Proof.
move => S S' HS R R' HR.
split; cancel2; by (rewrite HS || rewrite HR).
Qed.
Lemma spec_at_emp S: S @ empSP -|- S.
Proof.
split; last exact: spec_frame.
move=> k P. rewrite /spec_at /spec_fun /=. by rewrite empSPL.
Qed.
Lemma spec_at_at S R R': S @ R @ R' -|- S @ (R ** R').
Proof.
apply spec_equiv => k P. rewrite /spec_at /spec_fun /=.
split; by [rewrite -sepSPA | rewrite sepSPA].
Qed.
Lemma spec_at_forall {T} F R: (Forall x:T, F x) @ R -|- Forall x:T, (F x @ R).
Proof. split; rewrite /= /spec_fun /=; auto. Qed.
Lemma spec_at_exists {T} F R: (Exists x:T, F x) @ R -|- Exists x:T, (F x @ R).
Proof. split; rewrite /= /spec_fun /= => k P [x Hx]; eauto. Qed.
Lemma spec_at_true R: ltrue @ R -|- ltrue.
Proof. split; rewrite /= /spec_fun /=; auto. Qed.
Lemma spec_at_false R: lfalse @ R -|- lfalse.
Proof. split; rewrite /= /spec_fun /=; auto. Qed.
Lemma spec_at_and S S' R: (S //\\ S') @ R -|- (S @ R) //\\ (S' @ R).
Proof. split; rewrite /spec_at /= /spec_fun /= => k P H; auto. Qed.
Lemma spec_at_or S S' R: (S \\// S') @ R -|- (S @ R) \\// (S' @ R).
Proof. split; rewrite /spec_at /= /spec_fun /= => k P H; auto. Qed.
Lemma spec_at_propimpl p S R: (p ->> S) @ R -|- p ->> (S @ R).
Proof. split; rewrite /= /spec_fun /=; auto. Qed.
Lemma spec_at_propand p S R: (p /\\ S) @ R -|- p /\\ (S @ R).
Proof. split; rewrite /= /spec_fun /=; auto. Qed.
Lemma spec_at_impl S S' R: (S -->> S') @ R -|- (S @ R) -->> (S' @ R).
Proof.
split.
- apply: limplAdj. rewrite <-spec_at_and.
cancel2. exact: landAdj.
- rewrite /spec_at /= /spec_fun /= => k P H.
(* This proof follows the corresponding one (Lemma 25) in Krishnaswami's
PhD thesis. *)
intros k' Hk' P' [Pext HPext] HS.
move/(_ k' Hk' (P ** Pext)): H => H.
rewrite ->sepSPA in HPext.
(* The rewriting by using explicit subrelation instances here is a bit
awkward. *)
rewrite <-(subrelation_equivSP_extSP HPext).
apply H; first by exists Pext.
rewrite ->(subrelation_equivSP_inverse_extSP HPext). done.
Qed.
Hint Rewrite
spec_at_at
spec_at_true
spec_at_false
spec_at_and
spec_at_or
spec_at_impl
@spec_at_forall
@spec_at_exists
spec_at_propimpl
spec_at_propand
: push_at.
(* This lemma is what [spec_at_at] really should be in order to be consistent
with the others in the hint database, but this variant is not suitable for
autorewrite. *)
Lemma spec_at_swap S R1 R2:
S @ R1 @ R2 -|- S @ R2 @ R1.
Proof. autorewrite with push_at. by rewrite sepSPC. Qed.
(*
"Rule of consequence" for spec_at
*)
Class AtCovar S := at_covar: forall P Q, P |-- Q -> S @ P |-- S @ Q.
Class AtContra S := at_contra: forall P Q, P |-- Q -> S @ Q |-- S @ P.
Instance: Proper (lequiv ==> iff) AtCovar.
Proof. move=> S S' HS. rewrite /AtCovar. by setoid_rewrite HS. Qed.
Instance: Proper (lequiv ==> iff) AtContra.
Proof. move=> S S' HS. rewrite /AtContra. by setoid_rewrite HS. Qed.
Instance AtCovar_forall A S {HS: forall a:A, AtCovar (S a)} :
AtCovar (Forall a, S a).
Proof.
move=> P Q HPQ. autorewrite with push_at. cancel1 => a.
by rewrite ->(HS _ _ _ HPQ).
Qed.
Instance AtContra_forall A S {HS: forall a:A, AtContra (S a)} :
AtContra (Forall a, S a).
Proof.
move=> P Q HPQ. autorewrite with push_at. cancel1 => a.
by rewrite ->(HS _ _ _ HPQ).
Qed.
Instance AtCovar_and S1 S2 {H1: AtCovar S1} {H2: AtCovar S2} :
AtCovar (S1 //\\ S2).
Proof. rewrite land_is_forall. by apply AtCovar_forall => [[]]. Qed.
Instance AtContra_and S1 S2 {H1: AtContra S1} {H2: AtContra S2} :
AtContra (S1 //\\ S2).
Proof. rewrite land_is_forall. by apply AtContra_forall => [[]]. Qed.
Instance AtCovar_true : AtCovar ltrue.
Proof. rewrite ltrue_is_forall. by apply AtCovar_forall => [[]]. Qed.
Instance AtContra_true : AtContra ltrue.
Proof. rewrite ltrue_is_forall. by apply AtContra_forall => [[]]. Qed.
Instance AtCovar_exists A S {HS: forall a:A, AtCovar (S a)} :
AtCovar (Exists a, S a).
Proof.
move=> P Q HPQ. autorewrite with push_at. cancel1 => a.
by rewrite ->(HS _ _ _ HPQ).
Qed.
Instance AtContra_exists A S {HS: forall a:A, AtContra (S a)} :
AtContra (Exists a, S a).
Proof.
move=> P Q HPQ. autorewrite with push_at. cancel1 => a.
by rewrite ->(HS _ _ _ HPQ).
Qed.
Instance AtCovar_or S1 S2 {H1: AtCovar S1} {H2: AtCovar S2} :
AtCovar (S1 \\// S2).
Proof. rewrite lor_is_exists. by apply AtCovar_exists => [[]]. Qed.
Instance AtContra_or S1 S2 {H1: AtContra S1} {H2: AtContra S2} :
AtContra (S1 \\// S2).
Proof. rewrite lor_is_exists. by apply AtContra_exists => [[]]. Qed.
Instance AtCovar_false : AtCovar lfalse.
Proof. rewrite lfalse_is_exists. by apply AtCovar_exists => [[]]. Qed.
Instance AtContra_false : AtContra lfalse.
Proof. rewrite lfalse_is_exists. by apply AtContra_exists => [[]]. Qed.
Instance AtCovar_impl S1 S2 {H1: AtContra S1} {H2: AtCovar S2} :
AtCovar (S1 -->> S2).
Proof.
move=> P Q HPQ. autorewrite with push_at.
by rewrite ->(H1 _ _ HPQ), <-(H2 _ _ HPQ).
Qed.
Instance AtContra_impl S1 S2 {H1: AtCovar S1} {H2: AtContra S2} :
AtContra (S1 -->> S2).
Proof.
move=> P Q HPQ. autorewrite with push_at.
by rewrite ->(H1 _ _ HPQ), <-(H2 _ _ HPQ).
Qed.
Instance AtCovar_at S R {HS: AtCovar S} : AtCovar (S @ R).
Proof.
move=> P Q HPQ. rewrite [_ @ P]spec_at_swap [_ @ Q]spec_at_swap.
by rewrite <-(HS _ _ HPQ).
Qed.
Instance AtContra_at S R {HS: AtContra S} : AtContra (S @ R).
Proof.
move=> P Q HPQ. rewrite [_ @ Q]spec_at_swap [_ @ P]spec_at_swap.
by rewrite <-(HS _ _ HPQ).
Qed.
Instance spec_at_covar_m S {HS: AtCovar S} :
Proper (lentails ++> lentails) (spec_at S).
Proof. move=> P Q HPQ. by apply HS. Qed.
Instance spec_at_contra_m S {HS: AtContra S} :
Proper (lentails --> lentails) (spec_at S).
Proof. move=> P Q HPQ. by apply HS. Qed.
Instance at_contra_entails_m (S: spec) `{HContra: AtContra S}:
Proper (ge ++> lentails --> lentails) S.
Proof.
move => k k' Hk P P' HP H. rewrite <-Hk.
specialize (HContra P' P HP k empSP).
simpl in HContra. rewrite ->!empSPR in HContra. by auto.
Qed.
Instance at_covar_entails_m (S: spec) `{HCovar: AtCovar S}:
Proper (ge ++> lentails ++> lentails) S.
Proof.
move => k k' Hk P P' HP H. rewrite <-Hk.
specialize (HCovar P P' HP k empSP).
simpl in HCovar. rewrite ->!empSPR in HCovar. by auto.
Qed.
(*
Rules for pulling existentials from the second argument of spec_at. These
rules together form a spec-level analogue of the "existential rule" for
Hoare triples.
Whether an existential quantifier can be pulled out of the R in [S @ R]
depends on S. Rewrite by <-at_ex to pull it out from a positive position in
the goal. Rewrite by ->at_ex' to pull it out from a negative position in the
goal.
*)
Class AtEx S := at_ex: forall A f, Forall x:A, S @ f x |-- S @ lexists f.
(* The reverse direction of at_ex. This not only follows from AtContra but is
actually equivalent to it. The proof is in revision 22259 (spec.v#22). *)
Lemma at_ex' S {HS: AtContra S} :
forall A f, S @ lexists f |-- Forall x:A, S @ f x.
Proof.
move=> A f. apply: lforallR => x. apply HS. ssplit. reflexivity.
Qed.
Instance: Proper (lequiv ==> iff) AtEx.
Proof. move=> S S' HS. rewrite /AtEx. by setoid_rewrite HS. Qed.
Lemma spec_at_and_or S R1 R2 {HS: AtEx S}:
S @ R1 //\\ S @ R2 |-- S @ (R1 \\// R2).
Proof.
rewrite ->land_is_forall, lor_is_exists.
transitivity (Forall b, S @ (if b then R1 else R2)).
- apply: lforallR => [[|]].
- by apply lforallL with true.
- by apply lforallL with false.
apply: at_ex.
Qed.
Lemma spec_at_or_and S R1 R2 {HNeg: AtContra S}:
S @ (R1 \\// R2) |-- S @ R1 //\\ S @ R2.
Proof.
rewrite ->land_is_forall, lor_is_exists.
transitivity (Forall b, S @ (if b then R1 else R2)); last first.
- apply: lforallR => [[|]].
- by apply lforallL with true.
- by apply lforallL with false.
apply: at_ex'.
Qed.
Instance AtEx_forall A S {HS: forall a:A, AtEx (S a)} :
AtEx (Forall a, S a).
Proof.
move=> T f.
rewrite -> spec_at_forall. apply: lforallR => a.
rewrite <- at_ex; cancel1 => x.
rewrite spec_at_forall; by apply lforallL with a.
Qed.
Instance AtEx_and S1 S2 {H1: AtEx S1} {H2: AtEx S2} : AtEx (S1 //\\ S2).
Proof. rewrite land_is_forall. by apply AtEx_forall => [[]]. Qed.
Instance AtEx_true : AtEx ltrue.
Proof. rewrite ltrue_is_forall. by apply AtEx_forall => [[]]. Qed.
Instance AtEx_impl S1 S2 {H1: AtContra S1} {H2: AtEx S2} : AtEx (S1 -->> S2).
Proof.
move=> A f. setoid_rewrite spec_at_impl.
rewrite ->at_ex', <-at_ex => //. (*TODO: why does at_ex' leave a subgoal? *)
apply: limplAdj. apply: lforallR => x. apply: landAdj.
apply lforallL with x. apply: limplAdj. rewrite landC. apply: landAdj.
apply lforallL with x. apply: limplAdj. rewrite landC. apply: landAdj.
reflexivity.
Qed.
Instance AtEx_at S R {HS: AtEx S} : AtEx (S @ R).
Proof.
move=> A f. rewrite spec_at_at.
assert (R ** lexists f -|- Exists x, R ** f x) as Hpull.
- split; sbazooka.
rewrite Hpull. rewrite <-at_ex. cancel1 => x. by rewrite spec_at_at.
Qed.
(* The payoff for all this: a tactic for pulling quantifiers *)
Module Export PullQuant.
Implicit Type S: spec.
Definition PullQuant {A} S (f: A -> spec) := lforall f |-- S.
Lemma pq_rhs S {A} S' (f: A -> spec) {HPQ: PullQuant S' f}:
(forall a:A, S |-- f a) ->
S |-- S'.
Proof. move=> H. red in HPQ. rewrite <-HPQ. by apply: lforallR. Qed.
Lemma PullQuant_forall A (f: A -> spec): PullQuant (lforall f) f.
Proof. red. reflexivity. Qed.
(* Hint Resolve worked here in Coq 8.3 but not since 8.4. *)
Hint Extern 0 (PullQuant (lforall _) _) =>
apply PullQuant_forall : pullquant.
Lemma PullQuant_propimpl p S:
PullQuant (p ->> S) (fun H: p => S).
Proof. red. reflexivity. Qed.
(* Hint Resolve worked here in Coq 8.3 but not since 8.4. *)
Hint Extern 0 (PullQuant (?p ->> ?S) _) =>
apply (@PullQuant_propimpl p S) : pullquant.
Lemma pq_at_rec S R A f:
PullQuant S f ->
PullQuant (S @ R) (fun a:A => f a @ R).
Proof.
rewrite /PullQuant => Hf. rewrite <-Hf. by rewrite spec_at_forall.
Qed.
(* If we didn't find anything to pull from the frame itself, recurse under it. *)
(* Unfortunately, Hint Resolve doesn't work here. For some obscure reason,
there has to be an underscore for the last argument. *)
Hint Extern 1 (PullQuant (?S @ ?R) _) =>
eapply (@pq_at_rec S R _ _) : pullquant.
Lemma pq_impl S S' A f:
PullQuant S f ->
PullQuant (S' -->> S) (fun a:A => S' -->> f a).
Proof.
rewrite /PullQuant => Hf. rewrite <-Hf. apply: limplAdj.
apply: lforallR => a. apply: landAdj. eapply lforallL. reflexivity.
Qed.
Hint Extern 1 (PullQuant (?S' -->> ?S) _) =>
eapply (@pq_impl S S' _ _) : pullquant.
Import Existentials.
Lemma pq_at S t:
match find t with
| Some (mkf _ f) =>
AtEx S -> PullQuant (S @ eval t) (fun a => S @ f a)
| None => True
end.
Proof.
move: (@find_correct t). case: (find t) => [[A f]|]; last done.
move=> Heval HS. red. rewrite ->Heval. by rewrite <-at_ex.
Qed.
Hint Extern 0 (PullQuant (?S @ ?R) _) =>
let t := quote_term R in
apply (@pq_at S t); [apply _] : pullquant.
(* It's a slight breach of abstraction to [cbv [eval]] here, but it's easier
than dealing with it in the hints that use reflection. *)
(* For some reason, auto sometimes hangs when there are entailments among the
hypotheses. As a workaround, we clear those first. Another workaround is
to use [typeclasses eauto], but that sometimes fails. *)
Ltac specintro :=
eapply pq_rhs; [
repeat match goal with H: _ |-- _ |- _ => clear H end;
solve [auto with pullquant]
|];
instantiate;
cbv [eval].
Ltac specintros :=
specintro; let x := fresh "x" in move=> x; try specintros; move: x.
End PullQuant.
(*
The spec_reads connective, [S <@ R].
It is like spec_at but requires S to not only preserve the validity of R but
keep the memory in R's footprint unchanged.
*)
Definition spec_reads S R := Forall s : PState, (eq_pred s |-- R) ->> S @ eq_pred s.
Infix "<@" := spec_reads (at level 44, left associativity).
Instance spec_reads_entails:
Proper (lentails ++> lentails --> lentails) spec_reads.
Proof.
move=> S S' HS R R' HR. red in HR. rewrite /spec_reads. cancel1 => s.
by rewrite ->HS, ->HR.
Qed.
Instance spec_reads_equiv:
Proper (lequiv ==> lequiv ==> lequiv) spec_reads.
Proof.
move=> S S' HS R R' HR. rewrite /spec_reads. setoid_rewrite HS.
by setoid_rewrite HR.
Qed.
Lemma spec_reads_alt S R:
S <@ R -|- Forall s, (eq_pred s |-- R ** ltrue) ->> S @ eq_pred s.
Proof.
rewrite /spec_reads. split.
- specintros => s Hs. rewrite <-lentails_eq in Hs.
case: Hs => sR [sframe [Hs [HsR _]]].
apply lforallL with sR. apply lpropimplL.
- by apply-> lentails_eq.
etransitivity; [apply (spec_frame (eq_pred sframe))|]. autorewrite with push_at.
rewrite stateSplitsAs_eq; [|eapply Hs]. done.
- cancel1 => s. apply: lpropimplR => Hs. apply lpropimplL => //.
rewrite ->Hs. by ssimpl.
Qed.
(* This definition can be more convenient in metatheory. *)
Lemma spec_reads_alt_meta S R : S <@ R -|- Forall s, R s ->> S @ eq_pred s.
Proof. rewrite /spec_reads. by setoid_rewrite <-lentails_eq. Qed.
Lemma spec_reads_ex S T f: Forall x:T, S <@ f x -|- S <@ lexists f.
Proof.
setoid_rewrite spec_reads_alt_meta. split.
- specintros => s [x Hx]. apply lforallL with x. apply lforallL with s.
by apply lpropimplL.
- specintros => x s Hf.
apply lforallL with s. apply: lpropimplL => //. econstructor. eassumption.
Qed.
Lemma spec_reads_frame S R:
S |-- S <@ R.
Proof. rewrite /spec_reads. specintros => s Hs. apply spec_frame. Qed.
Lemma spec_reads_entails_at S {HEx: AtEx S} R:
S <@ R |-- S @ R.
Proof.
rewrite spec_reads_alt_meta. rewrite ->(ILFun_exists_eq R).
specintros => s Hs. apply lforallL with s. by apply: lpropimplL.
Qed.
Local Transparent ILFun_Ops SABIOps.
Lemma emp_unit : empSP -|- eq_pred sa_unit.
split; simpl; move => x H.
+ destruct H as [H _]; assumption.
+ exists H; tauto.
Qed.
Lemma spec_at_entails_reads S {HContra: AtContra S} R:
S @ R |-- S <@ R.
Proof. rewrite /spec_reads. specintros => s Hs. by rewrite <-Hs. Qed.
Lemma spec_reads_eq_at S s:
S <@ eq_pred s -|- S @ eq_pred s.
Proof.
rewrite /spec_reads. split.
- apply lforallL with s. exact: lpropimplL.
- specintros => s' Hs'. rewrite <-lentails_eq in Hs'.
simpl in Hs'; rewrite -> Hs'; reflexivity.
Qed.
Lemma spec_reads_emp S:
S <@ empSP -|- S.
Proof.
rewrite emp_unit spec_reads_eq_at -emp_unit. by rewrite spec_at_emp.
Qed.
Corollary spec_reads_byteIs S p b:
S <@ byteIs p b -|- S @ byteIs p b.
Proof. apply spec_reads_eq_at. Qed.
Corollary spec_reads_flagIs S (p:Flag) b:
S <@ (p~=b) -|- S @ (p~=b).
Proof. apply spec_reads_eq_at. Qed.
Corollary spec_reads_regIs S (p:AnyReg) b:
S <@ (p~=b) -|- S @ (p~=b).
Proof. apply spec_reads_eq_at. Qed.
Lemma spec_reads_merge S R1 R2:
S <@ R1 <@ R2 |-- S <@ (R1 ** R2).
Proof.
setoid_rewrite spec_reads_alt_meta.
specintros => s [s1 [s2 [Hs [Hs1 Hs2]]]].
apply lforallL with s2. apply lpropimplL; first done.
rewrite spec_reads_alt_meta.
assert ((Forall s', R1 s' ->> S @ eq_pred s') |-- S @ eq_pred s1) as Htmp.
- apply lforallL with s1. by apply lpropimplL.
rewrite ->Htmp => {Htmp}. autorewrite with push_at.
rewrite stateSplitsAs_eq; [|eapply Hs]. done.
Qed.
Lemma spec_reads_split S {HS: AtEx S} R1 R2:
S <@ (R1 ** R2) |-- S <@ R1 <@ R2.
Proof.
rewrite /spec_reads.
specintros => s2 Hs2 s1 Hs1. autorewrite with push_at.
rewrite (ILFun_exists_eq (eq_pred s1 ** eq_pred s2)).
specintros => s Hs. rewrite ->lentails_eq in Hs. apply lforallL with s.
apply lpropimplL => //. by rewrite <-Hs1, <-Hs2.
Qed.
Lemma spec_reads_swap' S R1 R2:
S <@ R1 <@ R2 |-- S <@ R2 <@ R1.
Proof.
rewrite /spec_reads.
specintros => s1 Hs1 s2 Hs2. rewrite spec_at_swap.
apply lforallL with s2. apply lpropimplL => //. rewrite spec_at_forall.
apply lforallL with s1. rewrite spec_at_propimpl. exact: lpropimplL.
Qed.
Lemma spec_reads_swap S R1 R2:
S <@ R1 <@ R2 -|- S <@ R2 <@ R1.
Proof. split; apply spec_reads_swap'. Qed.
Lemma spec_reads_forall A S R:
(Forall a:A, S a) <@ R -|- Forall a:A, (S a <@ R).
Proof.
rewrite /spec_reads. split.
- specintros => a s' Hs'. apply lforallL with s'. apply: lpropimplL => //.
autorewrite with push_at. by apply lforallL with a.
- specintros => s' Hs' a.
apply lforallL with a. apply lforallL with s'. exact: lpropimplL.
Qed.
Lemma spec_reads_true R: ltrue <@ R -|- ltrue.
Proof.
rewrite ltrue_is_forall. rewrite spec_reads_forall.
split; specintro => [[]].
Qed.
Lemma spec_reads_and S1 S2 R:
(S1 //\\ S2) <@ R -|- (S1 <@ R) //\\ (S2 <@ R).
Proof.
rewrite !land_is_forall. rewrite spec_reads_forall.
split; by cancel1 => [[]].
Qed.
Lemma spec_reads_exists A S R:
Exists a:A, (S a <@ R) |-- (Exists a:A, S a) <@ R.
Proof.
rewrite /spec_reads. apply: lexistsL => a. specintros => s' Hs'.
autorewrite with push_at. apply lexistsR with a. apply lforallL with s'.
exact: lpropimplL.
Qed.
Lemma spec_reads_or S1 S2 R:
(S1 <@ R) \\// (S2 <@ R) |-- (S1 \\// S2) <@ R.
Proof.
rewrite !lor_is_exists. rewrite <-spec_reads_exists. by cancel1 => [[]].
Qed.
Lemma spec_reads_impl S1 S2 R:
(S1 -->> S2) <@ R |-- S1 <@ R -->> S2 <@ R.
Proof.
rewrite /spec_reads. apply: limplAdj. specintros => s Hs.
apply: landAdj. apply lforallL with s. apply lpropimplL => //.
apply: limplAdj. rewrite landC. apply: landAdj.
apply lforallL with s. apply lpropimplL => //. apply: limplAdj.
autorewrite with push_at. rewrite landC. apply: landAdj. reflexivity.
Qed.
Lemma spec_at_reads S R1 R:
S <@ R1 @ R -|- S @ R <@ R1.
Proof.
rewrite /spec_reads. split.
- specintros => s Hs. autorewrite with push_at.
apply lforallL with s. autorewrite with push_at.
apply lpropimplL => //. by rewrite sepSPC.
- autorewrite with push_at. specintro => s.
autorewrite with push_at. specintro => Hs.
apply lforallL with s. apply: lpropimplL => //.
autorewrite with push_at. by rewrite sepSPC.
Qed.
Hint Rewrite spec_at_reads : push_at.
Instance AtEx_reads S R {HS: AtEx S}: AtEx (S <@ R) := _.
Instance AtCovar_reads S R {HS: AtCovar S}: AtCovar (S <@ R) := _.
Instance AtContra_reads S R {HS: AtContra S}: AtContra (S <@ R) := _.
Module Export PullQuant_reads.
Import Existentials.
Lemma pq_reads S t:
match find t with
| Some (mkf _ f) =>
PullQuant (S <@ eval t) (fun a => S <@ f a)
| None => True
end.
Proof.
move: (@find_correct t). case: (find t) => [[A f]|]; last done.
move=> Heval. red. rewrite ->Heval. by apply_and spec_reads_ex.
Qed.
Hint Extern 0 (PullQuant (?S <@ ?R) _) =>
let t := quote_term R in
apply (@pq_reads S t) : pullquant.
Lemma pq_reads_rec S R A f:
PullQuant S f ->
PullQuant (S <@ R) (fun a:A => f a <@ R).
Proof.
rewrite /PullQuant => Hf. rewrite <-Hf. by rewrite spec_reads_forall.
Qed.
(* If we didn't find anything to pull from the frame itself, recurse under it. *)
(* Unfortunately, Hint Resolve doesn't work here. For some obscure reason,
there has to be an underscore for the last argument. *)
Hint Extern 1 (PullQuant (?S <@ ?R) _) =>
eapply (@pq_reads_rec S R _ _) : pullquant.
End PullQuant_reads.
Lemma spec_at_later S R:
(|> S) @ R -|- |> (S @ R).
Proof.
split => k P; reflexivity.
Qed.
Hint Rewrite spec_at_later : push_at.
Local Transparent ILLaterPreOps.
Lemma spec_reads_later S R: (|> S) <@ R -|- |> (S <@ R).
split => k P /=; rewrite /spec_fun /=; eauto.
Qed.
Hint Rewrite <-
spec_at_later
spec_reads_later
: push_later.
Hint Rewrite @spec_later_exists_inhabited
using repeat constructor : push_later.
Lemma spec_lob_context C S: (C //\\ |> S |-- S) -> C |-- S.
Proof.
etransitivity.
- apply landR; first apply ltrueR. reflexivity.
apply: landAdj. apply: spec_lob. rewrite spec_later_impl.
apply: limplAdj. apply: limplL; last by rewrite landC.
apply spec_later_weaken.
Qed.
Instance AtEx_later S {HS: AtEx S} : AtEx (|> S).
Proof.
move=> A f. rewrite spec_at_later.
red in HS. rewrite <-HS. rewrite spec_later_forall. cancel1 => x.
by rewrite spec_at_later.
Qed.
Instance AtCovar_later S {HS: AtCovar S} : AtCovar (|> S).
Proof.
move=> P Q HPQ. autorewrite with push_at. by rewrite ->(HS _ _ HPQ).
Qed.
Instance AtContra_later S {HS: AtContra S} : AtContra (|> S).
Proof.
move=> P Q HPQ. autorewrite with push_at. by rewrite ->(HS _ _ HPQ).
Qed.
|
{"author": "jbj", "repo": "x86proved", "sha": "d314fa6d23c064a2be4bf686ac7da16a591fda01", "save_path": "github-repos/coq/jbj-x86proved", "path": "github-repos/coq/jbj-x86proved/x86proved-d314fa6d23c064a2be4bf686ac7da16a591fda01/src/spec.v"}
|
using Documenter, Jack
makedocs(;
modules=[Jack],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
],
repo="https://github.com/TsuMakoto/Jack.jl/blob/{commit}{path}#L{line}",
sitename="Jack.jl",
authors="TsuMakoto",
assets=String[],
)
deploydocs(;
repo="github.com/TsuMakoto/Jack.jl",
)
|
{"hexsha": "84283f8b3b1e2079ce15ccecbce8e1091fbbfc3d", "size": 340, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "TsuMakoto/Jack.jl", "max_stars_repo_head_hexsha": "73c9d9c0827de93ec603b881d59aba5e1ce4dd15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "TsuMakoto/Jack.jl", "max_issues_repo_head_hexsha": "73c9d9c0827de93ec603b881d59aba5e1ce4dd15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "TsuMakoto/Jack.jl", "max_forks_repo_head_hexsha": "73c9d9c0827de93ec603b881d59aba5e1ce4dd15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.8888888889, "max_line_length": 76, "alphanum_fraction": 0.6058823529, "num_tokens": 107}
|
%\section{Quickstart Guide}
% The quickstart guide should explain in simple terms and with examples
% how a user is supposed to achieve the most common usecases. E.g. how
% to submit and cancel a job, how to receive a job's output. How to
% create a grid file, move it around, locate it, and delete it. How to
% monitor the progress on an application etc.
This section briefly explains the sequence of operations, from the client configuration, to the description of the
request up to the retrieval of the generated output, to be performed by a user to have his application run on a
grid resource.
\subsection{Configuration}
Configuration of the WMS User Interface VO-specific parameters is accomplished through the file:
\smallskip
\begin{verbatim}
$GLITE_LOCATION/etc/<vo name>/glite_wmsui.conf
\end{verbatim}
\smallskip
i.e. there is one directory and file for each supported VO.
If you wish to add a new VO among the ones supported by the WMS-UI, you must create a directory in \$GLITE\_LOCATION/etc,
named as the VO (lower-case), copy in it the file:
\smallskip
\begin{verbatim}
$GLITE_LOCATION/etc/vo_template/glite_wmsui.conf
\end{verbatim}
\smallskip
distributed with the WMS-UI package and update it according to the given VO.
Here follows an example of WMS-UI configuration file for the "EGEE" Virtual Organisation.
This implies that the file path has to be \emph{\$GLITE\_LOCATION/etc/egee/glite\_wmsui.conf}:
\smallskip
\begin{verbatim}
[
VirtualOrganisation = "EGEE";
NSAddresses = {
"tigerman.cnaf.infn.it:7772",
"gundam.cnaf.infn.it:7772"
};
LBAddresses = {
{"tigerman.cnaf.infn.it:9000", "fox.to.infn.it:9000"},
{"gundam.cnaf.infn.it:9000", "neo.datamat.it:9000", "grid003.ct.infn.it:9876"}
};
MyProxyServer = "skurut.cesnet.cz";
LoggingDestination = "localhost:9002"; // local instance of LB logging service
]
\end{verbatim}
\smallskip
This files indicates that there are two available Network Servers that can be contacted for the EGEE VO (they
are chosen randomly by the WMS-UI) and each NS has a group of associated LB servers:
\begin{itemize}
\item tigerman.cnaf.infn.it:7772 is associated with tigerman.cnaf.infn.it:9000 and
fox.to.infn.it:9000
\item gundam.cnaf.infn.it:9000 is associated with gundam.cnaf.infn.it:9000, neo.datamat.it:9000
and grid003.ct.infn.it:9876
\end{itemize}
Given a NS the WMS-UI chooses randomly the LB from the corresponding list. This feature can be used to
distribute load over several LB servers, although in most of cases there is a one-to-one correspondence
between NS and LB.
The \emph{MyProxyServer} parameter provides the host FQDN (fully qualified host name) of the MyProxy server
to be used for proxy renewal (see \ref{longjob}) whilst \emph{LoggingDestination} has to be set in case of
non-standard location of the LB locallogger (it usually runs on the WMS node).
The other configuration file for the WMS-UI is
\smallskip
\begin{verbatim}
$GLITE_LOCATION/etc/glite_wmsui_cmd_var.conf
\end{verbatim}
\smallskip
The glite\_wmsui\_cmd\_var.conf file is a class-ad containing information that are not VO specific.
\textbf{Example:}
\smallskip
\begin{verbatim}
[
requirements = other.GlueCEStateStatus == "Production" ;
rank = - other.GlueCEStateEstimatedResponseTime ;
RetryCount = 3 ;
ErrorStorage= "/var/tmp" ;
OutputStorage="/tmp/jobOutput";
ListenerStorage = "/tmp"
LoggingTimeout = 30 ;
LoggingSyncTimeout = 60 ;
DefaultStatusLevel = 1 ;
DefaultLogInfoLevel = 0;
NSLoggerLevel = 2;
]
\end{verbatim}
\smallskip
Details about configuration of the WMS-UI are provided in Section~\ref{cli}).
If you need to customise these files and you do not have root access on the WMS-UI machine, you can work with your own
copies of them using the \emph{--config-vo} and \emph{--config} options of the WMS-UI commands.
\subsection{Environment Variables}
These are the environment variables that influence the behavior of the WMS-UI:
\begin{itemize}
\item GLITE\_WMSUI\_CONFIG\_VAR Non-standard location of the command line interface configuration file
glite\_wmsui\_cmd\_var.conf. This variable points to the file absolute path
\item GLITE\_WMSUI\_CONFIG\_VO Non-standard location of the vo-specific GUI configuration file
glite\_wmsui.conf. This variable points to the file absolute path.
\item EDG\_WL\_LOG\_DESTINATION Non-standard address of the the LB logging service (glite-lb-locallogger logging
daemon ~\cite{lb}) in the format $<$host FQDN$>$[:$<$port$>$]. If not set the LB logging service running on the WMS node is
targeted for logging job information.
\end{itemize}
\subsection{Main commands}
The most relevant commands to interact with the WMS are:
\begin{itemize}
\item glite-job-list-match $<$jdl\_file$>$
\item glite-job-submit $<$jdl\_file$>$
\item glite-job-status $<$job\_Id$>$
\item glite-job-output $<$job\_Id$>$
\item glite-job-cancel $<$job\_Id$>$
\end{itemize}
You can access information about the usage of each command by issuing either:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> <command> --help
\end{verbatim}
\end{scriptsize}
\smallskip
or
\smallskip
\begin{scriptsize}
\begin{verbatim}
> man <command>
\end{verbatim}
\end{scriptsize}
\smallskip
\textbf{glite-job-list-match}
Displays the list of identifiers of the resources (and the corresponding ranks - if requested) on which the user
is authorized and satisfying the job requirements included in the JDL. This only works for jobs; for DAGs you have to
issue this commands on the single nodes JDLs.
\textbf{glite-job-submit}
This command submits a job/DAG to the grid. It requires a JDL file as input and returns a job/DAG Identifier.
\textbf{glite-job-status}
This command prints the status of a job/DAG previously submitted using glite-job-submit. The job status request is sent
to the LB (Logging and Bookkeeping service) that provides the requested information.
When issued for a DAG it provides the status information for the DAG itself and all of its nodes.
It is also possible to retrieve the status of individual nodes of a DAG simply passing their own identifiers to the
command. \
The LB service using the job/DAG related events sent by each WMS component handling the request, keeps a state machine
view of each job/DAG.
Figure~\ref{job-state} represents the job life-cycle state machine:
\clearpage
\begin{figure}[htb]
\centering
\includegraphics[width=.8\hsize]{job-state-diagram}
\caption{Job State Machine}
\label{job-state}
\end{figure}
Here below is provided a brief description of the meaning of each possible state a job/DAG can enter:
\begin{itemize}
\item {\it Submitted}: job is entered by the user to the User Interface but not yet transferred to Network Server for processing
\item {\it Waiting}: job has been accepted by NS and is waiting for Workload Manager processing or is being processed by WM Helper modules (e.g., WM is busy, no appropriate Computing Element (cluster) has been found yet, required dataset is not available, job is waiting for resource allocation).
\item {\it Ready}: job has been processed by WM and its Helper modules (especially, appropriate Computing Element has been found) but not yet transferred to the Computing Element (local batch system queue) via Job Controller and CondorC. This state does
not exists for a DAG as it is not subjected to matchmaking (the nodes are) but passed directly to DAGMan.
\item {\it Scheduled}: job is waiting in the queue on the Computing Element. This state also does not exists for a DAG as it is not directly sent to a CE (the node are).
\item {\it Running}: job is running. For a DAG this means that DAGMan has started processing it.
\item {\it Done}: job exited or is considered to be in a terminal state by CondorC (e.g., submission to CE has failed in an unrecoverable way).
\item {\it Aborted}: job processing was aborted by WMS (waiting in the Workload Manager queue or Computing Element for too long, over-use of quotas, expiration of user credentials, etc.).
\item {\it Canceled}: job has been successfully canceled on user request.
\item {\it Cleared}: output sandbox was transferred to the user or removed due to the timeout.
\end{itemize}
Taken into account remarks about DAGs in the previous state description, the following figure~\ref{dag-state} represents instead the DAG
life-cycle state machine:
\begin{figure}[htb]
\centering
\includegraphics[width=.8\hsize]{dag-state-diagram}
\caption{DAG State Machine}
\label{dag-state}
\end{figure}
\newpage
\textbf{glite-job-output}
The glite-job-output command can be used to retrieve the output files of a job/DAG that has been submitted through the
glite-job-submit command with a job description file including the OutputSandbox attribute. After the submission,
when the job/DAG has terminated its execution, the user can download the files generated by the job/DAG and temporarily
stored on the Resource Broker machine as specified by the OutputSandbox attribute, issuing the
glite-job-output with as input the ID returned by the glite-job-submit. As a DAG does not have its own output sandbox,
when the command is issues for such a request retrieves the output sandboxes of all the DAG nodes.
\textbf{glite-job-cancel}
This command cancels a job previously submitted using glite-job-submit. Before cancellation, it prompts the user
for confirmation. The cancel request is sent to the Network Server that forwards it to the WM that fulfills it.
It is not allowed to issue a cancel request for a node of a DAG: you have to cancel the whole DAG using the
provided handle instead.
\medskip
The WMS-UI also provides three additional commands. They are:
\begin{itemize}
\item glite-job-logging-info $<$job\_Id$>$ (mostly useful for debugging purposes)
\item glite-job-attach $<$job\_Id$>$ (for interactive jobs only)
\item glite-job-get-chkpt $<$job\_Id$>$ (for checkpointable jobs only)
\end{itemize}
\textbf{glite-job-logging-info}
This command prints all the events related to a previously submitted job/DAG, that have been logged to the LB during
request's lifetime by the WMS components that have handled it. The job/DAG logging-info request is sent
to the LB (Logging and Bookkeeping service) that provides the requested information. When issues for a DAG the
command only displays events related to the DAG itself and not the ones of the nodes.
\textbf{glite-job-attach}
This command attaches a listener to a previously submitted interactive job. This will make the job standard streams
be re-directed to the command shell (or to a dedicated graphical window - if requested).
\textbf{glite-job-get-chkpt}
This command retrieves a checkpoint state of a a previously submitted checkpointable job. The retrieved state, that
is saved to a file on the WMS-UI machine, can be used later to re-submit either the same or another checkpointable
job so that it will start its execution from the given state rather than from the beginning.
\smallskip
\subsection{The Job Identifier}
The Job (and DAG) Identifiers produced by the workload management software are of the form:
\smallskip
\begin{verbatim}
https://edt003.cnaf.infn.it:9000/NyIYrqE\_a8igk4f0CLXNKA
\end{verbatim}
\smallskip
The first part of the Id (\emph{https://edt003.cnaf.infn.it:9000} in the example above) is the endpoint URL of the
LB server holding the job/DAG logging and bookkeeping information and this allows the WMS-UI to know which LB server has
to be contacted for monitoring a given job/DAG.
\smallskip
The second part (\emph{NyIYrqE\_a8igk4f0CLXNKA}) generated by the WMS-UI taking into account some client local information
ensures instead grid-wide uniqueness of the identifier.
\subsection{The Job Description File}
The key to the job submission and resource matching process is the JDL description file. This file describes
the necessary inputs, generated outputs, and resource requirements of a job/DAG through the JDL
(Job Description Language).
A typical example of a job description file is:
\smallskip
\begin{verbatim}
[
Type = "Job";
JobType = "Normal";
Executable = "myexe";
StdInput = "myinput.txt";
StdOutput = "message.txt";
StdError = "error.txt";
InputSandbox = {"/users/pacini/example/myinput.txt",
"/users/pacini/example/myexe"};
OutputSandbox = {"message.txt", "error.txt"};
Requirements = other.GlueCEInfoLRMSType == "PBS";
Rank = other.FreeCPUs;
]
\end{verbatim}
\smallskip
Such a JDL would make the \emph{myexe} executable be transferred on a remote CE whose queue is managed by the PBS batch
system and be run taking the \emph{myinput.txt} file (also copied form the UI node) as input. The standard streams of the
job are redirected on the worker node to file \emph{message.txt} and \emph{error.txt} and can be later retrieved on the
WMS-UI by means of the \emph{glite-job-output} command.
A simple example of DAG description, call it \emph{dag.jdl}, is instead:
\smallskip
\begin{verbatim}
[
Type = "dag";
max_nodes_running = 10;
VirtualOrganisation = "EGEE";
nodes = [
nodeA = [
file ="/users/pacini/n1.jdl" ;
];
nodeB = [
file ="/users/pacini/n2.jdl" ;
];
nodeB = [
file ="/users/pacini/n3.jdl" ;
];
dependencies = {
{ nodeA, {nodeB, nodeC}}
}
];
]
\end{verbatim}
\smallskip
where n1.jdl, n2.jdl and n3.jdl are in turn job descriptions representing the nodes of the DAG and the dependencies
attributes states that \textit{nodeB} and \textit{nodeC} cannot start before \textit{nodeA} has been successfully executed.
Also the DAGs are submitted through the \emph{glite-job-submit} command.
A detailed description of the available JDL attributes and of the rules for building correct JDL files is
provided by the "JDL Attributes Specification" document ~\cite{jdl}.
It is important to note note that the input and output sandboxes are intended for relatively small files
(few megabytes) like scripts, standard input, and standard output streams.
If you are using large input files or generating large output files, you should instead directly read from or
write to a storage element. As each submitting user is assigned by the WMS with a limited quota on the WMS
machine disk, abuse of the input and output sandboxes will shortly make the quota fill-up and the WMS not accept
further jobs submission for the given user.
The parameters Requirements and Rank control the resource matching for the job. The expression given for the
requirements specifies the constraints necessary for a job to run. In the example above, a site running PBS is
required and the job will only be submitted to resources which satisfy this condition. If more than one resource
matches the job requirements, then the rank is used to determine which is the most desirable resource i.e. the one
to which the job is submitted (the higher the rank value the better is the resource).
Both, the Requirements and the rank attributes, can be arbitrary expressions which use the parameters published
by the resources in the Information System or directly to the ISM.
A DAG does not have its own requirements and ranking expressions: matchmaking is performed on the individual nodes
Rank and Requirements.
\smallskip
\subsection{Commands Sequence}
This section reports an example of the sequence of steps that have to be performed to do a job submission and to monitor
the submitted job.
Before using any of the WMS-UI commands it is necessary to have a valid proxy credential available on the WMS-UI
machine. You can create it using the \textit{voms-proxy-init} command or alternatively the \textit{grid-proxy-init} one.
If you already have a valid proxy available on your machine just make the X509\_USER\_PROXY environment variable
point to it.
In order to get a proxy certificates issued by VOMS ~\cite{voms-core} you should have in your home directory the file
\$HOME/.glite/vomses containing a line as follows:
\smallskip
{\scriptsize{\verb!"EGEE" "kuiken.nikhef.nl" "15001" "/O=dutchgrid/O=hosts/OU=nikhef.nl/CN=kuiken.nikhef.nl" "EGEE" "22"!}}
\smallskip
or the corresponding line for your VO (ask your VO admin for that).
Make moreover sure you have in the directory \textit{\$HOME/.globus} your certificate/key pair, i.e. the following files:
\smallskip
\begin{scriptsize}
\begin{verbatim}
usercert.pem
userkey.pem
\end{verbatim}
\end{scriptsize}
\smallskip
Note that file permissions are important: the two files must have respectively \textit{0600} and \textit{0400} permissions.
Then you can issue the VOMS client command (you will be prompted for the pass-phrase):
\smallskip
\begin{scriptsize}
\begin{verbatim}
> voms-proxy-init --userconf vomses -voms EGEE
Your identity: /C=IT/O=INFN/OU=Personal Certificate/L=DATAMAT DSAGRD/CN=Fabrizio Pacini
Enter GRID pass phrase for this identity:
Creating temporary proxy ..................................... Done
/O=dutchgrid/O=hosts/OU=nikhef.nl/CN=kuiken.nikhef.nl
/C=NL/O=NIKHEF/CN=NIKHEF medium-security certification auth
Creating proxy ............................ Done
Your proxy is valid until Sat Mar 5 06:13:07 2005
> voms-proxy-info
VO : EGEE
Valid from : Mar 4 17:13:08 2005 GMT
Valid to : Mar 5 05:13:08 2005 GMT
\end{verbatim}
\end{scriptsize}
\smallskip
Now you can start using the WMS-UI commands.
It is often useful to check the results of the resource matching before submitting a job. For this, one can use
the \emph{glite-job-list-match} command. Given the JDL file it will return a ranked list of matching resources.
The highest-ranked resource will appear first.
Take for example the following JDL file, say \textit{ HelloWorld.jdl}:
\smallskip
\begin{verbatim}
[
Executable = "/bin/echo";
Arguments = "Hello World";
StdOutput = "message.txt"; StdError = "stderror";
OutputSandbox = {"message.txt","stderror"};
rank = -other.GlueCEStateEstimatedResponseTime;
requirements = other.GlueCEStateStatus == "Production";
]
\end{verbatim}
\smallskip
you can get the list of available CEids and then submit as follows:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-list-match HelloWorld.jdl
Selected Virtual Organisation name (from proxy certificate extension): EGEE
Connecting to host edt003.cnaf.infn.it, port 7772
****************************************************
COMPUTING ELEMENT IDs LIST
The following CE(s) matching your job requirements
have been found:
*CEId*
grid20.bo.ingv.it:2119/jobmanager-pbs-infinite
grid20.bo.ingv.it:2119/jobmanager-pbs-long
grid20.bo.ingv.it:2119/jobmanager-pbs-short
gridba2.ba.infn.it:2119/jobmanager-lcgpbs-infinite
gridba2.ba.infn.it:2119/jobmanager-lcgpbs-long
gridba2.ba.infn.it:2119/jobmanager-lcgpbs-short
gridit001.pd.infn.it:2119/jobmanager-pbs-infinite
gridit001.pd.infn.it:2119/jobmanager-pbs-long
gridit001.pd.infn.it:2119/jobmanager-pbs-short
****************************************************
\end{verbatim}
\end{scriptsize}
\smallskip
and once verified that you are happy with the matching resources, you cab actually submit the job:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-submit HelloWorld.jdl
Selected Virtual Organisation name (from proxy certificate extension): EGEE
Connecting to host edt003.cnaf.infn.it, port 7772
Logging to host edt003.cnaf.infn.it, port 9002
**************************************************************
JOB SUBMIT OUTCOME
The job has been successfully submitted to the Network Server.
Use glite-job-status command to check job current status.
Your job identifier is:
- https://edt003.cnaf.infn.it:9000/NyIYrqE\_a8igk4f0CLXNKA
***************************************************************
\end{verbatim}
\end{scriptsize}
\smallskip
Note that this command returns the job identifier associated with this job. The job Id
is the unique Grid Job Identifier, assigned from the WMS (Workload Management System) to every job in order to
be able to identify it in clear and unique way all over the Grid system scope.
Passing the job id handle to the WMS commands you can follow-up the submitted job:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-status https://edt003.cnaf.infn.it:9000/NyIYrqE\_a8igk4f0CLXNKA
****************************************************************************
BOOKKEEPING INFORMATION:
Printing status info for the Job :
https://edt003.cnaf.infn.it:9000/NyIYrqE\_a8igk4f0CLXNKA
Current Status:Done (Success)
Exit code: 0
Status Reason: Job terminated successfully
Destination: gridit001.pd.infn.it:2119/jobmanager-lcgpbs-infinite
reached on:Mon Sep 22 09:37:13 2003 CET
****************************************************************************
\end{verbatim}
\end{scriptsize}
\smallskip
States seen in the normal processing of jobs are: \textit{Submitted, Waiting, Ready, Running} and \textit{Done}. Abnormal
execution usually ends with an Aborted status.
Once you have checked that the job has terminated its execution successfully (\emph{Done} status), i.e. the job has
finished and the output has been pushed back to the WMS node, you can retrieve the output of your job to the
WMS-UI machine as follows:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-output https://edt003.cnaf.infn.it:9000/NyIYrqE\_a8igk4f0CLXNKA
Retrieving files from host edt003.cnaf.infn.it
****************************************************************************
JOB GET OUTPUT OUTCOME
Output sandbox files for the job:
- https://edt003.cnaf.infn.it:9000/NyIYrqE\_a8igk4f0CLXNKA
have been successfully retrieved and stored in the directory:
/tmp/jobOutput/mrossi__NyIYrqE\_a8igk4f0CLXNKA
****************************************************************************
\end{verbatim}
\end{scriptsize}
\smallskip
where \textit{/tmp/jobOutput} is the output storage path set in you configuration file and
\textit{mrossi\_\_NyIYrqE\_a8igk4f0CLXNKA} is a directory name built concatenating your current OS user-name and
the job Id unique string.
Use the \emph{--dir} option if you want the output to be saved in a location different from \textit{/tmp/jobOutput}.
Handling the job identifiers directly quickly becomes tedious. To avoid this, you can make the \emph{glite-job-submit}
command append the job Id to a named file using the \emph{--output} option.
On the other side, the WMS-UI commands which take job identifiers as an argument accept also the \emph{--input} option
which allows the job identifier to be read from a file.
It is possible anyway to retrieve the status information for all jobs you have submitted for a given VO by using
the \emph{--all} option of the \emph{glite-job-status} command:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-status --all
\end{verbatim}
\end{scriptsize}
\smallskip
If something is not going as expected with your job, e.g. it is \textit{Aborted} or it does not reach the \textit{Done}
status you can try the \emph{glite-job-logging-info} command to inspect the job related events. Assuming your job
identifier is e.g. \textit{https://gundam.cnaf.infn.it:9000/WkyitIdNTR0C9adOcBPhwg}, you can type:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-logging-info https://gundam.cnaf.infn.it:9000/WkyitIdNTR0C9adOcBPhwg
**********************************************************************
LOGGING INFORMATION:
Printing info for the Job : https://gundam.cnaf.infn.it:9000/WkyitIdNTR0C9adOcBPhwg
---
Event: RegJob
- source = UserInterface
- timestamp = Fri Mar 4 18:05:28 2005 CET
---
Event: Transfer
- destination = NetworkServer
- result = START
- source = UserInterface
- timestamp = Fri Mar 4 18:05:29 2005 CET
---
Event: Transfer
- destination = NetworkServer
- result = OK
- source = UserInterface
- timestamp = Fri Mar 4 18:05:32 2005 CET
---
Event: Accepted
- source = NetworkServer
- timestamp = Fri Mar 4 18:05:23 2005 CET
---
....
\end{verbatim}
\end{scriptsize}
\smallskip
to check if there was a problem within a certain WMS component and eventually cancel it with :
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-cancel https://gundam.cnaf.infn.it:9000/SpLbPbMpftBSnCr0WwJmZA
Are you sure you want to remove specified job(s)? [y/n]n :y
============================= glite-job-cancel Success ===========================
The cancellation request has been successfully submitted for the following job(s):
- https://gundam.cnaf.infn.it:9000/SpLbPbMpftBSnCr0WwJmZA
====================================================================================
\end{verbatim}
\end{scriptsize}
\smallskip
\subsubsection {DAGs}
DAG submission can be accomplished through the same commands used for simple jobs.
If for example you want to submit the DAG defined above, \emph{dag.jdl}, once you have
completed the proxy preparation steps, just issue the following command:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-submit dag.jdl
Selected Virtual Organisation name (from proxy certificate extension): EGEE
Connecting to host gundam.cnaf.infn.it, port 7772
Logging to host gundam.cnaf.infn.it, port 9002
**************************************************************************************
JOB SUBMIT OUTCOME
The dag has been successfully submitted to the Network Server.
Use glite-job-status command to check job current status. Your dag identifier is:
- https://gundam.cnaf.infn.it:9000/AhM4clHKVD1VMOMVrdkCZw
**************************************************************************************
\end{verbatim}
\end{scriptsize}
and then monitor it by means of:
\smallskip
\begin{scriptsize}
\begin{verbatim}
> glite-job-status https://gundam.cnaf.infn.it:9000/AhM4clHKVD1VMOMVrdkCZw
*************************************************************
BOOKKEEPING INFORMATION:
Status info for the Job : https://gundam.cnaf.infn.it:9000/AhM4clHKVD1VMOMVrdkCZw
Current Status: Ready
Status Reason: unavailable
Destination: dagman
Submitted: Mon Mar 7 17:25:22 2005 CET
*************************************************************
- Nodes information for:
Status info for the Job : https://gundam.cnaf.infn.it:9000/ayNofwCnlusD68s3qQvFEA
Current Status: Submitted
Submitted: Mon Mar 7 17:25:08 2005 CET
Parent Job: https://gundam.cnaf.infn.it:9000/AhM4clHKVD1VMOMVrdkCZw
*************************************************************
Status info for the Job : https://gundam.cnaf.infn.it:9000/9FfFXd7UIWuoPSlyqMVZNQ
Current Status: Submitted
Submitted: Mon Mar 7 17:25:08 2005 CET
Parent Job: https://gundam.cnaf.infn.it:9000/AhM4clHKVD1VMOMVrdkCZw
*************************************************************
Status info for the Job : https://gundam.cnaf.infn.it:9000/wlgiicvWUe6Br7nbjIxcnQ
Current Status: Submitted
Submitted: Mon Mar 7 17:25:08 2005 CET
Parent Job: https://gundam.cnaf.infn.it:9000/AhM4clHKVD1VMOMVrdkCZw
*************************************************************
\end{verbatim}
\end{scriptsize}
\smallskip
As you can see the \emph{glite-job-status} command shows in this case information about the DAG
itself and all its nodes. Nodes can be also followed-up singularly by picking their job identifiers.
The \emph{glite-job-logging-info} returns instead only the events related to the DAG. Nodes logging information
have to be requested explicitly specifying the node Id.
The \emph{glite-job-output} can be used for a DAG to request the retrieval of the output sandboxes of all its nodes.
The \emph{glite-job-cancel} commands can be used for a DAG to request cancellation of the whole DAG whilst cannot be
called for a single DAG node.
\subsection {Long Lived Jobs}
\label{longjob}
It is possible that long jobs may outlive the validity of the initial proxy; if so and the proxy
is not renewed, the job will die prematurely. To avoid this the workload management software allows
the proxy to be renewed automatically if your credentials are managed by a MyProxy server.
To use the automatic proxy renewal mechanism, first register a proxy with the MyProxy server using
the command
\smallskip
\verb!myproxy-init -s <server> -t <hours> -d -n!
\smallskip
where \emph{server} is the MyProxy server address, \emph{hours} is the number of hours the proxy should be valid on the
server.
As this proxy is only copied to the server, you will need to create a local short-lived proxy using
voms-proxy-init as explained previously, to do the job submissions. The Workload Manager will
retrieve renewed proxies from the MyProxy server for jobs which need and request them.
The need for proxy renewal has to be explicitly specified in the JDL of the job/DAG through the
\emph{MyProxyServer} attribute, e.g.:
\smallskip
\begin{verbatim}
MyProxyServer = "skurut.ics.muni.cz";
\end{verbatim}
\smallskip
Information about your stored proxy can be obtained via the command
\smallskip
\verb!myproxy-info -s <server> -d!
\smallskip
and the proxy can be removed with
\smallskip
\verb!myproxy-destroy -s <server> -d.!
\smallskip
Once the proxy is removed from the server, running jobs will no longer receive renewed credentials.
\subsection{Specifying Job Requirements}
By specifying job requirements, the user can steer the job to sites which have the resources
necessary to run the job correctly. Incompletely specifying the requirements may cause the job
to fail, wasting both the resources and the user's time.
The request requirements are specified through the \emph{Requirements} attribute in the JDL
description of the job. The value of this attribute is a boolean expression which specifies the
necessary constraints. Nearly the full set of C operators and syntax are supported.
The values (or variables) which can be used in the requirements expression can be found by looking
at the Computing Element attributes in the BDII or subscribing to the CE Monitor notification service.
The current Glue Schema used for publishing CE information is however available at:
\url{http://www.cnaf.infn.it/~sergio/datatag/glue/index.htm}. Most of the attributes are
self-explanatory.
For example to express that a job requires at least 25 minutes of CPU time and 100 minutes of real
time, the expression:
\smallskip
{\scriptsize{
\verb!Requirements = other.GlueCEPolicyMaxCPUTime >= 1500 && other.GlueCEPolicyMaxWallClockTime >= 6000;!
}}
\smallskip
would limit the matching to viable sites. The times are given in seconds.
Note that the attribute names are prefixed with \textit{other.}; this is a remnant of the Class-Ads syntax on
which JDL is based indicating the the prefixed attribute has to be searched in the counterpart class-ad (i.e. the
one describing the resource). Note also that the values are not quoted. Using quotes around a numeric value
will result in a string comparison which will produce an erroneous match (or none at all).
The \textit{GlueHostApplicationSoftwareRunTimeEnvironment} is usually used to describe application software
packages which are installed on a site. For example:
\smallskip
{\scriptsize{
\verb!Requirements = Member(other.GlueHostApplicationSoftwareRunTimeEnvironment ,"ALICE-3.07.01");!
}}
\smallskip
will choose a site with the ALICE-3.07.01 tag defined. The \textit{GlueHostApplicationSoftwareRunTimeEnvironment}
is a multi-valued attribute and evaluates to a list. The class-ad \textit{Member} function returns true if the given
value is in the list.
The available built-in class-ad functions that can be used for building the requirements expression are
described in ~\cite{jdl-lang}.
Occasionally, one may wish to exclude or include a site manually. Forcing a job to a site can be
accomplished with the \emph{--resource} option of the glite-job-submit command. However, this
entirely bypasses the matchmaking process and will not produce the \emph{.BrokerInfo} file, i.e. the
file generated during the matchmaking phase, that is sent on the WN and contains data location
information that can be useful for the job at run time. You can use instead a clause like:
\smallskip
{\scriptsize{
\verb!Requirements = other.GlueCEUniqueID == "ccgridli03.in2p3.fr:2119/jobmanager-bqs-A";!
}}
\smallskip
to do the same thing. More interestingly one can select or exclude a site:
\smallskip
\begin{scriptsize}
\begin{verbatim}
Requirements = RegExp(".*nikhef.*",other.GlueCEUniqueID);
Requirements = (!(RegExp(".*nikhef.*",other.GlueCEUniqueID)));
\end{verbatim}
\end{scriptsize}
\smallskip
which cannot be accomplished with the --resource option. Note that the JDL is very picky about the
logical not syntax.
In the WMS-UI configuration file (\$GLITE\_LOCATION/etc/glite\_wmsui\_cmd\_var.conf) there is a
requirements clause which is added to all JDL files by default. This is
\smallskip
\begin{scriptsize}
\begin{verbatim}
other.GlueCEStateStatus == "Production" ;
\end{verbatim}
\end{scriptsize}
\smallskip
If you have provided an expression for the requirements attribute in the JDL, the one specified in
the configuration file is added (in AND) to the existing one.
As a DAG does not have its own requirements, what stated in this section applies to the DAG nodes descriptions.
\subsection{Ranking Resources}
If more than one resource matches the specified requirements, then the highest-ranked resource will
be used. If the Rank attribute is not specified in the user's JDL description, then
\smallskip
\begin{scriptsize}
\verb!Rank = - other.GlueCEStateEstimatedResponseTime ;!
\end{scriptsize}
\smallskip
is added by default by the WMS-UI (as specified in the WMS-UI configuration file).The traversal time is
the expected time in seconds that a job will take to begin executing at the site from the it has
entered the batch system queue.
This ranking is not always ideal, and the user may wish to choose some other criteria for the
ranking. For example,
\smallskip
\begin{scriptsize}
\verb!Rank = other.GlueCEStateFreeCPUs ;!
\end{scriptsize}
\smallskip
will make the WMS choose the site with the largest number of free CPUs. The rule to remember is that
the larger the rank, the more desirable the resource is. If more than one site has exactly the same
rank, then the one which is used is chosen randomly by the RB.
As a DAG does not have its own rank, what stated in this section applies to the DAG nodes descriptions.
|
{"hexsha": "7eddcbc4173faa7b99c8bbb79d1ab47cafe1ab35", "size": 35844, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "users-guide/WMS/quickstart.tex", "max_stars_repo_name": "italiangrid/wms", "max_stars_repo_head_hexsha": "5b2adda72ba13cf2a85ec488894c2024e155a4b5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-18T02:19:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-18T02:19:18.000Z", "max_issues_repo_path": "users-guide/WMS/quickstart.tex", "max_issues_repo_name": "italiangrid/wms", "max_issues_repo_head_hexsha": "5b2adda72ba13cf2a85ec488894c2024e155a4b5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "users-guide/WMS/quickstart.tex", "max_forks_repo_name": "italiangrid/wms", "max_forks_repo_head_hexsha": "5b2adda72ba13cf2a85ec488894c2024e155a4b5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8266666667, "max_line_length": 297, "alphanum_fraction": 0.7123646914, "num_tokens": 8913}
|
# This file is part of the scanning-squid package.
#
# Copyright (c) 2018 Logan Bishop-Van Horn
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors as colors
import numpy as np
from utils import make_scan_vectors, make_scan_grids, moving_avg, to_real_units, clear_artists
from typing import Dict, List, Optional, Sequence, Any, Union, Tuple
import warnings
warnings.filterwarnings('ignore', message='The unit of the quantity is stripped.')
class ScanPlot(object):
"""Plot displaying acquired images in all measurement channels, updated live during a scan.
"""
def __init__(self, scan_params: Dict[str, Any], ureg: Any, **kwargs) -> None:
"""
Args:
scan_params: Scan parameters as defined in measurement configuration file.
prefactors: Dict of pint quantities defining conversion factor from
DAQ voltage to real units for each measurement channel.
ureg: pint UnitRegistry, manages units.
"""
self.scan_params = scan_params
self.ureg = ureg
self.Q_ = ureg.Quantity
self.channels = scan_params['channels']
self.fast_ax = scan_params['fast_ax']
self.slow_ax = 'y' if self.fast_ax == 'x' else 'x'
self.line_colors = ['#d80202' ,'#545454' ,'#777777' ,'#a8a8a8', '#d1d1d1']
MAXN_COLS = 4
N = len(self.channels.keys())
cols = N if N < MAXN_COLS else MAXN_COLS
plot_rows = int(np.ceil(N / cols))
rows = 3 * plot_rows
self.fig, self.ax = plt.subplots(rows, cols, figsize=(10,4.5 * plot_rows),
gridspec_kw={"height_ratios":[0.075, 1, 0.5]*plot_rows})
self.fig.patch.set_alpha(1)
self.plots = {'colorbars': {}, 'images': {}, 'lines': {}}
for i, ch in enumerate(self.channels.keys()):
self.plots['colorbars'].update({ch: {'cax': self.ax[0][i]}})
self.plots['images'].update({ch: {'ax' :self.ax[1][i]}})
self.plots['lines'].update({ch: self.ax[2][i]})
for ch, ax in self.plots['images'].items():
ax['ax'].set_aspect('equal')
ax['ax'].set_xlabel('x position [V]')
ax['ax'].set_ylabel('y position [V]')
for ch, ax in self.plots['lines'].items():
ax.grid(True)
ax.set_aspect('auto')
ax.set_xlabel('{} position [V]'.format(self.fast_ax))
self.init_empty()
def init_empty(self):
"""Initialize the plot with all images empty. They will be filled during the scan.
"""
self.scan_vectors = make_scan_vectors(self.scan_params, self.ureg)
self.X, self.Y = np.meshgrid(self.scan_vectors['x'], self.scan_vectors['y'])
empty = np.full_like(self.X, np.nan, dtype=np.double)
for ch in self.channels:
im = self.plots['images'][ch]['ax'].pcolormesh(self.X, self.Y, empty)
self.plots['images'][ch].update({'quad': im})
cbar = plt.colorbar(im, cax=self.plots['colorbars'][ch]['cax'], orientation='horizontal')
self.plots['colorbars'][ch]['cax'].set_label(r'{}'.format(self.channels[ch]['unit_latex']))
self.plots['colorbars'][ch].update({'cbar': cbar})
for ax, ch in zip(self.ax[0], self.channels.keys()):
ax.set_title(self.channels[ch]['label'])
self.fig.canvas.draw()
self.fig.tight_layout()
def update(self, data_set: Any, loop_counter: Any, num_lines: Optional[int]=5,
offline: Optional[bool]=False) -> None:
"""Update the plot with updated DataSet. Called after each line of the scan.
Args:
DataSet: active data set, with a new line of data added with each loop iteration.
loop_counter: utils.Counter instance, lets us know where we are in the scan.
num_lines: Number of previous linecuts to plot, including the line just scanned.
Currently can only handle num_lines <= 5.
offline: False if this is being called during a scan.
"""
self.location = data_set.location
self.fig.suptitle(self.location, x=0.5, y=1, fontsize=10)
data = to_real_units(data_set)
meta = data_set.metadata['loop']['metadata']
slow_ax = 'x' if meta['fast_ax'] == 'y' else 'y'
line = loop_counter.count if not offline else meta['scan_size'][slow_ax] - 1
for idx, ch in enumerate(self.channels):
data_ch = data[:,idx,:]
if self.fast_ax.lower() == 'y':
data_ch = data_ch.T
# clear_artists(self.plots['images'][ch]['ax'])
# clear_artists(self.plots['lines'][ch])
norm = colors.Normalize().autoscale(np.ma.masked_invalid(data_ch))
self.plots['images'][ch]['ax'].clear()
self.plots['images'][ch]['ax'].set_xlabel('x position [V]')
self.plots['images'][ch]['ax'].set_ylabel('y position [V]')
self.plots['images'][ch]['quad'] = self.plots['images'][ch]['ax'].pcolormesh(
self.X, self.Y, np.ma.masked_invalid(data_ch), norm=norm)
self.plots['colorbars'][ch]['cax'].clear()
self.plots['colorbars'][ch]['cbar'] = self.fig.colorbar(self.plots['images'][ch]['quad'],
cax=self.plots['colorbars'][ch]['cax'],
orientation='horizontal')
self.plots['colorbars'][ch]['cbar'].locator = ticker.MaxNLocator(nbins=3)
self.plots['colorbars'][ch]['cbar'].update_ticks()
self.plots['colorbars'][ch]['cbar'].set_label(r'{}'.format(self.channels[ch]['unit_latex']))
self.plots['colorbars'][ch]['cbar'].update_normal(self.plots['images'][ch]['quad'])
# self.plots['images'][ch]['ax'].relim()
self.plots['lines'][ch].relim()
self.plots['colorbars'][ch]['cax'].minorticks_on()
#: Update linecuts
self.plots['lines'][ch].clear()
self.plots['lines'][ch].grid(True)
self.plots['lines'][ch].set_aspect('auto')
self.plots['lines'][ch].set_xlabel('{} position [V]'.format(self.fast_ax))
self.plots['lines'][ch].set_ylabel(r'{}'.format(self.channels[ch]['unit_latex']))
xdata = self.scan_vectors[self.fast_ax]
if line < num_lines:
for l in range(line+1):
ydata = data_ch[:,l] if self.fast_ax == 'y' else data_ch[l,:]
self.plots['lines'][ch].plot(xdata, ydata, lw=2, color=self.line_colors[line-l])
else:
for l in range(num_lines):
ydata = data_ch[:,line-num_lines+l+1] if self.fast_ax == 'y' else data_ch[line-num_lines+l+1,:]
self.plots['lines'][ch].plot(xdata, ydata, lw=2, color=self.line_colors[num_lines-l-1])
self.fig.canvas.draw()
def save(self, fname=None):
"""Save plot to png file.
Args:
fname: File to which to save the plot.
If fname is None, saves to data location as {scan_params['fname']}.png
"""
if fname is None:
fname = os.path.join(self.location, self.scan_params['fname'] + '.png')
plt.savefig(fname, dpi=300)
class ScanPlotFromDataSet(ScanPlot):
"""Generate ScanPlot instance from a completed DataSet rather than during a Loop.
"""
def __init__(self, scan_data: Any, ureg: Optional[Any]=None) -> None:
"""
Args:
scan_data: DataSet to plot, as created by microscope.scan_plane
ureg: pint UnitRegistry, manages units.
"""
if ureg is None:
from pint import UnitRegistry
ureg = UnitRegistry()
#: Tell the UnitRegistry what a Phi0 is, and that ohm = Ohm
with open('squid_units.txt', 'w') as f:
f.write('Phi0 = 2.067833831e-15 * Wb\n')
f.write('Ohm = ohm\n')
ureg.load_definitions('./squid_units.txt')
meta = scan_data.metadata['loop']['metadata']
super().__init__(meta, meta['prefactors'], ureg)
self.update(scan_data, None, offline=True)
class TDCPlot(object):
"""Plot displaying capacitance as a function of z voltage, updated live during a scan.
"""
def __init__(self, tdc_params: Dict[str, Any], ureg: Any) -> None:
"""
Args:
tdc_params: Touchdown parameters as defined in measurement configuration file.
ureg: pint UnitRegistry, manages units.
"""
self.tdc_params = tdc_params
self.constants = tdc_params['constants']
self.channels = tdc_params['channels']
self.ureg = ureg
self.Q_ = ureg.Quantity
self.fig, self.ax = plt.subplots(1,len(self.channels), figsize=(3*len(self.channels),3))
self.fig.patch.set_alpha(1)
self.init_empty()
def init_empty(self):
"""Initialize the plot with no data.
"""
dV = self.Q_(self.tdc_params['dV']).to('V').magnitude
startV, endV = sorted([self.Q_(lim).to('V').magnitude for lim in self.tdc_params['range']])
npnts = int((endV - startV) / dV)
self.heights = np.linspace(startV, endV, npnts)
for i, ch in enumerate(self.channels):
self.ax[i].set_xlim(min(self.heights), max(self.heights))
self.ax[i].grid()
self.ax[i].set_xlabel('z position [V]')
self.ax[i].set_ylabel(r'{} [{}]'.format(self.channels[ch]['label'], self.channels[ch]['unit_latex']))
self.ax[i].set_title(self.channels[ch]['label'])
self.fig.canvas.draw()
self.fig.tight_layout()
def update(self, data_set: Any) -> None:
"""Update plot with data from data_set.
Args:
data_set: DataSet generated by Loop in Microscope.td_cap().
"""
self.location = data_set.location
self.fig.suptitle(self.location, x=0.5, y=1, fontsize=10)
self.prefactors = data_set.metadata['loop']['metadata']['prefactors']
all_data = to_real_units(data_set)
npnts = len(all_data[:,0,0][np.isfinite(all_data[:,0,0])])
self.hdata = self.heights[:npnts]
for i, ch in enumerate(self.channels):
data = all_data[:,i,0][np.isfinite(all_data[:,i,0])]
if len(self.hdata) == len(data):
clear_artists(self.ax[i])
self.ax[i].plot(self.hdata, data, 'b.')
self.ax[i].plot(self.hdata[-1], data[-1], 'r.')
if ch == 'CAP':
self.cdata = data
elif ch == 'SUSCX':
self.sxdata = data
elif ch == 'SUSCY':
self.sydata = data
self.fig.canvas.draw()
def save(self, fname=None):
"""Save plot to png file.
Args:
fname: File to which to save the plot.
If fname is None, saves to data location as {tdc_params['fname']}.png
"""
if fname is None:
fname = os.path.join(self.location, self.tdc_params['fname'] + '.png')
plt.savefig(fname, dpi=300)
|
{"hexsha": "54d867f360fa0cef49ca3613476cb23aec493301", "size": 12347, "ext": "py", "lang": "Python", "max_stars_repo_path": "scanning-squid/plots.py", "max_stars_repo_name": "moler-group/scanning-squid", "max_stars_repo_head_hexsha": "32a61e8f8f4f6671f04e583752e361cb00276188", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-11-14T11:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T14:10:02.000Z", "max_issues_repo_path": "scanning-squid/plots.py", "max_issues_repo_name": "moler-group/scanning-squid", "max_issues_repo_head_hexsha": "32a61e8f8f4f6671f04e583752e361cb00276188", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scanning-squid/plots.py", "max_forks_repo_name": "moler-group/scanning-squid", "max_forks_repo_head_hexsha": "32a61e8f8f4f6671f04e583752e361cb00276188", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-12-23T23:05:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-08T14:45:59.000Z", "avg_line_length": 49.388, "max_line_length": 115, "alphanum_fraction": 0.592289625, "include": true, "reason": "import numpy", "num_tokens": 2972}
|
import logging
import os
import pickle
from glob import glob
import librosa
import numpy as np
from tqdm import tqdm
from utils import parallel_function
logger = logging.getLogger(__name__)
SENTENCE_ID = 'sentence_id'
SPEAKER_ID = 'speaker_id'
FILENAME = 'filename'
def find_files(directory, pattern='**/*.wav'):
"""Recursively finds all files matching the pattern."""
return sorted(glob(directory + pattern, recursive=True))
def read_audio_from_filename(filename, sample_rate):
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
return audio, filename
def trim_silence(audio, threshold):
"""Removes silence at the beginning and end of a sample."""
energy = librosa.feature.rmse(audio)
frames = np.nonzero(np.array(energy > threshold))
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
audio_trim = audio[0:0]
left_blank = audio[0:0]
right_blank = audio[0:0]
if indices.size:
audio_trim = audio[indices[0]:indices[-1]]
left_blank = audio[:indices[0]] # slice before.
right_blank = audio[indices[-1]:] # slice after.
return audio_trim, left_blank, right_blank
def extract_speaker_id(filename):
return filename.split('/')[-2]
def extract_sentence_id(filename):
return filename.split('/')[-1].split('_')[1].split('.')[0]
class AudioReader:
def __init__(self, input_audio_dir,
output_cache_dir,
sample_rate,
multi_threading=False):
self.audio_dir = os.path.expanduser(input_audio_dir)
self.cache_dir = os.path.expanduser(output_cache_dir)
self.sample_rate = sample_rate
self.multi_threading = multi_threading
self.cache_pkl_dir = os.path.join(self.cache_dir, 'audio_cache_pkl')
self.pkl_filenames = find_files(self.cache_pkl_dir, pattern='/**/*.pkl')
logger.info('audio_dir = {}'.format(self.audio_dir))
logger.info('cache_dir = {}'.format(self.cache_dir))
logger.info('sample_rate = {}'.format(sample_rate))
speakers = set()
self.speaker_ids_to_filename = {}
for pkl_filename in self.pkl_filenames:
speaker_id = os.path.basename(pkl_filename).split('_')[0]
if speaker_id not in self.speaker_ids_to_filename:
self.speaker_ids_to_filename[speaker_id] = []
self.speaker_ids_to_filename[speaker_id].append(pkl_filename)
speakers.add(speaker_id)
self.all_speaker_ids = sorted(speakers)
def load_cache(self, speakers_sub_list=None):
cache = {}
metadata = {}
if speakers_sub_list is None:
filenames = self.pkl_filenames
else:
filenames = []
for speaker_id in speakers_sub_list:
filenames.extend(self.speaker_ids_to_filename[speaker_id])
for pkl_file in filenames:
with open(pkl_file, 'rb') as f:
obj = pickle.load(f)
if FILENAME in obj:
cache[obj[FILENAME]] = obj
for filename in sorted(cache):
speaker_id = extract_speaker_id(filename)
if speaker_id not in metadata:
metadata[speaker_id] = {}
sentence_id = extract_sentence_id(filename)
if sentence_id not in metadata[speaker_id]:
metadata[speaker_id][sentence_id] = []
metadata[speaker_id][sentence_id] = {SPEAKER_ID: speaker_id,
SENTENCE_ID: sentence_id,
FILENAME: filename}
# metadata # small cache <speaker_id -> sentence_id, filename> - auto generated from self.cache.
# cache # big cache <filename, data:audio librosa, blanks.>
return cache, metadata
def build_cache(self):
if not os.path.exists(self.cache_pkl_dir):
os.makedirs(self.cache_pkl_dir)
logger.info('Nothing found at {}. Generating all the cache now.'.format(self.cache_pkl_dir))
logger.info('Looking for the audio dataset in {}.'.format(self.audio_dir))
audio_files = sorted(find_files(self.audio_dir))
audio_files_count = len(audio_files)
assert audio_files_count != 0, 'Generate your cache please.'
logger.info('Found {} files in total in {}.'.format(audio_files_count, self.audio_dir))
assert len(audio_files) != 0
if self.multi_threading:
num_threads = os.cpu_count()
parallel_function(self.dump_audio_to_pkl_cache, [('%i/%i' % (idx, len(audio_files)), file_name) for idx, file_name in enumerate(audio_files)], num_threads)
else:
bar = tqdm(audio_files)
for filename in bar:
bar.set_description(filename)
self.dump_audio_to_pkl_cache('%i/%i' % (idx, len(audio_files)), filename)
bar.close()
def dump_audio_to_pkl_cache(self, params):
log_prefix, input_filename = params
try:
cache_filename = input_filename.split('/')[-1].split('.')[0] + '_cache'
pkl_filename = os.path.join(self.cache_pkl_dir, cache_filename) + '.pkl'
if os.path.isfile(pkl_filename):
logger.info('{} - [FILE ALREADY EXISTS] {}'.format(log_prefix, pkl_filename))
return
logger.info('{} - [PROCESSING] {}'.format(log_prefix, pkl_filename))
audio, _ = read_audio_from_filename(input_filename, self.sample_rate)
energy = np.abs(audio[:, 0])
silence_threshold = np.percentile(energy, 95)
offsets = np.where(energy > silence_threshold)[0]
left_blank_duration_ms = (1000.0 * offsets[0]) // self.sample_rate # frame_id to duration (ms)
right_blank_duration_ms = (1000.0 * (len(audio) - offsets[-1])) // self.sample_rate
# _, left_blank, right_blank = trim_silence(audio[:, 0], silence_threshold)
# logger.info('_' * 100)
# logger.info('left_blank_duration_ms = {}, right_blank_duration_ms = {}, '
# 'audio_length = {} frames, silence_threshold = {}'.format(left_blank_duration_ms,
# right_blank_duration_ms,
# len(audio),
# silence_threshold))
obj = {'audio': audio,
'audio_voice_only': audio[offsets[0]:offsets[-1]],
'left_blank_duration_ms': left_blank_duration_ms,
'right_blank_duration_ms': right_blank_duration_ms,
FILENAME: input_filename}
with open(pkl_filename, 'wb') as f:
pickle.dump(obj, f)
logger.info('{} - [DUMP AUDIO] {}'.format(log_prefix, pkl_filename))
# except librosa.util.exceptions.ParameterError as e:
except Exception as e:
logger.error(e)
logger.error('[DUMP AUDIO ERROR SKIPPING FILENAME] {}'.format(input_filename))
|
{"hexsha": "7da509d1d6919873877849a54d641baa90222700", "size": 7297, "ext": "py", "lang": "Python", "max_stars_repo_path": "audio_reader.py", "max_stars_repo_name": "mrjj/deep-speaker", "max_stars_repo_head_hexsha": "68703d8d3090d5036b77b475d7ac75e5ef9cc8ec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "audio_reader.py", "max_issues_repo_name": "mrjj/deep-speaker", "max_issues_repo_head_hexsha": "68703d8d3090d5036b77b475d7ac75e5ef9cc8ec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "audio_reader.py", "max_forks_repo_name": "mrjj/deep-speaker", "max_forks_repo_head_hexsha": "68703d8d3090d5036b77b475d7ac75e5ef9cc8ec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9235294118, "max_line_length": 167, "alphanum_fraction": 0.6036727422, "include": true, "reason": "import numpy", "num_tokens": 1530}
|
[STATEMENT]
lemma ipurge_tr_rev_aux_first [rule_format]:
"ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow>
(\<exists>ys zs. xs = ys @ x # zs \<and>
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and>
(\<exists>v \<in> sources_aux I D U zs. (D x, v) \<in> I))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow> (\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I))
[PROOF STEP]
proof (induction xs, simp, rule impI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a xs. \<lbrakk>ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow> (\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)); ipurge_tr_rev_aux I D U (a # xs) = x # ws\<rbrakk> \<Longrightarrow> \<exists>ys zs. a # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
fix x' xs
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a xs. \<lbrakk>ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow> (\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)); ipurge_tr_rev_aux I D U (a # xs) = x # ws\<rbrakk> \<Longrightarrow> \<exists>ys zs. a # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
assume
A: "ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow>
(\<exists>ys zs. xs = ys @ x # zs \<and>
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and>
(\<exists>v \<in> sources_aux I D U zs. (D x, v) \<in> I))" and
B: "ipurge_tr_rev_aux I D U (x' # xs) = x # ws"
[PROOF STATE]
proof (state)
this:
ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow> (\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I))
ipurge_tr_rev_aux I D U (x' # xs) = x # ws
goal (1 subgoal):
1. \<And>a xs. \<lbrakk>ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow> (\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)); ipurge_tr_rev_aux I D U (a # xs) = x # ws\<rbrakk> \<Longrightarrow> \<exists>ys zs. a # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
show "\<exists>ys zs. x' # xs = ys @ x # zs \<and>
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and>
(\<exists>v \<in> sources_aux I D U zs. (D x, v) \<in> I)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
proof (cases "\<exists>v \<in> sources_aux I D U xs. (D x', v) \<in> I")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
2. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I
goal (2 subgoals):
1. \<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
2. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I
[PROOF STEP]
have "x' = x"
[PROOF STATE]
proof (prove)
using this:
\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I
goal (1 subgoal):
1. x' = x
[PROOF STEP]
using B
[PROOF STATE]
proof (prove)
using this:
\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I
ipurge_tr_rev_aux I D U (x' # xs) = x # ws
goal (1 subgoal):
1. x' = x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x' = x
goal (2 subgoals):
1. \<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
2. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
with True
[PROOF STATE]
proof (chain)
picking this:
\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I
x' = x
[PROOF STEP]
have "x' # xs = x # xs \<and>
ipurge_tr_rev_aux I D (sources_aux I D U (x # xs)) [] = [] \<and>
(\<exists>v \<in> sources_aux I D U xs. (D x, v) \<in> I)"
[PROOF STATE]
proof (prove)
using this:
\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I
x' = x
goal (1 subgoal):
1. x' # xs = x # xs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # xs)) [] = [] \<and> (\<exists>v\<in>sources_aux I D U xs. (D x, v) \<in> I)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
x' # xs = x # xs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # xs)) [] = [] \<and> (\<exists>v\<in>sources_aux I D U xs. (D x, v) \<in> I)
goal (2 subgoals):
1. \<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
2. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
x' # xs = x # xs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # xs)) [] = [] \<and> (\<exists>v\<in>sources_aux I D U xs. (D x, v) \<in> I)
goal (1 subgoal):
1. \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
hence "ipurge_tr_rev_aux I D U xs = x # ws"
[PROOF STATE]
proof (prove)
using this:
\<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I)
goal (1 subgoal):
1. ipurge_tr_rev_aux I D U xs = x # ws
[PROOF STEP]
using B
[PROOF STATE]
proof (prove)
using this:
\<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I)
ipurge_tr_rev_aux I D U (x' # xs) = x # ws
goal (1 subgoal):
1. ipurge_tr_rev_aux I D U xs = x # ws
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ipurge_tr_rev_aux I D U xs = x # ws
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
with A
[PROOF STATE]
proof (chain)
picking this:
ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow> (\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I))
ipurge_tr_rev_aux I D U xs = x # ws
[PROOF STEP]
have "\<exists>ys zs. xs = ys @ x # zs \<and>
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and>
(\<exists>v \<in> sources_aux I D U zs. (D x, v) \<in> I)"
[PROOF STATE]
proof (prove)
using this:
ipurge_tr_rev_aux I D U xs = x # ws \<longrightarrow> (\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I))
ipurge_tr_rev_aux I D U xs = x # ws
goal (1 subgoal):
1. \<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
obtain ys and zs where xs: "xs = ys @ x # zs \<and>
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and>
(\<exists>v \<in> sources_aux I D U zs. (D x, v) \<in> I)"
[PROOF STATE]
proof (prove)
using this:
\<exists>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal (1 subgoal):
1. (\<And>ys zs. xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
have
"\<not> (\<exists>v \<in> sources_aux I D (sources_aux I D U (x # zs)) ys. (D x', v) \<in> I)"
[PROOF STATE]
proof (prove)
using this:
xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D (sources_aux I D U (x # zs)) ys. (D x', v) \<in> I)
[PROOF STEP]
using False
[PROOF STATE]
proof (prove)
using this:
xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
\<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D (sources_aux I D U (x # zs)) ys. (D x', v) \<in> I)
[PROOF STEP]
by (simp add: sources_aux_append)
[PROOF STATE]
proof (state)
this:
\<not> (\<exists>v\<in>sources_aux I D (sources_aux I D U (x # zs)) ys. (D x', v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
hence "ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) =
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys"
[PROOF STATE]
proof (prove)
using this:
\<not> (\<exists>v\<in>sources_aux I D (sources_aux I D U (x # zs)) ys. (D x', v) \<in> I)
goal (1 subgoal):
1. ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
with xs
[PROOF STATE]
proof (chain)
picking this:
xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys
[PROOF STEP]
have "x' # xs = (x' # ys) @ x # zs \<and>
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = [] \<and>
(\<exists>v \<in> sources_aux I D U zs. (D x, v) \<in> I)"
[PROOF STATE]
proof (prove)
using this:
xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys
goal (1 subgoal):
1. x' # xs = (x' # ys) @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
by (simp del: sources_aux.simps)
[PROOF STATE]
proof (state)
this:
x' # xs = (x' # ys) @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal (1 subgoal):
1. \<not> (\<exists>v\<in>sources_aux I D U xs. (D x', v) \<in> I) \<Longrightarrow> \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
x' # xs = (x' # ys) @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) (x' # ys) = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal (1 subgoal):
1. \<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>ys zs. x' # xs = ys @ x # zs \<and> ipurge_tr_rev_aux I D (sources_aux I D U (x # zs)) ys = [] \<and> (\<exists>v\<in>sources_aux I D U zs. (D x, v) \<in> I)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 8000, "file": "Noninterference_Ipurge_Unwinding_IpurgeUnwinding", "length": 39}
|
# This file creates a dataset with the labels and backgrounds that are provided
# Import libraries
import numpy as np
import cv2, os, math, random
from glob import glob
from PIL import Image
## Define all parameters for the dataset manipulation
copies_in_train = 7000
copies_in_val = 3000
desired_width = 300
desired_height = 300
max_label_size = 300
min_label_size = 10
noise_mean = 0
noise_std = 0.5
blur_min_size = 7
blur_max_size = 19
max_labels = 10
#################################################
# Add directory of your background images
# ex: /data/<my-backgrounds>
background_dir = "/data/my-backgrounds/"
#################################################
#################################################
# Add directory for your resulting images
# ex: /data/<new-dataset>
saved_dir = "/data/new_dataset/"
#################################################
#################################################
# Add directory for the label images
# ex: /data/BMW_Labels/images/
image_dir = "/data/BMW_Labels/images/"
#################################################
# Find a valid location for label wtihin the image, place it there
def pasteLabel(image, bgX, bgY, label_size_y, iteration):
# Pick random label from the label set
label_choice = random.choice(glob(os.path.join(image_dir, "*.PNG")))
# Resize label to fit on background
label = cv2.imread(label_choice, -1)
label_size_x = int(label_size_y * label.shape[1]/label.shape[0])
label = cv2.resize(label, (label_size_x, label_size_y))
# Check shape of label (make sure it fits)
if max(label.shape) > 300:
return (image, 0, 0, 0, 0, 1)
# Rotate the label before placing on the image
rot = random.randint(-90, 90)
#############################################
# Example image processing techniques
# NOTE: replace with your techniques
#############################################
distort = random.randint(0,label_size_y/6)
leftD = random.randint(0,1)
rightD = not(leftD)
(leftD, rightD) = (leftD*distort, rightD*distort)
# and Distort at angle
oldpts = np.float32([[0,0],[label_size_x, 0],[0, label_size_y], \
[label_size_x, label_size_y]])
newpts = np.float32([[0,leftD],[label_size_x, rightD],[0, label_size_y-leftD], \
[label_size_x, label_size_y-rightD]])
M = cv2.getPerspectiveTransform(oldpts, newpts)
label = cv2.warpPerspective(label, M, (label_size_x, label_size_y))
#############################################
# Create region where label will not go outside image boundary
max_x_range = bgX - label.shape[1]
max_y_range = bgY - label.shape[0]
# If first label, not possible to overlap
if iteration == 0:
overlap_found = False
label_x_location = random.randint(0, max_x_range)
label_y_location = random.randint(0, max_y_range)
else:
overlap_found = True
TimesTried = 0
# Check for overlap in random location chosen
while overlap_found:
# Choose random x, y inside the boundary
label_x_location = random.randint(0, max_x_range)
label_y_location = random.randint(0, max_y_range)
TimesTried = TimesTried + 1
if TimesTried > 10:
return (image, 0, 0, 0, 0, 1)
# Iterate through pixels new label would occupy
target_area = image[label_y_location : label_y_location + label.shape[0],
label_x_location : label_x_location + label.shape[1],
3]
overlap_found = np.any(target_area < 225)
image[label_y_location : label_y_location + label.shape[0],
label_x_location : label_x_location + label.shape[1]] = label
# Return coordinates and size of label
coord = np.where(label[:] > 0)
label_size_y = max(coord[0]) - min(coord[0])
label_size_x = max(coord[1]) - min(coord[1])
label_y_location = min(coord[0]) + label_y_location
label_x_location = min(coord[1]) + label_x_location
return(image, label_x_location, label_y_location, label_x_location + label_size_x,
label_y_location + label_size_y, 0)
def check_directories():
if not os.path.exists(saved_dir):
os.makedirs(saved_dir)
if not os.path.exists(saved_dir + "train/"):
os.makedirs(saved_dir + "train")
if not os.path.exists(saved_dir + "train/images"):
os.makedirs(saved_dir + "train/images")
if not os.path.exists(saved_dir + "train/labels"):
os.makedirs(saved_dir + "train/labels")
if not os.path.exists(saved_dir + "val"):
os.makedirs(saved_dir + "val")
if not os.path.exists(saved_dir + "val/images"):
os.makedirs(saved_dir + "val/images")
if not os.path.exists(saved_dir + "val/labels"):
os.makedirs(saved_dir + "val/labels")
if __name__ == '__main__':
check_directories()
for dataset, dssize in ( ("train", copies_in_train), ("val", copies_in_val), ):
for i in range(dssize):
# Open background image
bg = None
while bg is None:
# Change .PNG to your image type
bg_choice = random.choice(glob(os.path.join(background_dir, "*.PNG")))
bg = cv2.imread(bg_choice, -1)
if bg is None:
print ("{} is invalid.".format(bg_choice))
# Add the alpha channel to background
rgb = cv2.split(bg)
try:
bg = cv2.merge((rgb[0], rgb[1], rgb[2], 0*rgb[0]+255))
except IndexError:
bg = cv2.merge((rgb[0], rgb[0], rgb[0], 0*rgb[0]+255))
# Resize the image to uniform size, get shape of image
bg = cv2.resize(bg, (desired_width, desired_height))
(rows, cols, channel) = bg.shape
# Randomly select how many labels to add to image
n_labels = random.randint(1, max_labels)
# Determine size of labels
label_y_size = int(np.floor(rows/(n_labels + 1)))
# Check to make sure labels correct size to detect
label_y_size = min(label_y_size, max_label_size)
label_y_size = max(label_y_size, min_label_size)
# Create copy of background
edit = bg.copy()
# Create text file for bounding box information
with open("{}{}/labels/data_{:05d}.txt".format(saved_dir, dataset, i), 'w') as f:
# Loop through images to paste on background
for n in range(n_labels):
(edit, left, top, right, bottom, skip) = pasteLabel(edit, cols, rows, label_y_size, n)
# If a suitable spot for label not found in a timely manner, skip it
if not skip:
# Write information to label file
f.write("Label 0 0 0 {} {} {} {} 0 0 0 0 0 0 0 0\n".format(left, top, right, bottom))
# Replace transparent regions of labels with background
mask = (edit[:, :, 3] < 255)
edit[mask] = bg[mask]
# Add noise to the image
edit += np.random.normal(noise_mean, noise_std, size=(rows, cols, 4)).astype(np.uint8)
# Generate a random blur value and blur the image
blur = random.randint(blur_min_size, blur_max_size)
if blur % 2 == 0:
blur += 1
# Perform Gaussian Blur
newPicture = cv2.GaussianBlur(edit, (blur, blur), 1)
# Write the resulting image to a file
cv2.imwrite("{}{}/images/data_{:05d}.png".format(saved_dir, dataset, i), newPicture)
print ("Processing {}{}/images/data_{:05d}.png".format(saved_dir, dataset, i))
|
{"hexsha": "11fd05c76e32bfd0accdc9d76abf5ac9ea92297b", "size": 7759, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/generate-data.py", "max_stars_repo_name": "CUFCTL/dlbd-ci", "max_stars_repo_head_hexsha": "7d0b040e730f01e79cb749fa55361b32456c5175", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-02-08T14:02:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-05T21:16:20.000Z", "max_issues_repo_path": "scripts/generate-data.py", "max_issues_repo_name": "LukeAI/DLBD", "max_issues_repo_head_hexsha": "7d0b040e730f01e79cb749fa55361b32456c5175", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/generate-data.py", "max_forks_repo_name": "LukeAI/DLBD", "max_forks_repo_head_hexsha": "7d0b040e730f01e79cb749fa55361b32456c5175", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-03-08T17:34:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-28T05:40:12.000Z", "avg_line_length": 37.8487804878, "max_line_length": 109, "alphanum_fraction": 0.5851269493, "include": true, "reason": "import numpy", "num_tokens": 1878}
|
from abc import abstractmethod
import numpy as np
from quara.loss_function.loss_function import LossFunction, LossFunctionOption
from quara.protocol.qtomography.standard.standard_qtomography import StandardQTomography
class MinimizationResult:
def __init__(self, value: np.ndarray, computation_time: float = None):
"""Constructor
Parameters
----------
value : np.ndarray
the result of the minimization.
computation_time : float, optional
computation time for the minimization, by default None
"""
self._value: np.ndarray = value
self._computation_time: float = computation_time
@property
def value(self) -> np.ndarray:
"""returns the result of the minimization.
Returns
-------
np.ndarray
the result of the minimization.
"""
return self._value
@property
def computation_time(self) -> float:
"""returns computation time for the estimate.
Returns
-------
float
computation time for the estimate.
"""
return self._computation_time
class MinimizationAlgorithmOption:
def __init__(
self,
on_algo_eq_constraint: bool = True,
on_algo_ineq_constraint: bool = True,
var_start: np.ndarray = None,
max_iteration_optimization: int = None,
):
"""Constructor
Parameters
----------
on_algo_eq_constraint : bool, optional
whether this algorithm needs on algorithm equality constraint, by default True
on_algo_ineq_constraint : bool, optional
whether this algorithm needs on algorithm inequality constraint, by default True
var_start : np.ndarray, optional
initial variable for the algorithm, by default None.
max_iteration_optimization: int, optional
maximun number of iterations of optimization, by default None.
"""
self._on_algo_eq_constraint = on_algo_eq_constraint
self._on_algo_ineq_constraint = on_algo_ineq_constraint
self._var_start: np.ndarray = var_start
self._max_iteration_optimization: int = max_iteration_optimization
@property
def on_algo_eq_constraint(self) -> bool: # read only
"""returns whether this algorithm needs on algorithm equality constraint.
Returns
-------
bool
whether this algorithm needs on algorithm equality constraint.
"""
return self._on_algo_eq_constraint
@property
def on_algo_ineq_constraint(self) -> bool: # read only
"""returns whether this QOperation is on algorithm inequality constraint.
Returns
-------
bool
whether this QOperation is on algorithm inequality constraint.
"""
return self._on_algo_ineq_constraint
@property
def var_start(self) -> np.ndarray:
"""returns initial variable for the algorithm.
Returns
-------
np.ndarray
initial variable for the algorithm.
"""
return self._var_start
@property
def max_iteration_optimization(self) -> int:
"""returns imaximun number of iterations of optimization.
Returns
-------
int
maximun number of iterations of optimization.
"""
return self._max_iteration_optimization
class MinimizationAlgorithm:
def __init__(self):
"""Constructor.
Subclasses have a responsibility to set the following variables.
- ``_is_gradient_required``: whether or not to require gradient.
- ``_is_hessian_required``: whether or not to require Hessian.
"""
self._is_gradient_required: bool = False
self._is_hessian_required: bool = False
self._loss: LossFunction = None
self._option: MinimizationAlgorithmOption = None
@property
def is_gradient_required(self) -> bool:
"""returns whether or not to require gradient.
Returns
-------
bool
whether or not to require gradient.
"""
return self._is_gradient_required
@property
def is_hessian_required(self) -> bool:
"""returns whether or not to require Hessian.
Returns
-------
bool
whether or not to require Hessian.
"""
return self._is_hessian_required
@property
def loss(self) -> LossFunction:
"""returns loss function.
Returns
-------
LossFunction
loss function.
"""
return self._loss
def set_from_loss(self, loss: LossFunction) -> None:
"""sets from LossFunction and calls ``is_loss_sufficient`` function.
Parameters
----------
loss : MinimizationAlgorithmOption
loss to set.
"""
self._loss = loss
self.is_loss_sufficient()
def is_loss_sufficient(self) -> bool:
"""returns whether the loss is sufficient.
In the default implementation, this function returns True.
Override with subclasses as needed.
Returns
-------
bool
whether the loss is sufficient.
"""
return True
@property
def option(self) -> MinimizationAlgorithmOption:
"""returns algorithm option.
Returns
-------
LossFunctionOption
algorithm option.
"""
return self._option
def set_from_option(self, option: MinimizationAlgorithmOption) -> None:
"""sets option from MinimizationAlgorithmOption and calls ``is_option_sufficient`` function.
Parameters
----------
option : MinimizationAlgorithmOption
option to set.
"""
self._option = option
self.is_option_sufficient()
@abstractmethod
def set_constraint_from_standard_qt_and_option(
self, qt: StandardQTomography, option: MinimizationAlgorithmOption
) -> None:
"""sets constraint from StandardQTomography and Algorithm Option.
Parameters
----------
qt : StandardQTomography
StandardQTomography to set constraint.
option : MinimizationAlgorithmOption
Algorithm Option.
Raises
------
NotImplementedError
this function does not be implemented in the subclass.
"""
raise NotImplementedError()
def is_option_sufficient(self) -> bool:
"""returns whether the option is sufficient.
In the default implementation, this function returns True.
Override with subclasses as needed.
Returns
-------
bool
whether the option is sufficient.
"""
return True
def is_loss_and_option_sufficient(self) -> bool:
"""returns whether the loss and the option are sufficient.
In the default implementation, this function returns True.
Override with subclasses as needed.
Returns
-------
bool
whether the loss and the option are sufficient.
"""
return True
@abstractmethod
def optimize(
self,
loss_function: LossFunction,
loss_function_option: LossFunctionOption,
algorithm_option: MinimizationAlgorithmOption,
on_iteration_history: bool = False,
) -> MinimizationResult:
"""optimizes using specified parameters.
Parameters
----------
loss_function : LossFunction
Loss Function
loss_function_option : LossFunctionOption
Loss Function Option
algorithm_option : MinimizationAlgorithmOption
Minimization Algorithm Option
on_iteration_history : bool, optional
whether to return iteration history, by default False
Returns
-------
MinimizationResult
the result of the optimization.
Raises
------
NotImplementedError
this function does not be implemented in the subclass.
"""
raise NotImplementedError()
|
{"hexsha": "0b4cbb1f59a854ddf0ace91a30226884d473d765", "size": 8522, "ext": "py", "lang": "Python", "max_stars_repo_path": "quara/minimization_algorithm/minimization_algorithm.py", "max_stars_repo_name": "tknrsgym/quara", "max_stars_repo_head_hexsha": "8f3337af83cdd02bb85632bb1e297902b1fff8fb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-19T11:44:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:13:49.000Z", "max_issues_repo_path": "quara/minimization_algorithm/minimization_algorithm.py", "max_issues_repo_name": "tknrsgym/quara", "max_issues_repo_head_hexsha": "8f3337af83cdd02bb85632bb1e297902b1fff8fb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-06-02T01:24:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-02T12:20:31.000Z", "max_forks_repo_path": "quara/minimization_algorithm/minimization_algorithm.py", "max_forks_repo_name": "tknrsgym/quara", "max_forks_repo_head_hexsha": "8f3337af83cdd02bb85632bb1e297902b1fff8fb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-14T13:21:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-14T13:21:27.000Z", "avg_line_length": 29.5902777778, "max_line_length": 101, "alphanum_fraction": 0.5934053039, "include": true, "reason": "import numpy", "num_tokens": 1596}
|
////////////////////////////////////////////////////////////////////////////////
// Name: vi.cpp
// Purpose: Implementation of class wex::vi
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/vi.html
// Author: Anton van Wezenbeek
// Copyright: (c) 2020-2022 Anton van Wezenbeek
////////////////////////////////////////////////////////////////////////////////
#include <boost/tokenizer.hpp>
#include <wex/core/core.h>
#include <wex/core/log.h>
#include <wex/core/regex.h>
#include <wex/factory/stc.h>
#include <wex/ui/frame.h>
#include <wex/vi/macros.h>
#include <wex/vi/vi.h>
#include "motion.h"
#include "util.h"
#include "vim.h"
// without this code adding tab in block insert mode fails, it only
// add one tab instead of a line of tabs
namespace wex
{
bool is_block_insert(vi* vi)
{
return vi->mode().is_insert() &&
(vi->get_stc()->SelectionIsRectangle() ||
vi->get_stc()->GetSelectionMode() == wxSTC_SEL_THIN);
}
bool is_special_key(const wxKeyEvent& event, const vi_mode& mode)
{
return !event.HasAnyModifiers() &&
(event.GetKeyCode() == WXK_ESCAPE || event.GetKeyCode() == WXK_BACK ||
event.GetKeyCode() == WXK_RETURN ||
event.GetKeyCode() == WXK_NUMPAD_ENTER ||
(!mode.is_visual() && event.GetKeyCode() == WXK_TAB) ||
(!mode.is_insert() &&
(event.GetKeyCode() == WXK_LEFT ||
event.GetKeyCode() == WXK_DELETE ||
event.GetKeyCode() == WXK_DOWN || event.GetKeyCode() == WXK_UP ||
event.GetKeyCode() == WXK_RIGHT ||
event.GetKeyCode() == WXK_PAGEUP ||
event.GetKeyCode() == WXK_PAGEDOWN)));
}
bool process_modifier(vi* vi, macros::key_t type, const wxKeyEvent& event)
{
if (const auto& it =
ex::get_macros().get_keys_map(type).find(event.GetKeyCode());
it != ex::get_macros().get_keys_map(type).end())
{
vi->command(it->second);
return false;
}
return true;
}
bool visual_ex_command(const std::string& command, ex* ex)
{
const auto& sel_command(
command[0] + ex_command::selection_range() + command.substr(1));
return ex->command(sel_command);
}
bool visual_vi_command(const std::string& command, vi* vi)
{
if (
vi->mode().is_visual() &&
command.find(ex_command::selection_range()) == std::string::npos &&
!vi->get_stc()->get_selected_text().empty() && command[0] == ':')
{
return visual_ex_command(command, vi);
}
return false;
}
} // namespace wex
wex::vi::vi(wex::factory::stc* arg, mode_t ex_mode)
: ex(arg, ex_mode)
, m_mode(
this,
// insert mode process
[=, this](const std::string& command)
{
if (!m_dot)
{
m_insert_text.clear();
}
get_stc()->BeginUndoAction();
},
// back to command mode process
[=, this]()
{
if (!m_dot)
{
const std::string c(m_insert_command + register_insert());
set_last_command(c + esc());
get_macros().record(c);
get_macros().record(esc(), true);
}
m_command.clear();
m_insert_command.clear();
get_stc()->EndUndoAction();
})
, m_last_commands{{"!", "<", ">", "A", "C", "D", "I", "J", "O",
"P", "R", "S", "X", "Y", "a", "c", "d", "g",
"i", "o", "p", "r", "s", "x", "y", "~"}}
, m_motion_commands(commands_motion())
, m_other_commands(commands_other())
{
}
void wex::vi::append_insert_command(const std::string& s)
{
m_insert_command.append(s);
log::trace("insert command") << m_insert_command;
}
void wex::vi::append_insert_text(const std::string& s)
{
m_insert_text.append(s);
set_register_insert(m_insert_text);
log::trace("insert text") << m_insert_text;
}
bool wex::vi::command(const std::string& command)
{
if (command.empty() || !is_active())
{
return false;
}
if (command.front() != ':' && command.front() != '!')
{
log::trace("vi command") << command;
}
if (visual_vi_command(command, this))
{
return auto_write();
}
else if (m_mode.is_insert())
{
return insert_mode(command);
}
else if (
!m_dot && command.size() > 2 && command.back() == WXK_ESCAPE &&
command[command.size() - 2] == WXK_ESCAPE)
{
m_mode.escape();
m_command.clear();
return auto_write();
}
if (auto parse(command); !parse_command(parse))
{
return false;
}
else
{
if (!m_dot)
{
set_last_command(command);
}
if (
!m_mode.is_insert() && command[0] != 'q' && command[0] != ':' &&
command[0] != '!' && command != "/" && command != "?" &&
command != k_s(WXK_CONTROL_R) + "=")
{
get_macros().record(command);
}
return auto_write();
}
}
void wex::vi::command_reg(const std::string& reg)
{
switch (reg[0])
{
case 0:
break;
// calc register: control-R =
case WXK_CONTROL_R:
if (reg.size() > 1 && reg[1] == '=' && get_stc()->is_visual())
{
if (reg.size() == 2)
{
set_register_insert(std::string());
frame()->show_ex_command(get_stc(), reg);
}
else
{
const auto sum = calculator(reg.substr(2));
if (m_mode.is_insert())
{
if (m_last_command.find('c') != std::string::npos)
{
get_stc()->ReplaceSelection(wxEmptyString);
}
get_stc()->add_text(std::to_string(sum));
append_insert_command(reg);
}
else
{
set_register_yank(std::to_string(sum));
frame()->show_ex_message(std::to_string(sum));
}
}
}
else
{
frame()->show_ex_message("calc register is control-R =");
}
break;
// clipboard register
case '*':
if (m_mode.is_insert())
{
put(true);
}
break;
// filename register
case '%':
if (m_mode.is_insert())
{
get_stc()->add_text(get_stc()->path().filename());
}
else
{
frame()->show_ex_message(get_stc()->path().string());
clipboard_add(get_stc()->path().string());
}
break;
default:
if (!get_macros().get_register(reg[0]).empty())
{
if (m_mode.is_insert())
{
get_stc()->add_text(get_macros().get_register(reg[0]));
if (reg[0] == '.')
{
append_insert_text(register_insert());
}
}
else
{
frame()->show_ex_message(get_macros().get_register(reg[0]));
}
}
else
{
frame()->show_ex_message("?" + reg);
}
}
}
char wex::vi::convert_key_event(const wxKeyEvent& event) const
{
if (event.GetKeyCode() == WXK_BACK)
return WXK_BACK;
else if (event.GetKeyCode() == WXK_RETURN && !m_mode.is_insert())
return 'j';
else if (event.GetModifiers() & wxMOD_RAW_CONTROL)
return event.GetKeyCode();
char c = event.GetUnicodeKey();
if (c == WXK_NONE)
{
switch (event.GetKeyCode())
{
case WXK_LEFT:
c = 'h';
break;
case WXK_DOWN:
c = 'j';
break;
case WXK_UP:
c = 'k';
break;
case WXK_RIGHT:
c = 'l';
break;
case WXK_DELETE:
c = 'x';
break;
case WXK_PAGEUP:
c = WXK_CONTROL_B;
break;
case WXK_PAGEDOWN:
c = WXK_CONTROL_F;
break;
case WXK_NUMPAD_ENTER:
c = 'j';
break;
default:
c = event.GetKeyCode();
}
}
return c;
}
bool wex::vi::delete_range(int start, int end)
{
if (get_stc()->GetReadOnly())
{
return false;
}
const auto first = (start < end ? start : end);
const auto last = (start < end ? end : start);
if (get_stc()->is_hexmode())
{
get_stc()->get_hexmode_erase(last - first, first);
}
else if (
get_stc()->GetSelectionMode() == wxSTC_SEL_RECTANGLE &&
!get_stc()->GetSelectedText().empty())
{
get_stc()->Cut();
}
else
{
const auto& b(get_stc()->GetTextRangeRaw(first, last));
get_macros().set_register(
register_name() ? register_name() : '0',
std::string(b.data(), b.length()));
get_stc()->DeleteRange(first, last - first);
}
return true;
}
void wex::vi::filter_count(std::string& command)
{
/*
command: 3w
-> v has 2 elements
-> m_count 3
-> command w
*/
if (regex v("^([1-9][0-9]*)(.*)"); v.match(command) == 2)
{
try
{
m_count_present = true;
const auto count = std::stoi(v[0]);
m_count *= count;
append_insert_command(v[0]);
command = v[1];
}
catch (std::exception& e)
{
m_count_present = false;
log(e) << command;
command = v[1];
}
}
}
wex::vi::motion_t wex::vi::get_motion(const std::string& command) const
{
switch (command[0])
{
case 'c':
return motion_t::CHANGE;
case 'd':
return motion_t::DEL;
case 'g':
return vim::get_motion(command);
case 'y':
return motion_t::YANK;
default:
return motion_t::NAVIGATE;
}
}
bool wex::vi::insert_mode(const std::string& command)
{
if (command.empty())
{
return false;
}
else if (get_stc()->is_hexmode())
{
return insert_mode_hex(command);
}
// add control chars
else if (command.size() == 2 && command[1] == 0)
{
append_insert_text(std::string(1, command[0]));
get_stc()->add_text(std::string(1, command[0]));
return true;
}
if (command.starts_with(k_s(WXK_CONTROL_R) + "="))
{
command_reg(command);
return true;
}
else if (command.find(k_s(WXK_CONTROL_R)) != std::string::npos)
{
return insert_mode_register(command);
}
insert_mode_other(command);
return true;
}
void wex::vi::insert_mode_escape(const std::string& command)
{
// Add extra inserts if necessary.
if (!m_insert_text.empty())
{
for (auto i = 1; i < m_count; i++)
{
insert_mode_normal(m_insert_text);
}
set_register_insert(m_insert_text);
}
// If we have text to be added.
if (command.size() > 1)
{
if (const auto rest(command.substr(0, command.size() - 1));
!get_stc()->GetSelectedText().empty())
{
get_stc()->ReplaceSelection(rest);
}
else
{
if (!get_stc()->GetOvertype())
{
REPEAT(get_stc()->add_text(rest));
}
else
{
std::string text;
get_stc()->SetTargetStart(get_stc()->GetCurrentPos());
REPEAT(text += rest;);
get_stc()->SetTargetEnd(get_stc()->GetCurrentPos() + text.size());
get_stc()->ReplaceTarget(text);
}
}
}
if (m_mode.escape())
{
get_stc()->SetOvertype(false);
}
}
bool wex::vi::insert_mode_hex(const std::string& command)
{
if (static_cast<int>(command.back()) == WXK_ESCAPE)
{
if (m_mode.escape())
{
get_stc()->SetOvertype(false);
}
return true;
}
else
{
return get_stc()->get_hexmode_insert(command, get_stc()->GetCurrentPos());
}
}
void wex::vi::insert_mode_normal(const std::string& text)
{
if (boost::tokenizer<boost::char_separator<char>> tok(
text,
boost::char_separator<char>("", "\r\n", boost::keep_empty_tokens));
text.find('\0') == std::string::npos &&
std::distance(tok.begin(), tok.end()) >= 1)
{
for (auto it = tok.begin(); it != tok.end(); ++it)
{
if (auto token(*it); !token.empty())
{
if (token.back() == ' ' || token.back() == '\t' || token.back() == ';')
{
if (!m_insert_text.empty())
{
const auto last(m_insert_text.find_last_of(" ;\t"));
if (const auto& it = get_macros().get_abbreviations().find(
m_insert_text.substr(last + 1));
it != get_macros().get_abbreviations().end())
{
m_insert_text.replace(last + 1, it->first.size(), it->second);
const auto pos = get_stc()->GetCurrentPos();
const auto match_pos = get_stc()->FindText(
pos,
get_stc()->PositionFromLine(get_stc()->get_current_line()),
it->first);
if (match_pos != wxSTC_INVALID_POSITION)
{
get_stc()->SetTargetRange(
match_pos,
match_pos + it->first.size());
get_stc()->ReplaceTarget(it->second);
get_stc()->SetCurrentPos(
pos + it->second.size() - it->first.size());
}
}
}
else
{
const auto last(token.find_last_of(" ;\t", token.size() - 2));
const auto word(token.substr(last + 1, token.size() - 2 - last));
if (const auto& it = get_macros().get_abbreviations().find(word);
it != get_macros().get_abbreviations().end())
{
token.replace(last + 1, it->first.size(), it->second);
}
}
}
get_stc()->add_text(token);
if (token == "\n")
{
get_stc()->auto_indentation(token[0]);
}
}
}
}
else
{
get_stc()->add_text(text);
}
}
bool wex::vi::insert_mode_other(const std::string& command)
{
switch (command.back())
{
case WXK_BACK:
if (!m_insert_text.empty())
{
m_insert_text.pop_back();
}
get_stc()->DeleteBack();
break;
case WXK_CONTROL_R:
append_insert_text(command);
break;
case WXK_DELETE:
delete_range(get_stc()->GetCurrentPos(), get_stc()->GetCurrentPos() + 1);
break;
case WXK_ESCAPE:
insert_mode_escape(command);
break;
default:
if (
m_last_command.find('c') != std::string::npos && m_insert_text.empty())
{
get_stc()->ReplaceSelection(wxEmptyString);
}
if (!m_insert_text.empty() && m_insert_text.back() == WXK_CONTROL_R)
{
get_stc()->ReplaceSelection(wxEmptyString);
if (command.back() != '.')
{
append_insert_text(command);
}
else
{
m_insert_text.pop_back();
}
command_reg(std::string(1, command.back()));
return false;
}
else
{
if (
command.size() == 1 &&
(static_cast<int>(command.back()) == WXK_RETURN ||
static_cast<int>(command.back()) == WXK_NUMPAD_ENTER))
{
get_stc()->NewLine();
if (!get_stc()->AutoCompActive())
{
append_insert_text(get_stc()->eol());
}
}
else
{
if (!get_stc()->GetOvertype())
{
insert_mode_normal(command);
}
if (!m_dot)
{
append_insert_text(command);
}
}
}
}
return true;
}
bool wex::vi::insert_mode_register(const std::string& command)
{
if (command.size() < 2)
{
return false;
}
std::string text(command);
marker_and_register_expansion(this, text);
insert_mode(text);
return true;
}
bool wex::vi::on_char(const wxKeyEvent& event)
{
if (!is_active())
{
return true;
}
else if (m_mode.is_insert())
{
if (is_block_insert(this))
{
return true;
}
m_command.append(convert_key_event(event));
const bool result = insert_mode(m_command.command());
if (result || (get_stc()->is_hexmode() && m_command.size() > 2))
{
m_command.clear();
}
return result && get_stc()->GetOvertype();
}
else
{
if (!(event.GetModifiers() & wxMOD_ALT))
{
// This check is important, as WXK_NONE (0)
// would add nullptr terminator at the end of m_command,
// and pressing ESC would not help, (rest is empty
// because of the nullptr).
if (event.GetUnicodeKey() != (wxChar)WXK_NONE)
{
if (
!m_command.empty() && m_command.front() == '@' &&
event.GetKeyCode() == WXK_BACK)
{
m_command.pop_back();
}
else
{
#ifdef __WXOSX__
if (event.GetModifiers() & wxMOD_RAW_CONTROL)
{
if (m_command.append_exec(event.GetKeyCode()))
{
m_command.clear();
}
}
else
#endif
if (m_command.append_exec(event.GetUnicodeKey()))
{
m_command.clear();
}
}
}
else
{
return true;
}
return false;
}
else
{
return true;
}
}
}
bool wex::vi::on_key_down(const wxKeyEvent& event)
{
if (!is_active() || get_stc()->AutoCompActive())
{
return true;
}
else if (!m_command.empty() && m_command.front() == '@')
{
return process_macro_key(event);
}
else if (is_block_insert(this) && event.GetKeyCode() != WXK_ESCAPE)
{
m_command.clear();
return true;
}
else if (is_special_key(event, m_mode))
{
return process_special_key(event);
}
else if (
(event.GetModifiers() & wxMOD_CONTROL) && event.GetKeyCode() != WXK_NONE)
{
return process_modifier(this, macros::key_t::KEY_CONTROL, event);
}
else if ((event.GetModifiers() & wxMOD_ALT) && event.GetKeyCode() != WXK_NONE)
{
if (!m_mode.is_command())
{
command(esc());
}
return process_modifier(this, macros::key_t::KEY_ALT, event);
}
else
{
return true;
}
}
bool wex::vi::process_macro_key(const wxKeyEvent& event)
{
if (event.GetKeyCode() == WXK_BACK)
{
m_command.pop_back();
frame()->statustext(m_command.command().substr(1), "PaneMacro");
}
else if (event.GetKeyCode() == WXK_ESCAPE)
{
m_command.clear();
m_insert_command.clear();
frame()->statustext(get_macros().mode().get_macro(), "PaneMacro");
}
return true;
}
bool wex::vi::process_special_key(const wxKeyEvent& event)
{
if (event.GetKeyCode() == WXK_BACK)
{
if (!m_insert_text.empty())
{
m_insert_text.pop_back();
}
}
else if (m_command.append_exec(convert_key_event(event)))
{
m_command.clear();
if (!m_mode.is_insert())
{
m_insert_command.clear();
}
return false;
}
return true;
}
bool wex::vi::put(bool after)
{
if (register_text().empty())
{
return false;
}
// do not trim
const bool yanked_lines = (get_number_of_lines(register_text(), false) > 1);
if (yanked_lines)
{
if (after)
{
if (
get_stc()->GetColumn(get_stc()->GetCurrentPos()) > 0 ||
get_stc()->GetSelectedText().empty())
{
get_stc()->LineDown();
}
}
get_stc()->Home();
}
get_stc()->add_text(register_text());
if (yanked_lines && after)
{
get_stc()->LineUp();
}
return true;
}
void wex::vi::set_last_command(const std::string& command)
{
size_t first = 0;
if (regex v("^([1-9][0-9]*)(.*)"); v.match(command) == 2)
{
first = v[0].size(); // skip a possible leading count
}
if (const auto& it = std::find(
m_last_commands.begin(),
m_last_commands.end(),
command.substr(first, 1));
it != m_last_commands.end())
{
if (command != "gg")
{
m_last_command = command;
log::trace("last command") << m_last_command;
}
}
}
void wex::vi::yank_range(int start)
{
if (auto end = get_stc()->GetCurrentPos(); end - start > 0)
{
get_stc()->CopyRange(start, end - start);
get_stc()->SetSelection(start, end);
}
else
{
// reposition end at start of selection
if (!get_stc()->GetSelectedText().empty())
{
end = get_stc()->GetSelectionStart();
}
else
{
end--;
}
get_stc()->CopyRange(end, start - end);
get_stc()->SetSelection(end, start);
}
m_mode.escape();
if (!register_name())
{
set_register_yank(get_stc()->get_selected_text());
}
else
{
get_macros().set_register(register_name(), get_stc()->get_selected_text());
get_stc()->SelectNone();
}
info_message(get_stc()->get_selected_text(), wex::info_message_t::YANK);
}
|
{"hexsha": "8d7d78a030f7767fbff19c60f4a82ae270574fde", "size": 20184, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/vi/vi.cpp", "max_stars_repo_name": "antonvw/wxExtension", "max_stars_repo_head_hexsha": "d5523346cf0b1dbd45fd20dc33bf8d679299676c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2016-01-10T20:59:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-09T14:18:13.000Z", "max_issues_repo_path": "src/vi/vi.cpp", "max_issues_repo_name": "antonvw/wxExtension", "max_issues_repo_head_hexsha": "d5523346cf0b1dbd45fd20dc33bf8d679299676c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T17:46:17.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-04T17:33:50.000Z", "max_forks_repo_path": "src/vi/vi.cpp", "max_forks_repo_name": "antonvw/wxExtension", "max_forks_repo_head_hexsha": "d5523346cf0b1dbd45fd20dc33bf8d679299676c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2015-04-05T08:45:22.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-24T06:43:24.000Z", "avg_line_length": 22.3027624309, "max_line_length": 80, "alphanum_fraction": 0.5420630202, "num_tokens": 5263}
|
using RecipesBase
export McmcSampler, SnfMcmcOutput
struct McmcSampler{T<:McmcMove}
move::T
output::McmcOutputParameters
function McmcSampler(
move::T;
desired_samples::Int=1000, burn_in::Int=0, lag::Int=1
) where {T<:McmcMove}
output = McmcOutputParameters(desired_samples, burn_in, lag)
new{T}(move, output)
end
end
Base.show(io::IO, x::McmcSampler{T}) where {T} = print(io, typeof(x))
acceptance_prob(mcmc::McmcSampler) = acceptance_prob(mcmc.move)
struct SnfMcmcOutput{T,N,S}
sample::Vector{Array{T,N}}
model::SNF{T,N,S}
end
Base.show(io::IO, x::SnfMcmcOutput) = print(io, typeof(x))
@recipe function f(output::SnfMcmcOutput)
model = output.model
sample = output.sample
x = map(x -> model.d(model.mode, x), sample)
xguide --> "Sample"
yguide --> "Distance from Mode"
legend --> false
size --> (800, 300)
margin --> 5mm
x
end
function draw_sample!(
sample_out::Union{Vector{Vector{Int}},SubArray},
mcmc::McmcSampler,
model::VecMultigraphSNF;
burn_in::Int=mcmc.output.burn_in,
lag::Int=mcmc.output.lag,
init::Vector{Int}=model.mode
)
x_curr = copy(init)
x_prop = copy(x_curr)
sample_count = 1
i = 0
reset_counts!(mcmc.move)
while sample_count ≤ length(sample_out)
i += 1
# Store value
if (i > burn_in) & (((i-1) % lag)==0)
@inbounds sample_out[sample_count] = deepcopy(x_curr)
sample_count += 1
end
accept_reject!(
x_curr, x_prop,
mcmc.move,
model
)
end
end
function draw_sample(
mcmc::McmcSampler,
model::VecMultigraphSNF;
desired_samples::Int=mcmc.output.desired_samples,
args...
)
sample_out = Vector{Vector{Int}}(undef, desired_samples)
draw_sample!(sample_out, mcmc, model; args...)
return sample_out
end
function (mcmc::McmcSampler)(
model::VecMultigraphSNF;
args...
)
sample_out = draw_sample(mcmc, model; args...)
return SnfMcmcOutput(sample_out, model)
end
|
{"hexsha": "b2e7b461929971f2804098e141c2f19b6760fa5c", "size": 2120, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Graph Models/SNF/Samplers/SNF_model_sampler.jl", "max_stars_repo_name": "gmbolt/InteractionNetworkModels.jl", "max_stars_repo_head_hexsha": "bdea22adf934ca60185e68ca47d7396fb1069f94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Graph Models/SNF/Samplers/SNF_model_sampler.jl", "max_issues_repo_name": "gmbolt/InteractionNetworkModels.jl", "max_issues_repo_head_hexsha": "bdea22adf934ca60185e68ca47d7396fb1069f94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Graph Models/SNF/Samplers/SNF_model_sampler.jl", "max_forks_repo_name": "gmbolt/InteractionNetworkModels.jl", "max_forks_repo_head_hexsha": "bdea22adf934ca60185e68ca47d7396fb1069f94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3157894737, "max_line_length": 69, "alphanum_fraction": 0.6216981132, "num_tokens": 612}
|
[STATEMENT]
lemma dg_Rel_Obj_iff: "x \<in>\<^sub>\<circ> dg_Rel \<alpha>\<lparr>Obj\<rparr> \<longleftrightarrow> x \<in>\<^sub>\<circ> Vset \<alpha>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x \<in>\<^sub>\<circ> dg_Rel \<alpha>\<lparr>Obj\<rparr>) = (x \<in>\<^sub>\<circ> Vset \<alpha>)
[PROOF STEP]
unfolding dg_Rel_components
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x \<in>\<^sub>\<circ> Vset \<alpha>) = (x \<in>\<^sub>\<circ> Vset \<alpha>)
[PROOF STEP]
by auto
|
{"llama_tokens": 206, "file": "CZH_Foundations_czh_digraphs_CZH_DG_Rel", "length": 2}
|
import numpy as np
from sklearn.model_selection import ParameterGrid, ParameterSampler
from recpy.metrics import roc_auc, precision, recall, map, ndcg, rr
from recpy.utils.data_utils import df_to_csr
from recpy.utils.split import k_fold_cv
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
def grid_search_cv(RecommenderClass, dataset, param_space, metric=roc_auc, at=None,
cv_folds=5, is_binary=True, user_key='user_id', item_key='item_id', rating_key='rating',
rnd_seed=1234):
"""
Finds the best hyper-parameters of a recommender algorithm with Grid Search
:param RecommenderClass: Class of the recommender to tune (must be subclass of Recommender)
:param dataset: data to use for tuning
:param param_space: space of the parameters to explore
:param metric: metric to maximize
:param at: optional length of the recommendation list used in recommendaiton
:param cv_folds: number of cross-validation iters
:param is_binary: True to discard ratings, False otherwise
:param user_key: name of the column with user ids in dataset
:param item_key: name of the column with item ids in dataset
:param rating_key: name of the column with ratings in dataset
:param rnd_seed: random seed used for cross-validation
:return: a tuple with (best configuration, best metric value)
"""
tried_conf = []
results = np.zeros(np.prod([len(v) for v in param_space.values()]), dtype=np.float32)
space_size = len(results)
logger.info('Size of the parameter space: {} ({} cv trials)'.format(space_size, space_size * cv_folds))
param_grid = ParameterGrid(param_space)
# compute the cv splits
nusers, nitems = dataset[user_key].max() + 1, dataset[item_key].max() + 1
cv_split = []
for train_df, test_df in k_fold_cv(dataset,
user_key=user_key,
item_key=item_key,
k=cv_folds,
clean_test=True,
seed=rnd_seed):
train = df_to_csr(train_df, is_binary=is_binary, nrows=nusers, ncols=nitems,
user_key=user_key, item_key=item_key, rating_key=rating_key)
test = df_to_csr(test_df, is_binary=is_binary, nrows=nusers, ncols=nitems,
user_key=user_key, item_key=item_key, rating_key=rating_key)
cv_split.append((train, test))
for i, params in enumerate(param_grid):
logger.info('Iteration {}/{}: {}'.format(i + 1, space_size, params))
tried_conf.append(params)
cv_result = 0.0
for f, (train, test) in enumerate(cv_split):
# train the recommender
recommender = RecommenderClass(**params)
recommender.fit(train)
# evaluate the ranking quality
n_eval = 0
metric_ = 0.0
for test_user in range(nusers):
relevant_items = test[test_user].indices
if len(relevant_items) > 0:
n_eval += 1
# this will rank **all** items
recommended_items = recommender.recommend(user_id=test_user, exclude_seen=True)
# evaluate the recommendation list with ranking metrics ONLY
if metric == roc_auc:
metric_ += roc_auc(recommended_items, relevant_items)
elif metric == ndcg:
metric_ += ndcg(recommended_items, relevant_items, relevance=test[test_user].data, at=at)
else:
metric_ += metric(recommended_items, relevant_items, at=at)
metric_ /= n_eval
cv_result += metric_
# average value of the metric in cross-validation
results[i] = cv_result / cv_folds
logger.info('Result: {:.4f}'.format(results[i]))
# return the best configuration
best = results.argsort()[-1]
return tried_conf[best], results[best]
def random_search_cv(RecommenderClass, dataset, param_space, iters=10, metric=roc_auc, at=None,
cv_folds=5, is_binary=True, user_key='user_id', item_key='item_id', rating_key='rating',
rnd_seed=1234):
"""
Finds the best hyper-parameters of a recommender algorithm with Random Search
:param RecommenderClass: Class of the recommender to tune (must be subclass of Recommender)
:param dataset: data to use for tuning
:param param_space: space of the parameters to explore
:param iters: number of iterations of Random Search
:param metric: metric to maximize
:param at: optional length of the recommendation list used in recommendaiton
:param cv_folds: number of cross-validation iters
:param is_binary: True to discard ratings, False otherwise
:param user_key: name of the column with user ids in dataset
:param item_key: name of the column with item ids in dataset
:param rating_key: name of the column with ratings in dataset
:param rnd_seed: random seed used for cross-validation
:return: a tuple with (best configuration, best metric value)
"""
tried_conf = []
space_size = iters
# sample `iter` configurations at random from param_space
param_list = list(ParameterSampler(param_space, n_iter=iters, random_state=rnd_seed))
results = np.zeros((space_size,), dtype=np.float32)
logger.info('Size of the parameter space: {} ({} cv trials)'.format(space_size, space_size * cv_folds))
# compute the cv splits
nusers, nitems = dataset[user_key].max() + 1, dataset[item_key].max() + 1
cv_split = []
for train_df, test_df in k_fold_cv(dataset,
user_key=user_key,
item_key=item_key,
k=cv_folds,
clean_test=True,
seed=rnd_seed):
train = df_to_csr(train_df, is_binary=is_binary, nrows=nusers, ncols=nitems,
user_key=user_key, item_key=item_key, rating_key=rating_key)
test = df_to_csr(test_df, is_binary=is_binary, nrows=nusers, ncols=nitems,
user_key=user_key, item_key=item_key, rating_key=rating_key)
cv_split.append((train, test))
for i, params in enumerate(param_list):
logger.info('Iteration {}/{}: {}'.format(i + 1, space_size, params))
tried_conf.append(params)
cv_result = 0.0
for f, (train, test) in enumerate(cv_split):
# train the recommender
recommender = RecommenderClass(**params)
recommender.fit(train)
# evaluate the ranking quality
n_eval = 0
metric_ = 0.0
for test_user in range(nusers):
relevant_items = test[test_user].indices
if len(relevant_items) > 0:
n_eval += 1
# this will rank **all** items
recommended_items = recommender.recommend(user_id=test_user, exclude_seen=True)
# evaluate the recommendation list with ranking metrics ONLY
if metric == roc_auc:
metric_ += roc_auc(recommended_items, relevant_items)
elif metric == ndcg:
metric_ += ndcg(recommended_items, relevant_items, relevance=test[test_user].data, at=at)
else:
metric_ += metric(recommended_items, relevant_items, at=at)
metric_ /= n_eval
cv_result += metric_
# average value of the metric in cross-validation
results[i] = cv_result / cv_folds
logger.info('Result: {:.4f}'.format(results[i]))
# return the best configuration
best = results.argsort()[-1]
return tried_conf[best], results[best]
|
{"hexsha": "1a0e90c5d32b321846548d4e4bd8ba3c9ab6f989", "size": 8102, "ext": "py", "lang": "Python", "max_stars_repo_path": "RecPy/recpy/utils/tuning.py", "max_stars_repo_name": "Helma-T/recsys-course-2016-pub", "max_stars_repo_head_hexsha": "28bea50e211137f03c39ec97566510ba331946c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2016-10-29T19:11:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T14:59:24.000Z", "max_issues_repo_path": "RecPy/recpy/utils/tuning.py", "max_issues_repo_name": "Helma-T/recsys-course-2016-pub", "max_issues_repo_head_hexsha": "28bea50e211137f03c39ec97566510ba331946c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-11-07T11:22:29.000Z", "max_issues_repo_issues_event_max_datetime": "2016-11-23T16:51:10.000Z", "max_forks_repo_path": "RecPy/recpy/utils/tuning.py", "max_forks_repo_name": "mquad/recsys-course-2016-pub", "max_forks_repo_head_hexsha": "28bea50e211137f03c39ec97566510ba331946c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2016-10-27T23:10:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-22T12:38:21.000Z", "avg_line_length": 49.7055214724, "max_line_length": 113, "alphanum_fraction": 0.6173784251, "include": true, "reason": "import numpy", "num_tokens": 1776}
|
"""
This file contains helper functions for quantitative evaluations reported in the paper
"""
import os
import sys
import numpy as np
import torch
from progressbar import ProgressBar
from chamfer_distance import ChamferDistance
from data import PartNetDataset, PartNetShapeDiffDataset
import utils
def compute_recon_numbers(in_dir, baseline_dir, shapediff_topk):
topk_cd = np.zeros((shapediff_topk), dtype=np.float32)
topk_sd = np.zeros((shapediff_topk), dtype=np.float32)
baseline_topk_cd = np.zeros((shapediff_topk), dtype=np.float32)
baseline_topk_sd = np.zeros((shapediff_topk), dtype=np.float32)
topk_cnt = np.zeros((shapediff_topk), dtype=np.int32)
for anno_id in os.listdir(in_dir):
if '.' not in anno_id:
cur_dir = os.path.join(in_dir, anno_id)
for item in os.listdir(cur_dir):
if item.endswith('.stats'):
nid = int(item.split('.')[0].split('_')[1])
neighbor_anno_id = item.split('.')[0].split('_')[2]
with open(os.path.join(cur_dir, item), 'r') as fin:
topk_cd[nid] += float(fin.readline().rstrip().split()[-1])
topk_sd[nid] += float(fin.readline().rstrip().split()[-1])
with open(os.path.join(baseline_dir, neighbor_anno_id, 'stats.txt'), 'r') as fin:
baseline_topk_cd[nid] += float(fin.readline().rstrip().split()[-1])
baseline_topk_sd[nid] += float(fin.readline().rstrip().split()[-1])
topk_cnt[nid] += 1
topk_cd /= topk_cnt
topk_sd /= topk_cnt
baseline_topk_cd /= topk_cnt
baseline_topk_sd /= topk_cnt
print('ours cd mean: %.5f' % np.mean(topk_cd))
print('ours sd mean: %.5f' % np.mean(topk_sd))
print('structurenet cd mean: %.5f' % np.mean(baseline_topk_cd))
print('structurenet sd mean: %.5f' % np.mean(baseline_topk_sd))
with open(os.path.join(in_dir, 'stats.txt'), 'w') as fout:
fout.write('ours cd mean: %.5f\n' % np.mean(topk_cd))
fout.write('ours sd mean: %.5f\n' % np.mean(topk_sd))
fout.write('structurenet cd mean: %.5f\n' % np.mean(baseline_topk_cd))
fout.write('structurenet sd mean: %.5f\n' % np.mean(baseline_topk_sd))
for i in range(shapediff_topk):
fout.write('%d %d %.5f %.5f %.5f %.5f\n' % (i, topk_cnt[i], topk_cd[i], topk_sd[i], baseline_topk_cd[i], baseline_topk_sd[i]))
def compute_gen_cd_numbers(in_dir, data_path, object_list, shapediff_topk, shapediff_metric, self_is_neighbor, tot_shape):
chamfer_loss = ChamferDistance()
data_features = ['object', 'name', 'neighbor_diffs', 'neighbor_objs', 'neighbor_names']
dataset = PartNetShapeDiffDataset(
data_path, object_list, data_features, shapediff_topk, shapediff_metric, self_is_neighbor)
tot_gen = 100
bar = ProgressBar()
quality = 0.0; coverage = 0.0;
for i in bar(range(tot_shape)):
obj, obj_name, neighbor_diffs, neighbor_objs, neighbor_names = dataset[i]
mat = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
gt_pcs = []
for ni in range(shapediff_topk):
obbs_np = torch.cat([item.view(1, -1) for item in neighbor_objs[ni].boxes(leafs_only=True)], dim=0).cpu().numpy()
mesh_v, mesh_f = utils.gen_obb_mesh(obbs_np)
pc_sample = utils.sample_pc(mesh_v, mesh_f)
gt_pcs.append(np.expand_dims(pc_sample, axis=0))
gt_pcs = np.concatenate(gt_pcs, axis=0)
gt_pcs = torch.from_numpy(gt_pcs).float().cuda()
for i in range(tot_gen):
obj = PartNetDataset.load_object(os.path.join(in_dir, obj_name, 'obj2-%03d.json'%i))
obbs_np = torch.cat([item.view(1, -1) for item in obj.boxes(leafs_only=True)], dim=0).cpu().numpy()
mesh_v, mesh_f = utils.gen_obb_mesh(obbs_np)
gen_pc = utils.sample_pc(mesh_v, mesh_f)
gen_pc = np.tile(np.expand_dims(gen_pc, axis=0), [shapediff_topk, 1, 1])
gen_pc = torch.from_numpy(gen_pc).float().cuda()
d1, d2 = chamfer_loss(gt_pcs.cuda(), gen_pc)
mat[:, i] = (d1.sqrt().mean(dim=1) + d2.sqrt().mean(dim=1)).cpu().numpy() / 2
quality += mat.min(axis=0).mean()
coverage += mat.min(axis=1).mean()
np.save(os.path.join(in_dir, obj_name, 'cd_stats.npy'), mat)
quality /= tot_shape
coverage /= tot_shape
print('mean cd quality: %.5f' % quality)
print('mean cd coverage: %.5f' % coverage)
print('q + c: %.5f' % (quality + coverage))
with open(os.path.join(in_dir, 'neighbor_%s_cd_stats.txt'%shapediff_metric), 'w') as fout:
fout.write('mean cd quality: %.5f\n' % quality)
fout.write('mean cd coverage: %.5f\n' % coverage)
fout.write('q + c: %.5f\n' % (quality + coverage))
def compute_gen_sd_numbers(in_dir, data_path, object_list, shapediff_topk, shapediff_metric, self_is_neighbor, tot_shape):
chamfer_loss = ChamferDistance()
unit_cube = torch.from_numpy(utils.load_pts('cube.pts'))
def box_dist(box_feature, gt_box_feature):
pred_box_pc = utils.transform_pc_batch(unit_cube, box_feature)
pred_reweight = utils.get_surface_reweighting_batch(box_feature[:, 3:6], unit_cube.size(0))
gt_box_pc = utils.transform_pc_batch(unit_cube, gt_box_feature)
gt_reweight = utils.get_surface_reweighting_batch(gt_box_feature[:, 3:6], unit_cube.size(0))
dist1, dist2 = chamfer_loss(gt_box_pc, pred_box_pc)
loss1 = (dist1 * gt_reweight).sum(dim=1) / (gt_reweight.sum(dim=1) + 1e-12)
loss2 = (dist2 * pred_reweight).sum(dim=1) / (pred_reweight.sum(dim=1) + 1e-12)
loss = (loss1 + loss2) / 2
return loss
def struct_dist(gt_node, pred_node):
if gt_node.is_leaf:
if pred_node.is_leaf:
return 0
else:
return len(pred_node.boxes()) - 1
else:
if pred_node.is_leaf:
return len(gt_node.boxes()) - 1
else:
gt_sem = set([node.label for node in gt_node.children])
pred_sem = set([node.label for node in pred_node.children])
intersect_sem = set.intersection(gt_sem, pred_sem)
gt_cnodes_per_sem = dict()
for node_id, gt_cnode in enumerate(gt_node.children):
if gt_cnode.label in intersect_sem:
if gt_cnode.label not in gt_cnodes_per_sem:
gt_cnodes_per_sem[gt_cnode.label] = []
gt_cnodes_per_sem[gt_cnode.label].append(node_id)
pred_cnodes_per_sem = dict()
for node_id, pred_cnode in enumerate(pred_node.children):
if pred_cnode.label in intersect_sem:
if pred_cnode.label not in pred_cnodes_per_sem:
pred_cnodes_per_sem[pred_cnode.label] = []
pred_cnodes_per_sem[pred_cnode.label].append(node_id)
matched_gt_idx = []
matched_pred_idx = []
matched_gt2pred = np.zeros((100), dtype=np.int32)
for sem in intersect_sem:
gt_boxes = torch.cat([gt_node.children[cid].get_box_quat() for cid in gt_cnodes_per_sem[sem]], dim=0)
pred_boxes = torch.cat([pred_node.children[cid].get_box_quat() for cid in pred_cnodes_per_sem[sem]], dim=0)
num_gt = gt_boxes.size(0)
num_pred = pred_boxes.size(0)
if num_gt == 1 and num_pred == 1:
cur_matched_gt_idx = [0]
cur_matched_pred_idx = [0]
else:
gt_boxes_tiled = gt_boxes.unsqueeze(dim=1).repeat(1, num_pred, 1)
pred_boxes_tiled = pred_boxes.unsqueeze(dim=0).repeat(num_gt, 1, 1)
dmat = box_dist(gt_boxes_tiled.view(-1, 10), pred_boxes_tiled.view(-1, 10)).view(-1, num_gt, num_pred)
_, cur_matched_gt_idx, cur_matched_pred_idx = utils.linear_assignment(dmat)
for i in range(len(cur_matched_gt_idx)):
matched_gt_idx.append(gt_cnodes_per_sem[sem][cur_matched_gt_idx[i]])
matched_pred_idx.append(pred_cnodes_per_sem[sem][cur_matched_pred_idx[i]])
matched_gt2pred[gt_cnodes_per_sem[sem][cur_matched_gt_idx[i]]] = pred_cnodes_per_sem[sem][cur_matched_pred_idx[i]]
struct_diff = 0.0
for i in range(len(gt_node.children)):
if i not in matched_gt_idx:
struct_diff += len(gt_node.children[i].boxes())
for i in range(len(pred_node.children)):
if i not in matched_pred_idx:
struct_diff += len(pred_node.children[i].boxes())
for i in range(len(matched_gt_idx)):
gt_id = matched_gt_idx[i]
pred_id = matched_pred_idx[i]
struct_diff += struct_dist(gt_node.children[gt_id], pred_node.children[pred_id])
return struct_diff
# create dataset and data loader
data_features = ['object', 'name', 'neighbor_diffs', 'neighbor_objs', 'neighbor_names']
dataset = PartNetShapeDiffDataset(
data_path, object_list, data_features, shapediff_topk, shapediff_metric, self_is_neighbor)
tot_gen = 100
bar = ProgressBar()
quality = 0.0; coverage = 0.0;
for i in bar(range(tot_shape)):
obj, obj_name, neighbor_diffs, neighbor_objs, neighbor_names = dataset[i]
mat1 = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
mat2 = np.zeros((shapediff_topk, tot_gen), dtype=np.float32)
for j in range(tot_gen):
gen_obj = PartNetDataset.load_object(os.path.join(in_dir, obj_name, 'obj2-%03d.json'%j))
for ni in range(shapediff_topk):
sd = struct_dist(neighbor_objs[ni].root, gen_obj.root)
mat1[ni, j] = sd / len(neighbor_objs[ni].root.boxes())
mat2[ni, j] = sd / len(gen_obj.root.boxes())
quality += mat2.min(axis=0).mean()
coverage += mat1.min(axis=1).mean()
np.save(os.path.join(in_dir, obj_name, 'sd_mat1_stats.npy'), mat1)
np.save(os.path.join(in_dir, obj_name, 'sd_mat2_stats.npy'), mat2)
quality /= tot_shape
coverage /= tot_shape
print('mean sd quality: ', quality)
print('mean sd coverage: ', coverage)
print('q + c: %.5f' % (quality + coverage))
with open(os.path.join(in_dir, 'neighbor_%s_sd_stats.txt'%shapediff_metric), 'w') as fout:
fout.write('mean sd quality: %f\n' % quality)
fout.write('mean sd coverage: %f\n' % coverage)
fout.write('q + c: %.5f\n' % (quality + coverage))
|
{"hexsha": "ce6d4c98de6a6f3b5aeb88ae5d50684edbcd957b", "size": 10924, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/eval_utils.py", "max_stars_repo_name": "daerduoCarey/structedit", "max_stars_repo_head_hexsha": "79c4b076ade9975b9d3a68dbea5b6ab42a9001e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2019-11-26T03:40:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T02:43:21.000Z", "max_issues_repo_path": "code/eval_utils.py", "max_issues_repo_name": "daerduoCarey/structedit", "max_issues_repo_head_hexsha": "79c4b076ade9975b9d3a68dbea5b6ab42a9001e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-02T08:42:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-02T23:15:53.000Z", "max_forks_repo_path": "code/eval_utils.py", "max_forks_repo_name": "daerduoCarey/structedit", "max_forks_repo_head_hexsha": "79c4b076ade9975b9d3a68dbea5b6ab42a9001e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-11-27T00:21:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-20T00:40:31.000Z", "avg_line_length": 49.6545454545, "max_line_length": 138, "alphanum_fraction": 0.6084767484, "include": true, "reason": "import numpy", "num_tokens": 2787}
|
using Test
using NeXLMatrixCorrection
#@testset "XPhi" begin
@testset "Mg in Al at 25 keV" begin
# See Figure 1 of Merlet 1994
m, cxr, e0, toa = mat"Al", n"Mg K-L3", 25.0e3, deg2rad(40.0)
xp = matrixcorrection(XPhi, m, inner(cxr), e0)
@test isapprox(NeXLMatrixCorrection.ϕ0(xp), 1.4, atol=0.1)
@test isapprox(NeXLMatrixCorrection.ϕ0(xp), ϕ(xp, 0.0), rtol=1.0e-5)
@test isapprox(max(xp), 2.65, atol=0.1)
@test isapprox(ϕ(xp, 1.0e-3), 0.8, atol=0.1)
@test isapprox(ϕ(xp, 1.6e-3), 0.1, atol=0.1)
end
@testset "Cd in Al at 25 keV" begin
m, cxr, e0, toa = mat"Al", n"Cd L3-M5", 25.0e3, deg2rad(40.0)
xp = matrixcorrection(XPhi, m, inner(cxr), e0)
@test isapprox(ϕ(xp, 0.0), 1.5, atol=0.05)
@test isapprox(ϕ(xp, xp.ρzm), 2.45, atol=0.1)
@test isapprox(ϕ(xp, 1.0e-3), 0.75, atol=0.1)
@test isapprox(ϕ(xp, 1.8e-3), 0.2, atol=0.1)
end
@testset "Cd in Au at 25 keV" begin
m, cxr, e0, toa = mat"Au", n"Cd L3-M5", 25.0e3, deg2rad(40.0)
xp = matrixcorrection(XPhi, m, inner(cxr), e0)
@test isapprox(ϕ(xp, 0.0), 1.5, atol=0.05)
@test isapprox(ϕ(xp, xp.ρzm), 3.0, atol=0.1)
@test isapprox(ϕ(xp, 1.0e-3), 0.2, atol=0.1)
end
@testset "Al in C at 15 keV" begin
m, cxr, e0, toa = mat"C", n"Al K-L3", 15.0e3, deg2rad(40.0)
xp = matrixcorrection(XPhi, m, inner(cxr), e0)
@test isapprox(ϕ(xp, 0.0), 1.1, atol=0.05)
@test isapprox(ϕ(xp, xp.ρzm), 1.70, atol=0.1)
@test isapprox(ϕ(xp, 0.3e-3), 0.75, atol=0.1)
@test isapprox(ϕ(xp, 0.6e-3), 0.1, atol=0.1)
end
#end
|
{"hexsha": "850108e5516180c7e0db63e8f64f87f64b5176c7", "size": 1676, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/xphi.jl", "max_stars_repo_name": "NicholasWMRitchie/NeXLMatrixCorrection", "max_stars_repo_head_hexsha": "601f6e42880fd97b4bfdbeb5a0ee8d1364b6041b", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/xphi.jl", "max_issues_repo_name": "NicholasWMRitchie/NeXLMatrixCorrection", "max_issues_repo_head_hexsha": "601f6e42880fd97b4bfdbeb5a0ee8d1364b6041b", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/xphi.jl", "max_forks_repo_name": "NicholasWMRitchie/NeXLMatrixCorrection", "max_forks_repo_head_hexsha": "601f6e42880fd97b4bfdbeb5a0ee8d1364b6041b", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-05T15:04:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T07:24:27.000Z", "avg_line_length": 38.976744186, "max_line_length": 76, "alphanum_fraction": 0.5566825776, "num_tokens": 793}
|
import tensorflow as tf
# import tensorflow_text as text
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pandas as pd
import numpy as np
import json
import tensorflow_datasets as tfds
import pickle
from nltk.tokenize import WordPunctTokenizer
from src.features.transform_data import Transform
class Lyrics:
BUFFER_SIZE = 10000
with open('configs/config.json','r') as cfgFile:
cfg = json.load(cfgFile)
def __init__(self, BATCH_SIZE, VOCAB_SIZE):
self.BATCH_SIZE = BATCH_SIZE
self.VOCAB_SIZE = VOCAB_SIZE
_t = Transform()
data_dir = 'data/processed/verses.txt'
with open(data_dir, "rb") as fp: # Unpickling
lyrics = pickle.load(fp)
# lyrics = _t.verse_lines
#self.lyrics = lyrics
# [print(i) for i in lyrics]
# lyrics = [' \n '.join(tokenizer.tokenize(i)) for i in lyrics]
# lyrics = np.array(lyrics)
arr = [[j for j in i.split(' \n ') if len(j) > 1 and '\n\n' != j] for i in list(np.array(lyrics)) if len(i.split(' \n ')) > 0]
flattened_list = [y for x in arr for y in x if len(y.split()) <= 30]
#flattened_list = [y for x in lyrics for y in x]
self.lyrics = flattened_list
self.target = flattened_list[0::2]
self.train = flattened_list[1::2]
def build(self, pad_shape=40):
_sequences = tf.data.Dataset.from_tensor_slices((self.train, self.target))
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
self.tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus((i.numpy() for i,_ in _sequences),
target_vocab_size=self.VOCAB_SIZE)
self.tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus((i.numpy() for _,i in _sequences),
target_vocab_size=self.VOCAB_SIZE)
sample_string = 'Transformer is awesome.'
tokenized_string = self.tokenizer_en.encode(sample_string)
print ('Tokenized string is {}'.format(tokenized_string))
original_string = self.tokenizer_en.decode(tokenized_string)
print ('The original string: {}'.format(original_string))
assert original_string == sample_string
for ts in tokenized_string:
print ('{} ----> {}'.format(ts, self.tokenizer_en.decode([ts])))
BUFFER_SIZE = 20000
BATCH_SIZE = 64
TAKE_SIZE = 5000
def encode(lang1, lang2):
lang1 = self.tokenizer_pt.encode(lang1.numpy())# + [self.tokenizer_pt.vocab_size+1]
lang2 = self.tokenizer_en.encode(lang2.numpy())# + [self.tokenizer_en.vocab_size+1]
return lang1, lang2
def tf_encode(pt, en):
return tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
def filter_max_length(x, y):
return tf.logical_and(tf.size(x) <= pad_shape,
tf.size(y) <= pad_shape)
dataset = _sequences.map(tf_encode)
dataset = dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
# dataset = dataset.cache()
#.shuffle(self.BUFFER_SIZE).cache()
dataset = dataset.shuffle(BUFFER_SIZE).padded_batch(self.BATCH_SIZE,
padded_shapes=([pad_shape],
[pad_shape]),
drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
# test_dataset = dataset.shuffle(self.BUFFER_SIZE).cache().take(TAKE_SIZE)\
# .padded_batch(self.BATCH_SIZE,
# padded_shapes=([pad_shape],
# [pad_shape]),
# drop_remainder=True)
# test_dataset = test_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def build_char_dataset(self):
text = ' '.join(self.lyrics)
vocab = sorted(set(' '.join(self.lyrics)))
self.vocab = vocab
print(f'Vocab length is {len(vocab)}')
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
self.char2idx = char2idx
self.idx2char = idx2char
text_as_int = np.array([char2idx[c] for c in text])
seq_length = 500
examples_per_epoch = len(text)//(seq_length+1)
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
for i in char_dataset.take(5):
print(idx2char[i.numpy()])
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
for item in sequences.take(5):
print(repr(''.join(idx2char[item.numpy()])))
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
for item in sequences.take(10):
print(repr(''.join(idx2char[item.numpy()])))
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
for input_example, target_example in dataset.take(1):
print ('Input data: ', repr(''.join(idx2char[input_example.numpy()])))
print ('Target data:', repr(''.join(idx2char[target_example.numpy()])))
dataset = dataset.batch(self.BATCH_SIZE, drop_remainder=True)
return dataset
if __name__ == '__main__':
_l = Lyrics(32, 100)
_l.build_char_dataset()
|
{"hexsha": "29fd00836e9b36268a2297b1bd1b6d349dc3f018", "size": 6159, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/features/build.py", "max_stars_repo_name": "jmstevens/aesopbot", "max_stars_repo_head_hexsha": "24c148e3b56820d2df81574c01eb2d023356f9dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-18T21:16:56.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-19T16:18:45.000Z", "max_issues_repo_path": "src/features/build.py", "max_issues_repo_name": "jmstevens/aesopbot", "max_issues_repo_head_hexsha": "24c148e3b56820d2df81574c01eb2d023356f9dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-11-07T00:25:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-02T00:38:26.000Z", "max_forks_repo_path": "src/features/build.py", "max_forks_repo_name": "jmstevens/aesopbot", "max_forks_repo_head_hexsha": "24c148e3b56820d2df81574c01eb2d023356f9dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9810126582, "max_line_length": 134, "alphanum_fraction": 0.5903555772, "include": true, "reason": "import numpy", "num_tokens": 1355}
|
import os
os.environ['QT_QPA_PLATFORM']='offscreen'
import gc
import sys
import time
import logging
import argparse
import matplotlib.colors
import numpy as np
import pandas as pd
from Bio import Phylo
from itertools import filterfalse
from ete3 import Tree
from GetConfig import getConfig
'''
Description:
This module is used for phylogeny analysis.
It is recommended to input both haplogroup file and sequence alignment file to construct a bifurcating tree.
If only the haplogroup file is given, the phylogenetic tree may contain polytomies (a node with more than two children).
For polytomies, if sequence alignment is provided, the program will apply alignment method to construct a sub-tree for nodes.
'''
config = getConfig()
def phylo_parser():
parser = argparse.ArgumentParser('phylo', description='(c) Y-LineageTracker: Phylogeny analysis')
# function used for phylogeny analysis
parser.add_argument('phylo',
help='Phylogenetic analysis for NRY haplogroups.')
# required, haplogroup file
parser.add_argument('--hg',
required=True,
type=str,
action='store',
help='hg: A file containing sample ID and haplogroup of each individual.')
# optional, sequence alignment file
parser.add_argument('--seq',
required=False,
type=str,
action='store',
help='seq: Sequence alignment file of input samples.')
# optional, format of sequence alignment
parser.add_argument('--seq-format',
required=False,
type=str,
dest='format',
choices=['fasta', 'phylip', 'nexus', 'meg', 'vcf'],
help='seq-format: The file format of sequence alignment file. The default is fasta. ')
# optional, population file
parser.add_argument('-p', '--population',
required=False,
type=str,
action='store',
help='population: A file containing sample ID and population information of each individual')
# optional, method of calculating distance from sequence alignment
parser.add_argument('--align-method',
required=False,
type=str,
dest='align',
action='store',
default='mp',
choices=['ibs', 'upgma', 'mp'],
help='align-method: The method of constructing the bifurcating tree from multiple sequences. The default is mp.')
# optional, the output format in text of tree
parser.add_argument('--tree-format',
required=False,
type=str,
dest='tree',
action='store',
default='newick',
choices=['newick', 'nexus', 'phyloxml', 'nexml'],
help='--tree-format: The file format of output phylogenetic tree. The default is newick. ')
# optional, the layout of tree in figure
parser.add_argument('--layout',
required=False,
type=str,
action='store',
default='rectangular',
choices=['rectangular', 'circular'],
help='layout: The layout of the phylogenetic tree figure. The default is rectangular.')
# optional, the prefix of output
parser.add_argument('-o', '--output',
required=False,
type=str,
action='store',
help='output: The prefix of output files.')
args = parser.parse_args()
return args
# print program information and write to log file
def set_log(args_log, log_file):
logger = logging.getLogger()
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler(log_file, mode='w')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] - [%(levelname)s]: %(message)s')
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
log_info = ['[Y-LineageTracker] [Phylo]',
'[Y-LineageTracker] Run Date: ' + time.asctime(time.localtime(time.time())),
'[Y-LineageTracker] Haplogroup File: %s' % args_log.hg,
'[Y-LineageTracker] Seq File: %s' % args_log.seq,
'[Y-LineageTracker] Seq Format: %s' % args_log.format,
'[Y-LineageTracker] Population File: %s' % args_log.population,
'[Y-LineageTracker] Tree Layout: %s' % args_log.layout,
'[Y-LineageTracker] Output Tree Format: %s' % args_log.tree]
print('\n')
for i in log_info:
logger.info(i)
# names of output files
def check_phylo_output(tree_format, output):
from FilesIO import get_out_path
path = get_out_path(output, 'Phylo')
fig_file = path + '.phylo'
if 'xml' in tree_format:
tree_file = path + '.tree.xml'
else:
if tree_format == 'newick':
tree_file = path + '.tree.nwk'
elif tree_format == 'nexus':
tree_file = path + '.tree.nex'
log_file = path + '.PhyloLog.log'
output_set = [fig_file, tree_file, log_file]
return output_set
class PhyloHaplogroup(object):
'''
This class is used to perform main phylogeny analysis
It can:
1. construct a haplogroup tree form haplogroup file
2. construct sub-trees for internal polytomies
3. output tree with a figure
Population file is recommended for tree plot to construct a bifurcating tree
Output tree file in newick format can be used as one of input file in time estimation
'''
def __init__(self, hap_data, output_set):
self.logger = logging.getLogger()
hap_data['Haplogroup'] = hap_data['Haplogroup'].map(lambda x: x.strip('~'))
self.hap_data = hap_data
self.output_set = output_set
self.common_trunk = config.get('HaplogroupTree', 'CommonTrunk').split(',')
# read sequence alignment file
def read_seq_file(self, seq_file, format):
from ProcessData import ConvertData
from Bio import AlignIO
# get mutiple alignment data
if format == 'vcf':
convert = ConvertData.ConvertData(seq_file)
msa = convert.data_convert('vcf', 'msa')
elif format == 'phylip':
msa = AlignIO.read(open(seq_file), 'phylip-relaxed')
else:
msa = AlignIO.read(open(seq_file), format)
# get ids of msa
ids = [i.id for i in msa]
seqs = [i.seq for i in msa]
return ids, seqs
# get base tree from haplogroup file
def get_base_tree(self, used_samples=None):
self.logger.info('[Y-LineageTracker] Constructing phylogenetic tree...')
# preprocess data
hap_df = self.hap_data.sort_values(by='Haplogroup')
if used_samples:
hap_df = hap_df[hap_df['SampleID'].map(lambda x: x in used_samples)]
hap_phylo = hap_df.drop_duplicates(subset='Haplogroup', keep='first', inplace=False)
hap_inds = hap_df.set_index(['Haplogroup'], drop=True)
# prepare variables
new_tree_info = []
filter_trunk = []
from FilesIO import CommonData
common_data = CommonData()
tree_info = common_data.read_tree_info()
# traverse tree to build a pylogenetic tree based on haplogroup
for i in tree_info:
hap_name = i.split('\t')[-1]
if hap_name in self.common_trunk:
new_tree_info.append(i)
continue
level = i.count('\t')
if len(list(filterfalse(lambda x: not x.startswith(hap_name), hap_phylo['Haplogroup']))) == 0:
new_tree_info.append(i)
filter_trunk.append(hap_name)
continue
hap_point_list = list(filterfalse(lambda x: x == hap_name or not x.startswith(hap_name), hap_phylo['Haplogroup']))
hap_point_list = [i for i in hap_point_list if i not in self.common_trunk]
hap_dict = {}
hap_class = []
if len(hap_point_list) != 0 and len(set(hap_point_list)&set([i.split('\t')[-1] for i in tree_info])) == 0:
num = 0
hap_count = len((hap_point_list))+1
iter_time = len(''.join(hap_point_list))
if len(hap_point_list) == 1:
new_tree_info.append(i)
new_tree_info.append('-\t' * (level+1) + hap_point_list[0])
continue
for j in range(iter_time):
if num+1 < len(hap_point_list):
length = len(hap_name)
if len(hap_point_list[num]) == length:
num += 1
hap_name = i.split('\t')[-1]
hap_count = len(hap_dict[hap_name])
continue
if hap_name in hap_class:
hap_count = len(hap_dict[hap_name])
hap_name = hap_point_list[num][:length+1]
continue
if len(list(filterfalse(lambda x: not x.startswith(hap_name), hap_point_list))) < hap_count and len(list(filterfalse(lambda x: not x.startswith(hap_name), hap_point_list))) > 1:
hap_dict[hap_name] = [i for i in hap_point_list if i.startswith(hap_name)]
hap_class.append(hap_name)
hap_count = len(hap_dict[hap_name])
hap_name = hap_point_list[num][:length+1]
else:
hap_name = hap_point_list[num][:length+1]
if hap_dict:
level_list = [i]
key_list = sorted(list(hap_dict.keys()))
key_list.reverse()
ele_list = []
for key in key_list:
one_key_list = hap_dict[key]
ele_list.extend(one_key_list)
ele_list.extend(key_list)
ele_list = sorted(list(set(ele_list)))
level_count = 0
level_hap = i.split('\t')[-1]
level_dict = {}
level_dict[i.split('\t')[-1]] = 0
for ele_num, ele in enumerate(ele_list[1:]):
if ele.startswith(level_hap):
level_count += 1
level_dict[ele] = level_count
else:
for ele_back in reversed(ele_list[:ele_num]):
if ele.startswith(ele_back):
level_count = level_dict[ele_back]+1
level_dict[ele] = level_count
break
level_list.append('-\t' * (level+level_count) + ele)
level_hap = ele
level_list = sorted(level_list, key=lambda level_list: level_list.split('-\t')[-1])
if i == level_list[0]:
new_tree_info.extend(level_list)
else:
new_tree_info.append(i)
# remove tree trunk not for tree construction
intersection = [i for i in new_tree_info if i.split('\t')[-1] not in filter_trunk]
for i in reversed(intersection):
if i.split('\t')[-1] in self.common_trunk:
common_level = i.count('\t')
if intersection.index(i) == len(intersection)-1:
filter_trunk.append(i.split('\t')[-1])
elif intersection[intersection.index(i)+1].count('\t') != common_level+1:
filter_trunk.append(i.split('\t')[-1])
if i.split('\t')[-1] == 'P1':
filter_trunk.append('P')
elif i.split('\t')[-1] == 'NO1':
filter_trunk.append('NO')
elif all([hap.split('\t')[-1] in self.common_trunk or len(hap.split('\t')[-1]) == 1 for hap in intersection[intersection.index(i):]]):
filter_trunk.extend([hap.split('\t')[-1] for hap in intersection[intersection.index(i):]])
# remove duplicated branch in tree construction
rm_list = []
iter_num = 0
for i in new_tree_info:
node_hap = i.split('\t')[-1]
node_level = i.count('\t')
if node_hap in self.common_trunk or len(node_hap) == 1:
continue
if new_tree_info.index(i) == len(new_tree_info)-1:
continue
if node_hap in hap_df['Haplogroup'].tolist():
continue
if new_tree_info[new_tree_info.index(i)+1].split('\t')[-1].startswith(node_hap):
branch_hap = new_tree_info[new_tree_info.index(i)+1].split('\t')[-1]
node_base = [i for i in new_tree_info if i.split('\t')[-1].startswith(node_hap) and i.split('\t')[-1]!= node_hap]
branch_base = [i for i in new_tree_info if i.split('\t')[-1].startswith(branch_hap)]
if node_base == branch_base:
if iter_num == 0:
new_tree_info_irredundant = [(i.count('\t')-1)*'-\t' + i.split('\t')[-1] if i.split('\t')[-1].startswith(node_hap) else i for i in new_tree_info]
else:
new_tree_info_irredundant = [(i.count('\t')-1)*'-\t' + i.split('\t')[-1] if i.split('\t')[-1].startswith(node_hap) else i for i in new_tree_info_irredundant]
iter_num += 1
rm_list.append(node_hap)
new_tree_info = [i for i in new_tree_info_irredundant if i.split('\t')[-1] not in rm_list]
# add SampleID to tree
ind_tree_info = []
min_level_num = config.getint('PlotParameters', 'MinLevelNum')
for i in new_tree_info:
ind_hap = i.split('-\t')[-1]
level_num = i.count('-\t')
if ind_hap in hap_phylo['Haplogroup'].tolist():
ind_tree_info.append(i)
if isinstance(hap_inds.at[ind_hap, 'SampleID'], str):
ind_tree_info.append((level_num+1) * '-\t' + hap_inds.at[ind_hap, 'SampleID'])
else:
ind_list = hap_inds.at[ind_hap, 'SampleID'].tolist()
ind_tree_info = ind_tree_info + [(level_num+1) * '-\t' + i for i in ind_list]
if level_num < min_level_num:
min_level_num = level_num
elif ind_hap not in filter_trunk:
ind_tree_info.append(i)
max_length = max([len(i.split('-\t')[-1]) for i in new_tree_info])
# tree in newick format for visulization
samples = hap_df['SampleID'].tolist()
base, trunk_length = self._write_nwk(ind_tree_info, hap_df['SampleID'].tolist())
base = base[1:].replace(');', ';') # root
# remove reduandant nodes
base_tree = Tree(base, format=8)
for node in base_tree.traverse('preorder'):
if (not node.is_leaf()) and len(node.children) == 1:
child_name = node.children[0].name
if child_name in self.common_trunk or len(child_name) == 1:
node.delete()
base = base_tree.write(format=8)
base = base.replace('NoName', '')
return hap_df, base, trunk_length, max_length, samples
# output tree in specific format
def output_tree(self, base, tree_format):
if base.endswith(');'):
base = base[1:].replace(');', ';') # root
# output tree in specific format
output_file = self.output_set[1]
if tree_format == 'newick':
output_tree = open(output_file, 'w')
output_tree.write(base)
output_tree.close()
else:
from io import StringIO
tmp_tree = StringIO(base)
nwk_tree = Phylo.parse(tmp_tree, 'newick')
Phylo.write(nwk_tree, output_file, tree_format)
# apply sequence alignment method to construct a bifurcating tree
def phylo_internal(self, ids, seqs, base, samples, align):
# change the internal node of tree
base_tree = Tree(base, format=8)
for node in base_tree.iter_descendants('postorder'):
if len(node.children) > 2 and node.name != '':
node_name = node.name
tmp_node = Tree('(tmp);', format=8)
children = node.children
children_haps_num = len([i for i in children if i.name not in samples])
children_sample_num = len([i for i in children if i.name in samples])
if children_haps_num == 0: # all children samples
children_samples = [i for i in children if i.name in samples]
self._add_single_tree_from_tmp(node, tmp_node, children_samples, node_name, ids, seqs, align, True)
elif children_haps_num == 1:
if children_sample_num == 2:
children_samples_name = [i.name for i in children if i.name in samples]
children_haps_name = [i.name for i in children if i.name not in samples]
polynode = base_tree.get_common_ancestor(children_haps_name[0], children_samples_name[0])
polynode.resolve_polytomy(recursive=False)
else:
children_samples = [i for i in children if i.name in samples]
self._add_single_tree_from_tmp(node, tmp_node, children_samples, node_name, ids, seqs, align)
elif children_haps_num > 1:
hap_sample_dict = {}
if children_sample_num <= 1: # all sub-haplogroups
children_haps = [i for i in children if i.name not in samples]
self._add_multi_tree_from_tmp(node, tmp_node, children_haps, node_name, ids, seqs, samples, align)
else:
children_samples = [i for i in children if i.name in samples]
self._add_single_tree_from_tmp(node, tmp_node, children_samples, node_name, ids, seqs, align)
children_haps = [i for i in children if i.name not in samples]
tmp_node2 = Tree('(tmp2);', format=8)
self._add_multi_tree_from_tmp(node, tmp_node2, children_haps, node_name, ids, seqs, samples, align)
gc.collect()
# remove nodes without children
for node in base_tree.traverse('preorder'):
if (not node.is_leaf()) and len(node.children) == 1:
child_name = node.children[0].name
if child_name not in samples:
node.delete()
base = base_tree.write(format=8)
base = base.replace('NoName', '')
return base
def _add_single_tree_from_tmp(self, node, tmp_node, children_samples, node_name, ids, seqs, align, replace=False):
node.add_child(tmp_node)
children_samples_name = [i.name for i in children_samples]
# delete parallel samples
for i in children_samples:
i.delete()
# concat sequence_distance_tree to haplogroup node
id_idx = [ids.index(i) for i in children_samples_name]
children_seqs = [seqs[i] for i in id_idx]
children_tree = self._get_sequence_distance_tree(children_seqs, children_samples_name, align)
if replace:
children_tree = children_tree.replace(';', node_name+';')
node.search_nodes(name='tmp')[0].add_child(Tree(children_tree, format=8))
node.search_nodes(name='tmp')[0].delete()
def _add_multi_tree_from_tmp(self, node, tmp_node, children_haps, node_name, ids, seqs, samples, align, replace=False):
node.add_child(tmp_node)
tmp_name = [i.name for i in tmp_node][0]
hap_sample_dict = {}
for hap in children_haps:
hap_children_sample = list(filter(lambda x: x != '' and x in samples,
[i.name for i in hap.traverse()]))[0]
hap_sample_dict[hap_children_sample] = hap.name
hap_children_samples = list(hap_sample_dict.keys())
id_idx = [ids.index(i) for i in hap_children_samples]
hap_children_seqs = [seqs[i] for i in id_idx]
hap_children_tree = self._get_sequence_distance_tree(hap_children_seqs, hap_children_samples, align)
for hap_children_sample, hap_name in hap_sample_dict.items():
hap_children_tree = hap_children_tree.replace(hap_children_sample, hap_name+tmp_name)
tmp_tree = Tree(hap_children_tree, format=8)
for hap in children_haps:
node_to_add = Tree(hap.write(format=8), format=8)
tmp_tree.search_nodes(name=hap.name+tmp_name)[0].add_child(node_to_add)
tmp_tree.search_nodes(name=hap.name+tmp_name)[0].delete()
hap.detach()
node.search_nodes(name=tmp_name)[0].add_child(tmp_tree)
node.search_nodes(name=tmp_name)[0].delete()
# get sub-tree from multiple sequence alignment
def _get_sequence_distance_tree(self, seqs, ids, align):
new_seqs = []
# prune sequences
for i in range(len(seqs[0])):
all_al = [s[i] for s in seqs]
al = list(set(all_al))
N_count = 0
if 'N' in al:
al.remove('N')
N_count += all_al.count('N')
if '-' in al:
al.remove('-')
N_count += all_al.count('-')
N_rate = N_count / len(all_al)
if len(al) != 1 and N_rate < 0.05:
new_seqs.append(all_al)
new_seqs = np.array(new_seqs).T.tolist()
# IBS method
if align == 'ibs':
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform
pairwise_df = pd.DataFrame(index=ids, columns=ids)
# get pairwise IBS
ind_num = 0
for ind1 in ids:
for ind2 in ids[ind_num:]:
if ind1 == ind2:
pairwise_df.at[ind1, ind2] = 1
else:
ibs = self._calculate_ibs(seqs, ids, ind1, ind2)
pairwise_df.at[ind1, ind2] = ibs
pairwise_df.at[ind2, ind1] = ibs
ind_num += 1
pairwise_df = 1 - pairwise_df
# convert distance matrix to hierarchy
dm = hierarchy.linkage(squareform(np.array(pairwise_df)), 'average')
dm_tree = hierarchy.to_tree(dm, False)
children_tree = self._hier_to_newick(dm_tree, '', pairwise_df.index.tolist())
else:
import tempfile
from Bio import Phylo
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor
new_seqs = [''.join(i) for i in new_seqs]
seq_list = [SeqRecord(Seq(j), id=i, name=i) for i, j in zip(ids, new_seqs)]
aln = MultipleSeqAlignment(seq_list)
calculator = DistanceCalculator('identity')
# UPGMA method
if align == 'upgma':
constructor = DistanceTreeConstructor(calculator, 'upgma')
tree = constructor.build_tree(aln)
tree_tmp_file = tempfile.NamedTemporaryFile(mode='w')
Phylo.write(tree, tree_tmp_file.name, 'newick')
ete_tree = Tree(open(tree_tmp_file.name).read(), format=1)
children_tree = ete_tree.write(format=9)
# MP (maximum parsimony) method
elif align == 'mp':
from Bio.Phylo.TreeConstruction import ParsimonyScorer, NNITreeSearcher, ParsimonyTreeConstructor
dm = calculator.get_distance(aln)
constructor = DistanceTreeConstructor(calculator, 'nj')
tree = constructor.build_tree(aln)
scorer = ParsimonyScorer()
searcher = NNITreeSearcher(scorer)
constructor = ParsimonyTreeConstructor(searcher, tree)
pars_tree = constructor.build_tree(aln)
tree_tmp_file = tempfile.NamedTemporaryFile(mode='w')
Phylo.write(pars_tree, tree_tmp_file.name, 'newick')
ete_tree = Tree(open(tree_tmp_file.name).read(), format=1)
children_tree = ete_tree.write(format=9)
return children_tree
# IBS calculation
def _calculate_ibs(self, seqs, ids, sample1, sample2):
index1 = ids.index(sample1)
index2 = ids.index(sample2)
ibs0 = 0
ibs1 = 0
ibs2 = 0
for i, j in zip(seqs[index1], seqs[index2]):
if i in ['N', '-'] or j in ['N', '-']:
continue
if i == j:
ibs2 += 1
else:
ibs0 += 1
ibs = (ibs2) / (ibs0+ibs2)
return ibs
# convert hierarchy to newick
def _hier_to_newick(self, node, newick, leaf_names):
if node.is_leaf():
return '%s%s' % (leaf_names[node.id], newick)
else:
if len(newick) > 0:
newick = ')%s' % (newick)
else:
newick = ');'
newick = self._hier_to_newick(node.get_left(), newick, leaf_names)
newick = self._hier_to_newick(node.get_right(), ',%s' % (newick), leaf_names)
newick = '(%s' % (newick)
return newick
# function used for construction of write newick file
def _write_nwk(self, ind_tree_info, samples):
base = ''
width, level, length =0, 0, 0.5
trunk_list = []
trunk_length = {}
count_num = ind_tree_info[-1].count('-')
for i in ind_tree_info[1:]:
trunk = i.split('\t')[-1]
width_new = i.count('-\t')
if width_new > width:
width = width_new
level_new = i.count('-')-trunk.count('-')
trunk_list.append(trunk)
dif = level_new - level
if dif == 0:
length = length
base = trunk + ',' + base
if ind_tree_info.index(i)+1 == len(ind_tree_info):
base = '('*(base.count(')')-base.count('(')) + base
elif dif > 0:
length = 1+1*(count_num-level_new)
base = trunk + ')' + base
if ind_tree_info.index(i)+1 == len(ind_tree_info):
base = '('*(base.count(')')-base.count('(')) + base
elif dif < 0:
length = 1+1*(count_num-level_new)
if ind_tree_info.index(i)+1 != len(ind_tree_info):
base = trunk + ',' + '('*abs(dif) + base
else:
base = '('*(base.count(')')-base.count('(')-1) + trunk + ',(' + base
trunk_length[trunk] = length
level = level_new
base = '(' + base + 'Y-Adam);'
return base, trunk_length
# plot tree figure
def _visualize_tree(self, base, trunk_length, max_length, samples, layout, plot_info, color_dict=None):
from ete3 import TreeStyle, NodeStyle, TextFace, RectFace
# build tree from newick
base_tree = Tree(base, format=8)
num = 0
for node in base_tree.traverse('postorder'):
if node.name == '':
node.name = 'BLANK_NODE%d' % num
num += 1
# add text in trunk and leaf, and keep all branch length same in topology
for node in base_tree.traverse('postorder'):
trunk = base_tree & node.name
if node.name in samples:
text = TextFace(node.name, ftype='Arial', fsize=18)
text.margin_left = 4
trunk.add_face(text, column=0, position='branch-right')
elif node.name.startswith('BLANK_NODE'):
text = ''
trunk.add_face(TextFace(text, ftype='Arial', fsize=18), column=0, position='branch-top')
else:
space_num = max_length - len(node.name)
text = node.name + ' '*space_num
trunk.add_face(TextFace(text, ftype='Arial', fsize=18), column=0, position='branch-top')
if node.is_leaf():
node.dist = trunk_length[node.name]
else:
node.dist = 1
gc.collect()
# parameters for node style
# the only difference between TreeNode.traverse() and TreeNode.iter_descendants() is that the first will include the root node in the iteration
node_color = {}
if isinstance(plot_info, pd.DataFrame): # match colors to population
label = plot_info.columns[0]
for node in base_tree.traverse('postorder'):
ns = NodeStyle()
ns['hz_line_width'] = 1
ns['vt_line_width'] = 1
if node.is_leaf() and node.name in samples:
# color
color = color_dict[plot_info.at[node.name, label]]
ns['fgcolor'] = color
node_color[node.name] = color
# shape
ns['shape'] = 'square'
ns['size'] = 50
else:
children_nodes = [c_node for c_node in node.children]
node_colors = [node_color[i.name] for i in children_nodes]
if len(set(node_colors)) > 1:
if node.name in self.common_trunk or len(node.name) == 1:
color = 'black'
else:
for g_node in children_nodes:
node_colors += [node_color[i.name] for i in g_node]
node_color[node.name] = color
ns['fgcolor'] = 'black'
ns['size'] = 0
ns['vt_line_color'] = color
ns['hz_line_color'] = color
node.set_style(ns)
else: # all the leaf is same color
for node in base_tree.traverse():
ns = NodeStyle()
ns['hz_line_width'] = 1
ns['vt_line_width'] = 1
ns['fgcolor'] = 'black'
ns['size'] = 0
node.set_style(ns)
# parameters for tree style
ts = TreeStyle()
ts.min_leaf_separation = 30
ts.show_leaf_name = False
ts.show_branch_length = False
ts.scale = max_length*15
ts.branch_vertical_margin = 0
ts.optimal_scale_level = 'full'
ts.show_scale = False
ts.margin_right = 20
ts.margin_left = 20
if layout == 'circular':
ts.mode = 'c'
# add legend and output tree
base_tree.convert_to_ultrametric()
if color_dict:
if label == 'Color':
# output figure with color information only
base_tree.render(self.output_set[0]+'.pdf', tree_style=ts)
else:
# add legend
for name, color in color_dict.items():
icon_legend = RectFace(width=200, height=200, fgcolor='black', bgcolor=color)
icon_legend.margin_left = 200
icon_legend.margin_right = 100
icon_legend.margin_top = 50
icon_legend.margin_bottom = 50
ts.legend.add_face(icon_legend, column=0)
text_legend = TextFace(name, fsize=200, ftype='Arial')
text_legend.margin_top = 50
text_legend.margin_bottom = 50
ts.legend.add_face(text_legend, column=1)
ts.legend_position = 1
# output figure with population information
base_tree.render(self.output_set[0]+'.%s.pdf' % label, tree_style=ts)
else:
# output figure without population
base_tree.render(self.output_set[0]+'.pdf', tree_style=ts)
# match color to populations
def _match_colors(self, population_info, label):
# create a empty dict to match color and population
color_dict = {}
# if preset color, check color names
if label == 'Color':
colors = population_info[label]
for i in set(colors):
if matplotlib.colors.is_color_like(i):
color_dict[i] = i
else:
self.logger.error('[Y-LineageTracker] [Error] %s is not a RGB color' % i)
sys.exit()
else:
# match color to populations
populations = sorted(list(set(population_info[label])))
from FilesIO import set_color_num
color_num = len(populations)
colors, cmap = set_color_num(color_num, get_cmap=True)
# creat a dict usef for match color to population
for i, j in zip(populations, cmap):
color_dict[i] = matplotlib.colors.to_hex(j)
plot_info = population_info[[label]]
return plot_info, color_dict
# main function for tree plot
def output_tree_figure(self, population_info, base, trunk_length, max_length, samples, layout):
if isinstance(population_info, pd.DataFrame):
if 'Color' in population_info.columns:
plot_info, color_dict = self._match_colors(population_info, 'Color')
self._visualize_tree(base, trunk_length, max_length, samples, layout, plot_info, color_dict)
else:
if 'Population' in population_info.columns:
plot_info, color_dict = self._match_colors(population_info, 'Population')
self._visualize_tree(base, trunk_length, max_length, samples, layout, plot_info, color_dict)
if 'Group' in population_info.columns:
plot_info, color_dict = self._match_colors(population_info, 'Group')
self._visualize_tree(base, trunk_length, max_length, samples, layout, plot_info, color_dict)
else:
plot_info = None
self._visualize_tree(base, trunk_length, max_length, samples, plot_info, layout)
def main():
start = time.perf_counter()
arguments = phylo_parser()
# set of output files
output_set = check_phylo_output(arguments.tree, arguments.output)
from FilesIO import check_hap_input, check_population, check_overlap, time_count
# check haplogroup data
hap_data = check_hap_input(arguments.hg, 'haplogroup')
# check population data
if arguments.population:
population_data = check_population(arguments.population)
hap_data, population_data = check_overlap(hap_data, population_data)
# set log file
set_log(arguments, output_set[-1])
# start phylogeny analysis
phylo = PhyloHaplogroup(hap_data, output_set)
# read sequence alignment file
if arguments.seq:
ids, seqs = phylo.read_seq_file(arguments.seq, arguments.format)
hap_df, base, trunk_length, max_length, samples = phylo.get_base_tree(ids) # build base tree
else:
hap_df, base, trunk_length, max_length, samples = phylo.get_base_tree()
# construct sub-tree internal nodes
if arguments.seq:
base = phylo.phylo_internal(ids, seqs, base, samples, arguments.align)
# combine with population data
if arguments.population:
population_info = pd.merge(hap_df, population_data, on='SampleID')
population_info = population_info.set_index('SampleID')
else:
population_info = None
# output tree in specific format
phylo.output_tree(base, arguments.tree)
# output tree figure
phylo.output_tree_figure(population_info, base, trunk_length, max_length, samples, arguments.layout)
time_count(start)
if __name__ == '__main__':
main()
|
{"hexsha": "96762ff09603864ccffbe4ae4e9d4714de420a4b", "size": 36666, "ext": "py", "lang": "Python", "max_stars_repo_path": "LineageTracker/PhyloHaplogroup.py", "max_stars_repo_name": "Shuhua-Group/Y-LineageTracker", "max_stars_repo_head_hexsha": "82b14c74be95ef2d4d929ce20bf7436869f163ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-11T09:34:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T07:19:05.000Z", "max_issues_repo_path": "LineageTracker/PhyloHaplogroup.py", "max_issues_repo_name": "Shuhua-Group/Y-LineageTracker", "max_issues_repo_head_hexsha": "82b14c74be95ef2d4d929ce20bf7436869f163ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-11T11:19:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T09:01:33.000Z", "max_forks_repo_path": "LineageTracker/PhyloHaplogroup.py", "max_forks_repo_name": "Shuhua-Group/Y-LineageTracker", "max_forks_repo_head_hexsha": "82b14c74be95ef2d4d929ce20bf7436869f163ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-26T09:05:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T09:05:30.000Z", "avg_line_length": 43.598097503, "max_line_length": 201, "alphanum_fraction": 0.559891998, "include": true, "reason": "import numpy,from scipy", "num_tokens": 8141}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage
def plotData(X, y):
# Find Indices of Positive and Negative Examples
pos = np.where(y == 1)
neg = np.where(y == 0)
plt.scatter(X[pos,0], X[pos,1], c='b', label='1')
plt.scatter(X[neg,0], X[neg,1], c='r', label='0')
plt.legend()
return plt
def mapFeature(X1, X2, degree=6):
"""
特征映射函数,从X1和X2中映射出更多特征
"""
newX = np.ones((X1.shape[0], sum(range(degree + 2)))) #初始化新的特征矩阵
end = 1
for i in range(1, degree + 1):
for j in range(0, i+1):
newX[:, end] = np.multiply(np.power(X1, i-j), np.power(X2, j))
end = end + 1
return newX
def predict_prob(X, theta):
h = 1 / (1 + np.exp(-np.dot(X,theta)))
classes = h.round()
return classes
def plotDecisionBoundary(X, y, theta):
plt.figure(figsize=(10, 6))
plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color='b', label='0')
plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color='r', label='1')
plt.legend()
x1_min, x1_max = X[:,0].min(), X[:,0].max(),
x2_min, x2_max = X[:,1].min(), X[:,1].max(),
xx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max,500), np.linspace(x2_min, x2_max,500))
grid = np.c_[xx1.ravel(), xx2.ravel()]
newgrid = mapFeature(grid[:, 0], grid[:, 1])
probs = predict_prob(newgrid, theta).reshape(xx1.shape)
plt.contour(xx1, xx2, probs, [0.5],linewidths=1, colors='black')
plt.show()
return plt
|
{"hexsha": "8f861eb1d8b523c08a38db2402612376b221328a", "size": 1485, "ext": "py", "lang": "Python", "max_stars_repo_path": "ML2020_HW3/utils.py", "max_stars_repo_name": "chongwen8/Machine-Learning-Courseworks", "max_stars_repo_head_hexsha": "374210f0b77cfa166f2270cae6cce7fdd4ed62c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ML2020_HW3/utils.py", "max_issues_repo_name": "chongwen8/Machine-Learning-Courseworks", "max_issues_repo_head_hexsha": "374210f0b77cfa166f2270cae6cce7fdd4ed62c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ML2020_HW3/utils.py", "max_forks_repo_name": "chongwen8/Machine-Learning-Courseworks", "max_forks_repo_head_hexsha": "374210f0b77cfa166f2270cae6cce7fdd4ed62c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5957446809, "max_line_length": 92, "alphanum_fraction": 0.564983165, "include": true, "reason": "import numpy,import scipy", "num_tokens": 528}
|
# -*- coding: utf-8 -*-
from vispy.scene.node import Node
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main, raises)
from vispy.visuals.transforms import STTransform
import numpy as np
class EventCheck(object):
def __init__(self, emitter):
self._events = []
self.emitter = emitter
emitter.connect(self.callback)
def callback(self, event):
self._events.append(event)
@property
def events(self):
ev = self._events
self._events = []
return ev
@requires_application()
def test_topology():
c = TestingCanvas()
assert c.scene.canvas is c
with raises(AttributeError):
c.foo = 'bar'
w = c.central_widget
assert w.parent is c.scene
assert w.scene_node is c.scene
assert w.document_node is c.scene
g = w.add_grid()
with raises(AttributeError):
g.foo = 'bar'
grid_check = EventCheck(g.events.children_change)
v1 = g.add_view(row=0, col=0)
assert v1.parent is g
assert v1.scene_node is c.scene
assert len(grid_check.events) == 1
v2 = g.add_view(row=1, col=0)
assert v2.parent is g
assert v2.scene_node is c.scene
assert v2.document_node is c.scene
assert len(grid_check.events) == 1
n1 = Node()
n1_parent_check = EventCheck(n1.events.parent_change)
n1_child_check = EventCheck(n1.events.children_change)
v1.add(n1)
assert len(n1_parent_check.events) == 1
assert n1.parent is v1.scene
assert n1.scene_node is v1.scene
assert n1.document_node is c.scene
n2 = Node(parent=n1)
n2_parent_check = EventCheck(n2.events.parent_change)
assert n2.parent is n1
assert n2.scene_node is v1.scene
assert n2.document_node is c.scene
assert len(n1_child_check.events) == 1
assert len(grid_check.events) == 2
v2.add(n1)
assert len(grid_check.events) == 2
assert len(n1_parent_check.events) == 1
assert len(n2_parent_check.events) == 1
assert n1.parent is v2.scene
assert n2.scene_node is v2.scene
assert n2.document_node is c.scene
def test_transforms():
# test transform mapping between nodes
root = Node()
n1 = Node(parent=root)
n2 = Node(parent=n1)
n3 = Node(parent=root)
n4 = Node(parent=n3)
n1.transform = STTransform(scale=(0.1, 0.1), translate=(7, 6))
n2.transform = STTransform(scale=(0.2, 0.3), translate=(5, 4))
n3.transform = STTransform(scale=(0.4, 0.5), translate=(3, 2))
n4.transform = STTransform(scale=(0.6, 0.7), translate=(1, 0))
assert np.allclose(n1.transform.map((0, 0))[:2], (7, 6))
assert np.allclose(n1.node_transform(root).map((0, 0))[:2], (7, 6))
assert np.allclose(n2.transform.map((0, 0))[:2], (5, 4))
assert np.allclose(n2.node_transform(root).map((0, 0))[:2],
(5*0.1+7, 4*0.1+6))
assert np.allclose(root.node_transform(n1).map((0, 0))[:2],
(-7/0.1, -6/0.1))
assert np.allclose(root.node_transform(n2).map((0, 0))[:2],
((-7/0.1-5)/0.2, (-6/0.1-4)/0.3))
# just check that we can assemble transforms correctly mapping across the
# scenegraph
assert n2.node_path(n4) == ([n2, n1, root], [n3, n4])
assert n4.node_path(n2) == ([n4, n3, root], [n1, n2])
assert n2.node_path(root) == ([n2, n1, root], [])
assert root.node_path(n4) == ([root], [n3, n4])
assert n2.node_path_transforms(n4) == [n4.transform.inverse,
n3.transform.inverse,
n1.transform, n2.transform]
assert n4.node_path_transforms(n2) == [n2.transform.inverse,
n1.transform.inverse,
n3.transform, n4.transform]
pts = np.array([[0, 0], [1, 1], [-56.3, 800.2]])
assert np.all(n2.node_transform(n1).map(pts) == n2.transform.map(pts))
assert np.all(n2.node_transform(root).map(pts) ==
n1.transform.map(n2.transform.map(pts)))
assert np.all(n1.node_transform(n3).map(pts) ==
n3.transform.inverse.map(n1.transform.map(pts)))
assert np.all(n2.node_transform(n3).map(pts) ==
n3.transform.inverse.map(
n1.transform.map(n2.transform.map(pts))))
assert np.all(n2.node_transform(n4).map(pts) ==
n4.transform.inverse.map(n3.transform.inverse.map(
n1.transform.map(n2.transform.map(pts)))))
# test transforms still work after reparenting
n3.parent = n1
assert np.all(n2.node_transform(n4).map(pts) == n4.transform.inverse.map(
n3.transform.inverse.map(n2.transform.map(pts))))
# test transform simplification
assert np.all(n2.node_transform(n4).map(pts) ==
n2.node_transform(n4).simplified.map(pts))
run_tests_if_main()
|
{"hexsha": "69ddb7bd32411d28a8cd8739bde86256b77e65eb", "size": 5004, "ext": "py", "lang": "Python", "max_stars_repo_path": "vispy/scene/tests/test_node.py", "max_stars_repo_name": "hmaarrfk/vispy", "max_stars_repo_head_hexsha": "7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-28T16:05:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-03T21:29:03.000Z", "max_issues_repo_path": "vispy/scene/tests/test_node.py", "max_issues_repo_name": "hmaarrfk/vispy", "max_issues_repo_head_hexsha": "7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2017-04-07T01:44:15.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-16T20:47:08.000Z", "max_forks_repo_path": "graphViz/vispy/scene/tests/test_node.py", "max_forks_repo_name": "onecklam/ethereum-graphviz", "max_forks_repo_head_hexsha": "6993accf0cb85e23013bf7ae6b04145724a6dbd2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-15T08:52:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-15T08:52:26.000Z", "avg_line_length": 34.993006993, "max_line_length": 77, "alphanum_fraction": 0.6017186251, "include": true, "reason": "import numpy", "num_tokens": 1367}
|
import Serialization
function stack(io::IO,msg::Vector{UInt8})
frontbytes = reinterpret(UInt8,Int16[length(msg)])
item = UInt8[frontbytes...,msg...]
write(io,item)
end
function unstack(io::IO)
sizebytes = [read(io,UInt8),read(io,UInt8)]
size = reinterpret(Int16,sizebytes)[1]
msg = UInt8[]
for i in 1:size
push!(msg,read(io,UInt8))
end
return msg
end
function unstack(io::IOBuffer)
bytes = take!(io)
size = reinterpret(Int16,bytes[1:2])[1]
msg = bytes[3:size+2]
if length(bytes)>size+2
write(io,bytes[size+3:end])
end
return msg
end
function serialize(socket,msg)
io = IOBuffer()
Serialization.serialize(io,msg)
bytes = take!(io)
stack(socket,bytes)
end
function deserialize(socket)
bytes = unstack(socket)
io = IOBuffer(bytes)
Serialization.deserialize(io)
end
|
{"hexsha": "b08974352d0d972a1730b84387964b321b32e02a", "size": 876, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "research/stacking.jl", "max_stars_repo_name": "PeaceFounder/PeaceVote.jl", "max_stars_repo_head_hexsha": "f02f208cd673957ad626c8dfa64b24173f80842f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-30T03:15:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-25T00:27:20.000Z", "max_issues_repo_path": "research/stacking.jl", "max_issues_repo_name": "PeaceFounder/PeaceVote.jl", "max_issues_repo_head_hexsha": "f02f208cd673957ad626c8dfa64b24173f80842f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2019-10-17T08:37:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-29T23:30:21.000Z", "max_forks_repo_path": "research/stacking.jl", "max_forks_repo_name": "PeaceFounder/PeaceVote.jl", "max_forks_repo_head_hexsha": "f02f208cd673957ad626c8dfa64b24173f80842f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:52:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T10:52:25.000Z", "avg_line_length": 20.8571428571, "max_line_length": 54, "alphanum_fraction": 0.649543379, "num_tokens": 242}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 7 18:52:58 2022
@author: Analabha Roy
"""
import numpy as np
def GEPP(A, b, doPP=True):
'''
Gaussian elimination with partial pivoting.
input: A is an n x n numpy matrix
b is an n x 1 numpy array
output: x is the solution of Ax=b
with the entries permuted in
accordance with the pivoting
done by the algorithm
post-condition: A and b have been modified.
'''
n = len(A)
if b.size != n:
raise ValueError("Invalid argument: incompatible sizes between" +
"A & b.", b.size, n)
# k represents the current pivot row. Since GE traverses the matrix in the
# upper right triangle, we also use k for indicating the k-th diagonal
# column index.
# Elimination
for k in range(n-1):
if doPP:
# Pivot
maxindex = abs(A[k:, k]).argmax() + k
if A[maxindex, k] == 0:
raise ValueError("Matrix is singular.")
# Swap
if maxindex != k:
A[[k, maxindex]] = A[[maxindex, k]]
b[[k, maxindex]] = b[[maxindex, k]]
else:
if A[k, k] == 0:
raise ValueError("Pivot element is zero. Try setting doPP to True.")
# Eliminate
for row in range(k+1, n):
multiplier = A[row, k]/A[k, k]
A[row, k:] = A[row, k:] - multiplier * A[k, k:]
b[row] = b[row] - multiplier*b[k]
# Back Substitution
x = np.zeros(n)
for k in range(n-1, -1, -1):
x[k] = (b[k] - np.dot(A[k, k+1:], x[k+1:]))/A[k, k]
return x
def det(A):
_ = GEPP(A, np.ones(A.shape[0]), doPP=True)
return np.prod(np.diagonal(A))
if __name__ == '__main__':
A = np.array([[25., 5., 1.],
[64., 8., 1.],
[144., 12., 1.]])
b = np.array([106.8,
177.2,
279.2])
x = GEPP(np.copy(A), np.copy(b), doPP=False)
print("First solution is given by x =", x)
print("Error is ", np.linalg.norm(A@x - b) * 100/np.linalg.norm(b), "%")
print("Determinant of first matrix is ", det(np.copy(A)))
A = np.array([[12., 10., -7.],
[6., 5., 3.],
[5., -1., 5.]])
b = np.array([15.,
4.,
9.])
try:
x = GEPP(np.copy(A), np.copy(b), doPP=False)
except ValueError:
x = GEPP(np.copy(A), np.copy(b))
print("Second solution is given by x =", x)
print("Error is ", np.linalg.norm(A @ x - b) * 100/np.linalg.norm(b), "%")
|
{"hexsha": "3404ca054f38b5897531caef9a120bae6c9d1956", "size": 2661, "ext": "py", "lang": "Python", "max_stars_repo_path": "03-Computational_Linear_Algebra/gauss_method_ex.py", "max_stars_repo_name": "hariseldon99/msph402b", "max_stars_repo_head_hexsha": "20d2df0ca7c7216c504669ea1495a84de1b217d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "03-Computational_Linear_Algebra/gauss_method_ex.py", "max_issues_repo_name": "hariseldon99/msph402b", "max_issues_repo_head_hexsha": "20d2df0ca7c7216c504669ea1495a84de1b217d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "03-Computational_Linear_Algebra/gauss_method_ex.py", "max_forks_repo_name": "hariseldon99/msph402b", "max_forks_repo_head_hexsha": "20d2df0ca7c7216c504669ea1495a84de1b217d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2417582418, "max_line_length": 84, "alphanum_fraction": 0.4904171364, "include": true, "reason": "import numpy", "num_tokens": 767}
|
import numpy as np
from numpy_groupies import aggregate
import sys
sys.path.append("python")
from SurfStatEdg import *
def py_SurfStatSmooth(Y, surf, FWHM):
"""Smooths surface data by repeatedly averaging over edges.
Parameters
----------
Y : numpy array of shape (n,v) or (n,v,k)
surface data, v=#vertices, n=#observations, k=#variates.
surf : a dictionary with key 'tri' or 'lat', or a BSPolyData object.
surf['tri'] = numpy array of shape (t,3), triangle indices, or
surf['lat'] = numpy array of shape (nx,ny,nz), 1=in, 0=out,
(nx,ny,nz) = size(volume).
FWHM : approximate FWHM of Gaussian smoothing filter, in mesh units.
Returns
-------
Y : numpy array of shape (n,v) or (n,v,k),
smoothed data.
"""
niter = int(np.ceil(pow(FWHM,2) / (2*np.log(2))))
if isinstance(Y, np.ndarray):
Y = np.array(Y, dtype='float')
if np.ndim(Y) == 2:
n, v = np.shape(Y)
k = 1
isnum = True
elif np.ndim(Y) == 3:
n, v, k = np.shape(Y)
isnum = True
edg = py_SurfStatEdg(surf) + 1
agg_1 = aggregate(edg[:,0], 2, size=(v+1))
agg_2 = aggregate(edg[:,1], 2, size=(v+1))
Y1 = (agg_1 + agg_2)[1:]
if n>1:
print(' %i x %i surfaces to smooth, %% remaining: 100 '%(n, k))
n10 = np.floor(n/10)
for i in range(0, n):
if n10 != 0 and np.remainder(i+1, n10) == 0:
print('%s ' % str(int(100-(i+1)/n10*10)), end = '')
for j in range(0, k):
if isnum:
if np.ndim(Y) == 2:
Ys = Y[i,:]
elif np.ndim(Y) == 3:
Ys = Y[i,:,j]
for itera in range(1, niter+1):
Yedg = Ys[edg[:,0]-1] + Ys[edg[:,1]-1];
agg_tmp1 = aggregate(edg[:,0], Yedg, size=(v+1))[1:]
agg_tmp2 = aggregate(edg[:,1], Yedg, size=(v+1))[1:]
Ys = (agg_tmp1 + agg_tmp2) / Y1
if np.ndim(Y) == 2:
Y[i,:] = Ys
elif np.ndim(Y) == 3:
Y[i,:,j] = Ys
if n>1:
print('Done')
return Y
|
{"hexsha": "772ad215db3e72bf15d0100370c1edbc7ad474de", "size": 2281, "ext": "py", "lang": "Python", "max_stars_repo_path": "surfstat/python/SurfStatSmooth.py", "max_stars_repo_name": "rudimeier/BrainStat", "max_stars_repo_head_hexsha": "a5ef474ffd70300ecf5fa464fff4a41e71f4b7a1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "surfstat/python/SurfStatSmooth.py", "max_issues_repo_name": "rudimeier/BrainStat", "max_issues_repo_head_hexsha": "a5ef474ffd70300ecf5fa464fff4a41e71f4b7a1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "surfstat/python/SurfStatSmooth.py", "max_forks_repo_name": "rudimeier/BrainStat", "max_forks_repo_head_hexsha": "a5ef474ffd70300ecf5fa464fff4a41e71f4b7a1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6233766234, "max_line_length": 96, "alphanum_fraction": 0.4638316528, "include": true, "reason": "import numpy,from numpy", "num_tokens": 696}
|
[STATEMENT]
lemma fo_nmlzd_mono: "Inl -` set xs \<subseteq> AD \<Longrightarrow> fo_nmlzd AD' xs \<Longrightarrow> fo_nmlzd AD xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Inl -` set xs \<subseteq> AD; fo_nmlzd AD' xs\<rbrakk> \<Longrightarrow> fo_nmlzd AD xs
[PROOF STEP]
by (auto simp: fo_nmlzd_def)
|
{"llama_tokens": 133, "file": "Eval_FO_Ailamazyan", "length": 1}
|
''' A toy example of playing against rule-based bot on Wizard with trick predictions.
'''
import numpy as np
import rlcard
from rlcard import models
from rlcard.agents import RandomAgent
import torch
import os
import argparse
import random
def run_example(args):
# Make environment
config = {
'env':args.env,
'game_num_players': 2,
'game_num_cards': 5,
'seed':args.seed,
'no_human_players':args.n_human_players,
'opponent': args.opponent,
'load_path_agent': args.load_path_agent,
}
if config["seed"]==0:
config["seed"]=random.randint(1,100000)
environment_specific = config["env"].split("_")[1]
# assert environment_specific == config["load_path_agent"].split("_")[1]
if environment_specific=="s":
from rlcard.agents.human_agents.wizard_s_trickpred_human_agent import HumanAgent, _print_action
env = rlcard.make(f"wizard_{environment_specific}_trickpreds_with_humans",config)
elif environment_specific=="ms":
from rlcard.agents.human_agents.wizard_ms_trickpred_human_agent import HumanAgent, _print_action
env = rlcard.make(f"wizard_{environment_specific}_trickpreds_with_humans",config)
else:
from rlcard.agents.human_agents.wizard_s_trickpred_human_agent import HumanAgent, _print_action
env = rlcard.make(f"wizard_trickpreds_with_humans",config)
# How many human players are in the game?
assert config["no_human_players"] <= config["game_num_players"] and config["no_human_players"] >=1
agents = [HumanAgent(env.num_actions) for _ in range(config["no_human_players"])]
# Append more agents to the game until the number of players is reached.
for i in range(config["no_human_players"],config["game_num_players"]):
if config["opponent"]=="nfsp":
# additional_agent = torch.load(config["load_path_agent"])
additional_agent = torch.load(config["load_path_agent"], map_location=torch.device('cpu'))
else:
additional_agent = RandomAgent(num_actions=env.num_actions)
agents.append(additional_agent)
env.set_agents(agents)
print(">> Wizard game")
while (True):
print(">> Start a new game")
trajectories, payoffs = env.run(is_training=False)
# If the human does not take the final action, we need to
# print other players action
final_state = trajectories[0][-1]
action_record = final_state['action_record']
state = final_state['raw_obs']
_action_list = []
for i in range(1, len(action_record)+1):
if action_record[-i][0] == state['current_player']:
break
_action_list.insert(0, action_record[-i])
for pair in _action_list:
print('>> Player', pair[0], 'chooses ', end='')
_print_action(pair[1])
print('')
print('=============== Evaluation ===============')
print('=== Trick Scores ===')
for idx, score in enumerate(state['trick_scores']):
print("P",str(idx+1),": ",str(score),"/",str(state['predicted_tricks'][idx]))
print('=== Payoffs ===')
for idx, score in enumerate(payoffs):
print("P",str(idx+1),": ",score)
print('=============== Result ===============')
winner_list = []
max_score = np.max(payoffs)
for idx,score in enumerate(payoffs):
if score == max_score:
winner_list.append(idx)
if len(winner_list)==1:
print('Player',winner_list[0]+1,'wins!')
else:
print('Players',[winner+1 for winner in winner_list],'win!')
print('')
input("Press any key to continue...")
if __name__ == '__main__':
parser = argparse.ArgumentParser("Script to play wizard with humans/agents and trickpredictions")
parser.add_argument('--env', type=str, default='wizard_s_trickpreds', choices=['wizard_trickpreds','wizard_s_trickpreds',"wizard_ms_trickpreds"])
parser.add_argument('--cuda', type=str, default='')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n_human_players', type=int, default=1, choices=[1,2])
parser.add_argument('--opponent', type=str, default='nfsp', choices=['nfsp','random'])
parser.add_argument('--load_path_agent', type=str, default='experiments/wizard_s_trickpreds_result_nfsp/model.pth')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
run_example(args)
|
{"hexsha": "ac4e867f7211be8b865111af6ff5cbc75b236b8c", "size": 4582, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/human/run_wizard_human_trickpred.py", "max_stars_repo_name": "MagnusWagner/rlcard", "max_stars_repo_head_hexsha": "1a3aaef76e78968ebc68eb5b92e57be4709f7e38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/human/run_wizard_human_trickpred.py", "max_issues_repo_name": "MagnusWagner/rlcard", "max_issues_repo_head_hexsha": "1a3aaef76e78968ebc68eb5b92e57be4709f7e38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/human/run_wizard_human_trickpred.py", "max_forks_repo_name": "MagnusWagner/rlcard", "max_forks_repo_head_hexsha": "1a3aaef76e78968ebc68eb5b92e57be4709f7e38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6545454545, "max_line_length": 149, "alphanum_fraction": 0.6416412047, "include": true, "reason": "import numpy", "num_tokens": 1062}
|
import logging
import pymc3 as pm
import theano.tensor as tt
from theano.compile.ops import as_op
import numpy as np
from scipy import stats
logger = logging.getLogger('root')
def add_exp_uniform_normal_t_model(hierarchical_model):
"""
A student-t model with normal, uniform, exp priors for mu, sigma, nu parameters, respectively.
Credits of the implementation of this model in pymc3 belongs to
http://nbviewer.jupyter.org/github/JWarmenhoven/DBDA-python/blob/master/Notebooks/Chapter%2016.ipynb
For a discussion on this model and implementation on R refer to Chapter 16 in the book
'Doing Bayesian Data Analysis: A Tutorial with R, JAGS, and Stan', Second Edition, by John Kruschke (2015).
"""
mean_y = np.mean([hierarchical_model.stats_y[i].mean for i in range(hierarchical_model.n_groups)])
sd_y = np.mean([hierarchical_model.stats_y[i].variance for i in range(hierarchical_model.n_groups)]) ** (0.5)
with pm.Model() as hierarchical_model.pymc_model:
nu = pm.Exponential("nu", 1 / 30) # mean = sd = 30
sigma = pm.Uniform("sigma", sd_y / 100, sd_y * 100, shape=hierarchical_model.n_groups)
mu = pm.Normal("mu", mean_y, (100 * sd_y), shape=hierarchical_model.n_groups)
observations = []
hierarchical_model.mu_parameter = "mu"
hierarchical_model.sigma_parameter = "sigma"
hierarchical_model.outlierness_parameter = "nu"
def add_observations():
with hierarchical_model.pymc_model:
for i in range(hierarchical_model.n_groups):
observations.append(pm.StudentT(f'y_{i}', nu=nu, mu=mu[i], sd=sigma[i], observed=hierarchical_model.y[i]))
hierarchical_model.add_observations_function = add_observations
|
{"hexsha": "3864d571d4e42dfd812037cee90f81a271e704e3", "size": 1770, "ext": "py", "lang": "Python", "max_stars_repo_path": "HyBayes/models/metric_model.py", "max_stars_repo_name": "allenai/HyBayes", "max_stars_repo_head_hexsha": "9ac1b923953f471f104a4312499d007a676edc92", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2019-12-07T18:37:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T12:21:35.000Z", "max_issues_repo_path": "HyBayes/models/metric_model.py", "max_issues_repo_name": "allenai/HyBayes", "max_issues_repo_head_hexsha": "9ac1b923953f471f104a4312499d007a676edc92", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HyBayes/models/metric_model.py", "max_forks_repo_name": "allenai/HyBayes", "max_forks_repo_head_hexsha": "9ac1b923953f471f104a4312499d007a676edc92", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-14T04:46:02.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-14T04:46:02.000Z", "avg_line_length": 49.1666666667, "max_line_length": 126, "alphanum_fraction": 0.7084745763, "include": true, "reason": "import numpy,from scipy,import theano,from theano,import pymc3", "num_tokens": 425}
|
(*********************************************************)
(* Formal Proof of the Tic-Tac-Toe's Perfect Strategy *)
(* Author: Shuangquan Feng *)
(* Date: Apr 29. 2018 *)
(*********************************************************)
(* Tic-Tac-Toe' first player has a perfect strategy to never lose the game *)
Require Import board.
(* Definition of 'safe' board state:
Either
1. Player1 wins or ties
2. There exists a move for player1 where either
a. player1 wins or ties the game
b. for any moves player2 can make,
the resulting state is incomplete(i.e. not lose)
and still safe for player1.
*)
Inductive safe (b:board) : Prop :=
| win : (get_state b)=win -> safe b (* p1 wins *)
| tie : (get_state b)=tie -> safe b (* p1 ties *)
| safe_step:
(exists (x:step),
(* p1 makes one move and wins *)
((valid_move b x) /\ ((get_state (move b x))=win))
\/
(* p1 makes one move, forall possible moves for p2, p1 is safe *)
(forall (y:step),
(valid_round b x y)
->(((get_state (move b x))=incomplete)/\safe (move (move b x) y))))
->
safe b.
(* The main theorem we want to prove.
The initial empty board is safe, i.e. player1 can always win or tie *)
Theorem tic_tac_toe_first_always_safe:
safe empty_board.
(* define some handy tactics to simplify the proof. *)
(* player1 puts X at cell c can directly win or tie the game *)
Ltac final c := exists (st c X); left; split; simpl; split; constructor.
(* player1 puts X at cell c,
and enumerate all valid moves of player2 *)
Ltac put c' :=
exists (st c' X); right; intros y H; split;
try(reflexivity);unfold valid_round in H;
induction y as [c s];subst; (* c:a cell, s:X *)
(* separate out the props in 'valid_round' *)
destruct H as [H1 [H2 [H3 [H4 H5]]]];
clear H1;
apply sym_eq_iff in H2; subst;
(* enumerate all the moves player2 can choose *)
induction c; simpl in H3; simpl;
(* get rid of the contradictions *)
try (exfalso; unfold not in H3; apply H3; constructor);
try (inversion H4);
try (inversion H5);
apply safe_step; clear H3 H4 H5.
Proof.
apply safe_step.
(* player1 always first put X at top left corner *)
put c00.
- put c11; try(final c22).
+ put c20; try(final c02).
* final c10.
- put c11; try(final c22).
+ put c12; try(final c10).
* put c21. put c20. put c01.
- put c11; try(final c22).
+ put c20; try(final c02).
* put c12. put c21. put c11.
- put c02; try(final c01).
+ put c21.
* put c12. put c22. put c20.
* put c10. put c22. put c20.
* put c12. put c22. put c10.
* put c10. put c20. put c21.
- put c11; try(final c22).
+ put c02; try(final c20).
* final c01.
- put c01; try(final c02).
+ put c11; try(final c21).
* final c22.
- put c11; try(final c22).
+ put c20; try(final c10).
* final c02.
- put c02; try(final c01).
+ put c20; try(final c11).
* final c10.
Qed.
|
{"author": "fsq", "repo": "CS386L-Programming-Language", "sha": "2a4e01bba8dbee34d5ccc60b104ce831ff69ace1", "save_path": "github-repos/coq/fsq-CS386L-Programming-Language", "path": "github-repos/coq/fsq-CS386L-Programming-Language/CS386L-Programming-Language-2a4e01bba8dbee34d5ccc60b104ce831ff69ace1/tic-tac-toe/tic-tac-toe.v"}
|
import copy
import datetime
import logging
logger = logging.getLogger(__file__)
import os
import os.path
import statistics as stt
import sys
import time
from collections import namedtuple
from itertools import chain
from pprint import pprint
from colorama import init, deinit, reinit, Fore, Style
init()
deinit()
import gym
import h5py
import numpy as np
import pandas as pd
try:
import matplotlib.pyplot as plt
except:
print("Warning: matplotlib is not installed. You will not be able to display reward progress and can cause errors also")
pass
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # To silent Tensorflow warnings
import tensorflow as tf
from tensorflow.keras import Model, layers as Layers
from tensorflow.keras.layers import Dense, Flatten, Conv1D
# import tensorflow_addons as tfa
class NNv0(Model):
def __init__(self, n_output, hidden_layers, **kwargs):
super().__init__(**kwargs)
self.nnlayers = list()
for layer in hidden_layers:
self.nnlayers.append(getattr(Layers, layer['class'])(*layer['args'], **layer['kwargs']))
self.output_layer = Dense(n_output, activation=None)
@tf.function(experimental_compile=True)
def call(self, x):
y = x
for layer in self.nnlayers:
y = layer(y)
output = self.output_layer(y)
return output
class NNv1(Model):
def __init__(self, n_output, **kwargs):
super().__init__(**kwargs)
self.d1 = Dense(1024, activation='relu', kernel_initializer="he_uniform")
self.d2 = Dense(256, activation='relu', kernel_initializer="he_uniform")
self.d3 = Dense(64, activation='relu', kernel_initializer="he_uniform")
self.d4 = Dense(16, activation='relu', kernel_initializer="he_uniform")
self.d5 = Dense(n_output, activation=None)
@tf.function(experimental_compile=True)
def call(self, x):
y = self.d1(x)
y = self.d2(y)
y = self.d3(y)
y = self.d4(y)
y = self.d5(y)
return y
class NNv2(Model):
def __init__(self, n_output, **kwargs):
super().__init__(**kwargs)
self.c1 = Conv1D(16, 3, padding='valid', activation='relu', kernel_initializer="glorot_uniform", name="conv1d_1")
self.c2 = Conv1D(32, 3, padding='valid', activation='relu', kernel_initializer="glorot_uniform", name="conv1d_2")
self.f1 = Flatten(name="flatten_1")
self.d1 = Dense(64, activation='relu', kernel_initializer="he_uniform", name="dense_1")
self.d2 = Dense(16, activation='relu', kernel_initializer="he_uniform", name="dense_2")
self.d3 = Dense(n_output, activation=None, kernel_initializer="he_uniform", name="dense_3")
@tf.function(experimental_compile=True)
def call(self, x):
y = self.c1(x)
y = self.f1(y)
y = self.d1(y)
y = self.d2(y)
y = self.d3(y)
return y
class NNv3(Model):
def __init__(self, n_output, **kwargs):
super().__init__(**kwargs)
self.c1 = Conv1D(32, 3, padding='same', activation='relu', kernel_initializer="glorot_uniform", input_shape=(3, 3), data_format="channels_last", name="conv1d_1")
self.c2 = Conv1D(64, 3, padding='same', activation='relu', kernel_initializer="glorot_uniform", input_shape=(3, 32), data_format="channels_last", name="conv1d_2")
self.f1 = Flatten(name="flatten_1")
self.d1 = Dense(128, activation='relu', kernel_initializer="he_uniform", name="dense_1")
self.d2 = Dense(64, activation='relu', kernel_initializer="he_uniform", name="dense_2")
self.d3 = Dense(64, activation='relu', kernel_initializer="he_uniform", name="dense_3")
self.d4 = Dense(n_output, activation=None, kernel_initializer="he_uniform", name="dense_4")
@tf.function(experimental_compile=True)
def call(self, x):
y = self.c1(x)
y = self.c2(y)
y = self.f1(y)
y = self.d1(y)
y = self.d2(y)
y = self.d3(y)
y = self.d4(y)
return y
class DQN():
def __init__(self, observation_space, action_space, name, config, verbose=True):
self.action_space = action_space
self.window_size = (config['agent']['history_window'] if 'history_window' in config['agent'] and config['agent']['history_window'] is not None else 1,)
self.obs_shape = self.window_size + observation_space.shape
self.n_output = action_space.shape and np.product(action_space.nvec) or action_space.n
self.loss_object = tf.keras.losses.Huber()# MeanSquaredError() # Try huber loss
self.learning_rate_schedule = self._get_learning_rate_schedule(config=config)
# self.optimizer = tf.keras.optimizers.Nadam(learning_rate=config['agent']['network']['learning_rate'], clipnorm=1.0)
# self.optimizer = tfa.optimizers.AdamW(learning_rate=config['agent']['network']['learning_rate'], weight_decay=self._get_wd(), clipvalue=1.0)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate_schedule, clipvalue=1.0)
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
self.nn = self._get_nn(self.n_output, config=config, name=name)
self.nn_target = self._get_nn(self.n_output, config=config, name=name)
self.gamma = config['agent']['network']['gamma']
self._build((None, *self.obs_shape), verbose=verbose)
def _get_learning_rate_schedule(self, config):
# learning_rate_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
# learning_rate_initial,
# decay_steps=24*10000,
# decay_rate=0.1,
# staircase=True
# ) # Same as piecewise 24*[10000, 20000, 30000, ...] [1e-3, 1e-4, 1e-5, ...] being lr_ini := 1e-3
learning_rate_initial = config['agent']['network']['learning_rate'] # Repeated but maybe here is better
if 'optimizer' in config: # Now only PiecewiseConstantDecay only available
cfgopt = config['optimizer']
bem = cfgopt['boundary epoch multiplier'] # This is not suitable (use better length of the episode)
lri = learning_rate_initial
learning_rate_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[b*bem for b in cfgopt['boundaries']],
values=[float(s)*lri for s in cfgopt['values']]
)
else: # Default and deprecated TODO: raise warning message
learning_rate_schedule = learning_rate_initial
return learning_rate_schedule
def _build(self, shape_nn, verbose=True):
self.nn.build(shape_nn)
if not self.nn_target.built:
self.nn_target.build(shape_nn)
if verbose:
self.nn.summary()
def _get_nn(self, n_output, config, name):
H = config['agent']['history_window']
if 'model' in config:
nn = NNv0(n_output, hidden_layers=config['model']['hidden_layers'], name=name) # Custom
elif H > 2:
nn = NNv3(n_output, name=name) # With conv but less params
print("LOAD NNv3")
elif H == 1 or H is None:
nn = NNv1(n_output, name=name) # Without conv
print("LOAD NNv1")
else:
raise Exception("Neural Network incompatible")
if __debug__:
logger.debug("NN: {}".format(nn))
return nn
def load_weights(self, *args, **kwargs):
ret = self.nn.load_weights(*args, **kwargs)
self.nn_target.set_weights(self.nn.get_weights())
return ret
def save_weights(self, *args, **kwargs):
return self.nn.save_weights(*args, **kwargs)
@tf.function(experimental_compile=True)
def __call__(self, x):
return self.nn(x)
@tf.function(experimental_compile=True)
def qvalue(self, x, a):
x = self.nn(x)
a = tf.one_hot(a, self.n_output)
x = x * a
x = tf.reduce_sum(x, axis=1)
return x
@tf.function(experimental_compile=True)
def qvalue_with_mask(self, x, mask):
x = self.nn(x)
x = tf.multiply(x, mask)
x = tf.reduce_sum(x, axis=1)
return x
@tf.function(experimental_compile=True)
def qvalues(self, x):
return self.nn(x)
@tf.function(experimental_compile=True)
def qvalue_max(self, x):
x = self.nn(x)
x = tf.reduce_max(x, axis=1)
return x
@tf.function(experimental_compile=True)
def target_qvalue_max(self, x):
x = self.nn_target(x)
x = tf.reduce_max(x, axis=1)
return x
@tf.function(experimental_compile=True)
def argmax_qvalue(self, x):
x = self.nn(x)
x = tf.argmax(x, axis=1)
return x
@tf.function(experimental_compile=True)
def qtarget(self, x, r, d):
return r + self.gamma*self.target_qvalue_max(x)*(1-d)
@tf.function(experimental_compile=True)
def compute_error(self, o, a, r, n_o, done):
return tf.keras.losses.MSE(self.qtarget(n_o, r), self.qvalue(o, a))
class Agentv1():
def __init__(self, model, memory, config):
self.action_space = model.action_space
self.step = 0
self.explore_start = config['agent']['explore_start']
self.explore_stop = config['agent']['explore_stop']
self.decay_rate = config['agent']['decay_rate']
self.batch_size = config['agent']['network']['batch_size']
self.history_window = config['agent']['history_window'] if 'history_window' in config['agent'] and config['agent']['history_window'] is not None else 1
self.seed = config['seed']
self.config = config
self.model = model
self.best_loss = float("inf")
self.ndim = self._get_dimensions()
self.memory = memory
self.target_steps_update = int(config['agent']['target']['update'])
self.train_steps_without_update = 0
self.train_steps_to_update = self.target_steps_update
self.percentage_to_update_init = 1.1
self.percentage_to_update = self.percentage_to_update_init
self.percentage_to_update_target = 1
self.importance_sampling = False
self.importance_ratio = 0.40
self.actual_loss = 1.0
self.update_msg = "" # For debugging purposes
self.update_flag = False # For debugging purposes
def __call__(self, *args, **kwargs):
"""Alias for self.act"""
return self.act(*args, **kwargs)
def _get_dimensions(self):
return 2
@property
def obs_shape(self):
return self.model.obs_shape
def guess_init(self):
return np.zeros(self.obs_shape, dtype=np.float32)
def fill_memory(self, env, cyclic=False):
self.memory.fill_memory(env, self, cyclic=False)
def load_weights(self, path, load_from_path=True, skip_OSError=False):
if not self.model.nn.built:
shape_nn = (None,*self.obs_shape)
self.model._build(shape_nn)
try:
if load_from_path:
self.model.load_weights(path)
except OSError as e:
if not skip_OSError:
raise e
print("Model not initialized. OSError skipped", file=sys.stderr)
def act(self, belief, keep_tensor=False):
"""Perform an action.
If keep_tensor=True then return a tensor of Tensorflow.
Args:
belief (numpy.ndarray): Agent's belief of the env state given n observations (computed by agent.guess(o, h))
Return:
act ([numpy.array, tensorflow.tensor]):
"""
single_input_flag = False
if belief.ndim < self.ndim:
single_input_flag = True
belief = belief[None, ...]
if isinstance(self.action_space, gym.spaces.MultiDiscrete):
act = self.model.argmax_qvalue(belief)
act = tf.unravel_index(act, self.action_space.nvec)
act = tf.transpose(act)
if single_input_flag:
act = act[0]
if not keep_tensor:
act = act.numpy()
else:
act = self.model.argmax_qvalue(belief)
if not keep_tensor:
if single_input_flag:
act = int(act)
else:
act = act.numpy()
return act
def qvalues(self, s):
"""Return qvalues given an state s"""
return self.model(s)
def explore(self, increment=False):
"""Exploration with Exponential Decaying:
Description: Compute probability of exploring using p(ε) = stop + (start-stop)/exp(decay*step)
Internal Params:
start = 1.0 (float): At start only explore
stop = 0.1 (float): Minimum exploration rate
decay = 1e-4 (float): decay rate of probability
step ∈ {[0, ∞) ∩ ℕ } starts in 0 and step++
Args:
increment (bool): If true, increment self.step in 1 automatically. Default: False
Returns:
explore_p (numpy.float): Probability of exploration ∈ (stop, start]
"""
explore_p = self.explore_stop + np.exp(-self.decay_rate*self.step)*(self.explore_start - self.explore_stop)
if increment:
self.step += 1
return explore_p # Make a random action
def explore_step(self):
return self.explore(increment=True)
def guess(self, obs = None, hstate = None):
return obs
def sample(self):
action = self.action_space.sample()
return action
def _ravel_action(self, action):
if np.array(action).size > 1:
action = np.ravel_multi_index(action, self.action_space.nvec).squeeze()
return action
def add_experience(self, obs, action, reward, next_obs, done):
obs = np.array(obs)
action = self._ravel_action(action)
experience = obs, action, reward, next_obs, done
self.memory.add(experience)
def _convert_experience_list_to_tensor(self, experience_list):
hstate_list, action_list, reward_list, next_obs_list, done_list = list(zip(*experience_list))
next_hstate_list = [self.guess(obs, hstate) for hstate, obs in zip(hstate_list, next_obs_list)]
hstate_tensor = tf.convert_to_tensor(hstate_list, dtype=tf.float32)
action_tensor = tf.squeeze(tf.convert_to_tensor(action_list, dtype=tf.int32))
reward_tensor = tf.convert_to_tensor(reward_list, dtype=tf.float32)
next_hstate_tensor = tf.convert_to_tensor(next_hstate_list, dtype=tf.float32)
done_tensor = tf.convert_to_tensor(done_list, dtype=tf.float32)
return hstate_tensor, action_tensor, reward_tensor, next_hstate_tensor, done_tensor
def importance_sampling_experiences(self, experience_list):
important_experiences = list()
hstate_tensor, action_tensor, reward_tensor, next_hstate_tensor, done_tensor = self._convert_experience_list_to_tensor(experience_list)
mask = tf.cast(self(hstate_tensor, keep_tensor=True), tf.int32) == action_tensor
important_experiences = [x for m, x in zip(mask, experience_list) if m]
# for experience in experience_list:
# hstate, action, reward, next_hstate, done = experience
# if self(hstate) == action:
# important_experiences.append(experience)
# print("{} important experiences from {} in total ({:02.1f}%)".format(len(important_experiences), len(experience_list), len(important_experiences)/len(experience_list)*100))
return important_experiences
def train_step(self):
if self.importance_sampling:
n_importance_samples = int(self.importance_ratio * self.batch_size)
importance_experience_list = []
while len(importance_experience_list) < n_importance_samples:
importance_experience_list.extend(self.importance_sampling_experiences(self.memory.sample(self.batch_size)))
importance_experience_list = importance_experience_list[:n_importance_samples]
experience_list = importance_experience_list + self.memory.sample(self.batch_size-n_importance_samples)
else:
experience_list = self.memory.sample(self.batch_size)
hstate_tensor, action_tensor, reward_tensor, next_hstate_tensor, done_tensor = self._convert_experience_list_to_tensor(experience_list)
self.tf_train_step(hstate_tensor, action_tensor, reward_tensor, next_hstate_tensor, done_tensor)
loss = self.model.train_loss.result().numpy()
self.actual_loss = self.actual_loss + 0.1*(loss - self.actual_loss) # Moving average to smooth the update
self.model.train_loss.reset_states()
if self.train_steps_without_update >= self.train_steps_to_update and self.actual_loss < self.percentage_to_update*self.best_loss:
self.model.nn_target.set_weights(self.model.nn.get_weights())
self.update_msg = "Policy updated after {} steps".format(self.train_steps_without_update)
self.update_flag = True
self.train_steps_without_update = 0
# if self.train_steps_to_update < self.target_steps_update:
# self.train_steps_to_update += 1
if self.actual_loss < self.best_loss:
self.best_loss = min(self.actual_loss, self.best_loss)
self.percentage_to_update = self.percentage_to_update_init
else: # Contract update restriction
self.percentage_to_update = self.percentage_to_update_target + 0.9*(self.percentage_to_update - self.percentage_to_update_target)
else:
self.train_steps_without_update += 1
return loss
@tf.function(experimental_compile=True)
def tf_train_step(self, hstate_tensor, action_tensor, reward_tensor, next_hstate_tensor, done_tensor):
"""
q(s,a)_t+1 = q(s,a) - α*err
err = q(s,a) - r+γ*max_a[q(s',a)]
Only compute error + SGD instead of computing moving average and then SGD
"""
qtarget = self.model.qtarget(next_hstate_tensor, reward_tensor, done_tensor) # r+γ*max_a[q(s',a')]*(1-done)
mask = tf.one_hot(action_tensor, self.model.n_output)
with tf.GradientTape() as tape:
qvalue = self.model.qvalue_with_mask(hstate_tensor, mask) # q(s,a)
# qvalue = self.model.qvalue(hstate_tensor, action_tensor) # q(s,a)
loss = self.model.loss_object(qtarget, qvalue)
gradients = tape.gradient(loss, self.model.nn.trainable_variables)
self.model.optimizer.apply_gradients(zip(gradients, self.model.nn.trainable_variables))
self.model.train_loss(loss)
return gradients
class Agentv2(Agentv1):
def _get_dimensions(self):
return 3
def guess(self, o, h):
"""
x10 times faster than np.roll
"""
return np.concatenate([h[1:], o[None]])
class Monitor():
def __init__(self, agent, env_eval_list, times=None, config=None, output_template=None):
self.config = config
self.agent = agent
self.env_eval_list = env_eval_list
print("{:=^65}".format("ENVIRONMENT INFO"))
for env in self.env_eval_list:
pprint(env.spec.__dict__)
print(f"Start step values: {np.array(env.step_start_values)}")
print("{:=^65}".format("END ENVIRONMENT INFO"))
self.time_last_msg = time.time()
self.filename_data = output_template.format(object='data')
self.filename_model = output_template.format(object='model')
self.filename_early = output_template.format(object='early')
self.early_stop_iterations = 0
self.early_stop_max_iterations = config['agent']['monitor']['early_stop']
self.best_reward = None
self.plot_epoch = 0
self.plot_rewards = list()
self.plot_displayed = None
self.training_trajectory_list = list()
self.loss_list = list()
self.times = times
self.ema = 0
self.GAP = 0.01 # 1% gap from optimal to mark as solved
self._stop = False
def _evalue(self, env, h_I=None):
t_ev_s = time.time()
policy = Policy(self.agent, h_I)
trajectory = evalue_policy(policy, env, h_I)
steps = len(trajectory)
action_list = [step.action for step in trajectory]
self.times['t_ev_tot'] += time.time() - t_ev_s
return trajectory
def _verbose(self, epoch, step, loss_list, cumulated_reward_list, eval_trajectory_list, oneline=True, dry_run=False):
# TODO: Check flow code:
# Often values to be printed are calculated "a priori" but then conditions to be printed aren't met being useless the first calculations
def compute_gap(value, threshold):
if threshold is None:
return float("nan")
return (value - threshold) / threshold
reward_threshold_list = [env.spec.reward_threshold for env in self.env_eval_list]
loss_array = np.array(loss_list)
loss_mean, loss_std = (loss_array.mean(), loss_array.std()) if loss_array.size > 0 else (np.nan, np.nan)
reward_gap_list = list(map(compute_gap, cumulated_reward_list, reward_threshold_list))
if oneline:
# template = "W{:.1f}T{:.1f}E{:.1f} | Epoch {:5d} ({:7d} steps) | early {:4d} | exploration {:.8f} | reward {:.8f} | loss mean {:.8f} | loss std {:.8f} {}"
delta_time = time.time()-self.times['t_ini']
# steps_per_second = int(step//delta_time)
self.ema = compute_ema_from_list(self.ema, self.times['t_step_delta_list']) # Exponential moving average
self.times['t_step_delta_list'] = list()
template = ("{:.1f}s | Ech {:3d} (step {:5d} {:4.1f}/s) | ear {:4d} | ε {:.4f} | lmean {:7.4e} | lstd {:7.4e} |"
+ " |".join([" r {:.4f} (gap {:.2f})"]*len(cumulated_reward_list)) + " {}")
line = template.format(
delta_time, # times['t_tr_tot'], times['t_ev_tot'],
epoch, step, self.ema, self.early_stop_max_iterations - self.early_stop_iterations,
self.agent.explore(),
loss_mean, loss_std,
*chain.from_iterable([(cumulated_reward, reward_gap) for cumulated_reward, reward_gap in zip(cumulated_reward_list, reward_gap_list)]),
self.agent.update_msg,
)
if self.early_stop_iterations == 0:
if os.isatty(sys.stdout.fileno()):
# template = "\033[31m" + template + "\033[39m"
reinit()
print(Fore.GREEN + line + Style.RESET_ALL, flush=True)
deinit()
else:
print("> " + line, flush=True)
solution_array = np.array([step.action for step in eval_trajectory_list[0]]).ravel().tolist()
if len(solution_array) <= 40:
print("Solution:", solution_array)
elif time.time() - self.time_last_msg > 1.0 or self.agent.update_flag:
print(line, flush=True)
self.time_last_msg = time.time()
if self.agent.update_flag: # If update_flag is True then update_msg has text. After print, clean
self.agent.update_msg = ""
self.agent.update_flag = False # To avoid reassing every time (cheaper check)
else:
print("Training summary of epoch {} ({} steps):".format(epoch, steps))
print(" Training mean (std) loss: {} ({})".format(loss_mean, loss_std))
print(" Evaluation reward: {}".format(cumulated_reward_list))
print(" Early iter remaining: {}".format(self.early_stop_max_iterations - self.early_stop_iterations))
# self.plot_reward(cumulated_reward_list[-1])
def _save_model(self, filename_model):
"""Save the model of the agent in filename_model"""
try:
self.agent.model.nn.save_weights(filename_model, save_format='h5')
except OSError:
os.makedirs(os.path.dirname(filename_model))
self.agent.model.nn.save_weights(filename_model, save_format='h5')
@staticmethod
def _dump_loss(h5key, loss_list):
"""Get a dict of the loss to be stored in h5
Args:
loss_list (list<float>): All the loss from one epoch
epoch (int): Epoch that determine the new subgroup in h5
"""
data_dict = dict()
if len(loss_list) > 0:
data_dict = {
h5key: loss_list,
}
return data_dict
@staticmethod
def _dump_trajectory(template, trajectory):
"""Get h5 trajectory format
Args:
trajectory (list<StepClass>): A trajectory of one epoch
Returns:
data_dict (dict): A dict to be used in h5
"""
data_dict = dict()
if len(trajectory) > 0:
s_t_list, a_t_list, r_t_list, s__t_list, done_t_list = list(zip(*trajectory))
data_dict = {
template.format(target='s'): s_t_list,
template.format(target='a'): a_t_list,
template.format(target='r'): r_t_list,
template.format(target='s_'): s__t_list,
template.format(target='d'): done_t_list,
}
return data_dict
@staticmethod
def _save_data(filename_data, data_dict, mode="a"):
with h5py.File(filename_data, mode) as f:
for k, v in data_dict.items():
try:
f[k] = v
except (OSError, RuntimeError) as e:
del f[k]
f[k] = v
def _save(self, epoch, step, eval_trajectory_list, clean_data=True):
template = "{{phase}}/{epoch}/{step}/{{target}}".format(epoch=epoch, step=step)
self._save_model(self.filename_model)
if self.early_stop_iterations == 0:
self._save_model(self.filename_early)
data_dict = dict()
data_dict.update(self._dump_loss(template.format(phase="t", target="l"), self.loss_list))
data_dict.update(self._dump_trajectory(template.format(phase="t", target="{target}"), self.training_trajectory_list))
for i, trajectory in enumerate(eval_trajectory_list):
# print(sum([s.reward for s in trajectory]))
data_dict.update(self._dump_trajectory(template.format(phase="e-%d"%i, target="{target}"), trajectory))
mode = "w" if step == 0 else "a"
self._save_data(self.filename_data, data_dict, mode=mode)
if clean_data:
self.training_trajectory_list = list()
self.loss_list = list()
def evalue(self, step, epoch, verbose=False, oneline=True, dry_run=False, h_I=None):
# eval_trajectory_list = {"t": self.training_trajectory_list}
eval_trajectory_list = list()
cumulated_reward_list = list()
for i, env in enumerate(self.env_eval_list):
trajectory = self._evalue(env, h_I)
cumulated_reward = sum([step.reward for step in trajectory])
cumulated_reward_list.append(cumulated_reward)
eval_trajectory_list.append(trajectory)
if (i == 0
and env.spec.reward_threshold is not None
and cumulated_reward >= env.spec.reward_threshold * (1 + self.GAP) # GAP = (cumulated_reward - reward_threshold) / reward_threshold
):
self._stop = True
if self.best_reward is None or cumulated_reward_list[-1] > self.best_reward:
self.best_reward = cumulated_reward_list[-1]
self.early_stop_iterations = 0
else:
self.early_stop_iterations += 1
if verbose:
self._verbose(epoch, step, self.loss_list, cumulated_reward_list, eval_trajectory_list, oneline=oneline, dry_run=dry_run)
if not dry_run:
self._save(epoch, step, eval_trajectory_list) # Save early model and trajectory data
def add_loss(self, loss):
self.loss_list.append(loss)
def add_experience(self, s, a, r, s_, done):
self.training_trajectory_list.append(StepClass(s, a, r, s_, done))
@property
def stop(self):
return self._stop or self.early_stop_iterations == self.early_stop_max_iterations # == to be able to: early == -1 -> never ends
@property
def has_improved(self):
raise NotImplementedError("BLA")
return self._has_improved
def debug(self, agent):
print("\tDEBUG: Exploration: %f"%agent.explore())
def plot_reward(self, reward):
self.plot_rewards.append(reward)
if self.plot_epoch == 0:
plt.ion()
plt.clf()
self.last_reward = reward
plt.show()
plt.plot([self.plot_epoch, self.plot_epoch+1], [self.last_reward, reward], color='b')
plt.pause(0.00001)
self.plot_epoch += 1
self.last_reward = reward
class Policy():
def __init__(self, agent, initial_belief=None):
self.agent = agent
if initial_belief is None:
initial_belief = np.zeros(agent.obs_shape, dtype=np.float32)
self.internal_belief = initial_belief
def __call__(self, observation):
self.internal_belief = self.agent.guess(observation, self.internal_belief)
return self.agent(self.internal_belief)
StepClass = namedtuple("Step", ["state", "action", "reward", "next_state", "done"])
def evalue_policy(policy, env, h_I=None):
trajectory = list() # of steps
next_state = env.reset(obs=h_I)
done = False
while not done:
state = next_state
action = policy(state)
next_state, reward, done, _ = env.step(action)
step = StepClass(state, action, reward, next_state, done)
trajectory.append(step)
return trajectory
def compute_ema(ema0, y_value, alpha=0.1):
"""ema_t+1 = (1-α)ema + αy"""
assert isinstance(y_value, (int, float)), "y_value has to be a number"
return (1-alpha)*ema0 + alpha*(y_value)
def compute_ema_from_list(ema0, y_values, alpha=0.1):
"""ema_t+n = (1-α)^n*ema + αΣ(1-α)^(n-i)*y_i"""
assert isinstance(y_values, list), "y_values has to be a list"
n = len(y_values)
alphy = 1-alpha
return (alphy)**n*ema0 + alpha*sum([y_val*alphy**(n-1-i) for i,y_val in enumerate(y_values)])
|
{"hexsha": "d36d1350a18f26a22464a88fef4dc930c61c8d47", "size": 30553, "ext": "py", "lang": "Python", "max_stars_repo_path": "drling/core.py", "max_stars_repo_name": "DavidDB33/dearling", "max_stars_repo_head_hexsha": "90ee28366f4c233939eb9c72995e7b1df23835e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-27T23:06:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T23:06:47.000Z", "max_issues_repo_path": "drling/core.py", "max_issues_repo_name": "DavidDB33/dearling", "max_issues_repo_head_hexsha": "90ee28366f4c233939eb9c72995e7b1df23835e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-10T01:51:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:51:56.000Z", "max_forks_repo_path": "drling/core.py", "max_forks_repo_name": "DavidDB33/drling", "max_forks_repo_head_hexsha": "90ee28366f4c233939eb9c72995e7b1df23835e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7991202346, "max_line_length": 182, "alphanum_fraction": 0.6313946257, "include": true, "reason": "import numpy", "num_tokens": 7190}
|
# TOPIC: Australian Tax Office - Tax Return Sample for Year 2013-14
# CATEGORY: Age
# TITLE: Australian Tax Return Income by Age Group (3D, Log)
# AUTHOR: George Paw
# DATE: November 2017
import sys
import os
import pandas as pd
import plotly.graph_objs as go
import plotly
import numpy as np
#custom imports
import ATO_helper
#getting path
target_path = os.getcwd() + "/Sample_file_1314/2014_sample_file.csv"
df = pd.read_csv(target_path)
def ATO_AG_4(df):
# get the data from df as [ [Taxable_Income_0 for age_0, Taxable_Income_1 for age_0 ... ], [Taxable_Income_0 for age_1, Taxable_Income_1 for age_1 ... ], ... ]
range_all = [0]*len(ATO_helper.age_group)
for i in range(0, len(range_all)):
range_all[i] = list(pd.DataFrame(df).query("age_range == {}".format(i))["Taxable_Income"])
# convert it to histogram format as [ [# of people in bin_0 for age 0, # of people in bin_1 for age 0, ... ], [# of people in bin_0 for age 1, # of people in bin_0 for age 1, ... ]
range_hist_all = [0]*len(ATO_helper.age_group)
for age in range(0, len(range_all)):
count, division = np.histogram(range_all[age], bins=list(range(0,df["Taxable_Income"].max(),5000))) #create the bin size, use the max as the end point
range_hist_all[age] = count
# convert it to a dataframe, transpose it, the dataframe has index as bins and columns as age_group
df_new = pd.DataFrame(range_hist_all, columns=list(range(0,df["Taxable_Income"].max(),5000))[:-1], index=ATO_helper.age_group.values()) #reduce the bin list by 1, because bins is always one extra from result, as it is to cover the "remainding" data, since we found the max income in the data, there is no point of having the last bin
df_new = df_new.transpose()
# normalise df by age group
df_sum_across_rows = df_new.sum(axis=0)
df_new = 100 * df_new.div(df_sum_across_rows, axis=1)
# reduce data points
df_new = df_new.iloc[:80] # show only income between 0 and 200k, max data points is approx 1279
# Drawing 3D - how it works: each data "line" needs to be drawn twice to produce a 2D plane ribbon
# so therefore each data point is a tuple of 2 dimension, e.g. z = (10,10), y = (5,5), x = (1,2), x is two lines so it makes a plane
traces=[0]*len(list(ATO_helper.age_group.values()))
y_raw = list(range(0, df["Taxable_Income"].max(), 5000))[:-1]
for age_col in range(0, len(list(ATO_helper.age_group.values()))):
z_raw = df_new[ATO_helper.age_group[age_col]].tolist()
x = []; y = []; z = []
for j in range(0, len(z_raw)):
z.append([z_raw[j], z_raw[j]])
y.append([y_raw[j], y_raw[j]])
x.append([age_col, age_col+0.75])
traces[age_col] = go.Surface(
name=ATO_helper.age_group[age_col],
x=x,
y=y,
z=z,
showscale=False,
colorscale= [
[0, 'rgb(255, 0, 0)'], # 0
[1. / 10000000, 'rgb(250, 15, 0)'], # 10
[1. / 1000000, 'rgb(240, 30, 0)'], # 10
[1. / 100000, 'rgb(230, 45, 0)'], # 10
[1. / 10000, 'rgb(220, 75, 0)'], # 10
[1. / 1000, 'rgb(200, 100, 0)'], # 100
[1. / 100, 'rgb(180, 150, 0)'], # 1000
[1. / 10, 'rgb(150, 200, 0)'], # 10000
[1.0, 'rgb(0, 250, 0)'], # 100000
],
)
data = traces
layout = go.Layout(
title='Australian Tax Return Income<br>by Age Group (3D, Log)',
autosize=True,
scene=dict(
xaxis=dict(title="Age Group", tickmode="array", tickvals=list(range(0,12)), ticktext=list(ATO_helper.age_group.values())),
yaxis=dict(title="Income Bracket"),
zaxis=dict(title="Percentage of Age Group Population", type="log")
),
height = 667, width = 667,
)
fig = go.Figure(data=data, layout=layout)
plotly.offline.plot(fig, filename="{}/output/{}.html".format(os.getcwd(), os.path.basename(sys.argv[0][:-3])))
ATO_helper.save_div_text(plotly.offline.plot(fig, include_plotlyjs=False, output_type='div'), "{}/output/{}.txt".format(os.getcwd(), os.path.basename(sys.argv[0][:-3])))
return 0
ATO_AG_4(df)
|
{"hexsha": "8ea99bd5210ed864b05f76cab51f7947f13df5ee", "size": 4284, "ext": "py", "lang": "Python", "max_stars_repo_path": "ATO_Analysis/ATO_AG_4.py", "max_stars_repo_name": "gpaw789/ATO_Analysis", "max_stars_repo_head_hexsha": "d3d3b9bd73953491d2cb3b2c9083eabc18c2190c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ATO_Analysis/ATO_AG_4.py", "max_issues_repo_name": "gpaw789/ATO_Analysis", "max_issues_repo_head_hexsha": "d3d3b9bd73953491d2cb3b2c9083eabc18c2190c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-01T21:57:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-01T21:57:50.000Z", "max_forks_repo_path": "ATO_Analysis/ATO_AG_4.py", "max_forks_repo_name": "gpaw789/ATO_Analysis", "max_forks_repo_head_hexsha": "d3d3b9bd73953491d2cb3b2c9083eabc18c2190c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.84, "max_line_length": 344, "alphanum_fraction": 0.6087768441, "include": true, "reason": "import numpy", "num_tokens": 1292}
|
import argparse
import copy
import math
import numpy as np
import pygame
from pygame.locals import *
from timeit import default_timer as timer
import traceback
import os
from minos.lib import common
from minos.config.sim_args import parse_sim_args
from minos.lib.Simulator import Simulator
from minos.lib.util.ActionTraces import ActionTraces
from minos.lib.util.StateSet import StateSet
from minos.lib.util.VideoWriter import VideoWriter
import random,time
REPLAY_MODES = ['actions', 'positions']
VIDEO_WRITER = None
TMP_SURFS = {}
def blit_img_to_surf(img, surf, position=(0, 0), surf_key='*'):
global TMP_SURFS
if len(img.shape) == 2: # gray (y)
img = np.dstack([img, img, img, np.ones(img.shape, dtype=np.uint8)*255]) # y -> yyy1
else:
img = img[:, :, [2, 1, 0, 3]] # bgra -> rgba
img_shape = (img.shape[0], img.shape[1])
TMP_SURF = TMP_SURFS.get(surf_key)
if not TMP_SURF or TMP_SURF.get_size() != img_shape:
# print('create new surf %dx%d' % img_shape)
TMP_SURF = pygame.Surface(img_shape, 0, 32)
TMP_SURFS[surf_key] = TMP_SURF
bv = TMP_SURF.get_view("0")
bv.write(img.tostring())
del bv
surf.blit(TMP_SURF, position)
def display_episode_info(episode_info, display_surf, camera_outputs, show_goals=False):
displayed = episode_info.get('displayed',0)
if displayed < 1:
print('episode_info', {k: episode_info[k] for k in episode_info if k != 'goalObservations'})
if show_goals and 'goalObservations' in episode_info:
# NOTE: There can be multiple goals with separate goal observations for each
# We currently just handle one
goalObservations = episode_info['goalObservations']
if len(goalObservations) > 0:
# Call display_response but not write to video
display_response(goalObservations[0], display_surf, camera_outputs, print_observation=False, write_video=False)
episode_info['displayed'] = displayed + 1
def draw_forces(forces, display_surf, area):
r = 5
size = round(0.45*min(area.width, area.height)-r)
center = area.center
pygame.draw.rect(display_surf, (0,0,0), area, 0) # fill with black
# assume forces are radially positioned evenly around agent
# TODO: Actual get force sensor positions and visualize them
dt = -2*math.pi/forces.shape[0]
theta = math.pi/2
for i in range(forces.shape[0]):
x = round(center[0] + math.cos(theta)*size)
y = round(center[1] + math.sin(theta)*size)
width = 0 if forces[i] else 1
pygame.draw.circle(display_surf, (255,255,0), (x,y), r, width)
theta += dt
def draw_offset(offset, display_surf, area, color=(0,0,255)):
dir = (offset[0], offset[2])
mag = math.sqrt(dir[0]*dir[0] + dir[1]*dir[1])
if mag:
dir = (dir[0]/mag, dir[1]/mag)
size = round(0.45*min(area.width, area.height))
center = area.center
target = (round(center[0]+dir[0]*size), round(center[1]+dir[1]*size))
pygame.draw.rect(display_surf, (0,0,0), area, 0) # fill with black
pygame.draw.circle(display_surf, (255,255,255), center, size, 0)
pygame.draw.line(display_surf, color, center, target, 1)
pygame.draw.circle(display_surf, color, target, 4, 0)
def display_response(response, display_surf, camera_outputs, print_observation=False, write_video=False):
global VIDEO_WRITER
observation = response.get('observation')
sensor_data = observation.get('sensors')
measurements = observation.get('measurements')
def printable(x): return type(x) is not bytearray and type(x) is not np.ndarray
if observation is not None and print_observation:
simple_observations = {k: v for k, v in observation.items() if k not in ['measurements', 'sensors']}
dicts = [simple_observations, observation.get('measurements'), observation.get('sensors')]
for d in dicts:
for k, v in d.items():
if type(v) is not dict:
info = '%s: %s' % (k,v)
print(info[:75] + (info[75:] and '..'))
else:
print('%s: %s' % (k, str({i: v[i] for i in v if printable(v[i])})))
if 'forces' in sensor_data:
print('forces: %s' % str(sensor_data['forces']['data']))
if 'info' in response:
print('info: %s' % str(response['info']))
if 'offset' in camera_outputs:
draw_offset(measurements.get('offset_to_goal'), display_surf, camera_outputs['offset']['area'])
for obs, config in camera_outputs.items():
if obs not in sensor_data:
continue
if obs == 'forces':
draw_forces(sensor_data['forces']['data'], display_surf, config['area'])
continue
img = sensor_data[obs]['data']
img_viz = sensor_data[obs].get('data_viz')
if obs == 'depth':
img *= (255.0 / img.max()) # naive rescaling for visualization
img = img.astype(np.uint8)
elif img_viz is not None:
img = img_viz
blit_img_to_surf(img, display_surf, config.get('position'))
# TODO: consider support for writing to video of all camera modalities together
if obs == 'color':
if write_video and VIDEO_WRITER:
if len(img.shape) == 2:
VIDEO_WRITER.add_frame(np.dstack([img, img, img])) # yyy
else:
VIDEO_WRITER.add_frame(img[:, :, :-1]) # rgb
if 'audio' in sensor_data:
audio_data = sensor_data['audio']['data']
pygame.sndarray.make_sound(audio_data).play()
# pygame.mixer.Sound(audio_data).play()
def write_text(display_surf, text, position, font=None, fontname='monospace', fontsize=12, color=(255,255,224), align=None):
"""
text -> string of text.
fontname-> string having the name of the font.
fontsize -> int, size of the font.
color -> tuple, adhering to the color format in pygame.
position -> tuple (x,y) coordinate of text object.
"""
font_object = font if font is not None else pygame.font.SysFont(fontname, fontsize)
text_surface = font_object.render(text, True, color)
if align is not None:
text_rectangle = text_surface.get_rect()
if align == 'center':
text_rectangle.center = position[0], position[1]
else:
text_rectangle.topleft = position
display_surf.blit(text_surface, text_rectangle)
else:
display_surf.blit(text_surface, position)
previous_action = 119
collision_counter = 10
final_time = 0
count=0
def get_angle(x_vector,y_vector):
angle = 0.0
if abs(x_vector) > 0:
angle = math.atan2(y_vector, x_vector)*180/math.pi
if angle >0:
angle=angle-180
else:
angle=angle+180
#print('Direction to goal: ', angle)
return angle
def get_random_action():
actions = [119, 115, 97, 100, 276, 275]
try:
rand_no = random.randint(0, 5)
next_action = actions[rand_no]
except IndexError:
next_action = actions[5]
return next_action
def classifier():
print("Running Classifier")
os.system('/home/romi/SingleImageClassifier.py')
fd = "/home/romi/abc2.txt"
file = open(fd, 'r')
text = file.read()
if text=="Forward":
return("119")
elif text=="Back":
return("115")
elif text=="CW":
return("275")
elif text=="CCW":
return("276")
elif text=="Right":
return("100")
elif text=="Left":
return("97")
normal_counter = 0
def generate_key_press(has_collided, direction, distance):
global previous_action, collision_counter, normal_counter
time.sleep(0.8)
if normal_counter%2 == 0:
next_action = 119
else:
angle = get_angle(x_vector=direction[2], y_vector=direction[0])
#print('Angle: ', angle)
if abs(angle) < 13:
# The angle difference is very Small (Keep Moving Unless You Collide)
next_action = 119
if has_collided:
print('Collision Detected: Taking Random Action')
next_action = get_random_action()
else:
print('No Collision: Moving Forward')
next_action = 119
else:
# Need to Adjust Angle
print('Adjusting Angle')
if angle > 0:
# Turn Left
next_action = 276
else:
# Turn Right
next_action = 275
if distance < 0.3:
print("Goal Reached")
time.sleep(3)
scene_index = (scene_index + 1) % len(args.scene_ids)
scene_id = args.scene_ids[scene_index]
id = scene_dataset + '.' + scene_id
print('next_scene loading %s ...' % id)
sim.set_scene(id)
sim.episode_info = sim.start()
normal_counter = normal_counter + 1
previous_action = next_action
empty_keys = np.zeros(323, dtype='i')
empty_keys[next_action] = 1
return tuple(empty_keys)
def interactive_loop(sim, args):
# initialize
pygame.mixer.pre_init(frequency=8000, channels=1)
pygame.init()
pygame.key.set_repeat(500, 50) # delay, interval
clock = pygame.time.Clock()
# Set up display
font_spacing = 20
display_height = args.height + font_spacing*3
all_camera_observations = ['color', 'depth', 'normal', 'objectId', 'objectType', 'roomId', 'roomType']
label_positions = {
'curr': {},
'goal': {}
}
camera_outputs = {
'curr': {},
'goal': {}
}
# row with observations and goals
nimages = 0
for obs in all_camera_observations:
if args.observations.get(obs):
label_positions['curr'][obs] = (args.width*nimages, font_spacing*2)
camera_outputs['curr'][obs] = { 'position': (args.width*nimages, font_spacing*3) }
if args.show_goals:
label_positions['goal'][obs] = (args.width*nimages, display_height + font_spacing*2)
camera_outputs['goal'][obs] = { 'position': (args.width*nimages, display_height + font_spacing*3) }
nimages += 1
global final_time
global count
if args.show_goals:
display_height += args.height + font_spacing*3
# Row with offset and map
plot_size = max(min(args.height, 128), 64)
display_height += font_spacing
label_positions['curr']['offset'] = (0, display_height)
camera_outputs['curr']['offset'] = { 'area': pygame.Rect(0, display_height + font_spacing, plot_size, plot_size)}
next_start_x = plot_size
if args.observations.get('forces'):
label_positions['curr']['forces'] = (next_start_x, display_height)
camera_outputs['curr']['forces'] = { 'area': pygame.Rect(next_start_x, display_height + font_spacing, plot_size, plot_size)}
next_start_x += plot_size
if args.observations.get('map'):
label_positions['map'] = (next_start_x, display_height)
camera_outputs['map'] = { 'position': (next_start_x, display_height + font_spacing) }
display_height += font_spacing
display_height += plot_size
display_shape = [max(args.width * nimages, next_start_x), display_height]
display_surf = pygame.display.set_mode((display_shape[0], display_shape[1]), pygame.RESIZABLE | pygame.DOUBLEBUF)
# Write text
label_positions['title'] = (display_shape[0]/2, font_spacing/2)
write_text(display_surf, 'MINOS', fontsize=20, position = label_positions['title'], align='center')
write_text(display_surf, 'dir_to_goal', position = label_positions['curr']['offset'])
if args.observations.get('forces'):
write_text(display_surf, 'forces', position = label_positions['curr']['forces'])
if args.observations.get('map'):
write_text(display_surf, 'map', position = label_positions['map'])
write_text(display_surf, 'observations | controls: WASD+Arrows', position = (0, font_spacing))
if args.show_goals:
write_text(display_surf, 'goal', position = (0, args.height + font_spacing*3 + font_spacing))
for obs in all_camera_observations:
if args.observations.get(obs):
write_text(display_surf, obs, position = label_positions['curr'][obs])
if args.show_goals:
write_text(display_surf, obs, position = label_positions['goal'][obs])
# Other initialization
scene_index = 0
scene_dataset = args.scene.dataset
init_time = timer()
num_frames = 0
prev_key = ''
replay = args.replay
action_traces = args.action_traces
action_trace = action_traces.curr_trace() if action_traces is not None else None
replay_auto = False
replay_mode = args.replay_mode
replay_mode_index = REPLAY_MODES.index(replay_mode)
print('***\n***')
print('CONTROLS: WASD+Arrows = move agent, R = respawn, N = next state/scene, O = print observation, Q = quit')
if replay:
print('P = toggle auto replay, E = toggle replay using %s '
% str([m + ('*' if m == replay_mode else '') for m in REPLAY_MODES]))
print('***\n***')
has_collided=False
direction=[0.0,0.0,0.0]
distance=0.0
total_frames=0
pos= [10.8726722805803, 3.17766, -1.0168381949239895]
ang=4.88692 #angle in radian
tilt=0 #tilt angle in radian (keep it zero)
print('\nMoving to Starting Point\t',pos,ang)
sim.move_to(pos,ang,tilt) #define starting point here by pressing 'v'v
'''
House 17DRP5sb8fy
Point A:
[1.3307827641472185, 0.53861988, -10.146044853235205]
[3.5180930438444764, 0.566234, -1.8922092359314986] dining room'
[-1.4967893299263657, 0.5536211000000001, -9.28489025596529]
Point B:
[2.4621904181013585, 0.5086211, 1.905232439043702]
[1.5371551074831127, 0.566234, -7.069940355796163] bedroom
[2.614761534032291, 0.5443598460000001, 2.082176819455741]
House JeFG25nYj2p
Point A:
[-6.74225409821948, 0.55074358, 9.639630625521972] lounge
[-4.808781002856764, 0.579616, -3.2566622905687566]
A: [6.161385541472788, 0.55074358, 1.0423787109815281]
[-9.969136013947448, 0.5890924000000001, 4.443266425597109]
[-3.8024725023701844, 0.5667730000000001, -3.897040174086085] hallway
Point B: (Set in env_config file)
[5.324160089316311, 0.5293569, 1.0104915805896062]
[0.09539571149637265, 0.579616, 8.280566401485888] familyroom/lounge
[3.234156347243452, 0.54744289, 5.132335702885289] kitchen
B: [-5.547085140683584, 0.55074358, -5.553243897709698]
House ZMojNkEp431
Point A:
[-1.648939148401732, -0.316777, 21.77250735917539]
[-7.257402680352993, -0.316777, 22.62233552621379]
Point B:
[1.5515085926612524, -0.316777, 27.586585491887465]
House q9vSo1VnCiC
Point A:
[-9.295046451025712, 0.54650591, 9.32940161221944]
Point B:
[-2.93651621783526, 0.5278996, -8.961261587263957]
House YVUC4YcDtcY (prob)
Point A:
[-22.444534161392085, 0.54291507, -16.989977211158624]
Point B:
[-30.20203545489092, 0.54291507, -7.346167078006564]
House qoiz87JEwZ2
Point A:
[4.309296503924577, 0.69781, -2.2073896572614493]
Point B:
[12.669347833403256, -2.7127, -0.9483520109703518]
'''
while sim.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sim.running = False
# read keys
fd = "/home/romi/abc.txt"
file = open(fd, 'r')
text = file.read()
text3="Tracking"
text2=""
text1="Track is lost"
#print("Text :", text)
#print("Count : " ,count)
time_taken = timer() - final_time
print(' fps=%f' % ( num_frames / time_taken))
total_frames=total_frames+num_frames
num_frames=0
final_time=timer()
if text==text3 and count >2:
count=0
elif text==text2 or text==text3 or count >=6:
if count>=10:
print('Failed Case, please recover tracks manually\n')
keys = pygame.key.get_pressed()
else:
#keys = pygame.key.get_pressed()
keys = generate_key_press(has_collided, direction,distance)
#print('key pressed',action['name'])
#time.sleep(0.5)
open('/home/romi/abc2.txt', 'w').close()
open('/home/romi/abc.txt', 'w').close()
elif text==text1:
open('/home/romi/abc2.txt', 'w').close()
open('/home/romi/abc.txt', 'w').close()
print("\nTaking Random Steps to Recover\n")
text4 = generate_key_press(has_collided, direction,distance)
open('/home/romi/abc2.txt', 'w').close()
open('/home/romi/abc.txt', 'w').close()
empty_keys = np.zeros(323, dtype='i')
empty_keys[text4] = 1
keys= tuple(empty_keys)
count+=1
print("\nNumber of Random Movements done : ",count)
#keys = pygame.key.get_pressed()
print_next_observation = False
if keys[K_q]:
break
if keys[K_o]:
print_next_observation = True
elif keys[K_n]:
prev_key = 'n' if prev_key is not 'n' else ''
if 'state_set' in args and prev_key is 'n':
state = args.state_set.get_next_state()
if not state: # roll over to beginning
print('Restarting from beginning of states file...')
state = args.state_set.get_next_state()
id = scene_dataset + '.' + state['scene_id']
print('next_scene loading %s ...' % id)
sim.set_scene(id)
sim.move_to(state['start']['position'], state['start']['angle'])
sim.episode_info = sim.start()
elif prev_key is 'n':
scene_index = (scene_index + 1) % len(args.scene_ids)
scene_id = args.scene_ids[scene_index]
id = scene_dataset + '.' + scene_id
print('next_scene loading %s ...' % id)
sim.set_scene(id)
sim.episode_info = sim.start()
elif keys[K_v]:
prev_key = 'v' if prev_key is not 'v' else ''
if prev_key is 'v':
pos=[0.37536343739988054, 0.49121938, 1.7367364232544902]
ang=4.88692 #angle in radian
tilt=0 #tilt angle in radian (keep it zero)
print('\nMoving to Starting Point\t',pos,ang)
sim.move_to(pos,ang,tilt) #define starting point here by pressing 'v'v
elif keys[K_r]:
prev_key = 'r' if prev_key is not 'r' else ''
if prev_key is 'r':
sim.episode_info = sim.reset()
else:
# Figure out action
action = {'name': 'idle', 'strength': 1, 'angle': math.radians(5)}
actions = []
if replay:
unprocessed_keypressed = any(keys)
if keys[K_p]:
prev_key = 'p' if prev_key is not 'p' else ''
if prev_key == 'p':
replay_auto = not replay_auto
unprocessed_keypressed = False
elif keys[K_e]:
prev_key = 'e' if prev_key is not 'e' else ''
if prev_key == 'e':
replay_mode_index = (replay_mode_index + 1) % len(REPLAY_MODES)
replay_mode = REPLAY_MODES[replay_mode_index]
unprocessed_keypressed = False
print('Replay using %s' % replay_mode)
if replay_auto or unprocessed_keypressed:
# get next action and do it
rec = action_trace.next_action_record()
if rec is None:
# go to next trace
action_trace = action_traces.next_trace()
start_state = action_trace.start_state()
print('start_state', start_state)
sim.configure(start_state)
sim.episode_info = sim.start()
else:
if replay_mode == 'actions':
actnames = rec['actions'].split('+')
for actname in actnames:
if actname != 'reset':
act = copy.copy(action)
act['name'] = actname
actions.append(act)
elif replay_mode == 'positions':
sim.move_to([rec['px'], rec['py'], rec['pz']], rec['rotation'])
else:
if keys[K_w]:
action['name'] = 'forwards'
print('Action: Forward')
elif keys[K_s]:
action['name'] = 'backwards'
print('Action: Backward')
elif keys[K_LEFT]:
# ASCII Code 276
action['name'] = 'turnLeft'
print('Action: Rotate Left')
elif keys[K_RIGHT]:
# ASCII Code 275
action['name'] = 'turnRight'
print('Action: Rotate Right')
elif keys[K_a]:
action['name'] = 'strafeLeft'
print('Action: Strafe Left')
elif keys[K_d]:
action['name'] = 'strafeRight'
print('Action: Strafe Right')
elif keys[K_UP]:
action['name'] = 'lookUp'
elif keys[K_DOWN]:
action['name'] = 'lookDown'
else:
action['name'] = 'idle'
actions = [action]
# step simulator and get observation
response = sim.step(actions, 1)
if response is None:
break
display_episode_info(sim.episode_info, display_surf, camera_outputs['goal'], show_goals=args.show_goals)
# Handle map
observation = response.get('observation')
direction = observation['measurements']['shortest_path_to_goal']['direction']
distance = observation['measurements']['shortest_path_to_goal']['distance']
has_collided = observation['collision']
map = observation.get('map')
print('Total Distance Remaining ',distance)
if map is not None:
# TODO: handle multiple maps
if isinstance(map, list):
map = map[0]
config = camera_outputs['map']
img = map['data']
rw = map['shape'][0] + config.get('position')[0]
rh = map['shape'][1] + config.get('position')[1]
w = display_surf.get_width()
h = display_surf.get_height()
if w < rw or h < rh:
# Resize display (copying old stuff over)
old_display_surf = display_surf.convert()
display_surf = pygame.display.set_mode((max(rw,w), max(rh,h)), pygame.RESIZABLE | pygame.DOUBLEBUF)
display_surf.blit(old_display_surf, (0,0))
write_text(display_surf, 'map', position = label_positions['map'])
blit_img_to_surf(img, display_surf, config.get('position'), surf_key='map')
# Handle other response
display_response(response, display_surf, camera_outputs['curr'], print_observation=print_next_observation, write_video=True)
pygame.display.flip()
num_frames += 1
clock.tick(30) # constraint to max 30 fps
# NOTE: log_action_trace handled by javascript side
# if args.log_action_trace:
# trace = sim.get_action_trace()
# print(trace['data'])
# cleanup and quit
time_taken = timer() - init_time
print('time=%f sec, fps=%f' % (time_taken, total_frames / time_taken))
print('Thank you for playing - Goodbye!')
pygame.quit()
def main():
global VIDEO_WRITER
parser = argparse.ArgumentParser(description='Interactive interface to Simulator')
parser.add_argument('--navmap', action='store_true',
default=False,
help='Use navigation map')
group = parser.add_mutually_exclusive_group()
group.add_argument('--state_set_file',
help='State set file')
group.add_argument('--replay',
help='Load and replay action trace from file')
group.add_argument('--replay_mode',
choices=REPLAY_MODES,
default='positions',
help='Use actions or positions for replay')
group.add_argument('--show_goals', action='store_true',
default=False,
help='show goal observations')
args = parse_sim_args(parser)
args.visualize_sensors = True
sim = Simulator(vars(args))
common.attach_exit_handler(sim)
if 'state_set_file' in args and args.state_set_file is not None:
args.state_set = StateSet(args.state_set_file, 1)
if 'save_video' in args and len(args.save_video):
filename = args.save_video if type(args.save_video) is str else 'out.mp4'
is_rgb = args.color_encoding == 'rgba'
VIDEO_WRITER = VideoWriter(filename, framerate=24, resolution=(args.width, args.height), rgb=is_rgb)
if 'replay' in args and args.replay is not None:
print('Initializing simulator using action traces %s...' % args.replay)
args.action_traces = ActionTraces(args.replay)
action_trace = args.action_traces.next_trace()
sim.init()
start_state = action_trace.start_state()
print('start_state', start_state)
sim.configure(start_state)
else:
args.action_traces = None
args.replay = None
try:
print('Starting simulator...')
ep_info = sim.start()
if ep_info:
print('observation_space', sim.get_observation_space())
sim.episode_info = ep_info
print('Simulator started.')
interactive_loop(sim, args)
except:
traceback.print_exc()
print('Error running simulator. Aborting.')
if sim is not None:
sim.kill()
del sim
if VIDEO_WRITER is not None:
VIDEO_WRITER.close()
if __name__ == "__main__":
main()
|
{"hexsha": "31d2ab166c0f2ba83dc6cfcec737e8fabd37f864", "size": 26783, "ext": "py", "lang": "Python", "max_stars_repo_path": "MINOS_Navigation.py", "max_stars_repo_name": "ans-qureshi/SLAM-Recovery", "max_stars_repo_head_hexsha": "18337d886d4027cd4b485b50bc7ac32e6e74c4d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MINOS_Navigation.py", "max_issues_repo_name": "ans-qureshi/SLAM-Recovery", "max_issues_repo_head_hexsha": "18337d886d4027cd4b485b50bc7ac32e6e74c4d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MINOS_Navigation.py", "max_forks_repo_name": "ans-qureshi/SLAM-Recovery", "max_forks_repo_head_hexsha": "18337d886d4027cd4b485b50bc7ac32e6e74c4d5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6479076479, "max_line_length": 132, "alphanum_fraction": 0.5867901281, "include": true, "reason": "import numpy", "num_tokens": 6646}
|
% Default to the notebook output style
% Inherit from the specified cell style.
\documentclass[11pt]{article}
\usepackage[T1]{fontenc}
% Nicer default font (+ math font) than Computer Modern for most use cases
\usepackage{mathpazo}
% Basic figure setup, for now with no caption control since it's done
% automatically by Pandoc (which extracts  syntax from Markdown).
\usepackage{graphicx}
% We will generate all images so they have a width \maxwidth. This means
% that they will get their normal width if they fit onto the page, but
% are scaled down if they would overflow the margins.
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth
\else\Gin@nat@width\fi}
\makeatother
\let\Oldincludegraphics\includegraphics
% Set max figure width to be 80% of text width, for now hardcoded.
\renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}}
% Ensure that by default, figures have no caption (until we provide a
% proper Figure object with a Caption API and a way to capture that
% in the conversion process - todo).
\usepackage{caption}
\DeclareCaptionLabelFormat{nolabel}{}
\captionsetup{labelformat=nolabel}
\usepackage{adjustbox} % Used to constrain images to a maximum size
\usepackage{xcolor} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{textcomp} % defines textquotesingle
% Hack from http://tex.stackexchange.com/a/47451/13684:
\AtBeginDocument{%
\def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
}
\usepackage{upquote} % Upright quotes for verbatim code
\usepackage{eurosym} % defines \euro
\usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
\usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
% normalem makes italics be italics, not underlines
% Colors for the hyperref package
\definecolor{urlcolor}{rgb}{0,.145,.698}
\definecolor{linkcolor}{rgb}{.71,0.21,0.01}
\definecolor{citecolor}{rgb}{.12,.54,.11}
% ANSI colors
\definecolor{ansi-black}{HTML}{3E424D}
\definecolor{ansi-black-intense}{HTML}{282C36}
\definecolor{ansi-red}{HTML}{E75C58}
\definecolor{ansi-red-intense}{HTML}{B22B31}
\definecolor{ansi-green}{HTML}{00A250}
\definecolor{ansi-green-intense}{HTML}{007427}
\definecolor{ansi-yellow}{HTML}{DDB62B}
\definecolor{ansi-yellow-intense}{HTML}{B27D12}
\definecolor{ansi-blue}{HTML}{208FFB}
\definecolor{ansi-blue-intense}{HTML}{0065CA}
\definecolor{ansi-magenta}{HTML}{D160C4}
\definecolor{ansi-magenta-intense}{HTML}{A03196}
\definecolor{ansi-cyan}{HTML}{60C6C8}
\definecolor{ansi-cyan-intense}{HTML}{258F8F}
\definecolor{ansi-white}{HTML}{C5C1B4}
\definecolor{ansi-white-intense}{HTML}{A1A6B2}
% commands and environments needed by pandoc snippets
% extracted from the output of `pandoc -s`
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\newenvironment{Shaded}{}{}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
% Additional commands for more recent versions of Pandoc
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}}
\newcommand{\ImportTok}[1]{{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}}
\newcommand{\BuiltInTok}[1]{{#1}}
\newcommand{\ExtensionTok}[1]{{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
% Define a nice break command that doesn't care if a line doesn't already
% exist.
\def\br{\hspace*{\fill} \\* }
% Math Jax compatability definitions
\def\gt{>}
\def\lt{<}
% Document parameters
\title{Writeup}
% Pygments definitions
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
\let\PY@ul=\relax \let\PY@tc=\relax%
\let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
\PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
\PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}
\expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
\expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
\expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf}
\expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}}
\expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}}
\expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit}
\expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}}
\expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}}
\expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}}
\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother
% Exact colors from NB
\definecolor{incolor}{rgb}{0.0, 0.0, 0.5}
\definecolor{outcolor}{rgb}{0.545, 0.0, 0.0}
% Prevent overflowing lines due to hard-to-break entities
\sloppy
% Setup hyperref package
\hypersetup{
breaklinks=true, % so long urls are correctly broken across lines
colorlinks=true,
urlcolor=urlcolor,
linkcolor=linkcolor,
citecolor=citecolor,
}
% Slightly bigger margins than the latex defaults
\geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
\begin{document}
\maketitle
\hypertarget{behavioral-cloning}{%
\section{\texorpdfstring{\textbf{Behavioral
Cloning}}{Behavioral Cloning}}\label{behavioral-cloning}}
\hypertarget{writeup-report}{%
\subsection{Writeup Report}\label{writeup-report}}
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\textbf{Behavioral Cloning Project}
The goals / steps of this project are the following: * Use the simulator
to collect data of good driving behavior * Build, a convolution neural
network in Keras that predicts steering angles from images * Train and
validate the model with a training and validation set * Test that the
model successfully drives around track one without leaving the road *
Summarize the results with a written report
\hypertarget{rubric-points}{%
\subsection{Rubric Points}\label{rubric-points}}
\hypertarget{here-i-will-consider-the-rubric-points-individually-and-describe-how-i-addressed-each-point-in-my-implementation.}{%
\subsubsection{\texorpdfstring{Here I will consider the
\href{https://review.udacity.com/\#!/rubrics/432/view}{rubric points}
individually and describe how I addressed each point in my
implementation.}{Here I will consider the rubric points individually and describe how I addressed each point in my implementation.}}\label{here-i-will-consider-the-rubric-points-individually-and-describe-how-i-addressed-each-point-in-my-implementation.}}
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\hypertarget{files-submitted-code-quality}{%
\subsubsection{Files Submitted \& Code
Quality}\label{files-submitted-code-quality}}
\hypertarget{submission-includes-all-required-files-and-can-be-used-to-run-the-simulator-in-autonomous-mode}{%
\paragraph{1. Submission includes all required files and can be used to
run the simulator in autonomous
mode}\label{submission-includes-all-required-files-and-can-be-used-to-run-the-simulator-in-autonomous-mode}}
My project includes the following files: * model.py containing the
script to create and train the model * drive.py for driving the car in
autonomous mode * model.h5 containing a trained convolution neural
network * writeup\_report.md and writeup\_report.pdf summarizing the
results * weights.h5 from previous training * run1.mp4 containing the
video of Autonomous driving using my model.
\hypertarget{submission-includes-functional-code}{%
\paragraph{2. Submission includes functional
code}\label{submission-includes-functional-code}}
Using the Udacity provided simulator and my drive.py file, the car can
be driven autonomously around the track by executing
\begin{Shaded}
\begin{Highlighting}[]
\ExtensionTok{python}\NormalTok{ drive.py model.h5}
\end{Highlighting}
\end{Shaded}
\hypertarget{submission-code-is-usable-and-readable}{%
\paragraph{3. Submission code is usable and
readable}\label{submission-code-is-usable-and-readable}}
The model.py file contains the code for training and saving the
convolution neural network. The file shows the pipeline I used for
training and validating the model, and it contains comments to explain
how the code works.
\hypertarget{model-architecture-and-training-strategy}{%
\subsubsection{Model Architecture and Training
Strategy}\label{model-architecture-and-training-strategy}}
\hypertarget{an-appropriate-model-architecture-has-been-employed}{%
\paragraph{1. An appropriate model architecture has been
employed}\label{an-appropriate-model-architecture-has-been-employed}}
The top layer is a Cropping2D layer which crops top and bottom parts of
the input image.
My model consists of 5 Convolutional layers followed by one Maxpooling
and 4 Fully Connected(Dense) layers. The convolutional layers are
designed for feature extraction. First 3 convolutional layers are
designed with a 5x5 kernel and 2x2 strides and depths 24,36,48
respectively.(code line 59-61). The other 2 convolutional layers are
non-strided with a 3x3 kernel and with a depth of 64(code line 63-64).
Then I have used a MaxPooling layers with a pool size of (2,2)(Code line
65). I flattened the network using keras Flatten() function(Code line
66). These layers are followed by 4 Dense layers with sizes 100,50,10
and 1 (code line 68-74). In the model I have used RELU activations to
introduce nonlinearity, and the data is normalized in the model using a
Keras lambda layer (code line 57).
\hypertarget{attempts-to-reduce-overfitting-in-the-model}{%
\paragraph{2. Attempts to reduce overfitting in the
model}\label{attempts-to-reduce-overfitting-in-the-model}}
The model contains dropout layers in order to reduce overfitting
(model.py lines 71 \& 73).
The model was trained and validated on different data sets to ensure
that the model was not overfitting. The model was tested by running it
through the simulator and ensuring that the vehicle could stay on the
track.
\hypertarget{model-parameter-tuning}{%
\paragraph{3. Model parameter tuning}\label{model-parameter-tuning}}
The model used an adam optimizer with a decaying learning rate from
keras model callbacks. (model.py line 76).
\hypertarget{appropriate-training-data}{%
\paragraph{4. Appropriate training
data}\label{appropriate-training-data}}
Training data was chosen to keep the vehicle driving on the road. I used
a combination of center lane driving, recovering from the left and right
sides of the road. I have collected the data by driving two laps on
track 1 and two laps on track 2.
For details about how I created the training data, see the next section.
\hypertarget{model-architecture-and-training-strategy-1}{%
\subsubsection{Model Architecture and Training
Strategy}\label{model-architecture-and-training-strategy-1}}
\hypertarget{solution-design-approach}{%
\paragraph{1. Solution Design Approach}\label{solution-design-approach}}
My first step was to use a convolution neural network model similar to
the NVIDIA autonomous car architecture. I thought this model might be
appropriate because it has 5 convolutional layers for feature extraction
followed by fully connected layers to estimate the steering angle.
In order to gauge how well the model was working, I split my image and
steering angle data into a training and validation set. I found that my
first model had a low mean squared error on the training set but a high
mean squared error on the validation set. This implied that the model
was overfitting.
To combat the overfitting, I have added MaxPooling layer before
Flatenning the data and added dropout layers with probability 0.5. It
reduced the validation loss from 0.1496 to 0.0547.
Then I ran the model for 5 epochs and save the weights from it to use in
next training. Next I loaded the weights and trained the model for
another 10 epochs it drastically reduced the training loss and
validation loss.
The final step was to run the simulator to see how well the car was
driving around track one. There were a few spots where the vehicle fell
off the track. To improve the driving behavior in these cases, I have
added more data from left and right cameras with a corection of 0.15. I
have also added more data by flipping the images horizontally to
generalize the model better.
At the end of the process, the vehicle is able to drive autonomously
around the track without leaving the road.
\hypertarget{final-model-architecture}{%
\paragraph{2. Final Model Architecture}\label{final-model-architecture}}
The final model architecture (model.py lines 55-74) consisted of a
convolution neural network with the following layers and layer sizes.
\begin{verbatim}
| Layer | Description |
|:---------------------:|:-----------------------------------------------------:|
| Input | 160x320x3 RGB image |
| Cropping2D | cropping=((50,20), (0,0)) |
| Lambda | Normalization (X/255-0.5) |
| Convolution 1 | 5x5 kernel,24 Filters, 2x2 stride, relu activation |
| Convolution 2 | 5x5 kernel,36 Filters, 2x2 stride, relu activation |
| Convolution 3 | 5x5 kernel,48 Filters, 2x2 stride, relu activation |
| Convolution 4 | 3x3 kernel,64 Filters, relu activation |
| Convolution 5 | 3x3 kernel,64 Filters, relu activation |
| Max pooling | 2x2 stride |
| Flatten | |
| Dense 1 | Outputs 100, relu activation |
| dropout | prob=0.5 |
| Dense 2 | Outputs 50, relu activation |
| dropout | prob=0.5 |
| Dense 3 | Outputs 10, relu activation |
| dropout | prob=0.5 |
| Dense 4 | Outputs 1, relu activation |
\end{verbatim}
\hypertarget{creation-of-the-training-set-training-process}{%
\paragraph{3. Creation of the Training Set \& Training
Process}\label{creation-of-the-training-set-training-process}}
To capture good driving behavior, I first recorded two laps on track one
using center lane driving. Here is an example image of center lane
driving:
\begin{figure}
\centering
\includegraphics{./examples/centerlane.png}
\caption{alt text}
\end{figure}
I then recorded the vehicle recovering from the left side and right
sides of the road back to center so that the vehicle would learn to go
back to center if it is going out of the track.
Then I repeated this process on track two in order to get more data
points.
To augment the data set, I also flipped images horizontally(code lines
41-47) and angles thinking that this would help the model generalize
better while taking turns. For example, here is an image that has then
been flipped:
\includegraphics{./examples/original.png}
\includegraphics{./examples/flipped.png}
Also, while reading the images I have converted them to RGB space since
cv2.imread() reads images in BGR space and drive.py uses RGB images.
After the collection process, I had 42000 number of data points. I then
preprocessed this data by cropping(code line 56) 50 pixels on the top,
20 pixels on the bottom and 10 pixels on the right in order to remove
unwanted data from the images. I have used keras Cropping2D function to
crop the images while training. Then, I normalized the data using keras
Lambda function(Code line 57). I finally randomly shuffled the data set
and put 20\% of the data into a validation set.
I used this training data for training the model. The validation set
helped determine if the model was over or under fitting. First I have
trained the model for 5 epochs and saw that loss is still decreasing so
I saved the weights and used them again while re traing the model and
now I have used 20 epochs and observed that training loss and validation
loss settled. I used an adam optimizer and used a keras callback
fucntion LearningRateScheduler to use decaying leraning rate while
training(code line 10 \& 77) .
Below I have provided the Learning curves of my model.
\includegraphics{./examples/Learning1.jpg}
\includegraphics{./examples/Learning.jpg}
% Add a bibliography block to the postdoc
\end{document}
|
{"hexsha": "004de569047bda31ca15f4418d2d7b4f0f9cb3a7", "size": 27086, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "notebook.tex", "max_stars_repo_name": "NaveenVunnam/Behavioral-Colning", "max_stars_repo_head_hexsha": "4f1735e7b1452e96258f2ce36dba1e9b4aac25b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebook.tex", "max_issues_repo_name": "NaveenVunnam/Behavioral-Colning", "max_issues_repo_head_hexsha": "4f1735e7b1452e96258f2ce36dba1e9b4aac25b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebook.tex", "max_forks_repo_name": "NaveenVunnam/Behavioral-Colning", "max_forks_repo_head_hexsha": "4f1735e7b1452e96258f2ce36dba1e9b4aac25b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.3965844402, "max_line_length": 255, "alphanum_fraction": 0.6833050284, "num_tokens": 8910}
|
"""
gen_ref_dirs(dimension, n_paritions)
Generates Das and Dennis's structured reference points. `dimension` could be
the number of objective functions in multi-objective functions.
"""
function gen_ref_dirs(dimension, n_paritions)
return gen_weights(dimension, n_paritions)
end
function gen_weights(a, b)
nobj = a;
H = b;
a = zeros(nobj);
d = H;
w = [];
produce_weight!(a, 1, d, H, nobj, w)
return Array.(w)
end
function produce_weight!(a, i, d, H, nobj, w)
for k=0:d
if i<nobj
a[i] = k;
d2 = d - k;
produce_weight!(a, i+1, d2, H, nobj, w);
else
a[i] = d;
push!(w, a/H)
break;
end
end
end
"""
ideal(points)
Computes the ideal point from a provided array of `Vector`s or a population or row vectors
in a `Matrix`.
"""
function ideal(points::Array{Vector{T}}) where T <: Real
isempty(points) && isempty(points[1]) && return zeros(0)
ideal = points[1]
for point in points
ideal = min.(ideal, point)
end
return ideal
end
"""
nadir(points)
Computes the nadir point from a provided array of `Vector`s or a population or row vectors
in a `Matrix`.
"""
function nadir(points::Array{Vector{T}}) where T <: Real
isempty(points) && isempty(points[1]) && return zeros(0)
nadir = points[1]
for point in points
nadir = max.(nadir, point)
end
return nadir
end
function ideal(population::Array{xFgh_indiv})
mask = sum_violations.(population) .== 0
ideal(fval.(population[mask]))
end
ideal(A::Matrix) = ideal([A[i,:] for i in 1:size(A,1)])
function nadir(population::Array{xFgh_indiv})
mask = sum_violations.(population) .== 0
nadir(fval.(population[mask]))
end
nadir(A::Matrix) = nadir([A[i,:] for i in 1:size(A,1)])
|
{"hexsha": "754445ed657a57aa15948df7c404c2b28053f3ea", "size": 1874, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/common/multi-objective-functions.jl", "max_stars_repo_name": "jmejia8/Metaheuristics.jl", "max_stars_repo_head_hexsha": "c6a7cc2e076df31a69741f31852da27354e9ab42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 89, "max_stars_repo_stars_event_min_datetime": "2018-03-07T07:11:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:57:16.000Z", "max_issues_repo_path": "src/common/multi-objective-functions.jl", "max_issues_repo_name": "jmejia8/Metaheuristics.jl", "max_issues_repo_head_hexsha": "c6a7cc2e076df31a69741f31852da27354e9ab42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-01-30T23:03:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T15:45:15.000Z", "max_forks_repo_path": "src/common/multi-objective-functions.jl", "max_forks_repo_name": "jmejia8/Metaheuristics.jl", "max_forks_repo_head_hexsha": "c6a7cc2e076df31a69741f31852da27354e9ab42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-06-08T10:06:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T05:15:35.000Z", "avg_line_length": 20.8222222222, "max_line_length": 90, "alphanum_fraction": 0.6051227321, "num_tokens": 543}
|
[STATEMENT]
lemma not_refTE:
"\<lbrakk> \<not>is_refT T; T = Void \<or> T = Boolean \<or> T = Integer \<Longrightarrow> Q \<rbrakk> \<Longrightarrow> Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<not> is_refT T; T = Void \<or> T = Boolean \<or> T = Integer \<Longrightarrow> Q\<rbrakk> \<Longrightarrow> Q
[PROOF STEP]
by (cases T, auto simp add: is_refT_def)
|
{"llama_tokens": 146, "file": "CoreC++_Type", "length": 1}
|
[STATEMENT]
lemma [simp]:
"(binop_LessThan v1 v2 = Some va) \<longleftrightarrow>
(\<exists>i1 i2. v1 = Intg i1 \<and> v2 = Intg i2 \<and> va = Inl (Bool (i1 <s i2)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (binop_LessThan v1 v2 = \<lfloor>va\<rfloor>) = (\<exists>i1 i2. v1 = Intg i1 \<and> v2 = Intg i2 \<and> va = Inl (Bool (i1 <s i2)))
[PROOF STEP]
by(cases "(v1, v2)" rule: binop_LessThan.cases) auto
|
{"llama_tokens": 199, "file": "JinjaThreads_Common_BinOp", "length": 1}
|
import matplotlib.pyplot as plt
import numpy as np
# Trial function for adding vertical arrows to
def f(x):
return np.sin(2*x)
x = np.linspace(0,10,1000)
y = f(x)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y, 'k', lw=2)
ax.set_ylim(-3,3)
def add_force(F, x1):
"""Add a vertical force arrow F pixels long at x1 (in data coordinates)."""
ax.annotate('', xy=(x1, f(x1)), xytext=(0, F), textcoords='offset points',
arrowprops=dict(arrowstyle='<|-', color='r'))
add_force(60, 4.5)
add_force(-45, 6.5)
plt.show()
|
{"hexsha": "be9629a04a5eb4d5c9a82200ae812b912e29e829", "size": 553, "ext": "py", "lang": "Python", "max_stars_repo_path": "develop/matplotlib/arrow02.py", "max_stars_repo_name": "atmelino/PAT8", "max_stars_repo_head_hexsha": "b83b5ff8453017e4a7bec8e47b1a3a7619fffe53", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "develop/matplotlib/arrow02.py", "max_issues_repo_name": "atmelino/PAT8", "max_issues_repo_head_hexsha": "b83b5ff8453017e4a7bec8e47b1a3a7619fffe53", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "develop/matplotlib/arrow02.py", "max_forks_repo_name": "atmelino/PAT8", "max_forks_repo_head_hexsha": "b83b5ff8453017e4a7bec8e47b1a3a7619fffe53", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.12, "max_line_length": 79, "alphanum_fraction": 0.6383363472, "include": true, "reason": "import numpy", "num_tokens": 172}
|
#include <boost/process/error.hpp>
|
{"hexsha": "b3f1feadc8347f83d1d7f2da0433ac6ab4d95c89", "size": 35, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_process_error.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_process_error.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_process_error.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 17.5, "max_line_length": 34, "alphanum_fraction": 0.7714285714, "num_tokens": 7}
|
[STATEMENT]
lemma arrE [elim]:
assumes "arr f"
and "f \<noteq> null \<Longrightarrow> natural_transformation A B (Dom f) (Cod f) (Map f) \<Longrightarrow> T"
shows T
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T
[PROOF STEP]
using assms arr_char null_char
[PROOF STATE]
proof (prove)
using this:
arr f
\<lbrakk>f \<noteq> null; natural_transformation (\<cdot>\<^sub>A) (\<cdot>\<^sub>B) (Dom f) (Cod f) (Map f)\<rbrakk> \<Longrightarrow> T
arr ?f = (?f \<noteq> Null \<and> Dom ?f \<in> Collect (functor (\<cdot>\<^sub>A) (\<cdot>\<^sub>B)) \<and> Cod ?f \<in> Collect (functor (\<cdot>\<^sub>A) (\<cdot>\<^sub>B)) \<and> Map ?f \<in> Collect (natural_transformation (\<cdot>\<^sub>A) (\<cdot>\<^sub>B) (Dom ?f) (Cod ?f)))
null = Null
goal (1 subgoal):
1. T
[PROOF STEP]
by simp
|
{"llama_tokens": 311, "file": "Category3_FunctorCategory", "length": 2}
|
[STATEMENT]
lemma expands_to_root_neg:
assumes "n > 0" "trimmed_neg F" "basis_wf basis" "(f expands_to F) basis"
shows "((\<lambda>x. root n (f x)) expands_to
-powr_expansion False (-F) (inverse (real n))) basis"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. root n (f x)) expands_to - powr_expansion False (- F) (inverse (real n))) basis
[PROOF STEP]
proof (rule expands_to_cong)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (?f expands_to - powr_expansion False (- F) (inverse (real n))) basis
2. \<forall>\<^sub>F x in at_top. ?f x = root n (f x)
[PROOF STEP]
show "((\<lambda>x. -root n (-f x)) expands_to
-powr_expansion False (-F) (inverse (real n))) basis"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. - root n (- f x)) expands_to - powr_expansion False (- F) (inverse (real n))) basis
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
0 < n
trimmed_neg F
basis_wf basis
(f expands_to F) basis
goal (1 subgoal):
1. ((\<lambda>x. - root n (- f x)) expands_to - powr_expansion False (- F) (inverse (real n))) basis
[PROOF STEP]
by (intro expands_to_uminus expands_to_root trimmed_pos_uminus) auto
[PROOF STATE]
proof (state)
this:
((\<lambda>x. - root n (- f x)) expands_to - powr_expansion False (- F) (inverse (real n))) basis
goal (1 subgoal):
1. \<forall>\<^sub>F x in at_top. - root n (- f x) = root n (f x)
[PROOF STEP]
qed (simp_all add: real_root_minus)
|
{"llama_tokens": 604, "file": null, "length": 5}
|
import enum
import numpy as np
from controller.controller_enum import DiscreteControls
# Clear threshold
CLEAR = 0.45
# Outputs the action
def create_action(mask):
third_length = mask.shape[1] // 3
left_available = np.sum(mask[:, :third_length])
center_available = np.sum(mask[:, third_length:(2*third_length)])
right_available = np.sum(mask[:, (2*third_length):])
# mask is 0 to 255
total = 255*np.product(mask.shape) // 3
print(total)
print(center_available)
if center_available / total > CLEAR:
return DiscreteControls.FWD
elif np.maximum(left_available, right_available) / total > 0.75*CLEAR:
return DiscreteControls.LEFT if left_available > right_available else DiscreteControls.RIGHT
else:
return DiscreteControls.STOP
|
{"hexsha": "60a2b62c04c15cf351a4d65a5b462f075bd5f2bc", "size": 795, "ext": "py", "lang": "Python", "max_stars_repo_path": "planning/fishbrain.py", "max_stars_repo_name": "ElanHR/marys-lamb", "max_stars_repo_head_hexsha": "e55be63a3193737fcc793a3bcc678e6b18151eb2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-07-04T04:08:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T04:27:35.000Z", "max_issues_repo_path": "planning/fishbrain.py", "max_issues_repo_name": "ElanHR/marys-lamb", "max_issues_repo_head_hexsha": "e55be63a3193737fcc793a3bcc678e6b18151eb2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-07-04T02:26:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-06T15:45:58.000Z", "max_forks_repo_path": "planning/fishbrain.py", "max_forks_repo_name": "ElanHR/marys-lamb", "max_forks_repo_head_hexsha": "e55be63a3193737fcc793a3bcc678e6b18151eb2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-05T03:18:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T04:58:45.000Z", "avg_line_length": 34.5652173913, "max_line_length": 101, "alphanum_fraction": 0.7157232704, "include": true, "reason": "import numpy", "num_tokens": 193}
|
#pylint:disable=no-member
import cv2 as cv
import numpy as np
img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/park.jpg')
cv.imshow('Park', img)
blank = np.zeros(img.shape[:2], dtype='uint8')
b,g,r = cv.split(img)
blue = cv.merge([b,blank,blank])
green = cv.merge([blank,g,blank])
red = cv.merge([blank,blank,r])
cv.imshow('Blue', blue)
cv.imshow('Green', green)
cv.imshow('Red', red)
print(img.shape)
print(b.shape)
print(g.shape)
print(r.shape)
merged = cv.merge([b,g,r])
cv.imshow('Merged Image', merged)
cv.waitKey(0)
|
{"hexsha": "f42d249c395d0f7fbc4975792e75c9653e6b054f", "size": 586, "ext": "py", "lang": "Python", "max_stars_repo_path": "Section2_Advanced/splitmerge.py", "max_stars_repo_name": "NeeharikaDva/opencv_course", "max_stars_repo_head_hexsha": "234515ab59a1228c8dfd3c69f310dbc1d86c6089", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Section2_Advanced/splitmerge.py", "max_issues_repo_name": "NeeharikaDva/opencv_course", "max_issues_repo_head_hexsha": "234515ab59a1228c8dfd3c69f310dbc1d86c6089", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Section2_Advanced/splitmerge.py", "max_forks_repo_name": "NeeharikaDva/opencv_course", "max_forks_repo_head_hexsha": "234515ab59a1228c8dfd3c69f310dbc1d86c6089", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.5333333333, "max_line_length": 114, "alphanum_fraction": 0.7081911263, "include": true, "reason": "import numpy", "num_tokens": 164}
|
Require Import Crypto.Specific.Framework.SynthesisFramework.
Require Import Crypto.Specific.solinas64_2e129m25_3limbs.CurveParameters.
Module P <: PrePackage.
Definition package : Tag.Context.
Proof. make_Synthesis_package curve extra_prove_mul_eq extra_prove_square_eq. Defined.
End P.
Module Export S := PackageSynthesis P.
|
{"author": "anonymous-code-submission-01", "repo": "sp2019-54-code", "sha": "8867f5bed0821415ec99f593b1d61f715ed4f789", "save_path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code", "path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code/sp2019-54-code-8867f5bed0821415ec99f593b1d61f715ed4f789/src/Specific/solinas64_2e129m25_3limbs/Synthesis.v"}
|
C %W% %G%
C****************************************************************
C
C File: rebldzon.f
C
C Purpose: Routine to rebuild acznam() using zone hashing
C
c Return code: n = 0 : Success
c N > 0 : Error
c
C Author: Walt Powell Date: 21 May 1996
C Called by: clnuppti.f
C
C****************************************************************
integer function rebldzon (error)
integer error
include 'ipfinc/parametr.inc'
include 'ipfinc/blank.inc'
include 'ipfinc/area.inc'
include 'ipfinc/arcntl.inc'
include 'ipfinc/bus.inc'
include 'ipfinc/zonehash.inc'
integer bldzone
error = 0
rebldzon = 0
c
c Reinitialize zone hash tables
c
do nb = 1, MAXCZN
nextptr_z(nb) = 0
enddo
do nb = 1, HASHSIZE_Z
htable_z(nb) = 0
enddo
nztot = 0
do nb = 1, ntot
if (bus(nb) .ne. srtlst) then
iz = bldzone(zone(nb), jarzn(nb))
endif
enddo
return
end
|
{"hexsha": "d36f4904810e22a492777513b2730f2feb32dead", "size": 1098, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/rebldzon.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/rebldzon.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/rebldzon.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 22.4081632653, "max_line_length": 65, "alphanum_fraction": 0.4626593807, "num_tokens": 314}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy
import torch
def convert_to_distributed_tensor(tensor):
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This helper function converts to the correct
device and returns the tensor + original device.
"""
orig_device = "cpu" if not tensor.is_cuda else "gpu"
if (
torch.distributed.is_available()
and torch.distributed.get_backend() == torch.distributed.Backend.NCCL
and not tensor.is_cuda
):
tensor = tensor.cuda()
return (tensor, orig_device)
def convert_to_normal_tensor(tensor, orig_device):
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This converts the tensor back to original device.
"""
if tensor.is_cuda and orig_device == "cpu":
tensor = tensor.cpu()
return tensor
def is_distributed_training_run():
return (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and (torch.distributed.get_world_size() > 1)
)
def broadcast_long(value, src_rank):
if is_distributed_training_run():
tensor = torch.LongTensor([value])
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.broadcast(tensor, src=src_rank)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor.item()
return value
def broadcast_float_list(float_list, src_rank):
if is_distributed_training_run():
tensor = torch.Tensor(float_list)
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.broadcast(tensor, src=src_rank)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor.tolist()
return float_list
def broadcast_bool(value, src_rank):
return broadcast_long(1 if value else 0, src_rank) == 1
def all_gather_return_max_long(value):
"""
Returns the rank of the trainer that has the max input value and also
the max value.
"""
if is_distributed_training_run():
world_size = torch.distributed.get_world_size()
input, orig_device = convert_to_distributed_tensor(torch.LongTensor([value]))
output = []
for _ in range(world_size):
output_tensor, _ = convert_to_distributed_tensor(torch.LongTensor([0]))
output.append(output_tensor)
torch.distributed.all_gather(output, input)
for i in range(world_size):
output[i] = convert_to_normal_tensor(output[i], orig_device)
max_rank = max(range(world_size), key=lambda i: output[i][0])
max_value = output[max_rank].tolist()
return max_rank, max_value[0]
return 0, value
def broadcast_model(src_rank, model):
# Async/overlapped broadcast
broadcast_state_work = []
for param in model.parameters():
broadcast_state_work.append(
torch.distributed.broadcast(param.data, src=src_rank, async_op=True)
)
for work in broadcast_state_work:
work.wait()
def broadcast_binary(data: numpy.ndarray, src_rank: int) -> numpy.ndarray:
if not is_distributed_training_run():
return data
assert data is None or isinstance(
data, numpy.ndarray
), "Expect `numpy.ndarray`, but got:{}".format(type(data))
size = data.size if data is not None else 0
# broadcast the length of target data
size = broadcast_long(size, src_rank)
tensor = None
if data is not None and data.size > 0:
tensor = torch.as_tensor(data)
else:
tensor = torch.zeros(size, dtype=torch.uint8)
MIN_SIZE_FOR_CHUNK = 8 * 1024 * 1024
CHUNKS = 8
tensor, orig_device = convert_to_distributed_tensor(tensor)
if size >= MIN_SIZE_FOR_CHUNK:
# Async/overlapped broadcast
# split tensor to 8 chunks
chunks = torch.chunk(tensor, CHUNKS)
broadcast_state_work = []
# Async/overlapped broadcast
for chunk in chunks:
broadcast_state_work.append(
torch.distributed.broadcast(chunk, src=src_rank, async_op=True)
)
logging.info("Broadcasting big binary, size: {}.".format(size))
for work in broadcast_state_work:
work.wait()
logging.info("Finished broadcasting.")
# concat chunks
tensor = torch.cat(chunks)
else:
torch.distributed.broadcast(tensor, src=src_rank)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor.numpy()
|
{"hexsha": "f483a5958f3b2a057b0c74a3ff04fadaea018bcb", "size": 4803, "ext": "py", "lang": "Python", "max_stars_repo_path": "torchelastic/distributed/collectives.py", "max_stars_repo_name": "aashnamsft/elastic", "max_stars_repo_head_hexsha": "5372d6acaf07d130ab0f0ccaf52958a7fde88902", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-07T02:31:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-07T02:31:39.000Z", "max_issues_repo_path": "torchelastic/distributed/collectives.py", "max_issues_repo_name": "aashnamsft/elastic", "max_issues_repo_head_hexsha": "5372d6acaf07d130ab0f0ccaf52958a7fde88902", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torchelastic/distributed/collectives.py", "max_forks_repo_name": "aashnamsft/elastic", "max_forks_repo_head_hexsha": "5372d6acaf07d130ab0f0ccaf52958a7fde88902", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-14T09:12:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T09:12:47.000Z", "avg_line_length": 30.5923566879, "max_line_length": 85, "alphanum_fraction": 0.676035811, "include": true, "reason": "import numpy", "num_tokens": 1048}
|
open import Formalization.PredicateLogic.Signature
module Formalization.PredicateLogic.Syntax.NegativeTranslations (𝔏 : Signature) where
open Signature(𝔏)
open import Data.ListSized
import Lvl
open import Formalization.PredicateLogic.Syntax (𝔏)
open import Functional using (_∘_ ; _∘₂_ ; swap)
open import Numeral.Finite
open import Numeral.Natural
open import Sets.PredicateSet using (PredSet)
open import Type
private variable ℓ : Lvl.Level
private variable args vars : ℕ
-- Also called: Gödel-Gentzen's negative translation.
-- 2.3.3
ggTrans : Formula(vars) → Formula(vars)
ggTrans (P $ x) = ¬¬(P $ x)
ggTrans ⊤ = ⊤
ggTrans ⊥ = ⊥
ggTrans (φ ∧ ψ) = (ggTrans φ) ∧ (ggTrans ψ)
ggTrans (φ ∨ ψ) = ¬(¬(ggTrans φ) ∧ ¬(ggTrans ψ))
ggTrans (φ ⟶ ψ) = (ggTrans φ) ⟶ (ggTrans ψ)
ggTrans (Ɐ φ) = Ɐ(ggTrans φ)
ggTrans (∃ φ) = ¬ Ɐ(¬(ggTrans φ))
-- Also called: Kolmogorov's negative translation.
-- 2.3.7A
koTrans : Formula(vars) → Formula(vars)
koTrans (P $ x) = ¬¬(P $ x)
koTrans ⊤ = ⊤
koTrans ⊥ = ⊥
koTrans (φ ∧ ψ) = ¬¬((koTrans φ) ∧ (koTrans ψ))
koTrans (φ ∨ ψ) = ¬¬((koTrans φ) ∨ (koTrans ψ))
koTrans (φ ⟶ ψ) = ¬¬((koTrans φ) ⟶ (koTrans ψ))
koTrans (Ɐ φ) = ¬¬ Ɐ(koTrans φ)
koTrans (∃ φ) = ¬¬ ∃(koTrans φ)
-- Also called: Kuroda's negative translation.
-- 2.3.7B
kuTrans : Formula(vars) → Formula(vars)
kuTrans (P $ x) = P $ x
kuTrans ⊤ = ⊤
kuTrans ⊥ = ⊥
kuTrans (φ ∧ ψ) = ((koTrans φ) ∧ (koTrans ψ))
kuTrans (φ ∨ ψ) = ((koTrans φ) ∨ (koTrans ψ))
kuTrans (φ ⟶ ψ) = ((koTrans φ) ⟶ (koTrans ψ))
kuTrans (Ɐ φ) = Ɐ(¬¬(koTrans φ))
kuTrans (∃ φ) = ∃(koTrans φ)
|
{"hexsha": "152b54cda5eb7edcc26862aadbb58857ab2d4b99", "size": 1606, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Formalization/PredicateLogic/Syntax/NegativeTranslations.agda", "max_stars_repo_name": "Lolirofle/stuff-in-agda", "max_stars_repo_head_hexsha": "70f4fba849f2fd779c5aaa5af122ccb6a5b271ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-07T17:58:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T06:53:22.000Z", "max_issues_repo_path": "Formalization/PredicateLogic/Syntax/NegativeTranslations.agda", "max_issues_repo_name": "Lolirofle/stuff-in-agda", "max_issues_repo_head_hexsha": "70f4fba849f2fd779c5aaa5af122ccb6a5b271ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Formalization/PredicateLogic/Syntax/NegativeTranslations.agda", "max_forks_repo_name": "Lolirofle/stuff-in-agda", "max_forks_repo_head_hexsha": "70f4fba849f2fd779c5aaa5af122ccb6a5b271ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3018867925, "max_line_length": 85, "alphanum_fraction": 0.6288916563, "num_tokens": 628}
|
{-# OPTIONS --safe --without-K #-}
open import Relation.Binary.PropositionalEquality using (_≡_; _≢_; refl; trans; sym; cong)
open import Relation.Nullary using (_because_; ofʸ; ofⁿ)
open import Data.Unit using (⊤; tt)
open import Data.Empty using (⊥; ⊥-elim)
open import Data.Nat.Base
open import Data.Bool.Base using (false; true)
open import Data.Product using (_×_; _,_; ∃-syntax)
import Data.Fin as Fin
import Data.Nat.Properties as ℕₚ
import Data.Fin.Properties as Finₚ
open Fin using (Fin ; zero ; suc; #_)
open import PiCalculus.Syntax
open Scoped
module PiCalculus.Semantics where
private
variable
name namex namey : Name
n : ℕ
P P' Q R : Scoped n
x y : Fin n
Unused : ∀ {n} → Fin n → Scoped n → Set
Unused i 𝟘 = ⊤
Unused i (ν P) = Unused (suc i) P
Unused i (P ∥ Q) = Unused i P × Unused i Q
Unused i (x ⦅⦆ P) = i ≢ x × Unused (suc i) P
Unused i (x ⟨ y ⟩ P) = i ≢ x × i ≢ y × Unused i P
lift : (i : Fin (suc n)) → Scoped n → Scoped (suc n)
lift i 𝟘 = 𝟘
lift i (ν P) = ν (lift (suc i) P)
lift i (P ∥ Q) = lift i P ∥ lift i Q
lift i (x ⦅⦆ P) = Fin.punchIn i x ⦅⦆ lift (suc i) P
lift i (x ⟨ y ⟩ P) = Fin.punchIn i x ⟨ Fin.punchIn i y ⟩ lift i P
lower : (i : Fin (suc n)) (P : Scoped (suc n)) → Unused i P → Scoped n
lower i 𝟘 uP = 𝟘
lower i (ν P) uP = ν (lower (suc i) P uP)
lower i (P ∥ Q) (uP , uQ) = lower i P uP ∥ lower i Q uQ
lower i (x ⦅⦆ P) (i≢x , uP) = Fin.punchOut i≢x ⦅⦆ lower (suc i) P uP
lower i (x ⟨ y ⟩ P) (i≢x , (i≢y , uP)) = Fin.punchOut i≢x ⟨ Fin.punchOut i≢y ⟩ lower i P uP
notMax : (i : Fin n) (x : Fin (suc n)) → Fin.inject₁ i ≡ x → n ≢ Fin.toℕ x
notMax i x p n≡x = Finₚ.toℕ-inject₁-≢ i (trans n≡x (sym (cong Fin.toℕ p)))
exchangeFin : Fin n → Fin (suc n) → Fin (suc n)
exchangeFin i x with Fin.inject₁ i Fin.≟ x
exchangeFin i x | true because ofʸ p = suc (Fin.lower₁ x (notMax i x p))
exchangeFin i x | false because _ with (suc i) Fin.≟ x
exchangeFin i x | false because _ | true because _ = Fin.inject₁ i
exchangeFin i x | false because _ | false because _ = x
exchange : Fin n → Scoped (suc n) → Scoped (suc n)
exchange i 𝟘 = 𝟘
exchange i (ν P) = ν (exchange (suc i) P)
exchange i (P ∥ Q) = exchange i P ∥ exchange i Q
exchange i (x ⦅⦆ P) = exchangeFin i x ⦅⦆ exchange (suc i) P
exchange i (x ⟨ y ⟩ P) = exchangeFin i x ⟨ exchangeFin i y ⟩ exchange i P
infixl 10 _≈_
data _≈_ : Scoped n → Scoped n → Set where
comp-assoc : P ∥ (Q ∥ R) ≈ (P ∥ Q) ∥ R
comp-symm : P ∥ Q ≈ Q ∥ P
comp-end : P ∥ 𝟘 ≈ P
scope-end : _≈_ {n} (ν 𝟘 ⦃ name ⦄) 𝟘
scope-ext : (u : Unused zero P)
→ ν (P ∥ Q) ⦃ name ⦄ ≈ lower zero P u ∥ (ν Q) ⦃ name ⦄
scope-scope-comm : ν (ν P ⦃ namey ⦄) ⦃ namex ⦄ ≈ ν (ν (exchange zero P) ⦃ namex ⦄) ⦃ namey ⦄
data RecTree : Set where
zero : RecTree
one : RecTree → RecTree
two : RecTree → RecTree → RecTree
private
variable
r p : RecTree
-- TODO: change names as per paper
infixl 5 _≅⟨_⟩_
data _≅⟨_⟩_ : Scoped n → RecTree → Scoped n → Set where
stop_ : P ≈ Q → P ≅⟨ zero ⟩ Q
-- Equivalence relation
cong-refl : P ≅⟨ zero ⟩ P
cong-symm_ : P ≅⟨ r ⟩ Q → Q ≅⟨ one r ⟩ P
cong-trans : P ≅⟨ r ⟩ Q → Q ≅⟨ p ⟩ R → P ≅⟨ two r p ⟩ R
-- Congruent relation
ν-cong_ : P ≅⟨ r ⟩ P' → ν P ⦃ name ⦄ ≅⟨ one r ⟩ ν P' ⦃ name ⦄
comp-cong_ : P ≅⟨ r ⟩ P' → P ∥ Q ≅⟨ one r ⟩ P' ∥ Q
input-cong_ : P ≅⟨ r ⟩ P' → (x ⦅⦆ P) ⦃ name ⦄ ≅⟨ one r ⟩ (x ⦅⦆ P') ⦃ name ⦄
output-cong_ : P ≅⟨ r ⟩ P' → x ⟨ y ⟩ P ≅⟨ one r ⟩ x ⟨ y ⟩ P'
_≅_ : Scoped n → Scoped n → Set
P ≅ Q = ∃[ r ] (P ≅⟨ r ⟩ Q)
_[_↦_]' : Fin n → Fin n → Fin n → Fin n
x [ i ↦ j ]' with i Finₚ.≟ x
x [ i ↦ j ]' | true because _ = j
x [ i ↦ j ]' | false because _ = x
_[_↦_] : Scoped n → (i j : Fin n) → Scoped n
𝟘 [ i ↦ j ] = 𝟘
(ν P) [ i ↦ j ] = ν (P [ suc i ↦ suc j ])
(P ∥ Q) [ i ↦ j ] = (P [ i ↦ j ]) ∥ (Q [ i ↦ j ])
(x ⦅⦆ P) [ i ↦ j ] = (x [ i ↦ j ]') ⦅⦆ (P [ suc i ↦ suc j ])
(x ⟨ y ⟩ P) [ i ↦ j ] = (x [ i ↦ j ]') ⟨ y [ i ↦ j ]' ⟩ (P [ i ↦ j ])
substFin-unused : ∀ {i j} (x : Fin (suc n)) → i ≢ j → i ≢ x [ i ↦ j ]'
substFin-unused {i = i} x i≢j with i Finₚ.≟ x
substFin-unused {i = i} x i≢j | true because _ = i≢j
substFin-unused {i = i} x i≢j | false because ofⁿ ¬p = ¬p
subst-unused : {i j : Fin (suc n)}
→ i ≢ j
→ (P : Scoped (suc n))
→ Unused i (P [ i ↦ j ])
subst-unused i≢j 𝟘 = tt
subst-unused i≢j (ν P) = subst-unused (λ i≡j → i≢j (Finₚ.suc-injective i≡j)) P
subst-unused i≢j (P ∥ Q) = subst-unused i≢j P , subst-unused i≢j Q
subst-unused i≢j (x ⦅⦆ P) = substFin-unused x i≢j , subst-unused (λ i≡j → i≢j (Finₚ.suc-injective i≡j)) P
subst-unused i≢j (x ⟨ y ⟩ P) = substFin-unused x i≢j , substFin-unused y i≢j , subst-unused i≢j P
data Channel : ℕ → Set where
internal : ∀ {n} → Channel n
external : ∀ {n} → Fin n → Channel n
dec : Channel (suc n) → Channel n
dec internal = internal
dec (external zero) = internal
dec (external (suc i)) = external i
maybe : ∀ {a} {A : Set a} → A → (Fin n → A) → Channel n → A
maybe b f internal = b
maybe b f (external x) = f x
infixl 5 _=[_]⇒_
data _=[_]⇒_ : Scoped n → Channel n → Scoped n → Set where
comm : {P : Scoped (1 + n)} {Q : Scoped n} {i j : Fin n}
→ let uP' = subst-unused (λ ()) P
in ((i ⦅⦆ P) ⦃ name ⦄) ∥ (i ⟨ j ⟩ Q) =[ external i ]⇒ lower zero (P [ zero ↦ suc j ]) uP' ∥ Q
par_ : ∀ {c} {P P' Q : Scoped n}
→ P =[ c ]⇒ P'
→ P ∥ Q =[ c ]⇒ P' ∥ Q
res_ : ∀ {c} {P Q : Scoped (1 + n)}
→ P =[ c ]⇒ Q
→ ν P ⦃ name ⦄ =[ dec c ]⇒ ν Q ⦃ name ⦄
struct : ∀ {c} {P P' Q' Q : Scoped n}
→ P ≅⟨ r ⟩ P'
→ P' =[ c ]⇒ Q'
→ Q' ≅⟨ r ⟩ Q
→ P =[ c ]⇒ Q
_⇒_ : Scoped n → Scoped n → Set
P ⇒ Q = ∃[ c ] (P =[ c ]⇒ Q)
|
{"hexsha": "da22d67718e8ed9e50a7adf8db2d3e0f7411b8f7", "size": 5963, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/PiCalculus/Semantics.agda", "max_stars_repo_name": "guilhermehas/typing-linear-pi", "max_stars_repo_head_hexsha": "0fc3cf6bcc0cd07d4511dbe98149ac44e6a38b1a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2020-05-02T23:32:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T15:18:23.000Z", "max_issues_repo_path": "src/PiCalculus/Semantics.agda", "max_issues_repo_name": "guilhermehas/typing-linear-pi", "max_issues_repo_head_hexsha": "0fc3cf6bcc0cd07d4511dbe98149ac44e6a38b1a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-15T09:16:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T09:16:14.000Z", "max_forks_repo_path": "src/PiCalculus/Semantics.agda", "max_forks_repo_name": "guilhermehas/typing-linear-pi", "max_forks_repo_head_hexsha": "0fc3cf6bcc0cd07d4511dbe98149ac44e6a38b1a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-01-25T13:57:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T16:24:07.000Z", "avg_line_length": 34.2701149425, "max_line_length": 107, "alphanum_fraction": 0.5093073956, "num_tokens": 2664}
|
# Copyright 2021 Toyota Research Institute. All rights reserved.
import itertools
import json
import math
import os
import warnings
from collections import OrderedDict
from functools import partial
import numpy as np
import pandas as pd
from pyquaternion import Quaternion
from tqdm import tqdm
import numba
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
from detectron2.evaluation.evaluator import DatasetEvaluator
from detectron2.structures.boxes import BoxMode
from detectron2.utils import comm
from iopath.common.file_io import PathManager
from numba import errors as numba_err
from tridet.evaluators.rotate_iou import d3_box_overlap_kernel, rotate_iou_gpu_eval
from tridet.structures.boxes3d import GenericBoxes3D
from tridet.structures.pose import Pose
warnings.simplefilter('ignore', category=numba_err.NumbaDeprecationWarning)
BBOX3D_PREDICTION_FILE = "bbox3d_predictions.json"
KITTI_SUBMISSION_DIR = "kitti_3d_submission"
class KITTI3DEvaluator(DatasetEvaluator):
def __init__(
self,
dataset_name,
iou_thresholds,
only_prepare_submission=False,
output_dir=None,
distributed=False,
):
dataset_dicts = DatasetCatalog.get(dataset_name)
metadata = MetadataCatalog.get(dataset_name)
class_names = metadata.thing_classes
id_to_name = metadata.contiguous_id_to_name
self._dataset_dicts = {dikt['file_name']: dikt for dikt in dataset_dicts}
self._id_to_name = id_to_name
self._class_names = class_names
self._iou_thresholds = iou_thresholds
self._only_prepare_submission = only_prepare_submission
self._output_dir = output_dir
self._distributed = distributed
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
# List[Dict], each key'ed by category (str) + vectorized 3D box (10) + 2D box (4) + score (1) + file name (str)
self._predictions_as_json = []
self._predictions_kitti_format = []
self._groundtruth_kitti_format = []
def process(self, inputs, outputs):
"""
Process the pair of inputs and outputs.
If they contain batches, the pairs can be consumed one-by-one using `zip`:
.. code-block:: python
for input_, output in zip(inputs, outputs):
# do evaluation on single input/output pair
...
Args:
inputs (list): the inputs that's used to call the model.
outputs (list): the return value of `model(inputs)`
"""
for input_per_image, pred_per_image in zip(inputs, outputs):
pred_classes = pred_per_image['instances'].pred_classes
pred_boxes = pred_per_image['instances'].pred_boxes.tensor
pred_boxes3d = pred_per_image['instances'].pred_boxes3d
# pred_boxes3d = pred_per_image['instances'].pred_box3d_as_vec
scores = pred_per_image['instances'].scores
scores_3d = pred_per_image['instances'].scores_3d
file_name = input_per_image['file_name']
image_id = input_per_image['image_id']
# predictions
predictions_kitti = []
# for class_id, box3d_as_vec, score, box2d in zip(pred_classes, pred_boxes3d, scores, pred_boxes):
for class_id, box3d, score_3d, box2d, score in zip(
pred_classes, pred_boxes3d, scores_3d, pred_boxes, scores
):
# class_name = self._metadata.thing_classes[class_id]
class_name = self._class_names[class_id]
box3d_as_vec = box3d.vectorize()[0].cpu().numpy()
pred = OrderedDict(
category_id=int(class_id), # COCO instances
category=class_name,
bbox3d=box3d_as_vec.tolist(),
# COCO instances uses "XYWH". Aligning with it as much as possible
bbox=BoxMode.convert(box2d.tolist(), from_mode=BoxMode.XYXY_ABS, to_mode=BoxMode.XYWH_ABS),
score=float(score),
score_3d=float(score_3d),
file_name=file_name,
image_id=image_id # COCO instances
)
self._predictions_as_json.append(pred)
# prediction in KITTI format.
W, L, H, x, y, z, rot_y, alpha = convert_3d_box_to_kitti(box3d)
l, t, r, b = box2d.tolist()
predictions_kitti.append([
class_name, -1, -1, alpha, l, t, r, b, H, W, L, x, y, z, rot_y,
float(score_3d)
])
self._predictions_kitti_format.append(pd.DataFrame(predictions_kitti))
# groundtruths
gt_dataset_dict = self._dataset_dicts[file_name]
if "annotations" not in gt_dataset_dict:
# test set
continue
raw_kitti_annotations = gt_dataset_dict.get('raw_kitti_annotations', None)
if raw_kitti_annotations is not None:
self._groundtruth_kitti_format.append(raw_kitti_annotations)
else:
# Otherwise, use the same format as predictions (minus 'score').
groundtruth_kitti = []
for anno in gt_dataset_dict['annotations']:
# class_name = self._metadata.thing_classes[anno['category_id']]
class_name = self._class_names[anno['category_id']]
# groundtruth in KITTI format.
box2d = BoxMode.convert(anno['bbox'], from_mode=anno['bbox_mode'], to_mode=BoxMode.XYXY_ABS)
box3d = GenericBoxes3D.from_vectors([anno['bbox3d']])
W, L, H, x, y, z, rot_y, alpha = convert_3d_box_to_kitti(box3d)
l, t, r, b = box2d
groundtruth_kitti.append([class_name, -1, -1, alpha, l, t, r, b, H, W, L, x, y, z, rot_y])
self._groundtruth_kitti_format.append(pd.DataFrame(groundtruth_kitti))
def evaluate(self):
if self._distributed:
comm.synchronize()
predictions_as_json = comm.gather(self._predictions_as_json, dst=0)
predictions_as_json = list(itertools.chain(*predictions_as_json))
predictions_kitti_format = comm.gather(self._predictions_kitti_format, dst=0)
predictions_kitti_format = list(itertools.chain(*predictions_kitti_format))
groundtruth_kitti_format = comm.gather(self._groundtruth_kitti_format, dst=0)
groundtruth_kitti_format = list(itertools.chain(*groundtruth_kitti_format))
if not comm.is_main_process():
return
else:
predictions_as_json = self._predictions_as_json
predictions_kitti_format = self._predictions_kitti_format
groundtruth_kitti_format = self._groundtruth_kitti_format
# Write prediction file as JSON.
PathManager().mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, BBOX3D_PREDICTION_FILE)
with open(file_path, 'w') as f:
json.dump(predictions_as_json, f, indent=4)
if self._only_prepare_submission:
self.prepare_kitti3d_submission(
predictions_kitti_format, submission_dir=os.path.join(self._output_dir, KITTI_SUBMISSION_DIR)
)
return {}
assert len(predictions_kitti_format) == len(groundtruth_kitti_format)
formatted_predictions = [
KITTIEvaluationEngine._format(idx, x, True) for idx, x in enumerate(predictions_kitti_format)
]
formatted_groundtruth = [
KITTIEvaluationEngine._format(idx, x, False) for idx, x in enumerate(groundtruth_kitti_format)
]
engine = KITTIEvaluationEngine(id_to_name=self._id_to_name)
results = engine.evaluate(formatted_groundtruth, formatted_predictions, overlap_thresholds=self._iou_thresholds)
results = OrderedDict({k: 100. * v for k, v in results.items()})
return results
@staticmethod
def prepare_kitti3d_submission(predictions_kitti_format, submission_dir):
assert not os.path.exists(submission_dir)
os.makedirs(submission_dir)
for idx, prediction in tqdm(enumerate(predictions_kitti_format)):
prediction.to_csv(os.path.join(submission_dir, f"{idx:06d}.txt"), sep=" ", header=False, index=False)
def convert_3d_box_to_kitti(box):
"""Convert a single 3D bounding box (GenericBoxes3D) to KITTI convention. i.e. for evaluation. We
assume the box is in the reference frame of camera_2 (annotations are given in this frame).
Usage:
>>> box_camera_2 = pose_02.inverse() * pose_0V * box_velodyne
>>> kitti_bbox_params = convert_3d_box_to_kitti(box_camera_2)
Parameters
----------
box: GenericBoxes3D
Box in camera frame (X-right, Y-down, Z-forward)
Returns
-------
W, L, H, x, y, z, rot_y, alpha: float
KITTI format bounding box parameters.
"""
assert len(box) == 1
quat = Quaternion(*box.quat.cpu().tolist()[0])
tvec = box.tvec.cpu().numpy()[0]
sizes = box.size.cpu().numpy()[0]
# Re-encode into KITTI box convention
# Translate y up by half of dimension
tvec += np.array([0., sizes[2] / 2.0, 0])
inversion = Quaternion(axis=[1, 0, 0], radians=np.pi / 2).inverse
quat = inversion * quat
# Construct final pose in KITTI frame (use negative of angle if about positive z)
if quat.axis[2] > 0:
kitti_pose = Pose(wxyz=Quaternion(axis=[0, 1, 0], radians=-quat.angle), tvec=tvec)
rot_y = -quat.angle
else:
kitti_pose = Pose(wxyz=Quaternion(axis=[0, 1, 0], radians=quat.angle), tvec=tvec)
rot_y = quat.angle
# Construct unit vector pointing in z direction (i.e. [0, 0, 1] direction)
# The transform this unit vector by pose of car, and drop y component, thus keeping heading direction in BEV (x-z grid)
v_ = np.float64([[0, 0, 1], [0, 0, 0]])
v_ = (kitti_pose * v_)[:, ::2]
# Getting positive theta angle (we define theta as the positive angle between
# a ray from the origin through the base of the transformed unit vector and the z-axis
theta = np.arctan2(abs(v_[1, 0]), abs(v_[1, 1]))
# Depending on whether the base of the transformed unit vector is in the first or
# second quadrant we add or subtract `theta` from `rot_y` to get alpha, respectively
alpha = rot_y + theta if v_[1, 0] < 0 else rot_y - theta
# Bound from [-pi, pi]
if alpha > np.pi:
alpha -= 2.0 * np.pi
elif alpha < -np.pi:
alpha += 2.0 * np.pi
alpha = np.around(alpha, decimals=2) # KITTI precision
# W, L, H, x, y, z, rot-y, alpha
return sizes[0], sizes[1], sizes[2], tvec[0], tvec[1], tvec[2], rot_y, alpha
class KITTIEvaluationEngine():
_DEFAULT_KITTI_LEVEL_TO_PARAMETER = {
"levels": ("easy", "moderate", "hard"),
"max_occlusion": (0, 1, 2),
"max_truncation": (0.15, 0.3, 0.5),
"min_height": (40, 25, 25)
}
def __init__(self, id_to_name, num_shards=50, sample_points=41):
self.id_to_name = id_to_name
self.sample_points = sample_points
self.num_shards = num_shards
self.filter_data_fn = partial(
clean_kitti_data, difficulty_level_to_params=self._DEFAULT_KITTI_LEVEL_TO_PARAMETER
)
@staticmethod
def _format(idx, kitti_format, is_prediction):
if len(kitti_format) == 0:
annotations = dict(
id=f'{idx:06d}',
name=[],
truncated=np.array([]),
occluded=np.array([]),
alpha=np.array([]),
bbox=np.empty((0, 4)),
dimensions=np.empty((0, 3)),
location=np.empty((0, 3)),
rotation_y=np.array([]),
score=np.array([])
)
return annotations
data = np.array(kitti_format)
annotations = dict(
id=f'{idx:06d}',
name=data[:, 0],
truncated=data[:, 1].astype(np.float64),
occluded=data[:, 2].astype(np.int64),
alpha=data[:, 3].astype(np.float64),
bbox=data[:, 4:8].astype(np.float64),
dimensions=data[:, 8:11][:, [2, 0, 1]].astype(np.float64),
location=data[:, 11:14].astype(np.float64),
rotation_y=data[:, 14].astype(np.float64),
)
if is_prediction:
annotations.update({'score': data[:, 15].astype(np.float64)})
else:
annotations.update({'score': np.zeros([len(annotations['bbox'])])})
return annotations
def get_shards(self, num, num_shards):
"""Shard number into evenly sized parts. `Remaining` values are put into the last shard.
Parameters
----------
num: int
Number to shard
num_shards: int
Number of shards
Returns
-------
List of length (num_shards or num_shards +1), depending on whether num is perfectly divisible by num_shards
"""
assert num_shards > 0, "Invalid number of shards"
num_per_shard = num // num_shards
remaining_num = num % num_shards
full_shards = num_shards * (num_per_shard > 0)
if remaining_num == 0:
return [num_per_shard] * full_shards
else:
return [num_per_shard] * full_shards + [remaining_num]
def evaluate(self, gt_annos, dt_annos, overlap_thresholds):
# pr_curves = self.eval_metric(gt_annos, dt_annos, metric, overlap_thresholds)
gt_annos, dt_annos = self.validate_anno_format(gt_annos, dt_annos)
box3d_pr_curves = self.eval_metric(gt_annos, dt_annos, 'BOX3D_AP', overlap_thresholds)
mAP_3d = self.get_mAP(box3d_pr_curves["precision"], box3d_pr_curves["recall"])
bev_pr_curves = self.eval_metric(gt_annos, dt_annos, 'BEV_AP', overlap_thresholds)
mAP_bev = self.get_mAP(bev_pr_curves["precision"], bev_pr_curves["recall"])
results = OrderedDict()
for class_i, class_name in self.id_to_name.items():
for diff_i, diff in enumerate(["Easy", "Moderate", "Hard"]):
for thresh_i, thresh in enumerate(overlap_thresholds):
results['kitti_box3d_r40/{}_{}_{}'.format(class_name, diff, thresh)] = \
mAP_3d[class_i, diff_i, thresh_i]
for class_i, class_name in self.id_to_name.items():
for diff_i, diff in enumerate(["Easy", "Moderate", "Hard"]):
for thresh_i, thresh in enumerate(overlap_thresholds):
results['kitti_bev_r40/{}_{}_{}'.format(class_name, diff, thresh)] = \
mAP_bev[class_i, diff_i, thresh_i]
return results
def get_mAP(self, precision, recall):
""" Get mAP from precision.
Parameters
----------
precision: np.ndarray
Numpy array of precision curves at different recalls, of shape
[num_classes, num_difficulties, num_overlap_thresholds,self.sample_points]
recall: np.ndarray
Numpy array of recall values corresponding to each precision, of shape
[num_classes, num_difficulties, num_overlap_thresholds,self.sample_points]
Returns
-------
ap: np.ndarray
Numpy array of mean AP evaluated at different points along PR curve.
Shape [num_classes, num_difficulties, num_overlap_thresholds]
"""
precisions, recall_spacing = self.get_sampled_precision_recall(precision, recall)
ap = sum(precisions) / len(recall_spacing)
return ap
def get_sampled_precision_recall(self, precision, recall):
"""Given an array of precision, recall values, sample evenly along the recall range, and interpolate the precision
based on AP from section 6 from https://research.mapillary.com/img/publications/MonoDIS.pdf
Parameters
----------
precision: np.ndarray
Numpy array of precision curves at different recalls, of shape
[num_classes, num_difficulties, num_overlap_thresholds, self.sample_points]
recall: np.ndarray
Numpy array of recall values corresponding to each precision, of shape
[num_classes, num_difficulties, num_overlap_thresholds, self.sample_points]
Returns
sampled_precision: list of np.ndarrays, of shape (num_classes, num_difficulties, num_overlap_thresholds)
The maximum precision values corresponding to the sampled recall.
sampled_recall: list
Recall values evenly spaced along the recall range.
"""
# recall_range = self.recall_range
recall_range = (0.0, 1.0)
precisions = []
# Don't count recall at 0
recall_spacing = [1. / (self.sample_points - 1) * i for i in range(1, self.sample_points)]
recall_spacing = list(filter(lambda recall: recall_range[0] <= recall <= recall_range[1], recall_spacing))
for r in recall_spacing:
precisions_above_recall = (recall >= r) * precision
precisions.append(precisions_above_recall.max(axis=3))
return precisions, recall_spacing
@staticmethod
def validate_anno_format(gt_annos, dt_annos):
"""Verify that the format/dimensions for the annotations are correct.
Keys correspond to defintions here:
https://github.com/NVIDIA/DIGITS/blob/v4.0.0-rc.3/digits/extensions/data/objectDetection/README.md
"""
necessary_keys = ['name', 'alpha', 'bbox', 'dimensions', 'location', 'rotation_y', 'score']
for i, (gt_anno, dt_anno) in enumerate(zip(gt_annos, dt_annos)):
for key in necessary_keys:
assert key in gt_anno, "{} not present in GT {}".format(key, i)
assert key in dt_anno, "{} not present in prediction {}".format(key, i)
if key in ['bbox', 'dimensions', 'location']:
# make sure these fields are 2D numpy array
assert len(gt_anno[key].shape) == 2, key
assert len(dt_anno[key].shape) == 2, key
for key in ['truncated', 'occluded', 'alpha', 'rotation_y', 'score']:
if len(gt_anno[key].shape) == 2:
gt_anno[key] = np.squeeze(gt_anno[key], axis=0)
if len(dt_anno[key].shape) == 2:
dt_anno[key] = np.squeeze(dt_anno[key], axis=0)
return gt_annos, dt_annos
def eval_metric(self, gt_annos, dt_annos, metric, overlap_thresholds):
assert len(gt_annos) == len(dt_annos), "Must provide a prediction for every ground truth sample"
num_ground_truths = len(gt_annos)
shards = self.get_shards(num_ground_truths, self.num_shards)
overlaps, overlaps_by_shard, total_gt_num, total_dt_num = \
self.calculate_match_degree_sharded(gt_annos, dt_annos, metric)
# all_thresholds = -1.0 * dist_thresholds[metric, :, :, :] if metric == Metrics.BBOX_3D_NU_AP else \
# overlap_thresholds[metric, :, :, :]
num_minoverlap = len(overlap_thresholds)
num_classes = len(self.id_to_name)
num_difficulties = 3
precision = np.zeros([num_classes, num_difficulties, num_minoverlap, self.sample_points])
recall = np.zeros([num_classes, num_difficulties, num_minoverlap, self.sample_points])
instances_count = np.zeros([num_classes, num_difficulties])
for class_idx in range(num_classes):
for difficulty_idx in range(num_difficulties):
gt_data_list, dt_data_list, ignored_gts, ignored_dets, dontcares, ignores_per_sample, \
total_num_valid_gt = self.prepare_data(gt_annos, dt_annos, class_idx, difficulty_idx)
instances_count[class_idx, difficulty_idx] = total_num_valid_gt
for thresh_idx, min_overlap in enumerate(overlap_thresholds):
thresholds_list = []
for i in range(len(gt_annos)):
threshold = compute_threshold_jit(
overlaps[i],
gt_data_list[i],
dt_data_list[i],
ignored_gts[i],
ignored_dets[i],
min_overlap=min_overlap,
compute_fp=False
)
thresholds_list += threshold.tolist()
thresholds = np.array(
get_thresholds(np.array(thresholds_list), total_num_valid_gt, self.sample_points)
)
# TODO: Refactor hard coded numbers and strings
# [num_threshold, num_fields], fields: tp, fp, fn, aoe, aos, iou/dist error, -log(Probability,
# bev iou error)
pr = np.zeros([len(thresholds), 8])
idx = 0
for shard_idx, num_samples_per_shard in enumerate(shards):
gt_datas_part = np.concatenate(gt_data_list[idx:idx + num_samples_per_shard], 0)
dt_datas_part = np.concatenate(dt_data_list[idx:idx + num_samples_per_shard], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_samples_per_shard], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_samples_per_shard], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_samples_per_shard], 0)
fused_compute_statistics(
overlaps_by_shard[shard_idx],
pr,
total_gt_num[idx:idx + num_samples_per_shard],
total_dt_num[idx:idx + num_samples_per_shard],
ignores_per_sample[idx:idx + num_samples_per_shard],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
min_overlap=min_overlap,
thresholds=thresholds,
compute_angular_metrics=True
)
idx += num_samples_per_shard
for i in range(len(thresholds)):
recall[class_idx, difficulty_idx, thresh_idx, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[class_idx, difficulty_idx, thresh_idx, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
return {
"recall": recall,
"precision": precision,
}
def prepare_data(self, gt_annos, dt_annos, class_idx, difficulty_idx):
"""Wrapper function for cleaning data before computing metrics.
"""
gt_list = []
dt_list = []
ignores_per_sample = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for gt_anno, dt_anno in zip(gt_annos, dt_annos):
num_valid_gt, ignored_gt, ignored_det, ignored_bboxes = self.filter_data_fn(
gt_anno, dt_anno, class_idx, difficulty_idx, self.id_to_name
)
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(ignored_bboxes) == 0:
ignored_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
ignored_bboxes = np.stack(ignored_bboxes, 0).astype(np.float64)
ignores_per_sample.append(ignored_bboxes.shape[0])
dontcares.append(ignored_bboxes)
total_num_valid_gt += num_valid_gt
gt_list.append(
np.concatenate([
gt_anno["bbox"], gt_anno["rotation_y"][..., np.newaxis], gt_anno["alpha"][..., np.newaxis],
gt_anno["dimensions"]
], 1)
)
dt_list.append(
np.concatenate([
dt_anno["bbox"], dt_anno["rotation_y"][..., np.newaxis], dt_anno["alpha"][..., np.newaxis],
dt_anno["dimensions"], dt_anno["score"][..., np.newaxis]
], 1)
)
ignores_per_sample = np.stack(ignores_per_sample, axis=0)
return gt_list, dt_list, ignored_gts, ignored_dets, dontcares, ignores_per_sample, total_num_valid_gt
def calculate_match_degree_sharded(self, gt_annos, dt_annos, metric):
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
overlaps_by_shard = []
sample_idx = 0
num_ground_truths = len(gt_annos)
shards = self.get_shards(num_ground_truths, self.num_shards)
for num_samples_per_shard in shards:
gt_annos_part = gt_annos[sample_idx:sample_idx + num_samples_per_shard]
dt_annos_part = dt_annos[sample_idx:sample_idx + num_samples_per_shard]
if metric == 'BEV_AP':
loc = np.concatenate([a["location"][:, [0, 2]] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"][:, [0, 2]] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
shard_match = self.bev_box_overlap(dt_boxes, gt_boxes).astype(np.float64)
elif metric == "BOX3D_AP":
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
shard_match = self.box_3d_overlap(dt_boxes, gt_boxes).astype(np.float64)
else:
raise ValueError("Unknown metric")
# On each shard, we compute an IoU between all N predicted boxes and K GT boxes.
# Shard overlap is a (N X K) array
overlaps_by_shard.append(shard_match)
sample_idx += num_samples_per_shard
# Flatten into unsharded list
overlaps = []
sample_idx = 0
for j, num_samples_per_shard in enumerate(shards):
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_samples_per_shard):
gt_box_num = total_gt_num[sample_idx + i]
dt_box_num = total_dt_num[sample_idx + i]
overlaps.append(
overlaps_by_shard[j][dt_num_idx:dt_num_idx + dt_box_num, gt_num_idx:gt_num_idx + gt_box_num, ]
)
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
sample_idx += num_samples_per_shard
return overlaps, overlaps_by_shard, total_gt_num, total_dt_num
def bev_box_overlap(self, boxes, qboxes, criterion=-1):
"""Compute overlap in BEV"""
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
def box_3d_overlap(self, boxes, qboxes, criterion=-1):
"""Compute 3D box IoU"""
# For scale cuboid: use x, y to calculate bev iou, for kitti, use x, z to calculate bev iou
rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]], qboxes[:, [0, 2, 3, 5, 6]], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion, True)
return rinc
def clean_kitti_data(gt_anno, dt_anno, current_class, difficulty, id_to_name, difficulty_level_to_params=None):
"""Function for filtering KITTI data by difficulty and class.
We filter with the following heuristics:
If a ground truth matches the current class AND it falls below the difficulty
threshold, we count it as a valid gt (append 0 in `ignored_gt` list).
If a ground truth matches the current class but NOT the difficulty, OR it matches
a class that is semantically too close to penalize (i.e. Van <-> Car),
we ignore it (append 1 in `ignored_gt` list)
If a ground truth doesn't belong to the current class, we ignore it (append -1 in `ignored_gt`)
If a ground truth corresponds to a "DontCare" box, we append that box to the `ignored_bboxes` list.
If a prediction matches the current class AND is above the minimum height threshold, we count it
as a valid detection (append 0 in `ignored_dt`)
If a prediction matches the current class AND it is too small, we ignore it (append 1 in `ignored_dt`)
If a prediction doesn't belong to the class, we ignore it (append -1 in `ignored_dt`)
Parameters
----------
gt_anno: dict
KITTI format ground truth. Please refer to note at the top for details on format.
dt_anno: dict
KITTI format prediction. Please refer to note at the top for details on format.
current_class: int
Class ID, as int
difficulty: int
Difficulty: easy=0, moderate=1, difficult=2
id_to_name: dict
Mapping from class ID (int) to string name
difficulty_level_to_params: dict default= None
Returns
-------
num_valid_gt: int
Number of valid ground truths
ignored_gt: list[int]
List of length num GTs. Populated as described above.
ignored_dt: list[int]
List of length num detections. Populated as described above.
ignored_bboxes: list[np.ndarray]
List of np.ndarray corresponding to boxes that are to be ignored
"""
ignored_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = id_to_name[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
# For KITTI, Van does not penalize car detections and person sitting does not penalize pedestrian
if gt_name == current_cls_name:
valid_class = 1
elif current_cls_name == "Pedestrian".lower() and "Person_sitting".lower() == gt_name:
valid_class = 0
elif current_cls_name == "Car".lower() and "Van".lower() == gt_name:
valid_class = 0
else:
valid_class = -1
# Filter by occlusion/truncation
ignore_for_truncation_occlusion = False
if ((gt_anno["occluded"][i] > difficulty_level_to_params["max_occlusion"][difficulty])
or (gt_anno["truncated"][i] > difficulty_level_to_params["max_truncation"][difficulty])
or (height <= difficulty_level_to_params["min_height"][difficulty])):
ignore_for_truncation_occlusion = True
if valid_class == 1 and not ignore_for_truncation_occlusion:
ignored_gt.append(0)
num_valid_gt += 1
elif valid_class == 0 or (ignore_for_truncation_occlusion and (valid_class == 1)):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# Track boxes are in "dontcare" areas
if gt_name == "dontcare":
ignored_bboxes.append(bbox)
for i in range(num_dt):
if dt_anno["name"][i].lower() == current_cls_name:
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
# If a box is too small, ignore it
if height < difficulty_level_to_params["min_height"][difficulty]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, ignored_bboxes
@numba.jit(nopython=True, fastmath=True)
def compute_threshold_jit(
overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
min_overlap,
compute_fp=False,
):
"""Compute TP/FP statistics.
Modified from https://github.com/sshaoehuai/PointRCNN/blob/master/tools/kitti_object_eval_python/eval.py
"""
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
assigned_detection = [False] * det_size
NO_DETECTION = np.finfo(np.float32).min
tp, fp, fn = 0, 0, 0
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
# Not hit during TP/FP computation
if (not compute_fp and (overlap > min_overlap) and dt_score > valid_detection):
assert not compute_fp, "For sanity, compute_fp shoudl be False if we are here"
det_idx = j
valid_detection = dt_score
# No matched prediction found, valid GT
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
# Matched prediction, but NO valid GT or matched prediction is too small so we ignore it (NOT BECAUSE THE
# CLASS IS WRONG)
elif ((valid_detection != NO_DETECTION) and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
# Matched prediction
elif valid_detection != NO_DETECTION:
tp += 1
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
assigned_detection[det_idx] = True
return thresholds[:thresh_idx]
@numba.jit(nopython=True, fastmath=True)
def get_thresholds(scores, num_gt, num_sample_pts=41):
"""Get thresholds from a set of scores, up to num sample points
Parameters
----------
score: np.ndarray
Numpy array of scores for predictions
num_gt: int
Number of ground truths
num_sample_pts: int, default: 41
Max number of thresholds on PR curve
Returns
-------
threshold: np.ndarray
Array of length 41, containing recall thresholds
"""
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall)) and (i < (len(scores) - 1))):
continue
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
return thresholds
@numba.jit(nopython=True, fastmath=True)
def fused_compute_statistics(
overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
min_overlap,
thresholds,
compute_angular_metrics=True,
):
"""Compute TP/FP statistics.
Taken from https://github.com/sshaoehuai/PointRCNN/blob/master/tools/kitti_object_eval_python/eval.py
without changes to avoid introducing errors"""
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
# The key line that determines the ordering of the IoU matrix
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:gt_num + gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, error_yaw, similarity, _, match_degree, confidence_error, scale_error = \
compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_angular_metrics=compute_angular_metrics)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
pr[t, 5] += match_degree
pr[t, 6] += confidence_error
pr[t, 7] += scale_error
if error_yaw != -1:
pr[t, 3] += error_yaw
if similarity != -1:
pr[t, 4] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
@numba.jit(nopython=True, fastmath=True)
def compute_statistics_jit(
overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
ignored_bboxes,
min_overlap,
thresh=0.0,
compute_fp=False,
compute_angular_metrics=False
):
"""Compute TP/FP statistics.
Modified from https://github.com/sshaoehuai/PointRCNN/blob/master/tools/kitti_object_eval_python/eval.py
"""
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_yaws = dt_datas[:, 4]
gt_yaws = gt_datas[:, 4]
dt_alphas = dt_datas[:, 5]
gt_alphas = gt_datas[:, 5]
dt_bboxes = dt_datas[:, :4]
gt_dimensions = gt_datas[:, 6:9]
dt_dimensions = dt_datas[:, 6:9]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = np.finfo(np.float32).min
tp, fp, fn, error_yaw, similarity, match_degree, scale_error, confidence_error = 0, 0, 0, 0, 0, 0, 0, 0
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
delta_yaw = np.zeros((gt_size, ))
delta_alpha = np.zeros((gt_size, ))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
max_overlap = np.finfo(np.float32).min
target_scale_iou = 0
assigned_ignored_det = False
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
scale_iou = compute_scale_error(gt_dimensions[i, :], dt_dimensions[j, :])
dt_score = dt_scores[j]
# Not hit during TP/FP computation
if (not compute_fp and (overlap > min_overlap) and dt_score > valid_detection):
assert not compute_fp, "For sanity, compute_fp shoudl be False if we are here"
det_idx = j
valid_detection = dt_score
elif (
compute_fp and (overlap > min_overlap) and (overlap > max_overlap or assigned_ignored_det)
and ignored_det[j] == 0
):
max_overlap = overlap
target_scale_iou = scale_iou
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap) and (valid_detection == NO_DETECTION) and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
# No matched prediction found, valid GT
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
# Matched prediction, but NO valid GT or matched prediction is too small so we ignore it (NOT BECAUSE THE
# CLASS IS WRONG)
elif ((valid_detection != NO_DETECTION) and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
# Matched prediction
elif valid_detection != NO_DETECTION:
tp += 1
match_degree += abs(max_overlap)
scale_error += 1.0 - abs(target_scale_iou)
confidence_error += -math.log(dt_scores[det_idx])
# Build a big list of all thresholds associated to true positives
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_angular_metrics:
delta_yaw[delta_idx] = abs(angle_diff(float(gt_yaws[i]), float(dt_yaws[det_idx]), 2 * np.pi))
delta_alpha[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1 or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
fp -= nstuff
if compute_angular_metrics:
tmp_yaw = np.zeros((fp + delta_idx, ))
tmp_alpha = np.zeros((fp + delta_idx, ))
for i in range(delta_idx):
tmp_yaw[i + fp] = delta_yaw[i]
tmp_alpha[i + fp] = (1.0 + np.cos(delta_alpha[i])) / 2.0
if tp > 0 or fp > 0:
error_yaw = np.sum(tmp_yaw)
similarity = np.sum(tmp_alpha)
else:
error_yaw = -1
similarity = -1
return tp, fp, fn, error_yaw, similarity, thresholds[:thresh_idx], match_degree, confidence_error, scale_error
@numba.jit(nopython=True)
def angle_diff(x, y, period):
"""Get the smallest angle difference between 2 angles: the angle from y to x.
Parameters
----------
x: float
To angle.
y: float
From angle.
period: float
Periodicity in radians for assessing angle difference.
Returns:
----------
diff: float
Signed smallest between-angle difference in range (-pi, pi).
"""
# calculate angle difference, modulo to [0, 2*pi]
diff = (x - y + period / 2) % period - period / 2
if diff > np.pi:
diff = diff - (2 * np.pi) # shift (pi, 2*pi] to (-pi, 0]
return diff
@numba.jit(nopython=True, fastmath=True)
def compute_scale_error(gt_dimension, dt_dimension):
"""
This method compares predictions to the ground truth in terms of scale.
It is equivalent to intersection over union (IOU) between the two boxes in 3D,
if we assume that the boxes are aligned, i.e. translation and rotation are considered identical.
Parameters
----------
gt_dimension: List[float]
GT annotation sample.
dt_dimension: List[float]
Predicted sample.
Returns: float
----------
Scale IOU.
"""
# Compute IOU.
min_wlh = [
min(gt_dimension[0], dt_dimension[0]),
min(gt_dimension[1], dt_dimension[1]),
min(gt_dimension[2], dt_dimension[2])
]
volume_gt = gt_dimension[0] * gt_dimension[1] * gt_dimension[2]
volume_dt = dt_dimension[0] * dt_dimension[1] * dt_dimension[2]
intersection = min_wlh[0] * min_wlh[1] * min_wlh[2]
union = volume_gt + volume_dt - intersection
iou = intersection / union
return iou
|
{"hexsha": "224cb3c6a5a616de139d9e4392db1982a12feb09", "size": 44553, "ext": "py", "lang": "Python", "max_stars_repo_path": "tridet/evaluators/kitti_3d_evaluator.py", "max_stars_repo_name": "flipson/dd3d", "max_stars_repo_head_hexsha": "86d8660c29612b79836dad9b6c39972ac2ca1557", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 227, "max_stars_repo_stars_event_min_datetime": "2021-08-17T02:42:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:35:06.000Z", "max_issues_repo_path": "tridet/evaluators/kitti_3d_evaluator.py", "max_issues_repo_name": "flipson/dd3d", "max_issues_repo_head_hexsha": "86d8660c29612b79836dad9b6c39972ac2ca1557", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2021-08-20T06:51:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:47:18.000Z", "max_forks_repo_path": "tridet/evaluators/kitti_3d_evaluator.py", "max_forks_repo_name": "flipson/dd3d", "max_forks_repo_head_hexsha": "86d8660c29612b79836dad9b6c39972ac2ca1557", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2021-08-21T08:22:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T05:32:45.000Z", "avg_line_length": 40.5027272727, "max_line_length": 123, "alphanum_fraction": 0.6034610464, "include": true, "reason": "import numpy,import numba,from numba", "num_tokens": 10706}
|
"""
Tests module hierarchy
# Author: Vladan Lucic
# $Id$
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
__version__ = "$Revision$"
from copy import copy, deepcopy
import importlib
import unittest
import numpy
import numpy.testing as np_test
import scipy
from pyto.segmentation.grey import Grey
from pyto.segmentation.segment import Segment
from pyto.segmentation.hierarchy import Hierarchy
from pyto.segmentation.thresh_conn import ThreshConn
from pyto.segmentation.test import common as common
class TestHierarchy(np_test.TestCase):
"""
"""
def setUp(self):
"""
"""
importlib.reload(common) # to avoid problems when running multiple tests
# set flag so that the warning for reordered segment ids is printed
# only once
self.reorder_warning = True
def instantiateTC1(self):
"""
Instantiates ThreshConn with image_1 and bound_1
"""
tc = ThreshConn()
tc.setConnParam(boundary=common.bound_1, boundaryIds=[3, 4],
nBoundary=1, boundCount='at_least', mask=5)
for vars in tc.makeLevelsGen(image=common.image_1,
thresh=common.threshold_1, order=None):
pass
return tc
def testPopLevel(self):
"""
Tests popLevel(). Implicitly tests removeHigherLevels() and
removeLowerLevels() for top and bottom levels.
"""
# test making hierarchy
tc = self.instantiateTC1()
np_test.assert_equal(tc.levelIds, common.levelIds_1)
# test poped top level
top_seg = tc.popLevel(level='top')
np_test.assert_equal(top_seg.ids, common.levelIds_1[-1])
np_test.assert_equal(top_seg.threshold[common.levelIds_1[-1]], 7)
np_test.assert_equal(top_seg.data[2:6, 1:9], common.data_1[7])
np_test.assert_equal(
top_seg.contacts.findSegments(boundaryIds=[3,4], nBoundary=1),
14)
# test remaining hierarchy after poping top level
np_test.assert_equal(tc.levelIds, common.levelIds_1[:-1])
np_test.assert_equal(tc.threshold, common.threshold_1[:-1])
np_test.assert_equal(tc.thresh,
common.thresh_1[:-len(common.levelIds_1[-1])])
np_test.assert_equal(top_seg.data[2:6, 1:9]>0, common.data_1[7]>0)
# test poped bottom level
seg = tc.popLevel(level='bottom')
np_test.assert_equal(seg.ids, common.levelIds_1[0])
np_test.assert_equal(seg.threshold, [0])
np_test.assert_equal(seg.data[2:6, 1:9], common.data_1[0])
np_test.assert_equal(
seg.contacts.findSegments(boundaryIds=[3,4], nBoundary=1), None)
# test remaining hierarchy after poping bottom level
np_test.assert_equal(tc.levelIds, common.levelIds_1[1:-1])
np_test.assert_equal(tc.threshold, common.threshold_1[1:-1])
np_test.assert_equal(tc.thresh[1:], common.thresh_1[1:-1])
np_test.assert_equal(top_seg.data[2:6, 1:9]>0, common.data_1[7]>0)
def testExtractLevelsGen(self):
"""
Tests extractLevelsGen()
"""
# test making hierarchy
tc = self.instantiateTC1()
np_test.assert_equal(tc.levelIds, common.levelIds_1)
np_test.assert_equal(tc.topLevel, len(common.threshold_1) - 1)
####################################################
#
# test extracting, order '<'
#
for seg, level in tc.extractLevelsGen(order='<'):
#print 'level: ', level
# test extracted segment: ids, data, contacts, threshold
np_test.assert_equal(seg.ids, common.levelIds_1[level])
try:
np_test.assert_equal(seg.data[2:6, 1:9], common.data_1[level])
except AssertionError:
np_test.assert_equal(seg.data[2:6, 1:9]>0,
common.data_1[level]>0)
if self.reorder_warning:
print(
"The exact id assignment is different from what it was "
+ "when this test was written, but this really depends "
+ "on internals of scipy.ndimage. Considering that the "
+ "segments are correct, most likely everything is ok.")
self.reorder_warning = False
np_test.assert_equal(
seg.contacts.findSegments(boundaryIds=[3,4], nBoundary=1),
common.levelIds_1[level])
try:
np_test.assert_equal(
seg.contacts.findSegments(boundaryIds=[3,4], nBoundary=2),
common.bound_ge2_1[level])
except AssertionError:
np_test.assert_equal(
len(seg.contacts.findSegments(boundaryIds=[3,4],
nBoundary=2)),
len(common.bound_ge2_1[level]))
if self.reorder_warning:
print(
"The exact id assignment is different from what it was "
+ "when this test was written, but this really depends "
+ "on internals of scipy.ndimage. Considering that the "
+ "segments are correct, most likely everything is ok.")
self.reorder_warning = False
if len(seg.ids) > 0:
np_test.assert_equal(seg.threshold[seg.ids],
common.threshold_1[level])
# test remaining hierarchy: topLevel, levelIds
if tc.topLevel is not None:
np_test.assert_equal(tc.topLevel,
len(common.threshold_1) - 2 - level)
np_test.assert_equal(tc.levelIds, common.levelIds_1[level+1:])
else:
np_test.assert_equal(tc.levelIds, [])
####################################################
#
# test extracting, order '>'
#
tc = self.instantiateTC1()
np_test.assert_equal(tc.levelIds, common.levelIds_1)
for seg, level in tc.extractLevelsGen(order='>'):
#print 'level: ', level
# test extracted segment: ids, data, contacts, threshold
np_test.assert_equal(seg.ids, common.levelIds_1[level])
try:
np_test.assert_equal(seg.data[2:6, 1:9], common.data_1[level])
except AssertionError:
np_test.assert_equal(seg.data[2:6, 1:9]>0,
common.data_1[level]>0)
if self.reorder_warning:
print(
"The exact id assignment is different from what it was "
+ "when this test was written, but this really depends "
+ "on internals of scipy.ndimage. Considering that the "
+ "segments are correct, most likely everything is ok.")
self.reorder_warning = False
np_test.assert_equal(
seg.contacts.findSegments(boundaryIds=[3,4], nBoundary=1),
common.levelIds_1[level])
try:
np_test.assert_equal(
seg.contacts.findSegments(boundaryIds=[3,4], nBoundary=2),
common.bound_ge2_1[level])
except AssertionError:
np_test.assert_equal(
len(seg.contacts.findSegments(boundaryIds=[3,4],
nBoundary=2)),
len(common.bound_ge2_1[level]))
if self.reorder_warning:
print(
"The exact id assignment is different from what it was "
+ "when this test was written, but this really depends "
+ "on internals of scipy.ndimage. Considering that the "
+ "segments are correct, most likely everything is ok.")
self.reorder_warning = False
if len(seg.ids) > 0:
np_test.assert_equal(seg.threshold[seg.ids],
common.threshold_1[level])
# test remaining hierarchy: topLevel, levelIds
if tc.topLevel is not None:
np_test.assert_equal(tc.topLevel,
level - 1)
np_test.assert_equal(tc.levelIds, common.levelIds_1[:level])
else:
np_test.assert_equal(tc.levelIds, [])
def testRemove(self):
"""
Tests remove (implicitly tests removeData and removeIds).
Here only removeing specifed ids is tested. Removing levels is
implicitly tested in testPopLevel and testextractLevelGen.
Note: this test will fail if for some (numerical) reason id assignment
changes
"""
# make hierarchy
tc = self.instantiateTC1()
############################################
#
# remove id 6, new
#
# save original data and remove id
level_ids_orig = deepcopy(tc.levelIds)
data_orig = tc.data.copy()
contacts_orig = deepcopy(tc.contacts)
new_tc = tc.remove(ids=[6], new=True)
# test unchanged hierarchy
np_test.assert_equal(tc.levelIds, level_ids_orig)
np_test.assert_equal(tc.getHigherId(3), 6)
np_test.assert_equal(tc.getHigherId(5), 6)
np_test.assert_equal(tc.data, data_orig)
np_test.assert_equal(tc.contacts.segments, contacts_orig.segments)
np_test.assert_equal(tc.contacts._n, contacts_orig._n)
# test new hierarchy
desired = level_ids_orig
desired[3] = [7,8,9]
np_test.assert_equal(new_tc.levelIds, desired)
np_test.assert_equal(new_tc.getHigherId(3), 10)
np_test.assert_equal(new_tc.getHigherId(5), 10)
desired = numpy.where(data_orig==6, 10, data_orig)
np_test.assert_equal(new_tc.data, desired)
desired = deepcopy(common.ids_1)
desired.remove(6)
np_test.assert_equal(new_tc.contacts.segments, desired)
############################################
#
# remove id 6, new=False
#
# save original data and remove id
level_ids_orig = deepcopy(tc.levelIds)
data_orig = tc.data.copy()
contacts_orig = deepcopy(tc.contacts)
tc.remove(ids=[6], new=False)
# test changed hierarchy
desired = level_ids_orig
desired[3] = [7,8,9]
np_test.assert_equal(tc.levelIds, desired)
np_test.assert_equal(tc.getHigherId(3), 10)
np_test.assert_equal(tc.getHigherId(5), 10)
desired = numpy.where(data_orig==6, 10, data_orig)
np_test.assert_equal(tc.data, desired)
desired = deepcopy(common.ids_1)
desired.remove(6)
np_test.assert_equal(tc.contacts.segments, desired)
def testKeep(self):
"""
Tests keep
"""
# make hierarchy
tc = self.instantiateTC1()
# save original data and remove ids
level_ids_orig = deepcopy(tc.levelIds)
data_orig = tc.data.copy()
contacts_orig = deepcopy(tc.contacts)
new_tc = tc.keep(ids=list(range(2,11)), new=True)
# test unchanged hierarchy
np_test.assert_equal(tc.levelIds, level_ids_orig)
np_test.assert_equal(tc.getHigherId(3), 6)
np_test.assert_equal(tc.getHigherId(7), 11)
np_test.assert_equal(tc.data, data_orig)
np_test.assert_equal(tc.contacts.segments, contacts_orig.segments)
np_test.assert_equal(tc.contacts._n, contacts_orig._n)
# test new hierarchy
desired = level_ids_orig
desired = [[], [2], [3,4,5], [6,7,8,9], [10], [], [], []]
np_test.assert_equal(new_tc.levelIds, desired)
np_test.assert_equal(new_tc.getHigherId(3), 6)
np_test.assert_equal(new_tc.getHigherId(7), 0)
desired = numpy.where(data_orig==1, 3, data_orig)
desired = numpy.where(data_orig==11, 0, desired)
desired = numpy.where(data_orig==12, 0, desired)
desired = numpy.where(data_orig==13, 0, desired)
desired = numpy.where(data_orig==14, 0, desired)
np_test.assert_equal(new_tc.data, desired)
np_test.assert_equal(new_tc.contacts.segments, new_tc.ids)
def testAddLevel(self):
"""
Tests addLevel()
"""
# one level data
hi_data = numpy.array([[1, 0, 2],
[1, 2, 2]])
hi = Hierarchy(data=hi_data)
hi.inset = [slice(3,5), slice(2,5)]
hi.setIds(ids=[1,2])
hi.levelIds = [[], [1,2]]
# segment (different inset)
seg_data = numpy.array([[2, 1],
[1, 1]])
seg = Segment(data=seg_data)
seg.inset = [slice(3,5), slice(1,3)]
# add
hi.addLevel(segment=seg, level=2, check=False, shift=10)
desired_data = numpy.array([[12, 1, 0, 2],
[11, 1, 2, 2]])
desired_inset = [slice(3,5), slice(1,5)]
np_test.assert_equal(hi.data, desired_data)
np_test.assert_equal(hi.inset, desired_inset)
np_test.assert_equal(hi.levelIds, [[], [1,2], [11,12]])
np_test.assert_equal(hi._higherIds, {1:11, 2:0})
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestHierarchy)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{"hexsha": "784c9e703916a71a3c4e1d3a2c8a0fda76cf47a1", "size": 13811, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/pyto/segmentation/test/test_hierarchy.py", "max_stars_repo_name": "anmartinezs/pyseg_system", "max_stars_repo_head_hexsha": "5bb07c7901062452a34b73f376057cabc15a13c3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-01-08T01:33:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T00:25:34.000Z", "max_issues_repo_path": "code/pyto/segmentation/test/test_hierarchy.py", "max_issues_repo_name": "anmartinezs/pyseg_system", "max_issues_repo_head_hexsha": "5bb07c7901062452a34b73f376057cabc15a13c3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-12-19T19:34:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T10:11:28.000Z", "max_forks_repo_path": "code/pyto/segmentation/test/test_hierarchy.py", "max_forks_repo_name": "anmartinezs/pyseg_system", "max_forks_repo_head_hexsha": "5bb07c7901062452a34b73f376057cabc15a13c3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-30T13:12:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T18:12:10.000Z", "avg_line_length": 39.3475783476, "max_line_length": 80, "alphanum_fraction": 0.5676634567, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3059}
|
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
model = ResNet50(include_top=True, weights="imagenet")
model.trainable = False
def preprocess_img(image):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, (224, 224))
image = preprocess_input(image)
image = image[None, ...]
return image
def get_label(logits):
label = decode_predictions(logits, top=1)[0][0]
return label
img = load_img('../assets/dog1.jpg', color_mode="rgb")
img = img_to_array(img)
img = preprocess_img(img)
preds = model.predict(img)
_, image_class, class_confidence = get_label(preds)
print (image_class, class_confidence)
def fgsm(x, y_adv, epsilon):
loss_func = tf.keras.losses.CategoricalCrossentropy()
with tf.GradientTape() as gt:
gt.watch(x)
label = model(x)
loss = loss_func(y_adv, label)
print (loss)
grad = gt.gradient(loss, x)
gamma = epsilon * tf.sign(grad)
return gamma
y_adv_label = 10
y_adv = tf.one_hot(y_adv_label, preds.shape[-1])
y_adv = tf.reshape(y_adv, shape=[1, preds.shape[-1]])
noise = fgsm(img, y_adv, 0.1)
plt.imshow(noise[0] * 0.5 + 0.5)
plt.show()
x_adv = img + noise
x_adv = tf.clip_by_value(x_adv, -1, 1)
plt.imshow(x_adv[0] * 0.5 + 0.5)
plt.show()
|
{"hexsha": "2ac713aeee0ed4a5e6ac1a690eac300ee7bf1d41", "size": 1501, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/main.py", "max_stars_repo_name": "rish-16/FGSM-Attacks", "max_stars_repo_head_hexsha": "edd084895565f3519e0b8e00c5806f7fa6f50142", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-02T18:35:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-10T13:37:10.000Z", "max_issues_repo_path": "src/main.py", "max_issues_repo_name": "rish-16/FGSM-Attacks", "max_issues_repo_head_hexsha": "edd084895565f3519e0b8e00c5806f7fa6f50142", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.py", "max_forks_repo_name": "rish-16/FGSM-Attacks", "max_forks_repo_head_hexsha": "edd084895565f3519e0b8e00c5806f7fa6f50142", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-10T13:37:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-25T13:27:28.000Z", "avg_line_length": 25.8793103448, "max_line_length": 97, "alphanum_fraction": 0.6848767488, "include": true, "reason": "import numpy", "num_tokens": 406}
|
import matplotlib
import numpy as np
import datetime
def polyfit(dates, levels, p):
x = matplotlib.dates.date2num(dates)
d0 = x[0]
x-= d0
y = levels
p_coeff = np.polyfit(x, y, p)
poly = np.poly1d(p_coeff)
return poly, d0
def severity(stations):
severe = []
moderate = []
low = []
none = []
for danger in stations:
if 1.0 > danger[1] > 0.5 or danger[1] == 0.5:
low.append(danger[0])
if 1.5 > danger[1] > 1.0 or danger[1] == 1.0:
moderate.append(danger[0])
if danger[1] > 1.5 or danger[1] == 1.5:
severe.append(danger[0])
if danger[1] < 0.5:
none.append(danger[0])
else:
continue
return severe, moderate, low, none
def future_levels(dates, level, p):
date = matplotlib.dates.date2num(dates)
try:
p_coeff = np.polyfit(date - date[0],level,p)
except:
p_coeff = 0
poly = np.poly1d(p_coeff)
test_date = matplotlib.dates.date2num(datetime.datetime.now() + datetime.timedelta(days = 2))
return(poly(test_date - date[0]))
|
{"hexsha": "b0abca3934c11376dc18b206584baf9dd86aef2e", "size": 1114, "ext": "py", "lang": "Python", "max_stars_repo_path": "floodsystem/analysis.py", "max_stars_repo_name": "AyanShoaib/flood-warning-project-72", "max_stars_repo_head_hexsha": "84a9b24ad6d22b177d3d5d7c4a1c780ea7d48949", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "floodsystem/analysis.py", "max_issues_repo_name": "AyanShoaib/flood-warning-project-72", "max_issues_repo_head_hexsha": "84a9b24ad6d22b177d3d5d7c4a1c780ea7d48949", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "floodsystem/analysis.py", "max_forks_repo_name": "AyanShoaib/flood-warning-project-72", "max_forks_repo_head_hexsha": "84a9b24ad6d22b177d3d5d7c4a1c780ea7d48949", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-03T17:04:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T17:04:14.000Z", "avg_line_length": 23.7021276596, "max_line_length": 97, "alphanum_fraction": 0.5700179533, "include": true, "reason": "import numpy", "num_tokens": 339}
|
[STATEMENT]
lemma n_meet_L_below:
"n(x) \<sqinter> L \<le> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n x \<sqinter> L \<le> x
[PROOF STEP]
by (meson inf.coboundedI1 inf.coboundedI2 le_supI2 sup.cobounded1 top_right_mult_increasing n_less_eq_char)
|
{"llama_tokens": 122, "file": "Correctness_Algebras_N_Algebras", "length": 1}
|
using DifferentialEquations, LinearAlgebra, Plots; pyplot()
k, b, M = 1.2, 0.3, 2.0
A = [0 1;
-k/M -b/M]
initX = [8., 0.0]
tEnd = 50.0
tRange = 0:0.1:tEnd
manualSol = [exp(A*t)*initX for t in tRange]
linearRHS(x,Amat,t) = Amat*x
prob = ODEProblem(linearRHS, initX, (0,tEnd), A)
sol = solve(prob)
p1 = plot(first.(manualSol), last.(manualSol),
c=:blue, label="Manual trajectory")
p1 = scatter!(first.(sol.u), last.(sol.u),
c=:red, ms = 5, msw=0, label="DiffEq package")
p1 = scatter!([initX[1]], [initX[2]],
c=:black, ms=10, label="Initial state", xlims=(-7,9), ylims=(-9,7),
ratio=:equal, xlabel="Displacement", ylabel="Velocity")
p2 = plot(tRange, first.(manualSol),
c=:blue, label="Manual trajectory")
p2 = scatter!(sol.t, first.(sol.u),
c=:red, ms = 5, msw=0, label="DiffEq package")
p2 = scatter!([0], [initX[1]],
c=:black, ms=10, label="Initial state", xlabel="Time",
ylabel="Displacement")
plot(p1, p2, size=(800,400), legend=:topright)
|
{"hexsha": "fbafddeda37ec3baca0923de0ba1116cb2162bd9", "size": 959, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "10_chapter/springMass.jl", "max_stars_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_stars_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 988, "max_stars_repo_stars_event_min_datetime": "2018-06-21T00:44:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:37:47.000Z", "max_issues_repo_path": "10_chapter/springMass.jl", "max_issues_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_issues_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2019-02-20T05:06:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-23T16:53:08.000Z", "max_forks_repo_path": "10_chapter/springMass.jl", "max_forks_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_forks_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 264, "max_forks_repo_forks_event_min_datetime": "2018-07-31T03:11:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T16:12:13.000Z", "avg_line_length": 30.935483871, "max_line_length": 68, "alphanum_fraction": 0.6412930136, "num_tokens": 362}
|
Require Import Coq.ZArith.ZArith.
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.montgomery64_2e256m2e32m977_4limbs.Synthesis.
Local Open Scope Z_scope.
(* TODO : change this to field once field isomorphism happens *)
Definition nonzero :
{ nonzero : feBW_small -> BoundedWord.BoundedWord 1 adjusted_bitwidth bound1
| forall a, (BoundedWord.BoundedWordToZ _ _ _ (nonzero a) =? 0) = (if Decidable.dec (phiM_small a = F.of_Z m 0) then true else false) }.
Proof.
Set Ltac Profiling.
Time synthesize_nonzero ().
Show Ltac Profile.
Time Defined.
Print Assumptions nonzero.
|
{"author": "anonymous-code-submission-01", "repo": "sp2019-54-code", "sha": "8867f5bed0821415ec99f593b1d61f715ed4f789", "save_path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code", "path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code/sp2019-54-code-8867f5bed0821415ec99f593b1d61f715ed4f789/src/Specific/montgomery64_2e256m2e32m977_4limbs/fenz.v"}
|
import os
import numpy as np
import flopy
from ci_framework import base_test_dir, FlopyTestSetup
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
exe_names = {"mf6": "mf6", "mp7": "mp7"}
run = True
for key in exe_names.keys():
v = flopy.which(exe_names[key])
if v is None:
run = False
break
nm = "ex01b_mf6"
# model data
nper, nstp, perlen, tsmult = 1, 1, 1.0, 1.0
nlay, nrow, ncol = 3, 21, 20
delr = delc = 500.0
top = 400.0
botm = [220.0, 200.0, 0.0]
laytyp = [1, 0, 0]
kh = [50.0, 0.01, 200.0]
kv = [10.0, 0.01, 20.0]
wel_loc = (2, 10, 9)
wel_q = -150000.0
rch = 0.005
riv_h = 320.0
riv_z = 317.0
riv_c = 1.0e5
# particle data
zone3 = np.ones((nrow, ncol), dtype=np.int32)
zone3[wel_loc[1:]] = 2
zones = [1, 1, zone3]
defaultiface6 = {"RCH": 6, "EVT": 6}
local = np.array(
[
[0.1666666667e00, 0.1666666667e00, 1.0],
[0.5000000000e00, 0.1666666667e00, 1.0],
[0.8333333333e00, 0.1666666667e00, 1.0],
[0.1666666667e00, 0.5000000000e00, 1.0],
[0.5000000000e00, 0.5000000000e00, 1.0],
[0.8333333333e00, 0.5000000000e00, 1.0],
[0.1666666667e00, 0.8333333333e00, 1.0],
[0.5000000000e00, 0.8333333333e00, 1.0],
[0.8333333333e00, 0.8333333333e00, 1.0],
]
)
def test_default_modpath():
model_ws = f"{base_dir}_test_default_modpath"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_default"
pg = flopy.modpath.ParticleGroup(particlegroupname="DEFAULT")
build_modpath(model_ws, mpnam, pg)
return
def test_faceparticles_is1():
model_ws = f"{base_dir}_test_faceparticles_is1"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_face_t1node"
locs = []
localx = []
localy = []
for i in range(nrow):
for j in range(ncol):
node = i * ncol + j
for xloc, yloc, zloc in local:
locs.append(node)
localx.append(xloc)
localy.append(yloc)
p = flopy.modpath.ParticleData(
locs, structured=False, drape=0, localx=localx, localy=localy, localz=1
)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroup(
particlegroupname="T1NODEPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
# set base file name
fpth0 = os.path.join(model_ws, "ex01b_mf6_mp_face_t1node.mpend")
# get list of node endpath files
epf = [
os.path.join(model_ws, name)
for name in os.listdir(model_ws)
if ".mpend" in name and "_face_" in name and "_t2a" not in name
]
epf.remove(fpth0)
endpoint_compare(fpth0, epf)
return
def test_facenode_is3():
model_ws = f"{base_dir}_test_facenode_is2"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_face_t3node"
locs = []
for i in range(nrow):
for j in range(ncol):
node = i * ncol + j
locs.append(node)
sd = flopy.modpath.FaceDataType(
drape=0,
verticaldivisions1=0,
horizontaldivisions1=0,
verticaldivisions2=0,
horizontaldivisions2=0,
verticaldivisions3=0,
horizontaldivisions3=0,
verticaldivisions4=0,
horizontaldivisions4=0,
rowdivisions5=0,
columndivisions5=0,
rowdivisions6=3,
columndivisions6=3,
)
p = flopy.modpath.NodeParticleData(subdivisiondata=sd, nodes=locs)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroupNodeTemplate(
particlegroupname="T3NODEPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
return
def test_facenode_is3a():
model_ws = f"{base_dir}_test_facenode_is3a"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_face_t3anode"
locsa = []
for i in range(11):
for j in range(ncol):
node = i * ncol + j
locsa.append(node)
locsb = []
for i in range(11, nrow):
for j in range(ncol):
node = i * ncol + j
locsb.append(node)
sd = flopy.modpath.FaceDataType(
drape=0,
verticaldivisions1=0,
horizontaldivisions1=0,
verticaldivisions2=0,
horizontaldivisions2=0,
verticaldivisions3=0,
horizontaldivisions3=0,
verticaldivisions4=0,
horizontaldivisions4=0,
rowdivisions5=0,
columndivisions5=0,
rowdivisions6=3,
columndivisions6=3,
)
p = flopy.modpath.NodeParticleData(
subdivisiondata=[sd, sd], nodes=[locsa, locsb]
)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroupNodeTemplate(
particlegroupname="T3ANODEPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
return
def test_facenode_is2a():
model_ws = f"{base_dir}_test_facenode_is2a"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_face_t2anode"
locsa = [[0, 0, 0, 0, 10, ncol - 1]]
locsb = [[0, 11, 0, 0, nrow - 1, ncol - 1]]
sd = flopy.modpath.FaceDataType(
drape=0,
verticaldivisions1=0,
horizontaldivisions1=0,
verticaldivisions2=0,
horizontaldivisions2=0,
verticaldivisions3=0,
horizontaldivisions3=0,
verticaldivisions4=0,
horizontaldivisions4=0,
rowdivisions5=0,
columndivisions5=0,
rowdivisions6=3,
columndivisions6=3,
)
p = flopy.modpath.LRCParticleData(
subdivisiondata=[sd, sd], lrcregions=[locsa, locsb]
)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroupNodeTemplate(
particlegroupname="T2ANODEPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
return
def test_cellparticles_is1():
model_ws = f"{base_dir}_test_cellparticles_is1"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_cell_t1node"
locs = []
for k in range(nlay):
for i in range(nrow):
for j in range(ncol):
node = k * nrow * ncol + i * ncol + j
locs.append(node)
p = flopy.modpath.ParticleData(
locs, structured=False, drape=0, localx=0.5, localy=0.5, localz=0.5
)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroup(
particlegroupname="T1NODEPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
# set base file name
fpth0 = os.path.join(model_ws, "ex01b_mf6_mp_cell_t1node.mpend")
# get list of node endpath files
epf = [
os.path.join(model_ws, name)
for name in os.listdir(model_ws)
if ".mpend" in name and "_cell_" in name and "_t2a" not in name
]
epf.remove(fpth0)
endpoint_compare(fpth0, epf)
return
def test_cellparticleskij_is1():
model_ws = f"{base_dir}_test_cellparticleskij_is1"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_cell_t1kij"
locs = []
for k in range(nlay):
for i in range(nrow):
for j in range(ncol):
locs.append((k, i, j))
p = flopy.modpath.ParticleData(
locs, structured=True, drape=0, localx=0.5, localy=0.5, localz=0.5
)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroup(
particlegroupname="T1KIJPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
return
def test_cellnode_is3():
model_ws = f"{base_dir}_test_cellnode_is3"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_cell_t3node"
locs = []
for k in range(nlay):
for i in range(nrow):
for j in range(ncol):
node = k * nrow * ncol + i * ncol + j
locs.append(node)
sd = flopy.modpath.CellDataType(
drape=0,
columncelldivisions=1,
rowcelldivisions=1,
layercelldivisions=1,
)
p = flopy.modpath.NodeParticleData(subdivisiondata=sd, nodes=locs)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroupNodeTemplate(
particlegroupname="T3CELLPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
return
def test_cellnode_is3a():
model_ws = f"{base_dir}_test_cellnode_is3a"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_cell_t3anode"
locsa = []
for k in range(1):
for i in range(nrow):
for j in range(ncol):
node = k * nrow * ncol + i * ncol + j
locsa.append(node)
locsb = []
for k in range(1, 2):
for i in range(nrow):
for j in range(ncol):
node = k * nrow * ncol + i * ncol + j
locsb.append(node)
locsc = []
for k in range(2, nlay):
for i in range(nrow):
for j in range(ncol):
node = k * nrow * ncol + i * ncol + j
locsc.append(node)
sd = flopy.modpath.CellDataType(
drape=0,
columncelldivisions=1,
rowcelldivisions=1,
layercelldivisions=1,
)
p = flopy.modpath.NodeParticleData(
subdivisiondata=[sd, sd, sd], nodes=[locsa, locsb, locsc]
)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroupNodeTemplate(
particlegroupname="T3ACELLPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
return
def test_cellnode_is2a():
model_ws = f"{base_dir}_test_cellnode_is2a"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
build_mf6(model_ws)
mpnam = f"{nm}_mp_cell_t2anode"
locsa = [
[0, 0, 0, 0, nrow - 1, ncol - 1],
[1, 0, 0, 1, nrow - 1, ncol - 1],
]
locsb = [[2, 0, 0, 2, nrow - 1, ncol - 1]]
sd = flopy.modpath.CellDataType(
drape=0,
columncelldivisions=1,
rowcelldivisions=1,
layercelldivisions=1,
)
p = flopy.modpath.LRCParticleData(
subdivisiondata=[sd, sd], lrcregions=[locsa, locsb]
)
fpth = f"{mpnam}.sloc"
pg = flopy.modpath.ParticleGroupLRCTemplate(
particlegroupname="T2ACELLPG", particledata=p, filename=fpth
)
build_modpath(model_ws, mpnam, pg)
return
def endpoint_compare(fpth0, epf):
# get base endpoint data
e = flopy.utils.EndpointFile(fpth0)
maxtime0 = e.get_maxtime()
maxid0 = e.get_maxid()
maxtravel0 = e.get_maxtraveltime()
e0 = e.get_alldata()
names = ["x", "y", "z", "x0", "y0", "z0"]
dtype = np.dtype(
[
("x", np.float32),
("y", np.float32),
("z", np.float32),
("x0", np.float32),
("y0", np.float32),
("z0", np.float32),
]
)
t0 = np.rec.fromarrays((e0[name] for name in names), dtype=dtype)
for fpth1 in epf:
e = flopy.utils.EndpointFile(fpth1)
maxtime1 = e.get_maxtime()
maxid1 = e.get_maxid()
maxtravel1 = e.get_maxtraveltime()
e1 = e.get_alldata()
# check maxid
msg = (
f"endpoint maxid ({maxid0}) in {os.path.basename(fpth0)} "
f"are not equal to the endpoint maxid ({maxid1}) "
f"in {os.path.basename(fpth1)}"
)
assert maxid0 == maxid1, msg
# check maxtravel
msg = (
f"endpoint maxtraveltime ({maxtravel0}) "
f"in {os.path.basename(fpth0)} are not equal to the endpoint "
f"maxtraveltime ({maxtravel1}) in {os.path.basename(fpth1)}"
)
assert maxtravel0 == maxtravel1, msg
# check maxtimes
msg = (
f"endpoint maxtime ({maxtime0}) in {os.path.basename(fpth0)} "
f"are not equal to the endpoint maxtime ({maxtime1}) "
f"in {os.path.basename(fpth1)}"
)
assert maxtime0 == maxtime1, msg
# check that endpoint data are approximately the same
t1 = np.rec.fromarrays((e1[name] for name in names), dtype=dtype)
for name in names:
msg = (
f"endpoints in {os.path.basename(fpth0)} are not equal "
f"(within 1e-5) to the endpoints in {os.path.basename(fpth1)} "
f"for column {name}."
)
assert np.allclose(t0[name], t1[name]), msg
return
def build_mf6(ws):
"""
MODPATH 7 example 1 for MODFLOW 6
"""
exe_name = exe_names["mf6"]
# Create the Flopy simulation object
sim = flopy.mf6.MFSimulation(
sim_name=nm, exe_name="mf6", version="mf6", sim_ws=ws
)
# Create the Flopy temporal discretization object
pd = (perlen, nstp, tsmult)
tdis = flopy.mf6.modflow.mftdis.ModflowTdis(
sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd]
)
# Create the Flopy groundwater flow (gwf) model object
model_nam_file = f"{nm}.nam"
gwf = flopy.mf6.ModflowGwf(
sim, modelname=nm, model_nam_file=model_nam_file, save_flows=True
)
# Create the Flopy iterative model solver (ims) Package object
ims = flopy.mf6.modflow.mfims.ModflowIms(
sim, pname="ims", complexity="SIMPLE"
)
# create gwf file
dis = flopy.mf6.modflow.mfgwfdis.ModflowGwfdis(
gwf,
pname="dis",
nlay=nlay,
nrow=nrow,
ncol=ncol,
length_units="FEET",
delr=delr,
delc=delc,
top=top,
botm=botm,
)
# Create the initial conditions package
ic = flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic", strt=top)
# Create the node property flow package
npf = flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf(
gwf, pname="npf", icelltype=laytyp, k=kh, k33=kv
)
# recharge
flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch)
# wel
wd = [(wel_loc, wel_q)]
flopy.mf6.modflow.mfgwfwel.ModflowGwfwel(
gwf, maxbound=1, stress_period_data={0: wd}
)
# river
rd = []
for i in range(nrow):
rd.append([(0, i, ncol - 1), riv_h, riv_c, riv_z])
flopy.mf6.modflow.mfgwfriv.ModflowGwfriv(gwf, stress_period_data={0: rd})
# Create the output control package
headfile = f"{nm}.hds"
head_record = [headfile]
budgetfile = f"{nm}.cbb"
budget_record = [budgetfile]
saverecord = [("HEAD", "ALL"), ("BUDGET", "ALL")]
oc = flopy.mf6.modflow.mfgwfoc.ModflowGwfoc(
gwf,
pname="oc",
saverecord=saverecord,
head_filerecord=head_record,
budget_filerecord=budget_record,
)
# Write the datasets
sim.write_simulation()
# Run the simulation
if run:
success, buff = sim.run_simulation()
assert success, "mf6 model did not run"
def build_modpath(ws, mpn, particlegroups):
# load the MODFLOW 6 model
sim = flopy.mf6.MFSimulation.load("mf6mod", "mf6", "mf6", ws)
gwf = sim.get_model(nm)
# create modpath files
exe_name = exe_names["mp7"]
mp = flopy.modpath.Modpath7(
modelname=mpn, flowmodel=gwf, exe_name=exe_name, model_ws=ws
)
flopy.modpath.Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface6)
flopy.modpath.Modpath7Sim(
mp,
simulationtype="endpoint",
trackingdirection="forward",
weaksinkoption="pass_through",
weaksourceoption="pass_through",
referencetime=0.0,
stoptimeoption="extend",
zonedataoption="on",
zones=zones,
particlegroups=particlegroups,
)
# write modpath datasets
mp.write_input()
# run modpath
if run:
success, buff = mp.run_model()
assert success, f"mp7 model ({mp.name}) did not run"
return
if __name__ == "__main__":
# build and run modflow 6
test_mf6()
# test default modpath
test_default_modpath()
# build face particles
test_faceparticles_is1()
test_facenode_is3()
test_facenode_is3a()
# build cell particles
test_cellparticles_is1()
test_cellparticleskij_is1()
test_cellnode_is2a()
test_cellnode_is3()
test_cellnode_is3a()
# compare endpoint results
test_face_endpoint_output()
test_cell_endpoint_output()
|
{"hexsha": "0acd615fc693358b46d22975eca48cecab7e07b6", "size": 16515, "ext": "py", "lang": "Python", "max_stars_repo_path": "autotest/t058_test_mp7.py", "max_stars_repo_name": "scottrp/flopy", "max_stars_repo_head_hexsha": "af10ab377f48b41f00842cc2bfa08e8b4fc36a62", "max_stars_repo_licenses": ["CC0-1.0", "BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-22T20:04:36.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-22T20:04:36.000Z", "max_issues_repo_path": "autotest/t058_test_mp7.py", "max_issues_repo_name": "jlarsen-usgs/flopy", "max_issues_repo_head_hexsha": "6db70ac0b3da282e2e697909368d3204747bf2ca", "max_issues_repo_licenses": ["CC0-1.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autotest/t058_test_mp7.py", "max_forks_repo_name": "jlarsen-usgs/flopy", "max_forks_repo_head_hexsha": "6db70ac0b3da282e2e697909368d3204747bf2ca", "max_forks_repo_licenses": ["CC0-1.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4251290878, "max_line_length": 79, "alphanum_fraction": 0.6106569785, "include": true, "reason": "import numpy", "num_tokens": 5184}
|
"""
{This script carries out an MCMC analysis to parametrize the ECO SMHM}
"""
# Libs
from cosmo_utils.utils import work_paths as cwpaths
from chainconsumer import ChainConsumer
import matplotlib.pyplot as plt
from matplotlib import rc
import pandas as pd
import numpy as np
__author__ = '{Mehnaaz Asad}'
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)
rc('text', usetex=True)
rc('text.latex', preamble=[r"\usepackage{amsmath}"])
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
survey = 'eco'
file_ver = 2.0
quenching = 'hybrid'
mf_type = 'both'
nwalkers = 260
nsteps = 1000
burnin = 200
ndim = 4
run_smf = 23
# run_bmf =
## For SMF
if mf_type == 'smf' or mf_type == 'both':
chain_fname_smf = path_to_proc + 'smhm_colour_run{0}/mcmc_{1}_colour_raw.txt'.\
format(run_smf, survey)
if quenching == 'hybrid':
mcmc_smf_1 = pd.read_csv(chain_fname_smf,
names=['Mstar_q','Mhalo_q','mu','nu'],header=None, delim_whitespace=True)
mcmc_smf_1 = mcmc_smf_1[mcmc_smf_1.Mstar_q.values != '#']
mcmc_smf_1.Mstar_q = mcmc_smf_1.Mstar_q.astype(np.float64)
mcmc_smf_1.Mhalo_q = mcmc_smf_1.Mhalo_q.astype(np.float64)
mcmc_smf_1.mu = mcmc_smf_1.mu.astype(np.float64)
mcmc_smf_1.nu = mcmc_smf_1.nu.astype(np.float64)
for idx,row in enumerate(mcmc_smf_1.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = mcmc_smf_1.values[idx+1][0]
row[3] = nu_val
elif quenching == 'halo':
mcmc_smf_1 = pd.read_csv(chain_fname_smf, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
mcmc_smf_1 = mcmc_smf_1[mcmc_smf_1.Mh_qc.values != '#']
mcmc_smf_1.Mh_qc = mcmc_smf_1.Mh_qc.astype(np.float64)
mcmc_smf_1.Mh_qs = mcmc_smf_1.Mh_qs.astype(np.float64)
mcmc_smf_1.mu_c = mcmc_smf_1.mu_c.astype(np.float64)
mcmc_smf_1.mu_s = mcmc_smf_1.mu_s.astype(np.float64)
for idx,row in enumerate(mcmc_smf_1.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = mcmc_smf_1.values[idx+1][0]
row[3] = mu_s_val
mcmc_smf_1 = mcmc_smf_1.dropna(axis='index', how='any').reset_index(drop=True)
sampler_smf_1 = mcmc_smf_1.values.reshape(nsteps,nwalkers,ndim)
# Removing burn-in
samples_smf_1 = sampler_smf_1[burnin:, :, :].reshape((-1, ndim))
if mf_type == 'bmf' or mf_type == 'both':
nsteps = 1000
nwalkers = 260
ndim = 4
burnin = 200
chain_fname_bmf = path_to_proc + 'smhm_colour_run24/mcmc_{0}_colour_raw.txt'.\
format(survey)
## For BMF
if quenching == 'hybrid':
mcmc_bmf_1 = pd.read_csv(chain_fname_bmf,
names=['Mstar_q','Mhalo_q','mu','nu'],header=None, delim_whitespace=True)
mcmc_bmf_1 = mcmc_bmf_1[mcmc_bmf_1.Mstar_q.values != '#']
mcmc_bmf_1.Mstar_q = mcmc_bmf_1.Mstar_q.astype(np.float64)
mcmc_bmf_1.Mhalo_q = mcmc_bmf_1.Mhalo_q.astype(np.float64)
mcmc_bmf_1.mu = mcmc_bmf_1.mu.astype(np.float64)
mcmc_bmf_1.nu = mcmc_bmf_1.nu.astype(np.float64)
for idx,row in enumerate(mcmc_bmf_1.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = mcmc_bmf_1.values[idx+1][0]
row[3] = nu_val
elif quenching == 'halo':
mcmc_bmf_1 = pd.read_csv(chain_fname_bmf, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
mcmc_bmf_1 = mcmc_bmf_1[mcmc_bmf_1.Mh_qc.values != '#']
mcmc_bmf_1.Mh_qc = mcmc_bmf_1.Mh_qc.astype(np.float64)
mcmc_bmf_1.Mh_qs = mcmc_bmf_1.Mh_qs.astype(np.float64)
mcmc_bmf_1.mu_c = mcmc_bmf_1.mu_c.astype(np.float64)
mcmc_bmf_1.mu_s = mcmc_bmf_1.mu_s.astype(np.float64)
for idx,row in enumerate(mcmc_bmf_1.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = mcmc_bmf_1.values[idx+1][0]
row[3] = mu_s_val
mcmc_bmf_1 = mcmc_bmf_1.dropna(axis='index', how='any').reset_index(drop=True)
sampler_bmf_1 = mcmc_bmf_1.values.reshape(nsteps,nwalkers,ndim)
# Removing burn-in
samples_bmf_1 = sampler_bmf_1[burnin:, :, :].reshape((-1, ndim))
nsteps = 1000
nwalkers = 260
ndim = 4
burnin = 200
chain_fname_smf = path_to_proc + 'smhm_colour_run25/mcmc_{0}_colour_raw.txt'.\
format(survey)
if quenching == 'hybrid':
mcmc_smf_2 = pd.read_csv(chain_fname_smf,
names=['Mstar_q','Mhalo_q','mu','nu'],header=None, delim_whitespace=True)
mcmc_smf_2 = mcmc_smf_2[mcmc_smf_2.Mstar_q.values != '#']
mcmc_smf_2.Mstar_q = mcmc_smf_2.Mstar_q.astype(np.float64)
mcmc_smf_2.Mhalo_q = mcmc_smf_2.Mhalo_q.astype(np.float64)
mcmc_smf_2.mu = mcmc_smf_2.mu.astype(np.float64)
mcmc_smf_2.nu = mcmc_smf_2.nu.astype(np.float64)
for idx,row in enumerate(mcmc_smf_2.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = mcmc_smf_2.values[idx+1][0]
row[3] = nu_val
elif quenching == 'halo':
mcmc_smf_2 = pd.read_csv(chain_fname_smf, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
mcmc_smf_2 = mcmc_smf_2[mcmc_smf_2.Mh_qc.values != '#']
mcmc_smf_2.Mh_qc = mcmc_smf_2.Mh_qc.astype(np.float64)
mcmc_smf_2.Mh_qs = mcmc_smf_2.Mh_qs.astype(np.float64)
mcmc_smf_2.mu_c = mcmc_smf_2.mu_c.astype(np.float64)
mcmc_smf_2.mu_s = mcmc_smf_2.mu_s.astype(np.float64)
for idx,row in enumerate(mcmc_smf_2.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = mcmc_smf_2.values[idx+1][0]
row[3] = mu_s_val
mcmc_smf_2 = mcmc_smf_2.dropna(axis='index', how='any').reset_index(drop=True)
sampler_smf_2 = mcmc_smf_2.values.reshape(nsteps,nwalkers,ndim)
# Removing burn-in
samples_smf_2 = sampler_smf_2[burnin:, :, :].reshape((-1, ndim))
nsteps = 1000
nwalkers = 260
ndim = 4
burnin = 200
chain_fname_smf = path_to_proc + 'smhm_colour_run26/mcmc_{0}_colour_raw.txt'.\
format(survey)
if quenching == 'hybrid':
mcmc_smf_3 = pd.read_csv(chain_fname_smf,
names=['Mstar_q','Mhalo_q','mu','nu'],header=None, delim_whitespace=True)
mcmc_smf_3 = mcmc_smf_3[mcmc_smf_3.Mstar_q.values != '#']
mcmc_smf_3.Mstar_q = mcmc_smf_3.Mstar_q.astype(np.float64)
mcmc_smf_3.Mhalo_q = mcmc_smf_3.Mhalo_q.astype(np.float64)
mcmc_smf_3.mu = mcmc_smf_3.mu.astype(np.float64)
mcmc_smf_3.nu = mcmc_smf_3.nu.astype(np.float64)
for idx,row in enumerate(mcmc_smf_3.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = mcmc_smf_3.values[idx+1][0]
row[3] = nu_val
elif quenching == 'halo':
mcmc_smf_3 = pd.read_csv(chain_fname_smf, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
mcmc_smf_3 = mcmc_smf_3[mcmc_smf_3.Mh_qc.values != '#']
mcmc_smf_3.Mh_qc = mcmc_smf_3.Mh_qc.astype(np.float64)
mcmc_smf_3.Mh_qs = mcmc_smf_3.Mh_qs.astype(np.float64)
mcmc_smf_3.mu_c = mcmc_smf_3.mu_c.astype(np.float64)
mcmc_smf_3.mu_s = mcmc_smf_3.mu_s.astype(np.float64)
for idx,row in enumerate(mcmc_smf_3.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = mcmc_smf_3.values[idx+1][0]
row[3] = mu_s_val
mcmc_smf_3 = mcmc_smf_3.dropna(axis='index', how='any').reset_index(drop=True)
sampler_smf_3 = mcmc_smf_3.values.reshape(nsteps,nwalkers,ndim)
# Removing burn-in
samples_smf_3 = sampler_smf_3[burnin:, :, :].reshape((-1, ndim))
nsteps = 1000
nwalkers = 260
ndim = 4
burnin = 200
chain_fname_smf = path_to_proc + 'smhm_colour_run27/mcmc_{0}_colour_raw.txt'.\
format(survey)
if quenching == 'hybrid':
mcmc_smf_4 = pd.read_csv(chain_fname_smf,
names=['Mstar_q','Mhalo_q','mu','nu'],header=None, delim_whitespace=True)
mcmc_smf_4 = mcmc_smf_4[mcmc_smf_4.Mstar_q.values != '#']
mcmc_smf_4.Mstar_q = mcmc_smf_4.Mstar_q.astype(np.float64)
mcmc_smf_4.Mhalo_q = mcmc_smf_4.Mhalo_q.astype(np.float64)
mcmc_smf_4.mu = mcmc_smf_4.mu.astype(np.float64)
mcmc_smf_4.nu = mcmc_smf_4.nu.astype(np.float64)
for idx,row in enumerate(mcmc_smf_4.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = mcmc_smf_4.values[idx+1][0]
row[3] = nu_val
elif quenching == 'halo':
mcmc_smf_4 = pd.read_csv(chain_fname_smf, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
mcmc_smf_4 = mcmc_smf_4[mcmc_smf_4.Mh_qc.values != '#']
mcmc_smf_4.Mh_qc = mcmc_smf_4.Mh_qc.astype(np.float64)
mcmc_smf_4.Mh_qs = mcmc_smf_4.Mh_qs.astype(np.float64)
mcmc_smf_4.mu_c = mcmc_smf_4.mu_c.astype(np.float64)
mcmc_smf_4.mu_s = mcmc_smf_4.mu_s.astype(np.float64)
for idx,row in enumerate(mcmc_smf_4.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = mcmc_smf_4.values[idx+1][0]
row[3] = mu_s_val
mcmc_smf_4 = mcmc_smf_4.dropna(axis='index', how='any').reset_index(drop=True)
sampler_smf_4 = mcmc_smf_4.values.reshape(nsteps,nwalkers,ndim)
# Removing burn-in
samples_smf_4 = sampler_smf_4[burnin:, :, :].reshape((-1, ndim))
zumandelbaum_param_vals_hybrid = [10.5, 13.76, 0.69, 0.15] # For hybrid model
optimizer_best_fit_eco_smf_hybrid = [10.49, 14.03, 0.69, 0.14] # For hybrid model
zumandelbaum_param_vals_halo = [12.20, 0.38, 12.17, 0.15] # For halo model
optimizer_best_fit_eco_smf_halo = [12.61, 13.5, 0.40, 0.148] # For halo model
c = ChainConsumer()
if mf_type == 'smf' or mf_type == 'both':
c.add_chain(samples_smf_1,parameters=[r"$\mathbf{M^{q}_{*}}$",
r"$\mathbf{M^{q}_{h}}$", r"$\boldsymbol{\mu}$", r"$\boldsymbol{\nu}$"],
name=r"ECO 2$\sigma$ Behroozi",
color='#E766EA', zorder=9)
if mf_type == 'bmf' or mf_type == 'both':
c.add_chain(samples_bmf_1,parameters=[r"$\mathbf{M^{q}_{*}}$",
r"$\mathbf{M^{q}_{h}}$", r"$\boldsymbol{\mu}$", r"$\boldsymbol{\nu}$"],
name=r"ECO best-fit Behroozi",
color='#53A48D', zorder=10)
c.add_chain(samples_smf_2,parameters=[r"$\mathbf{M^{q}_{*}}$",
r"$\mathbf{M^{q}_{h}}$", r"$\boldsymbol{\mu}$", r"$\boldsymbol{\nu}$"],
name=r"ECO 2$\sigma$ Behroozi ", color='#1f77b4', zorder=11)
c.add_chain(samples_smf_3,parameters=[r"$\mathbf{M^{q}_{*}}$",
r"$\mathbf{M^{q}_{h}}$", r"$\boldsymbol{\mu}$", r"$\boldsymbol{\nu}$"],
name=r"ECO 2$\sigma$ Behroozi ", color='#DB7093', zorder=12)
c.add_chain(samples_smf_4,parameters=[r"$\mathbf{M^{q}_{*}}$",
r"$\mathbf{M^{q}_{h}}$", r"$\boldsymbol{\mu}$", r"$\boldsymbol{\nu}$"],
name=r"ECO 2$\sigma$ Behroozi ", color='#FFD700', zorder=13)
# c.configure(shade_gradient=[0.1, 3.0], colors=['r', 'b'], \
# sigmas=[1,2], shade_alpha=0.4)
c.configure(smooth=5,label_font_size=25,tick_font_size=10,summary=True,\
sigma2d=False,legend_kwargs={"fontsize": 30}) #1d gaussian showing 68%,95% conf intervals
if quenching == 'hybrid':
if mf_type == 'smf' or mf_type == 'both':
fig1 = c.plotter.plot(display=True,truth=optimizer_best_fit_eco_smf_hybrid)
elif quenching == 'halo':
if mf_type == 'smf' or mf_type == 'both':
fig1 = c.plotter.plot(display=True,truth=optimizer_best_fit_eco_smf_halo)
# fig2 = c.plotter.plot(filename=path_to_figures+'emcee_cc_mp_eco_corrscatter.png',\
# truth=behroozi10_param_vals)
|
{"hexsha": "6f643355995ac3a25294568933a6cbb0263e1599", "size": 12112, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mcmc/colour/cornerplot_colour.py", "max_stars_repo_name": "MehnaazAsad/RESOLVE_Statistics", "max_stars_repo_head_hexsha": "a7bdcc896ca2c51ab3417c46f07efe8c16825597", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-22T02:18:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-22T02:18:55.000Z", "max_issues_repo_path": "src/mcmc/colour/cornerplot_colour.py", "max_issues_repo_name": "MehnaazAsad/RESOLVE_Statistics", "max_issues_repo_head_hexsha": "a7bdcc896ca2c51ab3417c46f07efe8c16825597", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mcmc/colour/cornerplot_colour.py", "max_forks_repo_name": "MehnaazAsad/RESOLVE_Statistics", "max_forks_repo_head_hexsha": "a7bdcc896ca2c51ab3417c46f07efe8c16825597", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-22T02:27:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-22T02:27:49.000Z", "avg_line_length": 39.4527687296, "max_line_length": 94, "alphanum_fraction": 0.6482826948, "include": true, "reason": "import numpy", "num_tokens": 4199}
|
[STATEMENT]
lemma nat_power_eq':
assumes "a \<notin> carrier R"
shows "nat_power n a = undefined"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nat_power n a = undefined
[PROOF STEP]
by (simp add: assms nat_power_def)
|
{"llama_tokens": 89, "file": "Padic_Ints_Function_Ring", "length": 1}
|
[STATEMENT]
lemma sup_least_classes1:
"c \<le> e \<Longrightarrow> d \<le> e \<Longrightarrow> c \<squnion> d \<le> e"
for c d e :: classes1
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>c \<le> e; d \<le> e\<rbrakk> \<Longrightarrow> c \<squnion> d \<le> e
[PROOF STEP]
by (induct c; induct d;
auto simp add: less_eq_classes1_def less_classes1_def subclass1.simps)
|
{"llama_tokens": 157, "file": "Safe_OCL_OCL_Examples", "length": 1}
|
from logging import getLogger
import numpy
import pandas
from rdkit import Chem
from tqdm import tqdm
from chainer_chemistry.dataset.parsers.base_parser import BaseFileParser
from chainer_chemistry.dataset.preprocessors.common import MolFeatureExtractionError # NOQA
from chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor # NOQA
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
import traceback
class CSVFileParser(BaseFileParser):
"""csv file parser
This FileParser parses .csv file.
It should contain column which contain SMILES as input, and
label column which is the target to predict.
Args:
preprocessor (BasePreprocessor): preprocessor instance
labels (str or list): labels column
smiles_col (str): smiles column
postprocess_label (Callable): post processing function if necessary
postprocess_fn (Callable): post processing function if necessary
logger:
"""
def __init__(self, preprocessor,
labels=None,
smiles_col='smiles',
postprocess_label=None, postprocess_fn=None,
logger=None):
super(CSVFileParser, self).__init__(preprocessor)
if isinstance(labels, str):
labels = [labels, ]
self.labels = labels # type: list
self.smiles_col = smiles_col
self.postprocess_label = postprocess_label
self.postprocess_fn = postprocess_fn
self.logger = logger or getLogger(__name__)
def parse(self, filepath, return_smiles=False):
"""parse csv file using `preprocessor`
Label is extracted from `labels` columns and input features are
extracted from smiles information in `smiles` column.
Args:
filepath (str): file path to be parsed.
return_smiles (bool): If set to True, this function returns
preprocessed dataset and smiles list.
If set to False, this function returns preprocessed dataset and
`None`.
Returns (dict): dictionary that contains Dataset, 1-d numpy array with
dtype=object(string) which is a vector of smiles for each example
or None.
"""
logger = self.logger
pp = self.preprocessor
smiles_list = []
# counter = 0
if isinstance(pp, MolPreprocessor):
try:
# It is recommended to use `read_csv` method in pandas version
# after 0.18.x
df = pandas.read_csv(filepath)
except AttributeError as e:
# It is deprecated in newer versions of pandas, but we use
# this method for older version of pandas.
df = pandas.DataFrame.from_csv(filepath)
features = None
smiles_index = df.columns.get_loc(self.smiles_col)
if self.labels is None:
labels_index = [] # dummy list
else:
labels_index = [df.columns.get_loc(c) for c in self.labels]
total_count = df.shape[0]
fail_count = 0
success_count = 0
for row in tqdm(df.itertuples(index=False), total=df.shape[0]):
smiles = row[smiles_index]
# TODO(Nakago): Check.
# currently it assumes list
labels = [row[i] for i in labels_index]
try:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
fail_count += 1
continue
# Note that smiles expression is not unique.
# we should re-obtain smiles from `mol`, so that the
# smiles order does not contradict with input features'
# order.
# Here, `smiles` and `standardized_smiles` expresses
# same molecule, but the expression may be different!
standardized_smiles, mol = pp.prepare_smiles_and_mol(mol)
input_features = pp.get_input_features(mol)
# Extract label
if self.postprocess_label is not None:
labels = self.postprocess_label(labels)
if return_smiles:
assert standardized_smiles == Chem.MolToSmiles(mol)
smiles_list.append(standardized_smiles)
# logger.debug('[DEBUG] smiles {}, standard_smiles {}'
# .format(smiles, standardized_smiles))
except MolFeatureExtractionError as e:
# This is expected error that extracting feature failed,
# skip this molecule.
fail_count += 1
continue
except Exception as e:
logger.warning('parse(), type: {}, {}'
.format(type(e).__name__, e.args))
logger.info(traceback.format_exc())
fail_count += 1
continue
# Initialize features: list of list
if features is None:
if isinstance(input_features, tuple):
num_features = len(input_features)
else:
num_features = 1
if self.labels is not None:
num_features += 1
features = [[] for _ in range(num_features)]
if isinstance(input_features, tuple):
for i in range(len(input_features)):
features[i].append(input_features[i])
else:
features[0].append(input_features)
if self.labels is not None:
features[len(features) - 1].append(labels)
success_count += 1
ret = []
for feature in features:
try:
feat_array = numpy.asarray(feature)
except ValueError:
# Temporal work around.
# See,
# https://stackoverflow.com/questions/26885508/why-do-i-get-error-trying-to-cast-np-arraysome-list-valueerror-could-not-broa
feat_array = numpy.empty(len(feature), dtype=numpy.ndarray)
feat_array[:] = feature[:]
ret.append(feat_array)
result = tuple(ret)
logger.info('Preprocess finished. FAIL {}, SUCCESS {}, TOTAL {}'
.format(fail_count, success_count, total_count))
else:
# Spec not finalized yet for general case
result = pp.process(filepath)
smileses = numpy.array(smiles_list) if return_smiles else None
if isinstance(result, tuple):
if self.postprocess_fn is not None:
result = self.postprocess_fn(*result)
return {"dataset": NumpyTupleDataset(*result), "smiles": smileses}
else:
if self.postprocess_fn is not None:
result = self.postprocess_fn(result)
return {"dataset": NumpyTupleDataset(result), "smiles": smileses}
|
{"hexsha": "2e7c664db33269da534d07c1c3ce4bd5951dca27", "size": 7350, "ext": "py", "lang": "Python", "max_stars_repo_path": "chainer_chemistry/dataset/parsers/csv_file_parser.py", "max_stars_repo_name": "zhenghangCN/chainer-chemistry", "max_stars_repo_head_hexsha": "dcda27f2fdbf8ce1d626835e73f1c2ceb8ec9886", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chainer_chemistry/dataset/parsers/csv_file_parser.py", "max_issues_repo_name": "zhenghangCN/chainer-chemistry", "max_issues_repo_head_hexsha": "dcda27f2fdbf8ce1d626835e73f1c2ceb8ec9886", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chainer_chemistry/dataset/parsers/csv_file_parser.py", "max_forks_repo_name": "zhenghangCN/chainer-chemistry", "max_forks_repo_head_hexsha": "dcda27f2fdbf8ce1d626835e73f1c2ceb8ec9886", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-23T12:25:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-23T12:25:57.000Z", "avg_line_length": 42.0, "max_line_length": 144, "alphanum_fraction": 0.5580952381, "include": true, "reason": "import numpy", "num_tokens": 1392}
|
# coding=utf-8
from hielen2.source import CloudSource, ActionSchema, GeoInfoSchema
from hielen2.utils import LocalFile, ColorMap, Style, FTPPath
from hielen2.ext.source_rawsource import Source as RawSource
import hielen2.api.features as featman
from hielen2.mapmanager import Multiraster
from hielen2.cloudmanager import PotreeCM
from .cloudpainter import makemultilaz
import json
from pathlib import Path
from marshmallow import fields
from numpy import full
from pandas import read_csv, DataFrame, Series, DatetimeIndex
from matplotlib.cm import jet
from matplotlib.colors import rgb2hex
from xarray import open_rasterio
from shutil import copy
import geojson
from datetime import datetime
import traceback
series_file_date_parser = lambda x: datetime.strptime(x, "%d/%m/%Y %H.%M")
#mapbasename="basemap.tif"
class ConfigSchema(GeoInfoSchema):
#class ConfigSchema(ActionSchema):
_self_hints = {
"TinSAR Base" : {
0: ["master_cloud","references to master cloud csv in FTP",True],
},
"TinSAR Color Maps": {
0: ["displ_cmap","Displacement colormap range",True],
1: ["ampli_cmap","Amplitude colormap range",True],
},
"TinSAR Selected Points":{
0: ["point_style","style code for the selected points",True],
1: ["series_file","textfile containing selected points and dataseries of theirs",True]
}
}
master_cloud = FTPPath(required=True, allow_none=False)
displ_cmap = ColorMap(required=False,allow_none=True,default=None)
ampli_cmap = ColorMap(required=False,allow_none=True,default=None)
point_style = Style(required=False, allow_none=True,default=None)
series_file = FTPPath(required=False, allow_none=True)
class FeedSchema(ActionSchema):
_self_hints = {
"TinSAR Feed": {
0: ["displacement_cloud","reference to result cloud in FTP",True],
1: ["amplitude_cloud","refernce to radar amplitutde cloud in FTP",True],
2: ["displacement_geotiff","reference to result geotiff in FTP",True],
3: ["amplitude_geotiff","refernce to radar amplitude geotiff in FTP",True]
}
}
displacement_cloud = FTPPath(required=False, allow_none=True)
amplitude_cloud = FTPPath(required=False, allow_none=True)
displacement_geotiff = FTPPath(required=False, allow_none=True)
amplitude_geotiff = FTPPath(required=False, allow_none=True)
def get_imgname(mapname,timestamp,param):
return f"{mapname}_{timestamp[:14]}_{param}.tif"
class Source(CloudSource):
'''
PhotoMonitoring source manager
'''
def _config(self, brandnewconf=True, **kwargs):
if brandnewconf:
kwargs['opacity']=50
out=super().config(**kwargs)
chstruct={
"param": 'Displacement',
"struct": {
"cache": None,
"modules": {},
"mu":"mm",
"operands": {"output":"displacement"},
"operator": None
}
}
self.addParamSeries(**chstruct)
chstruct={
"param": 'Radar_Amplitude',
"struct": {
"cache": None,
"modules": {},
"mu":"mm",
"operands": {"output":"amplitude"},
"operator": None
}
}
self.addParamSeries(**chstruct)
else:
out=kwargs
timestamp=out['timestamp']
out['master_cloud']=kwargs['master_cloud']
confpath=self.hasher(timestamp)
mapmanager=Multiraster(self.uid,confpath)
mapmanager.mapcache.mkdir()
mapmanager.setMFparams(bands=3,crs='EPSG:4326')
self.filecache.mkdir(confpath)
#CONFIGURABILI: displ_cmap, ampli_cmap
def_cmap=[ [ a/100, rgb2hex(jet (a/100)[0:3]) ] for a in range(0,101,10) ]
if kwargs['displ_cmap'] is None:
kwargs['displ_cmap'] = ColorMap.make_colormap(def_cmap)
kwargs['displ_cmap']["norm"] = None
out['displ_cmap']=kwargs['displ_cmap']
if kwargs['ampli_cmap'] is None:
kwargs['ampli_cmap'] = ColorMap.make_colormap(def_cmap)
kwargs['ampli_cmap']["norm"] = None
out['ampli_cmap']=kwargs['ampli_cmap']
self.setParamOperands('Displacement',cmap=out["displ_cmap"])
self.setParamOperands('Radar_Amplitude',cmap=out["ampli_cmap"])
cloudman=PotreeCM(self.uid,confpath)
cloudman.cloudcache.mkdir()
clds=makemultilaz(out['master_cloud'],str(self.filecache / confpath ),basemanage='a')
for k,w in clds.items():
cloudman.makePotree(w,k)
#print(json.dumps(out,indent=4))
out['point_style']=kwargs['point_style']
try:
points_file=Path(kwargs["series_file"])
except Exception as e:
points_file = None
self._feed_subitems(points_file,out['point_style'])
if not brandnewconf:
##Ricreare le cloud associate alla config
try:
nextconf=self.getActionValues('config',slice(timestamp,None))[1]['timestamp']
except Exception as e:
nextconf=None
feeds=self.getActionValues('feed',slice(timestamp,nextconf))
for f in feeds:
feedkwargs=f['value']
self.feed(**feedkwargs)
return out
def config(self,**kwargs):
return self._config(brandnewconf=True,**kwargs)
def updateConfig(self,**kwargs):
return self._config(brandnewconf=False,**kwargs)
def cleanConfig(self,timestamp):
"""
da analizzare
"""
timestamp=self.hasher(timestamp)
self.filecache.rmdir(timestamp)
PotreeCM(self.uid,timestamp).cloudcache.rmdir()
Multiraster(self.uid,timestamp).mapcache.rmdir()
def _feed_subitems(self, points_file=None,point_style=None):
try:
subitems= set(self.getFeatureInfo('subitems'))
except Exception as e:
subitems= set([])
"""
associa punti a feature principale e crea serie dati
"""
if points_file is not None:
series=read_csv(points_file,sep=";",index_col=0,skiprows=3,parse_dates=[0],date_parser=series_file_date_parser)
points=read_csv(points_file,sep=";",index_col=0,header=None).head(4).T
points.columns=list(map(str.lower,points.columns))
labels=points[[ x for x in points.columns if x not in ['x','y','z']]]
points = points[['x','y','z']]
points['label']=labels
points.columns=['x','y','z','label']
points['puid']=points['label'].apply(lambda x: self.uid+x)
points=points.set_index("puid")
for subuid,x,y,z,label in points.itertuples():
prototype="RawSource"
properties={
"label":self.label+"_"+label,
"context":self.context,
"style":point_style
}
geometry={
"type":"Point",
"coordinates":[x,y,z]
}
resp=featman.create_feature(uid=subuid,prototype=prototype,properties=properties,geometry=geometry).status
resp=int(resp.split(" ")[0])
if resp == 201:
rs=RawSource(subuid)
rs.config(param_list=[["Displacement",0,"mm"]])
subitems.add(subuid)
elif resp == 409:
rs=RawSource(subuid)
resp=featman.update_feature(uid=subuid,properties=properties,geometry=geometry).status
resp=int(resp.split(" ")[0])
subitems.add(subuid)
if resp not in (200,201):
print (resp)
self.setFeatureInfo('subitems',list(subitems))
raise ValueError (f"While manageing {label}, '{resp}' occurs")
rs.feed(input_file=series[label])
self.setFeatureInfo('subitems',list(subitems))
return
def feed(self, **kwargs):
timestamp=kwargs["timestamp"]
conf=self.lastActionBefore('config',timestamp)
timestahash=self.hasher(timestamp)
reftimehash=self.hasher(conf["timestamp"])
point_style=conf['point_style']
subpath=Path(reftimehash, timestahash)
cloudman=PotreeCM(self.uid,subpath)
mapmanager=Multiraster(self.uid,reftimehash)
self.filecache.mkdir(subpath)
try:
result_cloud=Path(kwargs["displacement_cloud"])
except Exception as e:
result_cloud=None
if result_cloud is not None:
r=ColorMap.parse_colormap(conf['displ_cmap'])
clds=makemultilaz(result_cloud,str(self.filecache / subpath ), basemanage='i',**r)
for k,w in clds.items():
result=cloudman.makePotree(w,k)
try:
info_cloud=Path(kwargs["amplitude_cloud"])
except Exception as e:
info_cloud=None
if info_cloud is not None:
r=ColorMap.parse_colormap(conf['ampli_cmap'])
clds=makemultilaz(info_cloud,str(self.filecache / subpath ), basemanage='i', **r)
for k,w in clds.items():
result=cloudman.makePotree(w,k)
# MAPS #
mapname=self.hasher(conf['timestamp'])
try:
result_tiff=Path(kwargs["displacement_geotiff"])
except Exception as e:
result_tiff=None
if result_tiff is not None:
imgname=get_imgname(mapname,self.hasher(timestamp),'displacement')
path_image = mapmanager.mapcache / imgname
copy(result_tiff,path_image)
try:
info_tiff=Path(kwargs["amplitude_geotiff"])
except Exception as e:
info_tiff=None
if info_tiff is not None:
imgname=get_imgname(mapname,self.hasher(timestamp),'amplitude')
path_image = mapmanager.mapcache / imgname
copy(info_tiff,path_image)
self._timeline_add(timestamp)
return kwargs
def updateFeed(self,**kwargs):
self.cleanFeatureCache()
return self.feed(**kwargs)
def cleanFeed(self, timestamp):
timestamp=kwargs["timestamp"]
conf=self.lastActionBefore('config',timestamp)
timestahash=self.hasher(timestamp)
reftimehash=self.hasher(conf["timestamp"])
subpath=Path(reftimehash, timestahash)
PotreeCM(self.uid,subpath).cloudcache.rmdir()
self._timeline_remove(timestamp)
def data( self, times=None, timeref=None, geometry=None, output="displacement", cmap=None, **kwargs ):
cmappo=cmap['f_cmap']
if geometry is None:
return None
if isinstance(times,slice):
timestamp=times.stop
else:
timestamp=times
if timestamp is None:
try:
timestamp=self.getFeatureInfo('timeline')[-1]
except Exception as e:
timestamp = None
if timestamp is None:
return None
conf=self.lastActionBefore('config',timestamp)
mapname=self.hasher(conf['timestamp'])
mapmanager= Multiraster(self.uid,mapname)
mapfile=mapmanager.mapfile
imgname=get_imgname(mapname,self.hasher(timestamp),output)
path_image = mapmanager.mapcache / imgname
name=""
try:
with open_rasterio(path_image) as dataset:
#TODO ricorda dataframe.rio.clip(geometries)
#print (geometry)
coords=list(geojson.utils.coords(geometry[0]))
'''
if not geographic:
coords = list( map( lambda l: [l[0], dataset.y[-1]-l[1]], coords ) )
'''
geotype=str(geometry[0]['type'])
#if isinstance(geometry[0],geojson.Point) :
if geotype == 'Point':
query={
'method':'nearest',
**dict(zip(['x','y'],coords[0]))
}
#name="_".join(map(str,coords[0]))
name='nearest'
#elif isinstance(geometry[0],geojson.Polygon):
elif geotype == 'Polygon':
coords=DataFrame(coords)
dirx= dataset.x[0] < dataset.x[-1] and 1 or -1
diry= dataset.y[0] < dataset.y[-1] and 1 or -1
query={
'method':None,
"x":slice(coords[0].min(),coords[0].max(),dirx),
"y":slice(coords[1].min(),coords[1].max(),diry)
}
name='mean'
else:
raise ValueError("Unmanaged geometry Type")
selection=dataset.sel(**query)
color = [
int(selection.sel(band=1).mean().round(0)),
int(selection.sel(band=2).mean().round(0)),
int(selection.sel(band=3).mean().round(0))
]
result = ColorMap.valorizeColor(cmappo,color)
except Exception as e:
return None
ser=Series([result],index=DatetimeIndex([timestamp]))
ser.name = name
return ser
def map( self, times=None, timeref=None, geometry=None, output="displacement", cmap=None, **kwargs ):
timestamp=None
if isinstance(times,slice):
timestamp=times.stop
else:
timestamp=times
conf=self.lastActionBefore('config',timestamp)
try:
mapname=self.hasher(conf['timestamp'])
mapmanager= Multiraster(self.uid,mapname)
mapfile=mapmanager.mapfile
imgname=get_imgname(mapname,self.hasher(timestamp),output)
path_image = mapmanager.mapcache / imgname
url = mapmanager.geturl(imgname)
ser=Series([url],index=DatetimeIndex([timestamp]))
except Exception as e:
return None
return ser
def cloud( self, times=None, timeref=None, geometry=None, output="displacement", cmap=None, **kwargs ):
timestamp=None
if isinstance(times,slice):
timestamp=times.stop
else:
timestamp=times
if timestamp is None:
timestamp=self.getFeatureInfo('timeline')[-1]
conf=self.lastActionBefore('config',timestamp)
try:
reftimestamp=timeref or conf['timestamp']
cloudref=self.hasher(reftimestamp)
results=self.hasher(timestamp)
url=PotreeCM(self.uid,cloudref).geturl(results, output) + f"&feature={self.uid}"
ser=Series([url],index=DatetimeIndex([timestamp]))
except Exception as e:
return None
return ser
|
{"hexsha": "d6a86c90b7bfcc5dd12a75ec8786bc68e2c76b8e", "size": 15678, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/hielen2/ext/source_tinsar/tin.py", "max_stars_repo_name": "fantamodeman/hielen2", "max_stars_repo_head_hexsha": "b1b249f4bd7609b3977777f663ae242adf69cfe2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/hielen2/ext/source_tinsar/tin.py", "max_issues_repo_name": "fantamodeman/hielen2", "max_issues_repo_head_hexsha": "b1b249f4bd7609b3977777f663ae242adf69cfe2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hielen2/ext/source_tinsar/tin.py", "max_forks_repo_name": "fantamodeman/hielen2", "max_forks_repo_head_hexsha": "b1b249f4bd7609b3977777f663ae242adf69cfe2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8628571429, "max_line_length": 157, "alphanum_fraction": 0.5563209593, "include": true, "reason": "from numpy", "num_tokens": 3402}
|
#
# A Job Shop Scheduling OpenAI Gym Environment
#
# Inspired by: https://developers.google.com/optimization/scheduling/job_shop
# Author: Lisa Ong, NUS/ISS
#
import gym
from gym import spaces
import numpy as np
class TaskList:
"""Used to track the state of tasks in a Job Shop Environment
"""
def __init__ (self, jobs_data):
"""jobs_data: list of jobs, where
each job is a list of multiple tasks: (machine_id, processing_time)
Example:
jobs_data = [
[(0, 3), (1, 2), (2, 2)], # Job0
[(0, 2), (2, 1), (1, 4)], # Job1
[(1, 4), (2, 3)] # Job2
]
"""
num_jobs = len(jobs_data)
self.tasks = [Task(i, *task) for i in range(num_jobs)
for task in jobs_data[i]]
self.jobs_to_tasks = {i:[] for i in range(num_jobs)}
for i in range(len(self.tasks)):
self.jobs_to_tasks[self.tasks[i].job_id].append(i)
self.num_machines = 1 + max(t[0] for j in jobs_data for t in j)
self.machines_to_tasks = {i: [] for i in range(self.num_machines)}
for i in range(len(self.tasks)):
self.machines_to_tasks[self.tasks[i].machine_id].append(i)
self.reset()
def reset(self):
"""Reset the state of all tasks
"""
for t in self.tasks:
t.reset()
self.observation = {
'is_scheduled': [0] * self.length()
}
return self.observation
def length(self):
"""Return the total number of tasks
"""
return len(self.tasks)
def get_num_machines(self):
"""Return the total number of machines
"""
return len(self.machines_to_tasks)
def get_task(self, task_id):
"""Retrieve a task
task_id: the task index
"""
return self.tasks[task_id]
def get_machines_to_tasks(self):
"""Returns the mapping of machines to tasks
"""
return self.machines_to_tasks
def schedule_task(self, task_id, start_time):
"""Schedule a task and returns the observation
task_id: the task index
start_time: the task start time
"""
# start_time = 0 means task has not been scheduled
if start_time <= 0:
raise Exception('start_time should be > 0')
task = self.get_task(task_id)
task.schedule(start_time)
# update observation
machine_id = task.machine_id
self.observation['is_scheduled'][task_id] = start_time
return self.observation
def get_makespan(self):
"""Return the makespan (duration of the earliest start time
to the latest end time)
"""
start_times = [t.start_time for t in self.tasks if t.is_scheduled()]
end_times = [t.end_time for t in self.tasks if t.is_scheduled()]
if len(end_times) > 0:
return max(end_times) - min(start_times)
else:
return 0 # nothing has been scheduled
def get_related_tasks(self, task_id):
"""Return all tasks related to the current task. Here "related to"
means sharing the same job
task_id: the task index
This returns a tuple of lists: pretasks, posttasks
"""
task_ids = np.array(self.jobs_to_tasks[self.tasks[task_id].job_id])
pre = task_ids[task_ids < task_id]
post = task_ids[task_ids > task_id]
return pre, post
def all_tasks_scheduled(self):
"""Return whether all tasks have been scheduled
"""
return sum(self.get_tasks_is_scheduled()) == self.length()
def get_tasks_is_scheduled(self):
"""Return a list of 1 or 0 indicating if each
has been scheduled
"""
return [int(t.is_scheduled()) for t in self.tasks]
def __repr__(self):
return '\n'.join([f'{i}: {self.tasks[i].__repr__()}'
for i in range(len(self.tasks))])
class Task:
"""Encapsulates the state of a task in a Job Shop Environment
"""
def __init__(self, job_id, machine_id, processing_time):
"""job_id: the job id this task belongs to
machine_id: the machine id that this task must run
processing_time: the task processing time
"""
self.job_id = job_id
self.machine_id = machine_id
self.processing_time = processing_time
self.reset()
def reset(self):
"""Reset this task
"""
self.start_time = 0
self.end_time = -1
def schedule(self, start_time):
"""Schedule this task
"""
self.start_time = start_time
self.end_time = self.start_time + self.processing_time
def is_scheduled(self):
"""Return whether this task has already been scheduled
"""
return self.end_time != -1
def __repr__(self):
return f'Job: {self.job_id}, Machine: {self.machine_id}, \
Start: {self.start_time}, End: {self.end_time}'
class JobshopEnv(gym.Env):
"""Custom Environment for a Job Shop Scheduling Problem
For details on the scheduling problem:
https://developers.google.com/optimization/scheduling/job_shop
For details on the gym.Env class:
https://github.com/openai/gym/blob/master/gym/core.py
"""
# render to the current display or terminal
metadata = {'render.modes': ['human']}
def __init__(self, jobs_data, max_schedule_time=20, verbose=False):
"""jobs_data: list of jobs, where
each job is a list of multiple tasks: (machine_id, processing_time)
Example:
jobs_data = [
[(0, 3), (1, 2), (2, 2)], # Job0
[(0, 2), (2, 1), (1, 4)], # Job1
[(1, 4), (2, 3)] # Job2
]
max_schedule_time: maximum time allowed for the schedule
verbose: whether to print debug messages
"""
super(JobshopEnv, self).__init__()
self.tasks = TaskList(jobs_data)
self.max_schedule_time = max_schedule_time
self.verbose = verbose
# https://gym.openai.com/docs/#observations
# Action space describes all possible actions that can be taken
# here, we define an action as assigning 1 task
# https://github.com/openai/gym/blob/master/gym/spaces/dict.py
self.action_space = spaces.Dict({
'task_id' : spaces.Discrete(self.tasks.length()),
'start_time' : spaces.Discrete(self.max_schedule_time)
})
# Observation space describes the valid states
# states may be used by the agent to determine the next action
# here, we observe the latest tasks scheduled, with their
# (non-zero) start times specified.
# https://github.com/openai/gym/blob/master/gym/spaces/multi_discrete.py
#
# Example:
# {
# 'is_scheduled': [0, 0, 1, 0, 5, 0, 10, 1] - tasks 2, 4, 6, 7 already scheduled
# tasks 0, 1, 3, 5 not yet scheduled
# }
#
is_scheduled_vec = [self.max_schedule_time] * self.tasks.length()
# A dictionary give flexibility to add other observations in the future
self.observation_space = spaces.Dict({
'is_scheduled': spaces.MultiDiscrete(is_scheduled_vec)
})
# Rewards range describes the min and max possible rewards
# This is the default range, but we'll specify it explicitly below:
self.reward_range = (-float('inf'), float('inf'))
# Initialise our state
self.reset()
def reset(self):
"""Reset the environment to an initial state
"""
return self.tasks.reset()
def calculate_reward(self, action):
"""Compute the reward for taking an action
action: the action being considered
Returns the reward and any error info
"""
reward = 0
error_info = ''
id = action['task_id']
start_time = action['start_time']
task = self.tasks.get_task(id)
end_time = start_time + task.processing_time
pre, post = self.tasks.get_related_tasks(id)
machine_tasks = self.tasks.machines_to_tasks[task.machine_id]
# Constraint 1
# Task already assigned
if task.is_scheduled():
reward -= 100
error_info = 'Duplicate Scheduling'
if self.verbose:
print(f'DEBUG (Env): Task already scheduled: {id}')
# Constraint 2
# Machine already in use
# To detect this case, we collect all the start and end times
# for scheduled/to-be-run tasks on the machine, sort by start times,
# then flatten the tuples into [start, end, start, end, ...]
# none of the times should overlap
mtasks = [(self.tasks.get_task(mt).start_time, self.tasks.get_task(mt).end_time)
for mt in machine_tasks if (mt != id and self.tasks.get_task(mt).is_scheduled())]
mtasks.append((start_time, end_time))
mtasks.sort(key=lambda t:t[0])
flattened = np.array([t for ts in mtasks for t in ts])
overlap = sum(flattened[:-1] > flattened[1:]) > 0
if self.verbose:
print(f'DEBUG (Env): Checking for overlap: {flattened}')
if overlap:
reward -= 100
error_info = 'Machine Overlap'
if self.verbose:
print(f'DEBUG (Env): Machine overlap: {task.machine_id}, {flattened}')
# Constraint 3
# Makespan exceeded
makespan = self.tasks.get_makespan()
if makespan >= self.max_schedule_time:
reward -= 100
error_info = 'Makespan Exceeded'
if self.verbose:
print(f'DEBUG (Env): Makespan exceeded: {makespan}')
# Constraint 4
# Tasks must be in the right order
not_in_order = 0
# Task assigned in correct order and no overlap
pre_tasks = [self.tasks.get_task(p) for p in pre]
for pre in pre_tasks:
if pre.is_scheduled() and pre.end_time >= start_time:
not_in_order += 1
post_tasks = [self.tasks.get_task(p) for p in post]
for post in post_tasks:
if post.is_scheduled() and post.start_time <= start_time:
not_in_order += 1
if not_in_order > 0:
error_info = 'Out-of-sequence tasks'
reward -= (100 * not_in_order)
if self.verbose:
print('DEBUG (Env): Out-of-sequence tasks')
# If we made it this far, none of the constraints have been violated
# reward for more tasks scheduled without errors
if reward >= 0:
reward += 50 * sum(self.tasks.get_tasks_is_scheduled())
return reward, error_info
def step(self, action):
"""Take an action
action: the action being taken
"""
# calculate the reward
reward, error_info = self.calculate_reward(action)
# take the action and get the next observation
observation = self.tasks.schedule_task(action['task_id'],
action['start_time'])
# check if we should stop (all tasks scheduled)
done = self.tasks.all_tasks_scheduled()
makespan = self.tasks.get_makespan()
if self.tasks.all_tasks_scheduled():
reward += 100 * (self.max_schedule_time - makespan)
info = {'makespan': makespan}
if len(error_info):
info['errors'] = error_info
return observation, reward, done, info
def render(self, mode='human', close=True):
"""Print state of the current environment
"""
print(f'Job-view:\n{self.tasks}')
print(f'\nMachine-view:')
for machine, tasks in self.tasks.get_machines_to_tasks().items():
task_info = []
for t in tasks:
task = self.tasks.get_task(t)
if task.is_scheduled():
task_info.append((t, task.start_time, task.end_time))
status = ''
timeline = ''
if len(task_info) > 0:
task_info.sort(key=lambda t:t[1]) # sort by start time
prev = 0
for ti in task_info:
if prev > ti[1]: # overlap
status += '\n' + (' ' * ti[1]) + f'{ti[0]}' * (ti[2] - ti[1])
else:
status += (' ' * (ti[1] - prev)) + f'{ti[0]}' * (ti[2] - ti[1])
prev = ti[2]
timeline = ['-'] * max(task_info, key=lambda t:t[2])[2]
if len(timeline) >= 5:
timeline[::5] = ['|' for t in timeline[::5]]
timeline = '\n' + ''.join(timeline)
else:
status = 'idle'
print(f'\nMachine {machine}:{timeline}')
print(status)
# Unit test
if __name__ == "__main__":
# Each job is a list of multiple tasks: (machine_id, processing_time)
jobs_data = [
[(0, 3), (1, 2), (2, 2)], # Job0
[(0, 2), (2, 1), (1, 4)], # Job1
[(1, 4), (2, 3)] # Job2
]
env = JobshopEnv(jobs_data, verbose=True)
obs = env.reset()
env.render()
for i in range(10):
action = env.action_space.sample()
print(f'======{i}======\naction: {action}')
obs, reward, done, info = env.step(action)
print(f'obs: {obs}, reward: {reward}, done: {done}, info: {info}')
env.render()
if done:
print(f'took {i+1} action(s)')
break
|
{"hexsha": "77d3ef0343cc22cc88efe0d535f9734f9b728430", "size": 12252, "ext": "py", "lang": "Python", "max_stars_repo_path": "day4/rl/gym-jobshop/gym_jobshop/envs/jobshop_env.py", "max_stars_repo_name": "lisaong/diec", "max_stars_repo_head_hexsha": "f22fd0880ca7808975de70a9259be77a29c6e176", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-05-30T02:55:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-03T14:18:23.000Z", "max_issues_repo_path": "day4/rl/gym-jobshop/gym_jobshop/envs/jobshop_env.py", "max_issues_repo_name": "lisaong/diec", "max_issues_repo_head_hexsha": "f22fd0880ca7808975de70a9259be77a29c6e176", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-12-27T00:53:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-17T05:29:19.000Z", "max_forks_repo_path": "day4/rl/gym-jobshop/gym_jobshop/envs/jobshop_env.py", "max_forks_repo_name": "lisaong/diec", "max_forks_repo_head_hexsha": "f22fd0880ca7808975de70a9259be77a29c6e176", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-06-04T06:26:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-07T04:25:32.000Z", "avg_line_length": 30.7067669173, "max_line_length": 87, "alphanum_fraction": 0.6354880836, "include": true, "reason": "import numpy", "num_tokens": 3272}
|
#!/usr/bin/env python
#coding=utf-8
"""
severities.py: the set of arrays for severity measures.
"""
__author__ = "Francisco Maria Calisto"
__maintainer__ = "Francisco Maria Calisto"
__email__ = "francisco.calisto@tecnico.ulisboa.pt"
__license__ = "MIT"
__version__ = "1.0.0"
__status__ = "Development"
__copyright__ = "Copyright 2019, Instituto Superior Técnico (IST)"
__credits__ = [
"Bruno Oliveira",
"Carlos Santiago",
"Jacinto C. Nascimento",
"Pedro Miraldo",
"Nuno Nunes"
]
import os
import sys
import logging
from os import path
# The current folder path.
basePath = os.path.dirname(__file__)
# The path to the repository "src" folder.
joinRepoSrcPath = os.path.join(basePath, '..')
pathRepoSrcAbsPath = os.path.abspath(joinRepoSrcPath)
# Add the directory containing the module to
# the Python path (wants absolute paths).
sys.path.append(pathRepoSrcAbsPath)
# Appending constants path
consPath = os.path.join(joinRepoSrcPath, 'constants')
consAbsPath = os.path.abspath(consPath)
sys.path.append(consAbsPath)
sys.path.insert(0, consAbsPath)
# Importing available variables
from sheets import *
import numpy as np
arr_birads_assis_l = df_birads_assis_l.values
arr_birads_assis_m = df_birads_assis_m.values
arr_birads_assis_h = df_birads_assis_h.values
arr_birads_crrnt_l = df_birads_crrnt_l.values
arr_birads_crrnt_m = df_birads_crrnt_m.values
arr_birads_crrnt_h = df_birads_crrnt_h.values
arr_birads_phys_l = df_birads_phys_l.values
arr_birads_phys_m = df_birads_phys_m.values
arr_birads_phys_h = df_birads_phys_h.values
arr_birads_real_l = df_birads_real_l.values
arr_birads_real_m = df_birads_real_m.values
arr_birads_real_h = df_birads_real_h.values
|
{"hexsha": "15666ce6da1f0791da513824bc1d7896fb488378", "size": 1713, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/structures/severities.py", "max_stars_repo_name": "mida-project/sa-uta7-recall-precision", "max_stars_repo_head_hexsha": "295e0409c1967d488f792c0287ffa50522c73145", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/structures/severities.py", "max_issues_repo_name": "mida-project/sa-uta7-recall-precision", "max_issues_repo_head_hexsha": "295e0409c1967d488f792c0287ffa50522c73145", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/structures/severities.py", "max_forks_repo_name": "mida-project/sa-uta7-recall-precision", "max_forks_repo_head_hexsha": "295e0409c1967d488f792c0287ffa50522c73145", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.765625, "max_line_length": 68, "alphanum_fraction": 0.783420899, "include": true, "reason": "import numpy", "num_tokens": 515}
|
function MultivariateSummaryStatistics(arg0::jint)
return MultivariateSummaryStatistics((jint,), arg0)
end
function MultivariateSummaryStatistics(arg0::jint, arg1::jboolean)
return MultivariateSummaryStatistics((jint, jboolean), arg0, arg1)
end
function add_value(obj::MultivariateSummaryStatistics, arg0::Vector{jdouble})
return jcall(obj, "addValue", void, (Vector{jdouble},), arg0)
end
function clear(obj::MultivariateSummaryStatistics)
return jcall(obj, "clear", void, ())
end
function equals(obj::MultivariateSummaryStatistics, arg0::Object)
return jcall(obj, "equals", jboolean, (Object,), arg0)
end
function get_covariance(obj::MultivariateSummaryStatistics)
return jcall(obj, "getCovariance", RealMatrix, ())
end
function get_dimension(obj::MultivariateSummaryStatistics)
return jcall(obj, "getDimension", jint, ())
end
function get_geometric_mean(obj::MultivariateSummaryStatistics)
return jcall(obj, "getGeometricMean", Vector{jdouble}, ())
end
function get_max(obj::MultivariateSummaryStatistics)
return jcall(obj, "getMax", Vector{jdouble}, ())
end
function get_mean(obj::MultivariateSummaryStatistics)
return jcall(obj, "getMean", Vector{jdouble}, ())
end
function get_min(obj::MultivariateSummaryStatistics)
return jcall(obj, "getMin", Vector{jdouble}, ())
end
function get_n(obj::MultivariateSummaryStatistics)
return jcall(obj, "getN", jlong, ())
end
function get_standard_deviation(obj::MultivariateSummaryStatistics)
return jcall(obj, "getStandardDeviation", Vector{jdouble}, ())
end
function get_sum(obj::MultivariateSummaryStatistics)
return jcall(obj, "getSum", Vector{jdouble}, ())
end
function get_sum_log(obj::MultivariateSummaryStatistics)
return jcall(obj, "getSumLog", Vector{jdouble}, ())
end
function get_sum_sq(obj::MultivariateSummaryStatistics)
return jcall(obj, "getSumSq", Vector{jdouble}, ())
end
function hash_code(obj::MultivariateSummaryStatistics)
return jcall(obj, "hashCode", jint, ())
end
function to_string(obj::MultivariateSummaryStatistics)
return jcall(obj, "toString", JString, ())
end
|
{"hexsha": "5a65d66874e4cd95a8164f4357862d8d98a3a5a3", "size": 2123, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "gen/HipparchusWrapper/StatWrapper/DescriptiveWrapper/multivariate_summary_statistics.jl", "max_stars_repo_name": "JuliaAstrodynamics/Orekit.jl", "max_stars_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-07T12:26:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T16:02:35.000Z", "max_issues_repo_path": "gen/HipparchusWrapper/StatWrapper/DescriptiveWrapper/multivariate_summary_statistics.jl", "max_issues_repo_name": "JuliaSpace/Orekit.jl", "max_issues_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-09-05T10:16:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-30T05:17:19.000Z", "max_forks_repo_path": "gen/HipparchusWrapper/StatWrapper/DescriptiveWrapper/multivariate_summary_statistics.jl", "max_forks_repo_name": "JuliaSpace/Orekit.jl", "max_forks_repo_head_hexsha": "e2dd3d8b2085dcbb1d2c75471dab42d6ddf52c99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0821917808, "max_line_length": 77, "alphanum_fraction": 0.764955252, "num_tokens": 516}
|
[STATEMENT]
lemma finite_fold_lderiv: "finite {fold (\<lambda>a r. \<guillemotleft>lderiv a r\<guillemotright>) w \<guillemotleft>s\<guillemotright> |w. True}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite {fold (\<lambda>a r. \<guillemotleft>lderiv a r\<guillemotright>) w \<guillemotleft>s\<guillemotright> |w. True}
[PROOF STEP]
using finite_lderivs
[PROOF STATE]
proof (prove)
using this:
finite {\<guillemotleft>lderivs xs ?r\<guillemotright> |xs. True}
goal (1 subgoal):
1. finite {fold (\<lambda>a r. \<guillemotleft>lderiv a r\<guillemotright>) w \<guillemotleft>s\<guillemotright> |w. True}
[PROOF STEP]
unfolding lderivs_alt
[PROOF STATE]
proof (prove)
using this:
finite {fold (\<lambda>a r. \<guillemotleft>lderiv a r\<guillemotright>) xs \<guillemotleft>?r\<guillemotright> |xs. True}
goal (1 subgoal):
1. finite {fold (\<lambda>a r. \<guillemotleft>lderiv a r\<guillemotright>) w \<guillemotleft>s\<guillemotright> |w. True}
[PROOF STEP]
.
|
{"llama_tokens": 387, "file": "MSO_Regex_Equivalence_Pi_Derivatives", "length": 3}
|
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
file0 = open('log/log_train.txt', 'rt')
file1 = open('log1/log_train.txt', 'rt')
file2 = open('log2/log_train.txt', 'rt')
file3 = open('log3/log_train.txt', 'rt')
y_file0 = [[], [], []]
y_file1 = [[], [], []]
y_file2 = [[], [], []]
y_file3 = [[], [], []]
for line in file0.readlines():
if 'eval mean loss:' in line:
str = line.strip().replace('eval mean loss:', '').replace(' ', '')
y_file0[0].append(float(str))
if 'eval accuracy:' in line:
str = line.strip().replace('eval accuracy:', '').replace(' ', '')
y_file0[1].append(float(str))
if 'eval avg class acc: ' in line:
str = line.strip().replace('eval avg class acc: ', '').replace(' ', '')
y_file0[2].append(float(str))
for line in file1.readlines():
if 'eval mean loss:' in line:
str = line.strip().replace('eval mean loss:', '').replace(' ', '')
y_file1[0].append(float(str))
if 'eval accuracy:' in line:
str = line.strip().replace('eval accuracy:', '').replace(' ', '')
y_file1[1].append(float(str))
if 'eval avg class acc: ' in line:
str = line.strip().replace('eval avg class acc: ', '').replace(' ', '')
y_file1[2].append(float(str))
for line in file2.readlines():
if 'eval mean loss:' in line:
str = line.strip().replace('eval mean loss:', '').replace(' ', '')
y_file2[0].append(float(str))
if 'eval accuracy:' in line:
str = line.strip().replace('eval accuracy:', '').replace(' ', '')
y_file2[1].append(float(str))
if 'eval avg class acc: ' in line:
str = line.strip().replace('eval avg class acc: ', '').replace(' ', '')
y_file2[2].append(float(str))
for line in file3.readlines():
if 'eval mean loss:' in line:
str = line.strip().replace('eval mean loss:', '').replace(' ', '')
y_file3[0].append(float(str))
if 'eval accuracy:' in line:
str = line.strip().replace('eval accuracy:', '').replace(' ', '')
y_file3[1].append(float(str))
if 'eval avg class acc: ' in line:
str = line.strip().replace('eval avg class acc: ', '').replace(' ', '')
y_file3[2].append(float(str))
x = np.arange(250)
plt.figure(figsize=(30, 15))
plt.plot(x, y_file0[0], c='b')
plt.plot(x, y_file1[0], c='y')
plt.plot(x, y_file2[0], c='r')
plt.plot(x, y_file3[0], c='g')
plt.legend(['eval mean loss-f0', 'eval mean loss-f1', 'eval mean loss-f2', 'eval mean loss-f3'])
plt.show()
plt.figure(figsize=(30, 15))
plt.plot(x, y_file0[1], c='b')
plt.plot(x, y_file1[1], c='y')
plt.plot(x, y_file2[1], c='r')
plt.plot(x, y_file3[1], c='g')
plt.legend(['eval accuracy-f0', 'eval accuracy-f1', 'eval accuracy-f2', 'eval accuracy-f3'])
plt.show()
plt.figure(figsize=(30, 15))
plt.plot(x, y_file0[2], c='b')
plt.plot(x, y_file1[2], c='y')
plt.plot(x, y_file2[2], c='r')
plt.plot(x, y_file3[2], c='g')
plt.legend(['eval avg class acc-f0', 'eval avg class acc-f1', 'eval avg class acc-f2','eval avg class acc-f3'])
plt.show()
file_0 = open('dump/log_evaluate.txt', 'rt')
file_1 = open('dump1/log_evaluate.txt', 'rt')
file_2 = open('dump2/log_evaluate.txt', 'rt')
file_3 = open('dump3/log_evaluate.txt', 'rt')
for i in range(7):
next(file_0)
next(file_1)
next(file_2)
next(file_3)
y_file_0 = []
y_file_1 = []
y_file_2 = []
y_file_3 = []
label = []
for line in file_0.readlines():
label.append(line.strip().split(':')[0].strip())
y_file_0.append(float(line.strip().split(':')[1].strip()))
for line in file_1.readlines():
y_file_1.append(float(line.strip().split(':')[1].strip()))
for line in file_2.readlines():
y_file_2.append(float(line.strip().split(':')[1].strip()))
for line in file_3.readlines():
y_file_3.append(float(line.strip().split(':')[1].strip()))
bar_width = 0.2
X = np.arange(40)
plt.figure(figsize=(30, 15))
plt.bar(X, y_file_0, bar_width, label='f0')
plt.bar(X + bar_width, y_file_1, bar_width, label='f1')
plt.bar(X + bar_width + bar_width, y_file_2, bar_width, label='f2')
plt.bar(X + bar_width + bar_width + bar_width, y_file_3, bar_width, label='f3')
plt.legend()
plt.xticks(X + bar_width * 2, label)
plt.show()
|
{"hexsha": "ce34de539f361b0a2069bb0ab4bdfa3b9b217758", "size": 4210, "ext": "py", "lang": "Python", "max_stars_repo_path": "plt.py", "max_stars_repo_name": "xiangz201/cpointnet", "max_stars_repo_head_hexsha": "99492c8ae8a8df51932457f1fc69960f912e88de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plt.py", "max_issues_repo_name": "xiangz201/cpointnet", "max_issues_repo_head_hexsha": "99492c8ae8a8df51932457f1fc69960f912e88de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plt.py", "max_forks_repo_name": "xiangz201/cpointnet", "max_forks_repo_head_hexsha": "99492c8ae8a8df51932457f1fc69960f912e88de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5892857143, "max_line_length": 111, "alphanum_fraction": 0.6054631829, "include": true, "reason": "import numpy", "num_tokens": 1256}
|
import numpy as np
def feature_centroid(molecule, atom_indxs, conformer_idx):
"""
Get the 3D coordinates of the centroid of a feature that encompasses more than
one atom. This could be aromatic, hydrophobic, negative and positive features
Parameters
----------
molecule : rdkit.Chem.Mol
Molecule that contains the feature which centroid will be computed
atom_indxs : tuple of int
Indices of the atoms that belong to the feature
conformer_idx : int
Index of the conformer for which the feature centroid will be computed
Returns
-------
centroid : numpy.ndarray
Array of shape (3, ) with the coordinates of the centroid of the feature.
"""
n_atoms = len(atom_indxs)
coords = np.zeros((n_atoms, 3))
for j, idx in enumerate(atom_indxs):
position = molecule.GetConformer(conformer_idx).GetAtomPosition(idx)
coords[j, 0] = position.x
coords[j, 1] = position.y
coords[j, 2] = position.z
centroid = coords.mean(axis=0)
return centroid
|
{"hexsha": "bca90230eee3ce654b49833ebbed90a91cfb24d5", "size": 1165, "ext": "py", "lang": "Python", "max_stars_repo_path": "openpharmacophore/utils/centroid.py", "max_stars_repo_name": "dprada/OpenPharmacophore", "max_stars_repo_head_hexsha": "bfcf4bdafd586b27a48fd5d1f13614707b5e55a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-10T05:56:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-04T14:56:47.000Z", "max_issues_repo_path": "openpharmacophore/utils/centroid.py", "max_issues_repo_name": "dprada/OpenPharmacophore", "max_issues_repo_head_hexsha": "bfcf4bdafd586b27a48fd5d1f13614707b5e55a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2021-04-27T06:05:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-01T23:19:36.000Z", "max_forks_repo_path": "openpharmacophore/utils/centroid.py", "max_forks_repo_name": "dprada/OpenPharmacophore", "max_forks_repo_head_hexsha": "bfcf4bdafd586b27a48fd5d1f13614707b5e55a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-21T19:09:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-16T01:16:27.000Z", "avg_line_length": 32.3611111111, "max_line_length": 87, "alphanum_fraction": 0.6128755365, "include": true, "reason": "import numpy", "num_tokens": 255}
|
/**
* Copyright (c) 2011-2017 libbitcoin developers (see AUTHORS)
*
* This file is part of libbitcoin.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <bitcoin/bitcoin/config/parser.hpp>
#include <string>
#include <sstream>
#include <boost/algorithm/string.hpp>
#include <boost/filesystem.hpp>
#include <boost/program_options.hpp>
#include <boost/throw_exception.hpp>
#include <bitcoin/bitcoin/unicode/ifstream.hpp>
namespace libbitcoin {
namespace config {
using namespace boost::filesystem;
using namespace boost::program_options;
using namespace boost::system;
// The error is obtained from boost, which circumvents our localization.
// English-only hack to patch missing arg name in boost exception message.
std::string parser::format_invalid_parameter(const std::string& message)
{
std::string clean_message(message);
boost::replace_all(clean_message, "for option is invalid", "is invalid");
return "Error: " + clean_message;
}
path parser::get_config_option(variables_map& variables,
const std::string& name)
{
// read config from the map so we don't require an early notify
const auto& config = variables[name];
// prevent exception in the case where the config variable is not set
if (config.empty())
return path();
return config.as<path>();
}
bool parser::get_option(variables_map& variables, const std::string& name)
{
// Read settings from the map so we don't require an early notify call.
const auto& variable = variables[name];
// prevent exception in the case where the settings variable is not set.
if (variable.empty())
return false;
return variable.as<bool>();
}
void parser::load_command_variables(variables_map& variables, int argc,
const char* argv[])
{
const auto options = load_options();
const auto arguments = load_arguments();
auto command_parser = command_line_parser(argc, argv).options(options)
/*.allow_unregistered()*/.positional(arguments);
store(command_parser.run(), variables);
}
void parser::load_environment_variables(variables_map& variables,
const std::string& prefix)
{
const auto& environment_variables = load_environment();
const auto environment = parse_environment(environment_variables, prefix);
store(environment, variables);
}
bool parser::load_configuration_variables(variables_map& variables,
const std::string& option_name)
{
const auto config_settings = load_settings();
const auto config_path = get_config_option(variables, option_name);
// If the existence test errors out we pretend there's no file :/.
error_code code;
if (!config_path.empty() && exists(config_path, code))
{
const auto& path = config_path.string();
bc::ifstream file(path);
if (!file.good())
{
BOOST_THROW_EXCEPTION(reading_file(path.c_str()));
}
const auto config = parse_config_file(file, config_settings);
store(config, variables);
return true;
}
// Loading from an empty stream causes the defaults to populate.
std::stringstream stream;
const auto config = parse_config_file(stream, config_settings);
store(config, variables);
return false;
}
} // namespace config
} // namespace libbitcoin
|
{"hexsha": "4bcc29bd6b193c5dc6afa2a071c9ee9d708475c7", "size": 3917, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdparty/libbitcoin/src/config/parser.cpp", "max_stars_repo_name": "anatolse/beam", "max_stars_repo_head_hexsha": "43c4ce0011598641d9cdeffbfdee66fde0a49730", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 631.0, "max_stars_repo_stars_event_min_datetime": "2018-11-10T05:56:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T13:21:00.000Z", "max_issues_repo_path": "3rdparty/libbitcoin/src/config/parser.cpp", "max_issues_repo_name": "anatolse/beam", "max_issues_repo_head_hexsha": "43c4ce0011598641d9cdeffbfdee66fde0a49730", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1824.0, "max_issues_repo_issues_event_min_datetime": "2018-11-08T11:32:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T12:33:03.000Z", "max_forks_repo_path": "3rdparty/libbitcoin/src/config/parser.cpp", "max_forks_repo_name": "anatolse/beam", "max_forks_repo_head_hexsha": "43c4ce0011598641d9cdeffbfdee66fde0a49730", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 216.0, "max_forks_repo_forks_event_min_datetime": "2018-11-12T08:07:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T20:50:19.000Z", "avg_line_length": 32.6416666667, "max_line_length": 78, "alphanum_fraction": 0.7184069441, "num_tokens": 820}
|
from flask import Flask, request
from flask_cors import CORS, \
cross_origin # ติดตั้งตัวนี้เพิ่มเพื่อให้สามารถเรียกใช้งานผ่านจากภายนอกได้ กรณีคนละ network
import joblib
import numpy as np
app = Flask(__name__)
CORS(app)
@app.route('/') # เพิ่ม route หรือ วิธีการในการเรียก
@cross_origin()
def helloworld():
return 'helloworld'
@app.route('/area', methods=['GET'])
@cross_origin() # ใส่เพื่อให้ใช้ API จากภายนอกได้
def area():
width = float(request.values['w'])
height = float(request.values['h'])
return str(width * height)
@app.route('/bmi', methods=['GET'])
@cross_origin() # ใส่เพื่อให้ใช้ API จากภายนอกได้
def bmi():
weight = float(request.values['weight'])
height = float(request.values['height'])
bmi = weight/((height/100) * (height/100))
return str(bmi)
@app.route('/iris', methods=['POST']) # ใช้ method POST บ้าง
@cross_origin()
def predict_species():
model = joblib.load('iris.model') # วาง model ไว้ที่ตำแหน่งเดียวกันนะ ไม่งั้นต้องใส่ path
req = request.values['param'] # ใส่ค่าทั้งหมดในตัวแปร param ตัวเดียวเลย เด๋วค่อยไปแยกเอา
inputs = np.array(req.split(','), dtype=np.float32).reshape(1, -1) # จัดการกับ req ที่รับเข้ามาโดยแยกด้วย ',' และ reshape เป็น 1 กับ -1
predict_target = model.predict(inputs) # พอได้ format ที่ตรงกับ model แล้วก้อส่งค่าเข้าไป predict เลย
if predict_target == 0: # output จะออกมาเป็น 0-2 เราก้อมาแยกอีกที
return 'Setosa'
elif predict_target == 1:
return 'Versicolour'
else:
return 'Virginica'
if __name__ == '__main__':
app.run(host='0.0.0.0', port='5000', debug=True)
|
{"hexsha": "44bf011dbb34a42e290670eb48068c243fd1eadb", "size": 1622, "ext": "py", "lang": "Python", "max_stars_repo_path": "App.py", "max_stars_repo_name": "atthana/flask_machine_learning_as_service", "max_stars_repo_head_hexsha": "1f556fddb59b018b07de4ed8c6951669281a8514", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "App.py", "max_issues_repo_name": "atthana/flask_machine_learning_as_service", "max_issues_repo_head_hexsha": "1f556fddb59b018b07de4ed8c6951669281a8514", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "App.py", "max_forks_repo_name": "atthana/flask_machine_learning_as_service", "max_forks_repo_head_hexsha": "1f556fddb59b018b07de4ed8c6951669281a8514", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8039215686, "max_line_length": 140, "alphanum_fraction": 0.6553637485, "include": true, "reason": "import numpy", "num_tokens": 674}
|
"""
I/O for Tecplot ASCII data format, cf.
<http://paulbourke.net/dataformats/tp/>.
"""
import logging
import numpy
from ..__about__ import __version__ as version
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register
from .._mesh import Mesh
zone_key_to_type = {
"T": str,
"I": int,
"J": int,
"K": int,
"N": int,
"NODES": int,
"E": int,
"ELEMENTS": int,
"F": str,
"ET": str,
"DATAPACKING": str,
"ZONETYPE": str,
"NV": int,
"VARLOCATION": str,
}
tecplot_to_meshio_type = {
"TRIANGLE": "triangle",
"FETRIANGLE": "triangle",
"QUADRILATERAL": "quad",
"FEQUADRILATERAL": "quad",
"TETRAHEDRON": "tetra",
"FETETRAHEDRON": "tetra",
"BRICK": "hexahedron",
"FEBRICK": "hexahedron",
}
meshio_to_tecplot_type = {
"triangle": "FETRIANGLE",
"quad": "FEQUADRILATERAL",
"tetra": "FETETRAHEDRON",
"pyramid": "FEBRICK",
"wedge": "FEBRICK",
"hexahedron": "FEBRICK",
}
meshio_only = set(meshio_to_tecplot_type.keys())
meshio_to_tecplot_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 3],
"pyramid": [0, 1, 2, 3, 4, 4, 4, 4],
"wedge": [0, 1, 4, 3, 2, 2, 5, 5],
"hexahedron": [0, 1, 2, 3, 4, 5, 6, 7],
}
meshio_to_tecplot_order_2 = {
"triangle": [0, 1, 2, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 2, 2, 3, 3, 3, 3],
"pyramid": [0, 1, 2, 3, 4, 4, 4, 4],
"wedge": [0, 1, 4, 3, 2, 2, 5, 5],
"hexahedron": [0, 1, 2, 3, 4, 5, 6, 7],
}
meshio_type_to_ndim = {
"triangle": 2,
"quad": 2,
"tetra": 3,
"pyramid": 3,
"wedge": 3,
"hexahedron": 3,
}
def read(filename):
with open_file(filename, "r") as f:
out = read_buffer(f)
return out
def read_buffer(f):
while True:
line = f.readline().strip()
if line.upper().startswith("VARIABLES"):
variables = _read_variables(line)
elif line.upper().startswith("ZONE"):
# ZONE can be defined on several lines e.g.
# ```
# ZONE NODES = 62533, ELEMENTS = 57982
# , DATAPACKING = BLOCK, ZONETYPE = FEQUADRILATERAL
# , VARLOCATION = ([1-2] = NODAL, [3-7] = CELLCENTERED)
# ```
# is valid (and understood by ParaView and VisIt).
lines = [line]
i = f.tell()
line = f.readline().strip().upper()
while True:
if line and not line[0].isdigit():
lines += [line]
i = f.tell()
line = f.readline().strip().upper()
else:
f.seek(i)
break
line = " ".join(lines)
zone = _read_zone(line)
(
num_nodes,
num_cells,
zone_format,
zone_type,
is_cell_centered,
) = _parse_fezone(zone, variables)
num_data = [num_cells if i else num_nodes for i in is_cell_centered]
data, cells = _read_zone_data(
f,
sum(num_data) if zone_format == "FEBLOCK" else num_nodes,
num_cells,
zone_format,
)
break # Only support one zone, no need to read the rest
elif not line:
break
data = (
numpy.split(numpy.concatenate(data), numpy.cumsum(num_data[:-1]))
if zone_format == "FEBLOCK"
else numpy.transpose(data)
)
data = {k: v for k, v in zip(variables, data)}
point_data, cell_data = {}, {}
for i, variable in zip(is_cell_centered, variables):
if i:
cell_data[variable] = [data[variable]]
else:
point_data[variable] = data[variable]
x = "X" if "X" in point_data.keys() else "x"
y = "Y" if "Y" in point_data.keys() else "y"
z = "Z" if "Z" in point_data.keys() else "z" if "z" in point_data.keys() else ""
points = numpy.column_stack((point_data.pop(x), point_data.pop(y)))
if z:
points = numpy.column_stack((points, point_data.pop(z)))
cells = [(tecplot_to_meshio_type[zone_type], cells - 1)]
return Mesh(points, cells, point_data, cell_data)
def _read_variables(line):
# Gather variables in a list
line = line.split("=")[1]
line = [x for x in line.replace(",", " ").split()]
variables = []
i = 0
while i < len(line):
if '"' in line[i] and not (line[i].startswith('"') and line[i].endswith('"')):
var = "{}_{}".format(line[i], line[i + 1])
i += 1
else:
var = line[i]
variables.append(var.replace('"', ""))
i += 1
# Check that at least X and Y are defined
if "X" not in variables and "x" not in variables:
raise ReadError("Variable 'X' not found")
if "Y" not in variables and "y" not in variables:
raise ReadError("Variable 'Y' not found")
return variables
def _read_zone(line):
# Gather zone entries in a dict
line = line[5:]
zone = {}
# Look for zone title
ivar = line.find('"')
# If zone contains a title, process it and save the title
if ivar >= 0:
i1, i2 = ivar, ivar + line[ivar + 1 :].find('"') + 2
zone_title = line[i1 + 1 : i2 - 1]
line = line.replace(line[i1:i2], "PLACEHOLDER")
else:
zone_title = None
# Look for VARLOCATION (problematic since it contains both ',' and '=')
ivar = line.find("VARLOCATION")
# If zone contains VARLOCATION, process it and remove the key/value pair
if ivar >= 0:
i1, i2 = line.find("("), line.find(")")
zone["VARLOCATION"] = line[i1 : i2 + 1].replace(" ", "")
line = line[:ivar] + line[i2 + 1 :]
# Split remaining key/value pairs separated by '='
line = [x for x in line.replace(",", " ").split() if x != "="]
i = 0
while i < len(line) - 1:
if "=" in line[i]:
if not (line[i].startswith("=") or line[i].endswith("=")):
key, value = line[i].split("=")
else:
key = line[i].replace("=", "")
value = line[i + 1]
i += 1
else:
key = line[i]
value = line[i + 1].replace("=", "")
i += 1
zone[key] = zone_key_to_type[key](value)
i += 1
# Add zone title to zone dict
if zone_title:
zone["T"] = zone_title
return zone
def _parse_fezone(zone, variables):
# Check that the grid is unstructured
if "F" in zone.keys():
if zone["F"] not in {"FEPOINT", "FEBLOCK"}:
raise ReadError("Tecplot reader can only read finite-element type grids")
if "ET" not in zone.keys():
raise ReadError("Element type 'ET' not found")
zone_format = zone.pop("F")
zone_type = zone.pop("ET")
elif "DATAPACKING" in zone.keys():
if "ZONETYPE" not in zone.keys():
raise ReadError("Zone type 'ZONETYPE' not found")
zone_format = "FE" + zone.pop("DATAPACKING")
zone_type = zone.pop("ZONETYPE")
else:
raise ReadError("Data format 'F' or 'DATAPACKING' not found")
# Number of nodes
if "N" in zone.keys():
num_nodes = zone.pop("N")
elif "NODES" in zone.keys():
num_nodes = zone.pop("NODES")
else:
raise ReadError("Number of nodes not found")
# Number of elements
if "E" in zone.keys():
num_cells = zone.pop("E")
elif "ELEMENTS" in zone.keys():
num_cells = zone.pop("ELEMENTS")
else:
raise ReadError("Number of elements not found")
# Variable locations
is_cell_centered = numpy.zeros(len(variables), dtype=int)
if zone_format == "FEBLOCK":
if "NV" in zone.keys():
node_value = zone.pop("NV")
is_cell_centered[node_value:] = 1
elif "VARLOCATION" in zone.keys():
varlocation = zone.pop("VARLOCATION")[1:-1].split(",")
for location in varlocation:
varrange, varloc = location.split("=")
varloc = varloc.strip()
if varloc == "CELLCENTERED":
varrange = varrange[1:-1].split("-")
if len(varrange) == 1:
i = int(varrange[0]) - 1
is_cell_centered[i] = 1
else:
imin = int(varrange[0]) - 1
imax = int(varrange[1]) - 1
for i in range(imin, imax + 1):
is_cell_centered[i] = 1
return num_nodes, num_cells, zone_format, zone_type, is_cell_centered
def _read_zone_data(f, num_data, num_cells, zone_format):
data, count = [], 0
while count < num_data:
line = f.readline().strip().split()
if line:
data += [[float(x) for x in line]]
count += len(line) if zone_format == "FEBLOCK" else 1
cells, count = [], 0
while count < num_cells:
line = f.readline().strip().split()
if line:
cells += [[[int(x) for x in line]]]
count += 1
return data, numpy.concatenate(cells)
def write(filename, mesh):
# Check cell types
cell_types = []
cell_blocks = []
for ic, c in enumerate(mesh.cells):
if c.type in meshio_only:
cell_types.append(c.type)
cell_blocks.append(ic)
else:
logging.warning(
(
"Tecplot does not support cell type '{}'. "
"Skipping cell block {}."
).format(c.type, ic)
)
# Define cells and zone type
cell_types = numpy.unique(cell_types)
if len(cell_types) == 0:
raise WriteError("No cell type supported by Tecplot in mesh")
elif len(cell_types) == 1:
# Nothing much to do except converting pyramids and wedges to hexahedra
zone_type = meshio_to_tecplot_type[cell_types[0]]
cells = numpy.concatenate(
[
mesh.cells[ic].data[:, meshio_to_tecplot_order[mesh.cells[ic].type]]
for ic in cell_blocks
]
)
else:
# Check if the mesh contains 2D and 3D cells
num_dims = [meshio_type_to_ndim[mesh.cells[ic].type] for ic in cell_blocks]
# Skip 2D cells if it does
if len(numpy.unique(num_dims)) == 2:
logging.warning("Mesh contains 2D and 3D cells. Skipping 2D cells.")
cell_blocks = [ic for ic, ndim in zip(cell_blocks, num_dims) if ndim == 3]
# Convert 2D cells to quads / 3D cells to hexahedra
zone_type = "FEQUADRILATERAL" if num_dims[0] == 2 else "FEBRICK"
cells = numpy.concatenate(
[
mesh.cells[ic].data[:, meshio_to_tecplot_order_2[mesh.cells[ic].type]]
for ic in cell_blocks
]
)
# Define variables
variables = ["X", "Y"]
data = [mesh.points[:, 0], mesh.points[:, 1]]
varrange = [3, 0]
if mesh.points.shape[1] == 3:
variables += ["Z"]
data += [mesh.points[:, 2]]
varrange[0] += 1
for k, v in mesh.point_data.items():
if k not in {"X", "Y", "Z", "x", "y", "z"}:
if v.ndim == 1:
variables += [k]
data += [v]
varrange[0] += 1
elif v.ndim == 2:
for i, vv in enumerate(v.T):
variables += ["{}_{}".format(k, i)]
data += [vv]
varrange[0] += 1
else:
logging.warning("Skipping point data '{}'.".format(k))
if mesh.cell_data:
varrange[1] = varrange[0] - 1
for k, v in mesh.cell_data.items():
if k not in {"X", "Y", "Z", "x", "y", "z"}:
v = numpy.concatenate([v[ic] for ic in cell_blocks])
if v.ndim == 1:
variables += [k]
data += [v]
varrange[1] += 1
elif v.ndim == 2:
for i, vv in enumerate(v.T):
variables += ["{}_{}".format(k, i)]
data += [vv]
varrange[1] += 1
else:
logging.warning("Skipping cell data '{}'.".format(k))
with open_file(filename, "w") as f:
# Title
f.write('TITLE = "Written by meshio v{}"\n'.format(version))
# Variables
variables_str = ", ".join('"{}"'.format(var) for var in variables)
f.write("VARIABLES = {}\n".format(variables_str))
# Zone record
num_nodes = len(mesh.points)
num_cells = sum(len(mesh.cells[ic].data) for ic in cell_blocks)
f.write("ZONE NODES = {}, ELEMENTS = {},\n".format(num_nodes, num_cells))
f.write("DATAPACKING = BLOCK, ZONETYPE = {}".format(zone_type))
if varrange[0] <= varrange[1]:
f.write(",\n")
varlocation_str = (
"{}".format(varrange[0])
if varrange[0] == varrange[1]
else "{}-{}".format(varrange[0], varrange[1])
)
f.write("VARLOCATION = ([{}] = CELLCENTERED)\n".format(varlocation_str))
else:
f.write("\n")
# Zone data
for arr in data:
_write_table(f, arr)
# CellBlock
cells = numpy.array(cells) + 1
for cell in cells:
f.write("{}\n".format(" ".join(str(c) for c in cell)))
def _write_table(f, data, ncol=20):
nrow = len(data) // ncol
lines = numpy.split(data, numpy.full(nrow, ncol).cumsum())
for line in lines:
if len(line):
f.write("{}\n".format(" ".join(str(l) for l in line)))
register("tecplot", [".dat", ".tec"], read, {"tecplot": write})
|
{"hexsha": "91759b008da5414c0e9ca93bbe490395abe79984", "size": 13916, "ext": "py", "lang": "Python", "max_stars_repo_path": "meshio/tecplot/_tecplot.py", "max_stars_repo_name": "c-abird/meshio", "max_stars_repo_head_hexsha": "21301c3c5df3b196c60bea0cf71f27736f9a337e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-01T09:31:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-01T09:31:33.000Z", "max_issues_repo_path": "meshio/tecplot/_tecplot.py", "max_issues_repo_name": "c-abird/meshio", "max_issues_repo_head_hexsha": "21301c3c5df3b196c60bea0cf71f27736f9a337e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "meshio/tecplot/_tecplot.py", "max_forks_repo_name": "c-abird/meshio", "max_forks_repo_head_hexsha": "21301c3c5df3b196c60bea0cf71f27736f9a337e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7196467991, "max_line_length": 86, "alphanum_fraction": 0.5144438057, "include": true, "reason": "import numpy", "num_tokens": 3857}
|
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import pytest
import numpy as np
import pandas as pd
from .. import gen_feat_val_list, gen_name_from_class
from .. import reverse_map, unify_data, unify_vector
@pytest.fixture
def fixture_feat_val_list():
return [("race", 3), ("age", -2), ("gender", 1)]
def test_unify_vector_on_ndim_array():
y = np.array([[0], [1], [2], [3]])
expected = np.array([0, 1, 2, 3])
new_y = unify_vector(y)
assert np.all(new_y == expected)
def test_unify_fails_on_missing():
orig_data = np.array([[1, 2], [3, np.nan]])
orig_labels = np.array([0, 1])
with pytest.raises(ValueError):
unify_data(orig_data, orig_labels)
def test_unify_dataframe_smoke():
df = pd.DataFrame()
df["f1"] = [1.5, "a"]
df["f2"] = [3, "b"]
df["label"] = [0, 1]
train_cols = df.columns[0:-1]
label = df.columns[-1]
X = df[train_cols]
y = df[label]
unify_data(X, y)
def test_unify_list_data():
orig_data = [[1, 2], [3, 4]]
orig_labels = [0, 0]
data, labels, feature_names, feature_types = unify_data(orig_data, orig_labels)
assert feature_names is not None
assert feature_types is not None
assert isinstance(data, np.ndarray)
assert data.ndim == 2
assert isinstance(labels, np.ndarray)
assert labels.ndim == 1
def test_that_names_generated():
class SomeClass:
pass
some_class = SomeClass()
name = gen_name_from_class(some_class)
assert name == "SomeClass_0"
def test_that_feat_val_generated(fixture_feat_val_list):
features = ["age", "race", "gender"]
values = [-2, 3, 1]
feat_val_list = gen_feat_val_list(features, values)
assert feat_val_list == fixture_feat_val_list
def test_reverse_map():
map = {"a": 1, "b": 2, "c": 3}
actual_rev_map = reverse_map(map)
expected_rev_map = {1: "a", 2: "b", 3: "c"}
assert actual_rev_map == expected_rev_map
|
{"hexsha": "3ca27351cbfa03765683b9f0bc322a2c2b6a0902", "size": 1976, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/interpret-core/interpret/utils/test/test_utils.py", "max_stars_repo_name": "prateekiiest/interpret", "max_stars_repo_head_hexsha": "b5530a587251a77516ab443037fc37f71708564c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2674, "max_stars_repo_stars_event_min_datetime": "2019-10-03T14:14:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:40:49.000Z", "max_issues_repo_path": "python/interpret-core/interpret/utils/test/test_utils.py", "max_issues_repo_name": "prateekiiest/interpret", "max_issues_repo_head_hexsha": "b5530a587251a77516ab443037fc37f71708564c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 257, "max_issues_repo_issues_event_min_datetime": "2019-11-08T19:22:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T20:09:07.000Z", "max_forks_repo_path": "python/interpret-core/interpret/utils/test/test_utils.py", "max_forks_repo_name": "prateekiiest/interpret", "max_forks_repo_head_hexsha": "b5530a587251a77516ab443037fc37f71708564c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 367, "max_forks_repo_forks_event_min_datetime": "2019-10-31T15:33:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:40:50.000Z", "avg_line_length": 24.0975609756, "max_line_length": 83, "alphanum_fraction": 0.6548582996, "include": true, "reason": "import numpy", "num_tokens": 564}
|
import random
import numpy as np
import torch
def fix_seeds():
torch.manual_seed(0)
torch.cuda.manual_seed(0)
np.random.seed(0)
random.seed(0)
torch.backends.cudnn.deterministic = True
|
{"hexsha": "e0b5c695a214978e899ad0c504efd164ba156d37", "size": 208, "ext": "py", "lang": "Python", "max_stars_repo_path": "divnoising/analysis/randomness.py", "max_stars_repo_name": "ashesh-0/DivNoising", "max_stars_repo_head_hexsha": "45a4d3f04041887bcc6a748e15c74520521c003a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "divnoising/analysis/randomness.py", "max_issues_repo_name": "ashesh-0/DivNoising", "max_issues_repo_head_hexsha": "45a4d3f04041887bcc6a748e15c74520521c003a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "divnoising/analysis/randomness.py", "max_forks_repo_name": "ashesh-0/DivNoising", "max_forks_repo_head_hexsha": "45a4d3f04041887bcc6a748e15c74520521c003a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.0, "max_line_length": 45, "alphanum_fraction": 0.7067307692, "include": true, "reason": "import numpy", "num_tokens": 56}
|
import os
import numpy as np
import torch as T
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categorical
from models import Agent
import json
import pickle
class PPOMemory:
def __init__(self, batch_size):
self.states = []
self.probs = []
self.vals = []
self.actions = []
self.rewards = []
self.dones = []
self.batch_size = batch_size
def generate_batches(self):
n_states = len(self.states)
batch_start = np.arange(0, n_states, self.batch_size)
indices = np.arange(n_states, dtype=np.int64)
np.random.shuffle(indices)
batches = [indices[i:i+self.batch_size] for i in batch_start]
return np.array(self.states),\
np.array(self.actions),\
np.array(self.probs),\
np.array(self.vals),\
np.array(self.rewards),\
np.array(self.dones),\
batches
def store_memory(self, state, action, probs, vals, reward, done):
self.states.append(state)
self.actions.append(action)
self.probs.append(probs)
self.vals.append(vals)
self.rewards.append(reward)
self.dones.append(done)
def clear_memory(self):
self.states = []
self.probs = []
self.actions = []
self.rewards = []
self.dones = []
self.vals = []
class ActorNetwork(nn.Module):
def __init__(self, **config):
super(ActorNetwork, self).__init__()
self.actor = nn.Sequential(
nn.Linear(*input_dims, fc1_dims),
nn.ReLU(),
nn.Linear(fc1_dims, fc2_dims),
nn.ReLU(),
nn.Linear(fc2_dims, n_actions),
nn.Softmax(dim=-1)
)
self.optimizer = optim.Adam(self.parameters(), lr=config['alpha'])
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state):
dist = self.actor(state)
dist = Categorical(dist)
return dist
class CriticNetwork(nn.Module):
def __init__(self, **config):
super(CriticNetwork, self).__init__()
self.critic = nn.Sequential(
nn.Linear(*input_dims, fc1_dims),
nn.ReLU(),
nn.Linear(fc1_dims, fc2_dims),
nn.ReLU(),
nn.Linear(fc2_dims, 1)
)
self.optimizer = optim.Adam(self.parameters(), lr=config['alpha'])
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state):
value = self.critic(state)
return value
class PolicyGradientAgent(Agent):
#def __init__(self, **config):
def __init__(self, **config):
super().__init__(**config)
self.gamma = config["gamma"]
self.policy_clip = config['policy_clip']
self.n_epochs = config['n_epochs']
self.gae_lambda = config['gae_lambda']
self.actor = ActorNetwork(**config)
self.critic = CriticNetwork(**config)
self.memory = PPOMemory(config['batch_size'])
#------------- Override the inheritance functions from Agent
def predict(self, state):
state = T.tensor([state], dtype=T.float).to(self.actor.device)
dist = self.actor(state)
value = self.critic(state)
action = dist.sample()
probs = T.squeeze(dist.log_prob(action)).item()
action = T.squeeze(action).item()
value = T.squeeze(value).item()
return action, probs, value
def learn(self, previous_state, action, next_state, reward, terminal):
return super().learn(previous_state, action, next_state, reward, terminal)
def learn_episode(self, episode_num, **kwargs):
return super().learn_episode(episode_num, **kwargs)
def save_model(self, **kwargs):
path = kwargs['save_path']
os.makedirs(path, exist_ok=True)
T.save(self.actor.state_dict(), path + "/actor.pt")
T.save(self.critic.state_dict(), path + "/critic.pt")
with open(path + "/memory.pkl", "wb") as f:
pickle.dump(self.memory, f)
with open(path + "/model_config.txt", "w") as f:
f.write(json.dumps(kwargs, indent = 6))
print("> Ending simulation, PolicyGradient model successfully saved")
def load_model(self, **kwargs):
path = kwargs['load_path']
self.actor.load_state_dict(T.load(path + "/actor.pt"))
self.critic.load_state_dict(T.load(path + "/critic.pt"))
file_memory = open(path + "/memory.pkl", 'rb')
self.memory = pickle.load(file_memory)
print("> Starting simulation, PolicyGradient model successfully loaded")
#----- Private part
def _remember(self, state, action, probs, vals, reward, done):
self.memory.store_memory(state, action, probs, vals, reward, done)
def _choose_action(self, observation):
state = T.tensor([observation], dtype=T.float).to(self.actor.device)
dist = self.actor(state)
value = self.critic(state)
action = dist.sample()
probs = T.squeeze(dist.log_prob(action)).item()
action = T.squeeze(action).item()
value = T.squeeze(value).item()
return action, probs, value
def _learn(self):
for _ in range(self.n_epochs):
state_arr, action_arr, old_prob_arr, vals_arr,\
reward_arr, dones_arr, batches = self.memory.generate_batches()
values = vals_arr
advantage = np.zeros(len(reward_arr), dtype=np.float32)
for t in range(len(reward_arr)-1):
discount = 1
a_t = 0
for k in range(t, len(reward_arr)-1):
a_t += discount*(reward_arr[k] + self.gamma*values[k+1]*(1-int(dones_arr[k])) - values[k])
discount *= self.gamma*self.gae_lambda
advantage[t] = a_t
advantage = T.tensor(advantage).to(self.actor.device)
values = T.tensor(values).to(self.actor.device)
for batch in batches:
states = T.tensor(state_arr[batch], dtype=T.float).to(self.actor.device)
old_probs = T.tensor(old_prob_arr[batch]).to(self.actor.device)
actions = T.tensor(action_arr[batch]).to(self.actor.device)
dist = self.actor(states)
critic_value = self.critic(states)
critic_value = T.squeeze(critic_value)
new_probs = dist.log_prob(actions)
prob_ratio = new_probs.exp() / old_probs.exp()
#prob_ratio = (new_probs - old_probs).exp()
weighted_probs = advantage[batch] * prob_ratio
weighted_clipped_probs = T.clamp(prob_ratio, 1-self.policy_clip,
1+self.policy_clip)*advantage[batch]
actor_loss = -T.min(weighted_probs, weighted_clipped_probs).mean()
returns = advantage[batch] + values[batch]
critic_loss = (returns-critic_value)**2
critic_loss = critic_loss.mean()
total_loss = actor_loss + 0.5*critic_loss
self.actor.optimizer.zero_grad()
self.critic.optimizer.zero_grad()
total_loss.backward()
self.actor.optimizer.step()
self.critic.optimizer.step()
self.memory.clear_memory()
|
{"hexsha": "5538d177b7f94098ea79d24d71b67f8160823a54", "size": 7639, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/policy_gradient/policygradient.py", "max_stars_repo_name": "paultheron-X/INF581-Trading-agent", "max_stars_repo_head_hexsha": "2bff32c027507cd4e1ccda9d3b79325ef8977481", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/policy_gradient/policygradient.py", "max_issues_repo_name": "paultheron-X/INF581-Trading-agent", "max_issues_repo_head_hexsha": "2bff32c027507cd4e1ccda9d3b79325ef8977481", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/policy_gradient/policygradient.py", "max_forks_repo_name": "paultheron-X/INF581-Trading-agent", "max_forks_repo_head_hexsha": "2bff32c027507cd4e1ccda9d3b79325ef8977481", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7227272727, "max_line_length": 110, "alphanum_fraction": 0.5771697866, "include": true, "reason": "import numpy", "num_tokens": 1648}
|
using Distributed
using Random
using Logging
workers = 8
if nprocs() <= workers
addprocs(workers + 1 - nprocs())
end
@everywhere include("models/Vibron.jl")
@everywhere include("modules/ClassicalDynamics.jl")
@everywhere disable_logging(Logging.Info)
""" Calculates Poincaré sections with Lyapunov exponents for various energies
Parallel calculation, takes usually days to finish
"""
@everywhere function SolveItem(energy, parameters, dimension; file="Vibron.txt", path="", alreadySolved=[], sectionPlane=2)
for (E, λ) in alreadySolved
if isapprox(E, energy) && isapprox(parameters[1], λ)
println("Skipped $parameters, E=$energy, dim=$dimension (already calculated)")
return false
end
end
time = @elapsed averageLyapunov, freg, trajectories, lyapunovs = SolveEnergy(energy, parameters, dimension, savePath=path, timeout=7200, showFigures=false, randomize=true)
chaos = 0
total = 0
error = 0
meanLyapunov = 0
meanLyapunovChaos = 0
for x in averageLyapunov
if x > 0.0
total += 1
meanLyapunov += x
end
if x == 0.0
error += 1
end
if x > 0.01
chaos += 1
meanLyapunovChaos += x
end
end
varλ = 0
if length(lyapunovs) == 0
maxλ = 0
meanλ = 0
else
maxλ = maximum(lyapunovs)
meanλ = mean(lyapunovs)
if length(lyapunovs) > 1
varλ = var(lyapunovs)
end
end
result = [energy, parameters[1], total, chaos, error, freg, total > 0 ? meanLyapunov / total : 0, chaos > 0 ? meanLyapunovChaos / chaos : 0, length(lyapunovs), maxλ, meanλ, varλ, myid(), time, trajectories]
open(path * file, "a") do io
println(io, result)
end
return true
end
function RunMapC(; C=0.2, path="", dimension=101, step=0.1, sectionPlane=3)
path *= "Vibron_"
file = "Map_dim=$(dimension)_C=$C.txt"
alreadySolved = ReadMap(path * file)
input = shuffle([(energy, [0.5 * (1 - A), -A, C], dimension) for energy in -1.2:step:1, A in 0:step:1])
println("To be calculated $(length(input)).")
println("Already calculated $(length(alreadySolved)) points.")
pmap((args)->SolveItem(args...; file=file, path=path, alreadySolved=alreadySolved, sectionPlane=sectionPlane), input)
return
end
""" Calculates freg for one given A and C """
function RunAC(; C=0.2, A=0.4, path="", dimension=101, step=0.01, sectionPlane=3)
path *= "Vibron_"
file = "Energy_dim=$(dimension)_$([A, C]).txt"
alreadySolved = ReadMap(path * file)
input = [(energy, [0.5 * (1 - A), -A, C], dimension) for energy in -1.2:step:1]
println("To be calculated $(length(input)).")
println("Already calculated $(length(alreadySolved)) points.")
pmap((args)->SolveItem(args...; file=file, path=path, alreadySolved=alreadySolved, sectionPlane=sectionPlane), input)
return
end
function ReadMap(file="")
Eλ = []
try
for line in eachline(file)
line = replace(line, "[" => "")
line = replace(line, "]" => "")
line = replace(line, "," => "")
elements = split(line)
append!(Eλ, [(parse(Float64, elements[1]), parse(Float64, elements[2]))])
end
catch x
end
return Eλ
end
|
{"hexsha": "4b8a3442b40cc0a79792368a42f21d9741596c2c", "size": 3380, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "VibronMap.jl", "max_stars_repo_name": "PavelStransky/ClassicalDynamics", "max_stars_repo_head_hexsha": "967338a7a64282bf68f1e9be1a829b0d657cc0d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "VibronMap.jl", "max_issues_repo_name": "PavelStransky/ClassicalDynamics", "max_issues_repo_head_hexsha": "967338a7a64282bf68f1e9be1a829b0d657cc0d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VibronMap.jl", "max_forks_repo_name": "PavelStransky/ClassicalDynamics", "max_forks_repo_head_hexsha": "967338a7a64282bf68f1e9be1a829b0d657cc0d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1666666667, "max_line_length": 210, "alphanum_fraction": 0.6053254438, "num_tokens": 1011}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.