text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import unittest
import numpy as np
sys.path.append('./')
from feature_selection import (prob, shannon_entropy, conditional_shannon_entropy,
mutual_information, conditional_mutual_information)
class TestFunctions(unittest.TestCase):
def test_prob(self):
data = np.array([[True, 0, 4],
[True, 2, 3],
[True, 1, 2],
[False, 0, 1],
[False, 0, 1]])
multidim_prob = prob(data) # multidimensional probabilities
# Expected values
expected_bins = (2, 3, 4) # number of unique values per data column
# Check shape
self.assertEqual(multidim_prob.shape, expected_bins)
# Check values
# Note: two items in data are identical and should fall in the same bin,
# while the other three items are unique. Hence, we should expect
# a probability of 0.4 and three of 0.2.
# Note2: we're doing an indirect check on the values because I'm not
# sure if the histogram will always produce the same binning order per
# axis.
flat_prob = multidim_prob.ravel()
self.assertEqual(np.sum(flat_prob), 1) # probabilities sum to 1
self.assertEqual(np.sum(flat_prob==0.4), 1)
self.assertEqual(np.sum(flat_prob==0.2), 3)
def test_shannon_entropy(self):
prob = np.array([[0.5, 0], [0.3, 0.2]])
result = shannon_entropy(prob)
# -0.5*np.log2(0.5) - 0.3*np.log2(0.3) -0.2*np.log2(0.2)
expected = 1.4854752972273344
self.assertAlmostEqual(result, expected)
def test_conditional_shannon_entropy(self):
p = np.array([[[0.2, 0.0],
[0.1, 0.1]],
[[0.0, 0.3],
[0.25, 0.05]]])
p_x0 = sum(sum(p[0, :, :])) # p(x=0)
p_x1 = sum(sum(p[1, :, :])) # p(x=1)
# Calculating conditional shannon entropy using the definition
# sum [p(x,y,z) * log2 (p(x)/p(x,y,z))]
expected = 0
for y in range(2):
for z in range(2):
p_xyz0 = p[0, y, z] # p(x=0, y, z)
p_xyz1 = p[1, y, z] # p(x=1, y, z)
if p_xyz0 != 0:
expected += (p_xyz0 * np.log2(p_x0/p_xyz0))
if p_xyz1 != 0:
expected += (p_xyz1 * np.log2(p_x1/p_xyz1))
result = conditional_shannon_entropy(p, 0)
self.assertAlmostEqual(result, expected)
def test_mutual_information(self):
pass
def test_conditional_mutual_information(self):
pass
class TestDemo(unittest.TestCase):
def test_run_demo(self):
"""Run smoke test on demo code"""
# /path/to/demos/mutual-information-feature-selection/
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Raise error when return code is nonzero
demo_file = os.path.join(project_dir, 'feature_selection.py')
subprocess.check_output([sys.executable, demo_file])
|
{"hexsha": "94c91d23d5b08c014857c20781ba4b7abe59f204", "size": 3712, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_feature_selection.py", "max_stars_repo_name": "AlexanderNenninger/mutual-information-feature-selection", "max_stars_repo_head_hexsha": "7ed4fa9c2f7928fbeb07fa442f9d9413697a1fe1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_feature_selection.py", "max_issues_repo_name": "AlexanderNenninger/mutual-information-feature-selection", "max_issues_repo_head_hexsha": "7ed4fa9c2f7928fbeb07fa442f9d9413697a1fe1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_feature_selection.py", "max_forks_repo_name": "AlexanderNenninger/mutual-information-feature-selection", "max_forks_repo_head_hexsha": "7ed4fa9c2f7928fbeb07fa442f9d9413697a1fe1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0388349515, "max_line_length": 82, "alphanum_fraction": 0.5969827586, "include": true, "reason": "import numpy", "num_tokens": 954}
|
Require Import Basics.
Require Import Types.
Require Import Diagrams.Diagram.
Require Import Diagrams.Graph.
Require Import Diagrams.Cocone.
Require Import Colimits.Colimit.
(** * Colimit of the dependent sum of a family of diagrams *)
(** Given a family diagram [D(y)], and a colimit [Q(y)] of each diagram, one can consider the diagram of the sigmas of the types of the [D(y)]s. Then, a colimit of such a diagram is the sigma of the [Q(y)]s. *)
Section ColimitSigma.
Context `{Funext} {G : Graph} {Y : Type} (D : Y -> Diagram G).
(** The diagram of the sigmas. *)
Definition sigma_diagram : Diagram G.
Proof.
srapply Build_Diagram.
- exact (fun i => {y: Y & D y i}).
- simpl; intros i j g x.
exact (x.1; D x.1 _f g x.2).
Defined.
(** The embedding, for a particular [y], of [D(y)] in the sigma diagram. *)
Definition sigma_diagram_map (y: Y) : DiagramMap (D y) sigma_diagram.
Proof.
srapply Build_DiagramMap.
{ intros i x.
exists y.
exact x. }
reflexivity.
Defined.
Context {Q : Y -> Type}.
(** The sigma of a family of cocones. *)
Definition sigma_cocone (C : forall y, Cocone (D y) (Q y))
: Cocone sigma_diagram (sig Q).
Proof.
srapply Build_Cocone; simpl; intros i x.
1: exact (x.1; legs (C x.1) i x.2).
simpl; intros g x'.
srapply path_sigma'.
1: reflexivity.
apply legs_comm.
Defined.
(** The main result: [sig Q] is a colimit of the diagram of sigma types. *)
Lemma iscolimit_sigma (HQ : forall y, IsColimit (D y) (Q y))
: IsColimit sigma_diagram (sig Q).
Proof.
pose (SigmaC := sigma_cocone (fun y => HQ y)).
srapply (Build_IsColimit SigmaC).
srapply Build_UniversalCocone.
intros X; srapply isequiv_adjointify.
- intros CX x.
srapply (cocone_postcompose_inv (HQ x.1) _ x.2).
srapply (cocone_precompose _ CX).
apply sigma_diagram_map.
- intro CX.
pose (CXy := fun y => cocone_precompose (sigma_diagram_map y) CX).
change (cocone_postcompose SigmaC
(fun x => cocone_postcompose_inv (HQ x.1) (CXy x.1) x.2) = CX).
srapply path_cocone; simpl.
+ intros i x.
change (legs (cocone_postcompose (HQ x.1)
(cocone_postcompose_inv (HQ x.1) (CXy x.1))) i x.2 = CX i x).
exact (ap10 (apD10 (ap legs (eisretr
(cocone_postcompose (HQ x.1)) (CXy _))) i) x.2).
+ intros i j g [y x]; simpl.
set (py := (eisretr (cocone_postcompose (HQ y)) (CXy y))).
set (py1 := ap legs py).
specialize (apD legs_comm py); intro py2.
simpl in *.
rewrite (path_forall _ _(transport_forall_constant _ _)) in py2.
apply apD10 in py2; specialize (py2 i); simpl in py2.
rewrite (path_forall _ _(transport_forall_constant _ _)) in py2.
apply apD10 in py2; specialize (py2 j); simpl in py2.
rewrite (path_forall _ _(transport_forall_constant _ _)) in py2.
apply apD10 in py2; specialize (py2 g); simpl in py2.
rewrite (path_forall _ _(transport_forall_constant _ _)) in py2.
apply apD10 in py2; specialize (py2 x); simpl in py2.
rewrite transport_paths_FlFr in py2.
rewrite concat_1p, concat_pp_p in py2.
apply moveL_Mp in py2.
rewrite (ap_path_sigma_1p
(fun x01 x02 => cocone_postcompose_inv (HQ x01) (CXy x01) x02)).
(* Set Printing Coercions. (* to understand what happens *) *)
subst py1.
etransitivity.
* etransitivity.
2:exact py2.
apply ap.
rewrite (ap_compose legs (fun x0 => x0 i x)).
rewrite (ap_apply_lD2 _ i x).
reflexivity.
* apply ap10, ap.
rewrite (ap_compose legs (fun x0 => x0 j _)).
rewrite (ap_apply_lD2 _ j _).
reflexivity.
- intros f.
apply path_forall; intros [y x]; simpl.
rewrite <- cocone_precompose_postcompose.
srapply (apD10 (g := fun x => f (y; x)) _ x).
srapply equiv_moveR_equiv_V.
srapply path_cocone.
1: reflexivity.
intros i j g x'; simpl.
hott_simpl.
exact (ap_compose _ _ _)^.
Defined.
End ColimitSigma.
(** ** Sigma diagrams and diagram maps / equivalences *)
Section SigmaDiagram.
Context {G : Graph} {Y : Type} (D1 D2 : Y -> Diagram G).
Definition sigma_diagram_functor (m : forall y, DiagramMap (D1 y) (D2 y))
: DiagramMap (sigma_diagram D1) (sigma_diagram D2).
Proof.
srapply Build_DiagramMap.
- intros i.
srapply (functor_sigma idmap _).
intros y; apply m.
- intros i j g x; simpl in *.
srapply path_sigma'.
1: reflexivity.
simpl.
apply (DiagramMap_comm (m x.1)).
Defined.
Definition sigma_diag_functor_equiv (m : forall y, (D1 y) ~d~ (D2 y))
: (sigma_diagram D1) ~d~ (sigma_diagram D2).
Proof.
srapply (Build_diagram_equiv (sigma_diagram_functor m)).
intros i.
srapply isequiv_functor_sigma.
intros y; apply m.
Defined.
End SigmaDiagram.
|
{"author": "HoTT", "repo": "Coq-HoTT", "sha": "ab70acd360367dbda13d537748f792384fb882a3", "save_path": "github-repos/coq/HoTT-Coq-HoTT", "path": "github-repos/coq/HoTT-Coq-HoTT/Coq-HoTT-ab70acd360367dbda13d537748f792384fb882a3/theories/Colimits/Colimit_Sigma.v"}
|
import torch
import numpy as np
import torch.optim as optim
from lib.utils.util import check_path, empty_folder
from lib.utils.meter import AverageMeter
from torch.nn import DataParallel
from torch.backends import cudnn
__all__ = ['NetBase']
class NetBase(object):
def __init__(self, nClass, nCam, model_client, use_flow, task_dir, raw_model_dir, is_image_dataset, recorder):
self.nClass = nClass
self.nCam = nCam
self.recorder = recorder
# self.visual = self.recorder.visual
self.logger = self.recorder.logger
self._mode = 'Train'
self.is_image_dataset = is_image_dataset
self.task_dir = task_dir
self.model = model_client(self.nClass, self.nCam, use_flow, self.is_image_dataset, raw_model_dir, self.logger)
self.model_parallel = DataParallel(self.model).cuda()
self.model_parallel.feature = DataParallel(self.model.feature).cuda()
self.net_info = []
self.const_options()
self.init_options()
self.loss_mean = AverageMeter(len(self.line_name))
self.net_info.extend(self.model.net_info)
self.optimizer = self.init_optimizer()
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.lr_decay_step, gamma=self.gamma)
self.idx = 0
self.best_performance = 0.0
def const_options(self):
raise NotImplementedError
def init_options(self):
self.line_name = ['Identity Loss']
raise NotImplementedError
def init_optimizer(self):
raise NotImplementedError
def info(self, params_list):
self.logger.info('------Output shape--------')
for info_i in self.net_info:
self.logger.info(info_i)
if len(params_list) > 0:
self.logger.info('----Parameter shape-------')
for params_i in params_list:
self.logger.info(params_i.size())
self.logger.info('--------------------------')
def display(self):
info_str = 'Epoch: {} lr: {} Loss : '.format(self.idx, self.optimizer.param_groups[-1]['lr'])
num = self.loss_mean.len
for i in range(num):
if i < num - 2:
str_temp = ' + '
elif i < num - 1:
str_temp = ' = '
else:
str_temp = ''
info_str += str(self.loss_mean[i].round(5)) + str_temp
self.logger.info(info_str)
#self.visual.plot('Loss', 'lr', np.array([self.idx]), np.array([self.optimizer.param_groups[-1]['lr']]))
# for i_name, line_name in enumerate(self.line_name):
# self.visual.plot('Loss', line_name, np.array([self.idx]), self.loss_mean[i_name])
self.loss_mean.reset()
def forward(self, data):
data = self.data_preprocess(data)
model_output = self.model_parallel(data)
return model_output
def compute_loss(self, model_output, label_identify):
raise NotImplementedError
def forward_backward(self, args):
self.scheduler.step(self.idx)
data, label_identify = args
model_output = self.forward(data)
label_identify = self.data_preprocess(label_identify)
loss_final = self.compute_loss(model_output, label_identify)
self.optimizer.zero_grad()
loss_final.backward()
self.optimizer.step()
def eval(self, args):
data, label_identify = args
data = self.data_preprocess(data)
self.optimizer.zero_grad()
fea = self.model_parallel.feature(data)
return fea.detach()
def sync(self, idx):
self.idx = idx
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, flag):
if flag == 'Train':
self._mode = flag
self.model_parallel.train()
cudnn.benchmark = True
elif flag == 'Test':
self._mode = flag
self.model_parallel.eval()
cudnn.benchmark = True
else:
raise KeyError
def save(self, rank1):
if rank1 > self.best_performance:
self.best_performance = rank1
empty_folder(self.task_dir / 'output/model')
torch.save(self.model_parallel.state_dict(), str(self.task_dir / 'output/model/model.pkl'))
self.logger.info('Model has been saved for index ' + str(self.idx))
def load(self, model_name=None):
if model_name is None:
self.model_parallel.load_state_dict(torch.load(check_path(self.task_dir / 'output/model/model.pkl')))
self.logger.info('Model restored from ' + str(self.task_dir / 'output/model/model.pkl'))
else:
self.model_parallel.load_state_dict(torch.load(check_path(self.task_dir / str('output/model/' + model_name + '.pkl'))))
self.logger.info('Model restored from ' + str(self.task_dir / 'output/model/' + model_name + '.pkl'))
def _data_group_prepocess(self, data):
if not isinstance(data, (tuple, list)):
if torch.is_tensor(data):
return data.cuda()
else:
return torch.from_numpy(data).cuda()
else:
output = []
for data_i in data:
output.append(self._data_group_prepocess(data_i))
return output
def data_preprocess(self, *arg):
output = []
torch.set_grad_enabled(self._mode == 'Train')
for data in arg:
output.append(self._data_group_prepocess(data))
output = tuple(output)
if len(output) == 1:
output = output[0]
return output
|
{"hexsha": "98ea667f8cfa6ccc7ccdef44a74cd7ca072caa82", "size": 5664, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/network/model_factory/netbase.py", "max_stars_repo_name": "chenyanghungry/person-reid-lib", "max_stars_repo_head_hexsha": "783e66c9bfedf582e2cf935b9f5be960b543ac3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/network/model_factory/netbase.py", "max_issues_repo_name": "chenyanghungry/person-reid-lib", "max_issues_repo_head_hexsha": "783e66c9bfedf582e2cf935b9f5be960b543ac3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/network/model_factory/netbase.py", "max_forks_repo_name": "chenyanghungry/person-reid-lib", "max_forks_repo_head_hexsha": "783e66c9bfedf582e2cf935b9f5be960b543ac3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7484662577, "max_line_length": 131, "alphanum_fraction": 0.6089336158, "include": true, "reason": "import numpy", "num_tokens": 1215}
|
\documentclass[a4paper, 12pt]{report}
\usepackage[T1]{fontenc}
% \usepackage[icelandic]{babel}
\usepackage{latexsym,amssymb,amsmath,amsthm}
\usepackage{graphicx}
\usepackage[colorlinks=true,linkcolor=black,anchorcolor=black,citecolor=black,filecolor=black,menucolor=black,runcolor=black,urlcolor=black]{hyperref}
\usepackage{enumerate}
\usepackage{pgfplots}
\usepackage{nicefrac}
\usepackage{derivative}
\usepackage{setspace}
\usepackage{mathrsfs}
% \usepackage{newtxtext, newtxmath}
\usepackage{tabularx}
\usepackage{authblk}
\usepackage{caption}
\usepackage[most]{tcolorbox}
\renewcommand\qedsymbol{$\blacksquare$}
\newcommand\placeqed{\nobreak\enspace$\blacksquare$}
\renewcommand\qedsymbol{}
\usepackage{algorithm}
\usepackage{algpseudocodex}
\renewcommand\algorithmicprocedure{\textbf{procedure}}
\newcommand{\algorithmicendprocedure}{\algorithmicend\ \algorithmicprocedure}
\makeatletter
\newcommand\PROCEDURE[3][default]{%
\ALC@it
\algorithmicprocedure\ \textsc{#2}(#3)%
\ALC@com{#1}%
\begin{ALC@prc}%
}
\newcommand\ENDPROCEDURE{%
\end{ALC@prc}%
\ifthenelse{\boolean{ALC@noend}}{}{%
\ALC@it\algorithmicendprocedure
}%
}
\newenvironment{ALC@prc}{\begin{ALC@g}}{\end{ALC@g}}
\makeatother
\usepackage[hang,flushmargin]{footmisc}
\captionsetup[figure]{labelfont=bf}
\pgfplotsset{compat=1.17}
\setlength{\jot}{1em}
\title{\textsc{Simulating the Evolution of\\ Neural Pathways and Structures} \\ \vspace*{10mm} \Large Development Journal}
\author[1]{Kári Hlynsson}
\author[2]{Young Jun Lee}
\affil[1]{University of Iceland, Department of Mathematics}
\affil[2]{University of Oxford, Department of Biology}
\date{}
\newtheorem{theorem}{Theorem}[chapter]
\newtheorem{corollary}{Corollary}[chapter]
\newtheorem{proposition}{Proposition}[chapter]
\theoremstyle{definition}
\newtheorem{definition}{Definition}[chapter]
\newtheorem*{remark}{Remark}
\let\oldproofname=\proofname
\renewcommand{\proofname}{\rm\bf{\oldproofname}}
\begin{document}
\maketitle
\onehalfspacing
\newpage
\tableofcontents
\newpage
\chapter*{Index of notation}
\renewcommand{\arraystretch}{1.7}
\begin{table}[ht!]
\centering
\begin{tabularx}{\textwidth}{l X}
\textsc{Abbreviation} & \textsc{Definition} \\
$\mathscr E = [0;w] \times [0;h]$ & Environment with \emph{width} $w$ and \emph{height} $h$. The environment
can also be expressed as the set of locations $\ell$ such that $\mathscr E = \{\ell := (x, y) \mid x \in [0;w] \land y \in [0;h]\}$ \\
$n$ & Size of the population. \\
$E = (e_x, e_y)$ & An entity with $x$ and $y$ coordinates in the environment such that $\ell_{E} \in \mathscr E$ ($\ell_E$ is the location which corresponds to the entity's location) \\
$\mathbf C = \{C_1, \ldots, C_k\}$ & The partition of $\mathscr E$ into $k$ chunks $C_1, \ldots, C_k$ such that $\bigcap_{i = 1}^k C_i = \emptyset$ and $\bigcup_{i = 1}^k C_i = \mathscr E$, i.e. the
\emph{set of chunks} $\mathbf C$ forms a complete partition of $\mathscr E$. \\
$\mathbf O = \{O_1, \ldots, O_n\}$ & The population, the set of all organisms present within the environment. \\
$\mathcal P: \mathbf O \to \mathscr E$ & Positional mapping. Returns point representation of an entity's location within $\mathscr E$. Note that $\mathcal P$ is a random variable, which we will
discuss in more detail in the runtime optimization section. \\
\end{tabularx}
\end{table}
\newpage
\chapter*{Common Acronyms}
\begin{table}[ht!]
\centering
\begin{tabularx}{\textwidth}{l X}
\textsc{Acronym} & \textsc{Definition} \\
CBS & Chunk-Based System \\
UGP & Undirected-Graph Partitioning \\
BFS & Breadth-First Search
\end{tabularx}
\end{table}
\newpage
\chapter{Theoretical basis}
\section{Note}
Hello there Jun
\noindent
This is an \textsc{\huge EXTREMELY} primitive draft.
\noindent
None of this final and subject to changes as we cooperate on this project.
\noindent
I also want to apologize for the common abbreviatons section, its a load of cowdung
but I feel we will need this to make our lives easier later on.
\par That's alright!! XD The common abbreviations section is really useful, I'll try to follow your notations
throughout the doc.
\section{Relevant biology}
Neural network, to an extent, mimics the interconnected nerve cells in animals, noting that learning can be achieved by modulating certain
'parameters' that dictates how individual neurones respond to a set of stimuli.
\chapter{Model outline}
\section{Model outline}
\subsection{Overview}
The aim of the model is to study the natural evolution of neural pathways in a population of organism
when exposed to survivalistic conditions. A rigid logical and syntactical foundation will make all succeeding
articulation on the model parameters and attributes easier. We therefore dedicate this first section towards
establishing a foundation of terms and definitions which we build on later.
\par The most critical aspects of the model we define here is the \emph{environment} and the \emph{entities} contained
therein. Neglecting any elevation, we define the environment as the bounded subset of the Cartesian plane, which we symbolize
$\mathscr E$ \footnote{Although elevation certainly plays a vital role in the foraging patterns of organisms in natural environments, we refrain
from its implementation as it only adds a level of complexity to the model design while having no immediate benefit for the simulation.}.
\par Contained within the environment are \emph{entities} which we can think of as actors within the simulation. The two types that occur
in this model are \emph{organisms} and \emph{food}. Again, a simple intuitive definition is that the organism is an individual of a species
present within the environment and nutrition is the foodstuffs which it consumes to gain energy and thus survive.
\par Entities can be divided into two types: \emph{organisms} and \emph{food}. Organisms are motile, can sense their surroundings
and consume food to gain energy. On the other hand, food has none of these qualities. We represent an entity as the object $E$, while organisms are denoted
$O$ and food by $F$.
\begin{figure}
\centering
\begin{tcolorbox}[colframe = black, colback = white, sharp corners]
\begin{tikzpicture}[scale = 1, x=0.6pt,y=0.6pt,yscale=-1,xscale=1]
%uncomment if require: \path (0,300); %set diagram left start at 0, and has height of 300
%Shape: Circle [id:dp5046571963675113]
\draw [fill={rgb, 255:red, 232; green, 210; blue, 255 } ,fill opacity=1 ] (169,165.75) .. controls (169,157.05) and (176.05,150) .. (184.75,150) .. controls (193.45,150) and (200.5,157.05) .. (200.5,165.75) .. controls (200.5,174.45) and (193.45,181.5) .. (184.75,181.5) .. controls (176.05,181.5) and (169,174.45) .. (169,165.75) -- cycle ;
%Shape: Pie [id:dp6264332812271176]
\draw (222.96,129.08) .. controls (225.08,130.35) and (227.1,131.82) .. (229.01,133.51) .. controls (246.31,148.81) and (247.57,175.65) .. (231.82,193.45) .. controls (230.18,195.31) and (228.42,196.99) .. (226.56,198.5) -- (200.5,165.75) -- cycle ;
%Straight Lines [id:da5980112219613951]
\draw [color={rgb, 255:red, 0; green, 0; blue, 0 } ,draw opacity=1 ] (200.5,165.75) -- (241,165.27) ;
\draw [shift={(243,165.25)}, rotate = 179.33] [color={rgb, 255:red, 0; green, 0; blue, 0 } ,draw opacity=1 ][line width=0.75] (4.37,-1.32) .. controls (2.78,-0.56) and (1.32,-0.12) .. (0,0) .. controls (1.32,0.12) and (2.78,0.56) .. (4.37,1.32) ;
%Curve Lines [id:da9973990148186489]
\draw [dash pattern={on 4.5pt off 4.5pt}] (2,150) .. controls (42,120) and (103,168) .. (169,165.75) ;
%Shape: Triangle [id:dp44009432397165527]
\draw [fill={rgb, 255:red, 179; green, 249; blue, 182 } ,fill opacity=1 ] (324.11,89) -- (338,111) -- (311,111) -- cycle ;
%Shape: Triangle [id:dp35563504351381514]
\draw [fill={rgb, 255:red, 179; green, 249; blue, 182 } ,fill opacity=1 ] (600.11,244) -- (614,266) -- (587,266) -- cycle ;
%Shape: Triangle [id:dp6587382072289927]
\draw [fill={rgb, 255:red, 179; green, 249; blue, 182 } ,fill opacity=1 ] (585.11,47) -- (599,69) -- (572,69) -- cycle ;
%Shape: Triangle [id:dp8230417371681578]
\draw [fill={rgb, 255:red, 179; green, 249; blue, 182 } ,fill opacity=1 ] (416.11,221) -- (430,243) -- (403,243) -- cycle ;
%Shape: Triangle [id:dp3619204635470774]
\draw [fill={rgb, 255:red, 179; green, 249; blue, 182 } ,fill opacity=1 ] (121.11,55) -- (135,77) -- (108,77) -- cycle ;
%Shape: Triangle [id:dp9744597746822374]
\draw [fill={rgb, 255:red, 179; green, 249; blue, 182 } ,fill opacity=1 ] (110.11,227) -- (124,249) -- (97,249) -- cycle ;
%Shape: Circle [id:dp2625952541482832]
\draw [fill={rgb, 255:red, 232; green, 210; blue, 255 } ,fill opacity=1 ] (483.04,47.77) .. controls (489.39,53.72) and (489.71,63.68) .. (483.76,70.03) .. controls (477.81,76.38) and (467.85,76.7) .. (461.5,70.75) .. controls (455.15,64.8) and (454.83,54.83) .. (460.78,48.49) .. controls (466.73,42.14) and (476.7,41.82) .. (483.04,47.77) -- cycle ;
%Shape: Pie [id:dp25830448813204976]
\draw (472.89,112.22) .. controls (470.52,112.89) and (468.06,113.36) .. (465.52,113.6) .. controls (442.53,115.76) and (422.09,98.32) .. (419.87,74.66) .. controls (419.64,72.19) and (419.61,69.75) .. (419.78,67.36) -- (461.5,70.75) -- cycle ;
%Straight Lines [id:da5039204955567191]
\draw [color={rgb, 255:red, 0; green, 0; blue, 0 } ,draw opacity=1 ] (461.5,70.75) -- (434.15,100.62) ;
\draw [shift={(432.8,102.1)}, rotate = 312.48] [color={rgb, 255:red, 0; green, 0; blue, 0 } ,draw opacity=1 ][line width=0.75] (4.37,-1.32) .. controls (2.78,-0.56) and (1.32,-0.12) .. (0,0) .. controls (1.32,0.12) and (2.78,0.56) .. (4.37,1.32) ;
%Curve Lines [id:da7361275969659062]
\draw [dash pattern={on 4.5pt off 4.5pt}] (483,47.67) .. controls (512,16.67) and (562,36.67) .. (565,1.67) ;
% Text Node
\draw (5,8.4) node [anchor=north west][inner sep=0.75pt] [font=\LARGE] {$\mathscr{E}$};
% Text Node
\draw (320,97.4) node [anchor=north west][inner sep=0.75pt] [font=\scriptsize] {F};
% Text Node
\draw (596,252.4) node [anchor=north west][inner sep=0.75pt] [font=\scriptsize] {F};
% Text Node
\draw (581,55.4) node [anchor=north west][inner sep=0.75pt] [font=\scriptsize] {F};
% Text Node
\draw (412,229.4) node [anchor=north west][inner sep=0.75pt] [font=\scriptsize] {F};
% Text Node
\draw (117,63.4) node [anchor=north west][inner sep=0.75pt] [font=\scriptsize] {F};
% Text Node
\draw (106,235.4) node [anchor=north west][inner sep=0.75pt] [font=\scriptsize] {F};
% Text Node
\draw (246,158) node [anchor=north west][inner sep=0.75pt] [font=\small] {$\theta$};
% Text Node
\draw (177,157.07) node [anchor=north west][inner sep=0.75pt] {$O$};
\end{tikzpicture}
\end{tcolorbox}
\caption{An illustration of the model} \label{fig:model_illustration}
\end{figure}
\subsection{Sensory mapping of organisms}
One of the key characteristics of organisms is that they are able to sense their proximal surroundings and base their succeeding actions on the information they
have gathered on the environment. In this section we aim to establish a mathematical and syntactical foundation describing the sensory capabilities of organisms
which allows passing environmental data to the organism's neural network.
\par A convenient and well established method of sensory mapping is obtained through the use of \emph{raycasting} or \emph{raylines}, where several line segments
originating from the organism's point location are used as collision sensors which serve as sensors for distance. By calculating the distance of the intersection between
some rayline emitted by an organism and an entity in the field, a metric describing the \emph{sensory depth} from the organism to another entity is established.
\begin{definition}[Ray set]
Let $\lambda \in \mathbb R^+$. Suppose that an organism $O$ in an environment $\mathscr E$ with a present entity set $\mathbf E$ has the forward
facing angle $\theta$ (see figure \ref{fig:model_illustration}). We define the \emph{ray set} of the organism as the linear space vector $\mathbf R = \{r_1, \ldots, r_{\nu_{\mathbf R}}\}$ from $\left[\theta - \Delta_{\mathbf R}; \theta + \Delta_{\mathbf R}\right]$
numbering $\nu_{\mathbf R}$ elements ($\nu_{\mathbf R}$ is called the \emph{ray number}). Furthermore, we define the quantity $\mathcal S_{\mathbf R} = 2 \Delta_{\mathbf R}$ as the \emph{span} of the ray set.
\end{definition}
\begin{definition}[Ray map]
The vector function $R_{\lambda}$ is the \emph{ray map} from the ray set $\mathbf R$ to the family of vectors bounded within the organism's sensory field.
Furthermore, it is defined by
\[
R_{\lambda}(r) = \left\langle R_\lambda^x, R_\lambda^y \right\rangle = \langle \lambda \cos r, \lambda \sin r \rangle
\]
Where $\lambda$ is the \emph{maximum sensory depth}, i.e. the radius of the sensory field.
\end{definition}
\begin{remark}
Note that
\[
|R_\lambda(r)|^2 = \left(R_\lambda^x\right)^2 + \left(R_\lambda^y\right)^2 = \lambda^2
\]
The vector function is the parametrization of a circle sector in the range $\left[\theta - \Delta_{\mathbf R}; \theta + \Delta_{\mathbf R}\right]$
and the ray set $\mathbf R$ returns a finite collection of vectors with their endpoints located on the arc of the sector.
\end{remark}
\begin{definition}[Sensory field]
The sensory field of an organism $O$ is the set of vectors $\mathbf S = \{\vec{s}_1, \ldots, \vec{s}_{\nu_{\mathbf R}}\}$ which is returned by the ray map $R_\lambda$ acting on the
ray set $\mathbf R$. The \emph{maximum sensory depth} is the parameter $\lambda$ which describes the length of the vectors, i.e. $\forall \vec{s}_i \in \mathbf S: |\vec{s}_i| = \lambda$.
\end{definition}
Sensory activation occurs when an entity intersects with the line segment formed by a ray. This can be formalized as the predice $\kappa_{r_i}(E)$
\section{Simulation phases}
For your contemplation (Jun, if you're reading this): I've thought of dividing the simulation into a \emph{foraging phase}, where organisms roam around and collect food.
If they don't get any or deplete their energy, they die. Once the foraging phase is over, the \emph{reproductive phase} starts, where remaining energy is a measure of how
likely organisms are to find a partner and reproduce (this is of course a simplication, there are many other ways to go about this I'm sure). This way, we don't have to make
the reproduction itself an extreme pain (organisms having to find each other, etc.) This would mean that the reproductive phase is not carried out in the "plane" where the simulation
occurs but rather "off screen" where its just a bunch of calculations really.
\par On the other hand it might make for some really interesting data if we were to assign individuals genders and they would map their current energy level and the gender of individuals
in their sensory field and allow for them to reproduce "in the field" lol. Let me know what you think!
Hi Kari - sorry that I only got to have a look at the document now! Those two alternative strategies are similar to what I've been thinking about as well.
\par I suppose if we are doing the 'on-screen reproduction' strategy, we'll somehow have to enable agents to switch from \emph{foraging phase} to \emph{reproductive phase};
perhaps by having two different sets of neural networks? I think it's definitely worth developing but I think in this case we would need to consider a way
in which we can ensure agents all dying without offsprings in generation 1 due to incompetence in finding partners.
\par The 'off-screen reproduction' is certainly easier - perhaps we can make an algorithm that randomly picks two individuals with probability of selection
being scaled according to the amount of food collected over their lifetime? This might be possible if we use an algorithm that's kind of like the following:
\par 1) Sort the individuals by the amount of food they have collected (or the energy 'left over' at the end of the foraging phase if foraging also involves
some kind of energy expenditure); 2) Calculate 'boundary values' that divides, for example, [0,1) into sub-intervals proportional to the amount of food
individuals have collected (e.g. if there are two individuals that each collected 3 and 1 amounts of food, subdivide [0,1) into
[0,0.75) and [0.75,1) ); 3) Generate a random number between [0,1); 4) Determine which sub-interval the number falls into, and select the corresponding individual
\par Using this, we should be able to preferrentially select the most 'fittest' individuals and allow them to mate. Once the sub-intervals have been calculated,
selecting more individuals for additional mating shouldn't be too costly computationally speaking (But let me know what you think!)
\section{Runtime optimization}
One of the run-ins we've had so far is determining how to design the sensory mapping capabilities of organisms within the environment. By sensory mapping, I am referring to the organism's ability to sense its
proximal surroundings, sensing the proximity and types of the various entities they may encounter. This will be fed into their neural network, which outputs some response which instructs the organism how to behave
given its current surroundings.
\par The first attempt I made was in the days where the environment was grid-based instead of a float-based environment. There, sensory mapping was quite easy as all that had to be done was inspect the proximal tiles
and check for the entity type present in the tile. This is not possible in the float-based environment, so we propose another solution.
\par An excellent idea you came up with was the idea of partitioning the environment into separate chunks, which organisms restrict their sensory mapping to unless there sensory fields intersect another adjacent chunk
(more on that later). We will start by discussing this idea, which as you will see, will be of great use.
\subsection{Chunk system}
In this section, we will be doing a mathematical analysis of the chunk system to see how it will benefit the simulation. To start off, we inspect what fundamental laws apply to this system.
\begin{definition}[Chunk partition]
Let $\kappa = \sqrt k$.\footnote{A required simulation paramet
er is the chunk number $k = x^2 \in \mathbb N$ which yields the num
bers of rows and columns which are exactly equal.}
Recall that $\mathscr E = [0;w] \times [0;h]$. The partitionin
g of the environment into $k$ chunks consists of constructing the linearly spaced vertex vectors
$\mathbf x = \{x_1, \ldots, x_\kappa \}$ and $\mathbf y = \{y_1, \ldots, y_\kappa \}$.
To construct the chunk $C_i$ where $i \in [1;k]$ we must obtain the row-column representation of the index,
which is given by $M(i) = \langle x_r, y_c \rangle = \langle \lceil i / \kappa \rceil; i \text{ mod } \kappa \rangle$.
Chunk $C_i$ is then constructed $[x_r; x_{r + 1}] \times [y_c; y_{c + 1}]$.
\end{definition}
\begin{figure}[ht!]
\centering
\tikzset{every picture/.style={line width=0.75pt}} %set default line width to 0.75pt
\begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-1,xscale=1]
%uncomment if require: \path (0,300); %set diagram left start at 0, and has height of 300
%Shape: Grid [id:dp7174609337256415]
\draw [draw opacity=0][fill={rgb, 255:red, 255; green, 255; blue, 255 } ,fill opacity=1 ][dash pattern={on 0.84pt off 2.51pt}] (204,18) -- (455,18) -- (455,271) -- (204,271) -- cycle ; \draw [dash pattern={on 0.84pt off 2.51pt}] (204,18) -- (204,271)(267,18) -- (267,271)(330,18) -- (330,271)(393,18) -- (393,271) ;
\draw [dash pattern={on 0.84pt off 2.51pt}] (204,18) -- (455,18)(204,81) -- (455,81)(204,144) -- (455,144)(204,207) -- (455,207)(204,270) -- (455,270) ;
\draw [dash pattern={on 0.84pt off 2.51pt}] ;
%Shape: Rectangle [id:dp7032935088696164]
\draw [line width=2.25] (204,19) -- (456,19) -- (456,271) -- (204,271) -- cycle ;
% Text Node
\draw (199,281.4) node [anchor=north west][inner sep=0.75pt] {$x_{0}$};
% Text Node
\draw (447,281.4) node [anchor=north west][inner sep=0.75pt] {$x_{4}$};
% Text Node
\draw (259,281.4) node [anchor=north west][inner sep=0.75pt] {$x_{1}$};
% Text Node
\draw (322,281.4) node [anchor=north west][inner sep=0.75pt] {$x_{2}$};
% Text Node
\draw (386,281.4) node [anchor=north west][inner sep=0.75pt] {$x_{3}$};
% Text Node
\draw (173,11.4) node [anchor=north west][inner sep=0.75pt] {$y_{0}$};
% Text Node
\draw (227,44.4) node [anchor=north west][inner sep=0.75pt] {$C_{1}$};
% Text Node
\draw (173,71.4) node [anchor=north west][inner sep=0.75pt] {$y_{1}$};
% Text Node
\draw (173,132.4) node [anchor=north west][inner sep=0.75pt] {$y_{2}$};
% Text Node
\draw (173,197.4) node [anchor=north west][inner sep=0.75pt] {$y_{3}$};
% Text Node
\draw (173,257.4) node [anchor=north west][inner sep=0.75pt] {$y_{4}$};
% Text Node
\draw (415,44.4) node [anchor=north west][inner sep=0.75pt] {$C_{4}$};
% Text Node
\draw (290,44.4) node [anchor=north west][inner sep=0.75pt] {$C_{2}$};
% Text Node
\draw (352,44.4) node [anchor=north west][inner sep=0.75pt] {$C_{3}$};
% Text Node
\draw (225,107.4) node [anchor=north west][inner sep=0.75pt] {$C_{5}$};
% Text Node
\draw (413,107.4) node [anchor=north west][inner sep=0.75pt] {$C_{8}$};
% Text Node
\draw (288,107.4) node [anchor=north west][inner sep=0.75pt] {$C_{6}$};
% Text Node
\draw (351,107.4) node [anchor=north west][inner sep=0.75pt] {$C_{7}$};
% Text Node
\draw (226,168.4) node [anchor=north west][inner sep=0.75pt] {$C_{9}$};
% Text Node
\draw (414,168.4) node [anchor=north west][inner sep=0.75pt] {$C_{12}$};
% Text Node
\draw (289,168.4) node [anchor=north west][inner sep=0.75pt] {$C_{10}$};
% Text Node
\draw (351,168.4) node [anchor=north west][inner sep=0.75pt] {$C_{11}$};
% Text Node
\draw (226,230.4) node [anchor=north west][inner sep=0.75pt] {$C_{13}$};
% Text Node
\draw (414,230.4) node [anchor=north west][inner sep=0.75pt] {$C_{16}$};
% Text Node
\draw (289,230.4) node [anchor=north west][inner sep=0.75pt] {$C_{14}$};
% Text Node
\draw (351,230.4) node [anchor=north west][inner sep=0.75pt] {$C_{15}$};
\end{tikzpicture}
\caption{Partition of environment into $k = 16$ chunks}
\end{figure}
\begin{proposition}
Let $\mathscr E$ be an environment paritioned into $k$ chunks such that $\mathbf C = \{C_1, \ldots, C_k\}$. The probability of an entity being present in a generic chunk $C_i$ equals $1/k$, i.e.
\[
\textnormal{Pr}\{E \in C_i\} = \frac 1k
\]
\end{proposition}
\begin{proof}
Let $\mathscr E$ be the space $[0; w] \times [0;h]$ with $\text{area}(\mathscr E) = wh$ and the partition $\mathbf C$. Under the assumption that the chunks
are of uniform size, we assume
\[
\text{area}(C_i) = \frac{\text{area}(\mathscr E)}{k} \tag{\textasteriskcentered}
\]
for all $C_i \in \mathbf C$ where $i \in [1; k]$. Under conventional probability theory, we can express the probability of an entity being in a generic chunk as
the area of that particular chunk over the area of the environment, i.e.
\begin{align*}
\text{Pr}\{E \in C_i\} &= \frac{|C_i|}{|\mathscr E|} \\
&= \frac{\text{area}(C_i)}{\text{area}(\mathscr E)} \\
&= \frac{1}{k}
\end{align*}
The result of the calculations above are immediate of the definition of the area of the chunks, which is derived in (\textasteriskcentered). \placeqed
\end{proof}
\begin{definition}[Chunk load]
The random variable $\mathcal L$, or the \emph{chunk load} of some generic chunk $C_i$,
denotes the number of entities contained within the chunk. Immediate of proposition 1,
we have that $\mathcal L \sim \text{Bin}(n, 1/k)$, where $n$ is the total number of entities
in the environment. \footnote{Note that this assumes the uniform distribution of entities within
the environment. While it not entirely safe to say that the distribution of entities is always uniform, we do so in order to create some upper bound for simulation time}
\end{definition}
\begin{figure}[ht!]
\centering
\includegraphics[width=0.9\textwidth]{img/binomialdistribution.png}
\caption{An example binomial distribution}
\end{figure}
\noindent The random variables $\mathcal L_{C_1}, \ldots, \mathcal L_{C_k}$ are dependent, which by inference leads to $\sum_{i = 1}^k \mathcal L_{C_i} = N$, where $|\mathbf E| = N$. Algorithm \ref*{cbs-costmodel} shows a method with which a amortized cost model can be simulated.
\par The algorithm demands the assignment of $\mathcal L_{C_i}$ for chunks $C_i \in \mathbf C$ by a random process. However, given the nature of probabilistic distributions of dependent variables,
\begin{figure}[ht!]
\centering
\includegraphics[width=0.9\textwidth]{img/mcl_simulation.png}
\caption{$\mathcal L_{\max}$ by population size and number of chunks}
\end{figure}
\section{Adjacent chunk loading and critical boundary}
Although the chunk system minimizes the calculations needed to check for collisions with a ray, it introduces the risk of an entity
escaping an organisms sensory field despite being contained within it. This is due to the fact that the CBS only performs calculations
concerning entities contained within the chunk itself without paying attention to the contents of the sensory field.
\par A way to ensure that all entities within the sensory field are recognized is by introducing adjacent chunk loading (henceforth ACL)
which loads the adjacent chunk given that an organisms field of view intersects an adjoining chunk.
\par Expanding on this concept, we loosely define the \emph{critical boundary} as the subset of the environment, denoted $\mathcal C_{\mathscr E}$,
which suffices the condition that the sensory field of any organism contained within it intersect an adjoining chunk, regardless of the organism's position
and orientation.
\par Maybe we can calculate the critical boundary by using vector calculations for the intersection of chunk boundaries and ray lines?
Another idea is to have chunks with relatively large dimensions compared to $\lambda$ and then to load three more adjacent chunks depending on
which \emph{quadrant} of a chunk the individual is in; I will elaborate on this when we have our chat!
\section{Performance comparison}
In this section we compare the CBS versus non-CBS runtime performance to obtain a metric description
of performance improvements as a result of the CBS implementation.
\subsection{Amortized cost of non-CBS implementation}
\subsection{Amortized cost of CBS implementation}
\subsection{Comparative analysis}
\begin{algorithm}[ht!]
\caption{Algorithm for estimating amortized cost of CBS method}
\begin{algorithmic}[1]
\Require The chunk set $\mathbf C$ which partitions $\mathscr E$ into $k$ disjoint
subsets $C_1, \ldots, C_k$ where $|\mathbf C| = k$. Entity set $\mathbf E$ within $\mathscr E$
where $|\mathbf E| = N$ with organism subset $\mathbf O$ such that $|\mathbf O| = n$.
\Procedure{CbsCostModel}{$\mathbf C$, $\mathbf E$}
\State cost$_{\text{CBS}} \leftarrow 0$ \Comment{Amortized CBS cost}
\For{$C_i \in \mathbf C$}
\State Assign $C_i$ chunk load $\mathcal L_{C_i} \sim \text{Bin}(N, 1/k)$ by random process
\State $N \mathrel{-}= \mathcal L_{C_i}$ \Comment{Since $\mathcal L_{C_1}, \ldots, \mathcal L_{C_k}$ are dependent r.v.}
\State $n_{O \in C_i} := |\{O \in \mathbf O \mid O \in C_i\}|$ \Comment{$n_{O \in C_i} \leq n$}
\State cost$_{\text{CBS}} \leftarrow$ cost$_{\text{CBS}} \mathrel{+}= n_{O \in C_i} (\mathcal L_{C_i} - 1)$
\EndFor
\State cost$_{\text{CBS}} \leftarrow$ cost$_{\text{CBS}} \mathrel{+}= k$
\EndProcedure
\end{algorithmic}
\label{cbs-costmodel}
\end{algorithm}
\appendix
\newpage
\chapter{Preliminaries}
\section{Linear Algebra}
\begin{definition}[Dot product]
Let $\mathbf a = \langle a_1, \ldots, a_n \rangle$ and $\mathbf b = \langle b_1, \ldots, b_n \rangle$ be $n$-dimensional vectors. The \textbf{dot product} of the two vectors is the sum
\[
\mathbf a \bullet \mathbf b = \sum_{i = 1}^n a_i b_i
\]
The angle $\theta$ between two vectors is related to their dot product:
\[
\mathbf a \bullet \mathbf b = |\mathbf a||\mathbf b|\cos(\theta)
\]
That is to say,
\[
\theta = \arccos\left(\frac{\mathbf a \bullet \mathbf b}{|\mathbf a||\mathbf b|}\right)
\]
\end{definition}
\renewcommand{\arraystretch}{1}
\begin{definition}[Cross product]
The cross product of two vectors $\mathbf a, \mathbf b \in \mathbb R^3$
is denoted $\mathbf a \times \mathbf b$ and defined
\[
\mathbf a \times \mathbf b = \begin{bmatrix}
\mathbf i & \mathbf j & \mathbf k \\
a_1 & a_2 & a_3 \\
b_1 & b_2 & b_3
\end{bmatrix} =
(a_2b_3 - a_3b_2)\mathbf i + (a_3b_1 - a_1b_3)\mathbf j + (a_1b_2 - a_2b_1)\mathbf k
\]
\end{definition}
The resulting vector of the vector product $\mathbf a \times \mathbf b$ is normal to both $\mathbf a$ and $\mathbf b$,
and thus the following rule applies:
\[
\mathbf a \times \mathbf b = |\mathbf a||\mathbf b| \sin(\theta) \hat{\mathbf n}
\]
\section{Probability and statistics}
The numeric variable $X \in \mathbb R$ whose value is dependent on some stochastic process is called a \textbf{random variable}. The set $\Omega$ is called the \textbf{sample space} of $X$ and contains all possible outcomes, $\omega$. An \textbf{event} is the subset $E \subseteq \Omega$, where $\Omega$ is the certain event and $\emptyset$ is the impossible event.
\begin{definition}[]
A random variable $X$ is the bijective mapping
\[
X: \Omega \to \mathbb R
\]
which associates each $\omega \in \Omega$ with some $X(\omega) \in \mathbb R$. When $\Omega$ is a finite set, $X$ is called a \textbf{discrete r.v.} and a \textbf{continuous r.v.} otherwise.
\end{definition}
\begin{definition}[Probability]
The \textbf{probability of an event} E is the function which associates each $E \subseteq \Omega$ with a number $\text{Pr}\{E\} \in (0;1]$ such that
\begin{enumerate}
\item Pr$\{\Omega\} = 1$ and Pr$\{\emptyset\} = 0$
\item $\forall E \subseteq \Omega: \text{Pr}\{E\} \geq 0$
\item Let $\{E_n\}_{n \in \mathbb N}$ be an infinite sequents of disjoint events in $\Omega$. Then,
\[
\text{Pr}\left\{\bigcup_{i = 1}^\infty E_i\right\} = \sum_{i = 1}^\infty \text{Pr}\{E_i\}
\]
\end{enumerate}
\end{definition}
\begin{definition}[Cumulative distribution function, CDF]
Let $X$ be a random variable and $x_0$ be some generic value assumed by $X$. The CDF of $X$ is the function
\[
F(x_0) \mathrel{:}= \text{Pr}\{X \leq x_0\}
\]
\end{definition}
\section{Neural networks}
\textbf{Note:} We go about definitions in this section from a graph theoretic standpoint. However, all relevant definitions are explained in detail.
\end{document}
|
{"hexsha": "c428006a865ab3029f8e359f69778c11b34896e1", "size": 32753, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "notes/devjournal/development.tex", "max_stars_repo_name": "lvthnn/SimEVO", "max_stars_repo_head_hexsha": "6e4245bf25533f3de8f4393f93e9d0d9430fc162", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notes/devjournal/development.tex", "max_issues_repo_name": "lvthnn/SimEVO", "max_issues_repo_head_hexsha": "6e4245bf25533f3de8f4393f93e9d0d9430fc162", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notes/devjournal/development.tex", "max_forks_repo_name": "lvthnn/SimEVO", "max_forks_repo_head_hexsha": "6e4245bf25533f3de8f4393f93e9d0d9430fc162", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 61.5657894737, "max_line_length": 365, "alphanum_fraction": 0.6690379507, "num_tokens": 9883}
|
from numpy import array, cross, dot, float64
from pynurbs.config import Settings
from pynurbs.geometry.geom import Geometry
from pynurbs.geometry.methods.evaluate import check_param
from pynurbs.geometry.methods.geom_utils import (global_to_local_param,
local_to_global_param)
from pynurbs.geometry.methods.intersect_surface import (refine_spi_point,
refine_ssi_point)
from pynurbs.geometry.methods.misc import is_array_type, is_local_domain
from pynurbs.geometry.plane import Plane
from pynurbs.geometry.point import Point, Point2D
from pynurbs.geometry.vector import Vector
class ICurve(Geometry):
"""
Intersection curve.
"""
def __init__(self, s1, s2, crv3d, crv2d_s1, crv2d_s2, ftol):
super(ICurve, self).__init__('icurve')
self._s1 = s1
self._s2 = s2
self._crv3d = crv3d
self._crv2d_s1 = crv2d_s1
self._crv2d_s2 = crv2d_s2
self._ftol = ftol
self._crv3d.set_color(*self.color)
# self._check_order()
def __call__(self, u, rtype='Point', domain='local', tol=None):
return self.eval(u, rtype, domain, tol)
@property
def s1(self):
return self._s1
@property
def s2(self):
return self._s2
@property
def crv3d(self):
return self._crv3d
@property
def a(self):
return self._crv3d.a
@property
def b(self):
return self._crv3d.b
@property
def p(self):
return self._crv3d.p
@property
def m(self):
return self._crv3d.m
@property
def n(self):
return self._crv3d.n
@property
def uk(self):
return self._crv3d.uk
@property
def knots(self):
return self.get_mult_and_knots()[-1]
@property
def mult(self):
return self.get_mult_and_knots()[1]
@property
def cp(self):
return self._crv3d.cp
@property
def w(self):
return self._crv3d.w
@property
def cpw(self):
return self._crv3d.cpw
@property
def data(self):
return self.n, self.p, self.uk, self.cpw
@property
def ftol(self):
return self._ftol
@property
def is_spi(self):
return isinstance(self._s2, Plane)
@property
def is_ssi(self):
return not isinstance(self._s2, Plane)
@property
def is_closed(self):
return self._crv3d.is_closed
@property
def length(self):
return self.arc_length()
@property
def cp2d_s1(self):
return self._crv2d_s1.cp
@property
def cp2d_s2(self):
return self._crv2d_s2.cp
@property
def occ(self):
return self.get_occ()
@property
def handle(self):
return self._crv3d.handle
@property
def adaptor(self):
return self._crv3d.adaptor
def get_occ(self):
"""
Get OCC object.
:return:
"""
return self._crv3d.get_occ()
def _check_order(self):
"""
Make sure that the derivative of the intersection curve is in the same
direction as the 3-D curve. If not, reverse the curves.
"""
v3d = self._crv3d.deriv(0.5, 1, rtype='ndarray')
du = self.deriv(0.5, 1, rtype='ndarray')
if dot(v3d, du) < 0.:
self._crv3d.reverse(True)
self._crv2d_s1.reverse(True)
self._crv2d_s2.reverse(True)
def local_to_global_param(self, *args):
"""
Convert parameter(s) from local domain 0 <= u <= 1 to global domain
a <= u <= b.
:param args: Local parameter(s).
:return: Global parameter(s).
"""
return local_to_global_param(self.a, self.b, *args)
def global_to_local_param(self, *args):
"""
Convert parameter(s) from global domain a <= u <= b to local domain
0 <= u <= 1.
:param args: Global parameter(s).
:return: Local parameter(s).
"""
return global_to_local_param(self.a, self.b, *args)
def has_surf(self, sref):
"""
Check to see if the surface is used in the intersection curve.
:param surface_like sref: Reference surface.
:return: *True* if used, *False* if not.
:rtype: bool
"""
if sref is self._s1 or sref is self._s2:
return True
return False
def get_curve2d(self, sref):
"""
Get the 2-D curve associated with the reference surface.
:param sref: Reference surface (must have been used in the
intersection).
:return: 2-D intersection curve.
:rtype: :class:`.NurbsCurve`
"""
if sref is self._s1:
return self._crv2d_s1
if sref is self._s2:
return self._crv2d_s2
return None
def eval2d(self, u, rtype='Point', domain='local', tol=None, sref=None):
"""
Evaluate the intersection curve in 2-D space for each surface.
:param float u: Parametric point.
:param str rtype: Option to return a NumPy array or Point2D instance
(rtype = 'Point' or 'ndarray').
:param str domain: Option to use local (0 <= u <= 1) or global
(a <= u <= b) domain ('local', 'l', 'global', 'g').
:param float tol: Tolerance for point refinement.
:param surface_like sref: Option to provide one of the two surfaces
used in the intersection curve. If present, the method will
only return the 2-D parameters associated to that surface.
:return: Point on curve. Will return *None* if *sref* is not in the
intersection.
:rtype: :class:`.Point2D` or ndarray
"""
if is_local_domain(domain):
u = self.local_to_global_param(u)
# Evaluate 2-D curves.
s1, s2 = self._s1, self._s2
u1, v1 = self._crv2d_s1.eval(u, rtype='ndarray', domain='global')[:-1]
u2, v2 = self._crv2d_s2.eval(u, rtype='ndarray', domain='global')[:-1]
# Project 3-D point to surfaces to get initial parameters.
# s1, s2 = self._s1, self._s2
# p3d = self._crv3d.eval(u, domain='global')
# u1, v1 = invert_point_on_surface(p3d, s1)
# if self.is_spi:
# u2, v2 = invert_point_on_plane(p3d, s2)
# else:
# u2, v2 = invert_point_on_surface(p3d, s2)
# Refine parameters.
if tol is None:
tol = Settings.gtol / 100.
if self.is_spi:
u1, v1, u2, v2 = refine_spi_point(s1, s2, u1, v1, tol)[:-1]
else:
u1, v1, u2, v2 = refine_ssi_point(s1, s2, u1, v1, u2, v2, tol)[:-1]
if is_array_type(rtype):
if sref is s1:
return array([u1, v1], dtype=float64)
if sref is s2:
return array([u2, v2], dtype=float64)
return (array([u1, v1], dtype=float64),
array([u2, v2], dtype=float64))
else:
if sref is s1:
return Point2D((u1, v1))
if sref is s2:
return Point2D((u2, v2))
return Point2D((u1, v1)), Point2D((u2, v2))
def eval(self, u, rtype='Point', domain='local', tol=None):
"""
Evaluate curve at parametric point.
:param float u: Parametric point.
:param str rtype: Option to return a NumPy array or Point instance
(rtype = 'Point' or 'ndarray').
:param str domain: Option to use local (0 <= u <= 1) or global
(a <= u <= b) domain ('local', 'l', 'global', 'g').
:param float tol: Tolerance for point refinement.
:return: Point on curve.
:rtype: :class:`.Point` or ndarray
"""
if is_local_domain(domain):
u = self.local_to_global_param(u)
uv1, uv2 = self.eval2d(u, rtype='ndarray', domain='global', tol=tol)
p3d = self._s1.eval(uv1[0], uv1[1], rtype='ndarray', domain='global')
if is_array_type(rtype):
return p3d
else:
return Point(p3d)
def deriv(self, u, k=1, d=1, rtype='Vector', domain='local', tol=None):
"""
Compute the derivative of the intersection curve. Only supports
first derivates.
:param float u: Parametric point.
:param int k: Derivative to return (0 <= k <= 1).
:param int d: Highest derivative to compute. Currently only supports
first derivative.
:param str rtype: Option to return a NumPy array or a Vector instance
(rtype = 'Vector' or 'ndarray').
:param str domain: Option to use local (0 <= u <= 1) or global
(a <= u <= b) domain ('local', 'l', 'global', 'g').
:param float tol: Tolerance for point refinement.
:return: First derivative or intersection curve.
:rtype: :class:`.Vector` or ndarray
"""
if is_local_domain(domain):
u = self.local_to_global_param(u)
# Evaluate 2-D points.
s1, s2 = self._s1, self._s2
uv1, uv2 = self.eval2d(u, rtype='ndarray', domain='global', tol=tol)
# Evaluate surface normals.
vn1 = s1.norm(uv1[0], uv1[1], rtype='ndarray', domain='global')
if self.is_spi:
vn2 = s2.vn.ijk
else:
vn2 = s2.norm(uv2[0], uv2[1], rtype='ndarray', domain='global')
# First derivative is cross product
du = cross(vn1, vn2)
if is_array_type(rtype):
return du
else:
p0 = self.eval(u, domain='global')
return Vector(du, p0)
def extract(self, u0, u1, domain='local'):
"""
Extract a curve.
:param float u0: Starting parameter.
:param float u1: Ending parameter.
:param str domain: Option to use local (0 <= u <= 1) or global
(a <= u <= b) domain ('local', 'l', 'global', 'g').
:return: Curve between *u0* and *u1*.
:rtype: :class:`.ICurve`
"""
if is_local_domain(domain):
u0, u1 = self.local_to_global_param(u0, u1)
if u0 > u1:
u0, u1 = u1, u0
# Evaluate points on intersection curve.
p3d_0 = self.eval(u0, rtype='ndarray', domain='global')
p3d_1 = self.eval(u1, rtype='ndarray', domain='global')
uv0_s1_0, uv0_s2_0 = self.eval2d(u0, rtype='ndarray', domain='global')
uv0_s1_1, uv0_s2_1 = self.eval2d(u1, rtype='ndarray', domain='global')
# Extract curves
crv3d = self._crv3d.extract(u0, u1, domain='global')
crv2d_s1 = self._crv2d_s1.extract(u0, u1, domain='global')
crv2d_s2 = self._crv2d_s2.extract(u0, u1, domain='global')
# Force endpoints of curves to match original points by adjusting
# first and last control points.
crv3d.modify_cp(0, p3d_0)
crv3d.modify_cp(-1, p3d_1)
crv2d_s1.modify_cp(0, uv0_s1_0)
crv2d_s1.modify_cp(-1, uv0_s1_1)
crv2d_s2.modify_cp(0, uv0_s2_0)
crv2d_s2.modify_cp(-1, uv0_s2_1)
return ICurve(self._s1, self._s2, crv3d, crv2d_s1, crv2d_s2,
self._ftol)
def check_param(self, u):
"""
Check that the parameter is within the global domain of the knot vector
or is within tolerance of a unique knot value. Use
:class:`.CompareFloats` to compare floats.
:param float u: Global parameter.
:return: Parameter within global domain or near interior knot value.
:rtype: float
"""
c3d = self._crv3d
return check_param(c3d.n, c3d.p, c3d.uk, u)
def reverse(self, inplace=True):
"""
Reverse direction of curve.
:param bool inplace: Option to return new curve or modify
existing curve.
:return: Reversed NURBS curve.
:rtype: :class:`.ICurve`
"""
if inplace:
self._crv3d.reverse()
self._crv2d_s1.reverse()
self._crv2d_s2.reverse()
return True
crv3d = self._crv3d.reverse(False)
crv2d_s1 = self._crv2d_s1.reverse(False)
crv2d_s2 = self._crv2d_s1.reverse(False)
return ICurve(self._s1, self._s2, crv3d, crv2d_s1, crv2d_s2,
self._ftol)
def arc_length(self, u0=0., u1=1., domain='local'):
"""
Estimate the arc length between curve parameters.
:param float u0: Starting parameter.
:param float u1: Ending parameter.
:param str domain: Option to use local or global domain.
:return: Arc length of curve between *u0* and *u1*.
:rtype: float
..note:
Arc length estimates will be a function of the tolerance used in the
original intersection method.
"""
return self._crv3d.arc_length(u0, u1, domain)
def get_mult_and_knots(self):
"""
Get an array of unique knots values for the curve and their
multiplicities.
:return: Number of unique knots, multiplicities, and the knots
(nu, um, uq).
:rtype: tuple
"""
return self._crv3d.get_mult_and_knots()
|
{"hexsha": "f9a26c1a41b49c65af8149d553059ae3250e2457", "size": 13235, "ext": "py", "lang": "Python", "max_stars_repo_path": "pynurbs/geometry/icurve.py", "max_stars_repo_name": "trelau/pyNURBS", "max_stars_repo_head_hexsha": "5dfd082fe368c1140ce485dc64b049b32c267d1f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-16T20:30:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-03T07:40:47.000Z", "max_issues_repo_path": "pynurbs/geometry/icurve.py", "max_issues_repo_name": "trelau/pyNURBS", "max_issues_repo_head_hexsha": "5dfd082fe368c1140ce485dc64b049b32c267d1f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pynurbs/geometry/icurve.py", "max_forks_repo_name": "trelau/pyNURBS", "max_forks_repo_head_hexsha": "5dfd082fe368c1140ce485dc64b049b32c267d1f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-23T14:28:12.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-23T14:28:12.000Z", "avg_line_length": 31.3625592417, "max_line_length": 79, "alphanum_fraction": 0.5765772573, "include": true, "reason": "from numpy", "num_tokens": 3732}
|
'''
Using OpenCV takes a mp4 video and produces a number of images.
Requirements
----
You require OpenCV 3.2 to be installed.
Run
----
Place file in same directory as filenames.
Open the mp4_to_jpg.py and edit student name and filenames. Then run:
$ cd <file_location>
$ python mp4_to_jpg.py
Which will produce a folder based on student name containing all images for all videos.
'''
import cv2
import numpy as np
import os
from os import listdir
fps = 30
path_mani = '../manipulated_sequences/Deepfakes/c23/videos'
path_orig = '../original_sequences/youtube/c23/videos'
typ_orig = 'orignal'
typ_mani = 'manipulated'
def mp4_to_jpegs(seq_type, filename,fps, path):
# Playing video from file:
print(filename)
cap = cv2.VideoCapture(path+'/'+filename)
cap.set(cv2.CAP_PROP_FPS, fps)
title=filename.split('.')[0]
try:
if not os.path.exists(seq_type):
os.makedirs(seq_type)
except OSError:
print ('Error: Creating directory')
currentFrame = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
if currentFrame%60 ==0:
# Saves image of the current frame in jpg file
name = './'+seq_type+'/'+str(title)+'-' + str(currentFrame) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# To stop duplicate images
currentFrame += 1
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
def mp4s_to_jpegs(typ,filenames,fps,path):
for filename in filenames:
mp4_to_jpegs(typ,filename,fps,path)
if __name__ == '__main__':
mp4s_to_jpegs(typ_mani,listdir(path_mani),fps,path_mani)
mp4s_to_jpegs(typ_orig,listdir(path_orig),fps,path_orig)
|
{"hexsha": "0a1773cb5a8bd64b2b81fe7d9b7118d1711c446c", "size": 1716, "ext": "py", "lang": "Python", "max_stars_repo_path": "limited_parse.py", "max_stars_repo_name": "PRAkTIKal24/DeepFake_Classification", "max_stars_repo_head_hexsha": "de2c8d191145fc938cedb9de412ff2d949347272", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-05T04:58:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-18T08:28:00.000Z", "max_issues_repo_path": "limited_parse.py", "max_issues_repo_name": "PRAkTIKal24/DeepFake_Classification", "max_issues_repo_head_hexsha": "de2c8d191145fc938cedb9de412ff2d949347272", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "limited_parse.py", "max_forks_repo_name": "PRAkTIKal24/DeepFake_Classification", "max_forks_repo_head_hexsha": "de2c8d191145fc938cedb9de412ff2d949347272", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8125, "max_line_length": 87, "alphanum_fraction": 0.7004662005, "include": true, "reason": "import numpy", "num_tokens": 466}
|
[STATEMENT]
lemma subst_bv1_beta:
"subst_bv1 s (length (T#Ts)) x \<rightarrow>\<^sub>\<beta> subst_bv1 t (length (T#Ts)) x
\<Longrightarrow> typ_of1 Ts s = Some ty
\<Longrightarrow> typ_of1 Ts t = Some ty
\<Longrightarrow> s \<rightarrow>\<^sub>\<beta> t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>subst_bv1 s (length (T # Ts)) x \<rightarrow>\<^sub>\<beta> subst_bv1 t (length (T # Ts)) x; typ_of1 Ts s = Some ty; typ_of1 Ts t = Some ty\<rbrakk> \<Longrightarrow> s \<rightarrow>\<^sub>\<beta> t
[PROOF STEP]
proof (induction "subst_bv1 s (length (T#Ts)) x" "subst_bv1 t (length (T#Ts)) x"
arbitrary: s t T T Ts ty rule: beta.induct)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>T s t sa ta Ta Ts ty. \<lbrakk>Abs T s $ t = subst_bv1 sa (length (Ta # Ts)) x; subst_bv2 s 0 t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; s $ u = subst_bv1 sa (length (T # Ts)) x; t $ u = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
3. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
4. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
case (beta T s t)
[PROOF STATE]
proof (state)
this:
Abs Ta__ sa__ $ ta__ = subst_bv1 s (length (T # Ts)) x
subst_bv2 sa__ 0 ta__ = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
goal (4 subgoals):
1. \<And>T s t sa ta Ta Ts ty. \<lbrakk>Abs T s $ t = subst_bv1 sa (length (Ta # Ts)) x; subst_bv2 s 0 t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; s $ u = subst_bv1 sa (length (T # Ts)) x; t $ u = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
3. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
4. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Abs Ta__ sa__ $ ta__ = subst_bv1 s (length (T # Ts)) x
subst_bv2 sa__ 0 ta__ = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
Abs Ta__ sa__ $ ta__ = subst_bv1 s (length (T # Ts)) x
subst_bv2 sa__ 0 ta__ = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
goal (1 subgoal):
1. s \<rightarrow>\<^sub>\<beta> t
[PROOF STEP]
by (metis beta.simps length_Cons loose_bvar_Suc no_loose_bvar_imp_no_subst_bv1 typ_of1_imp_no_loose_bvar)
[PROOF STATE]
proof (state)
this:
s \<rightarrow>\<^sub>\<beta> t
goal (3 subgoals):
1. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; s $ u = subst_bv1 sa (length (T # Ts)) x; t $ u = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
3. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; s $ u = subst_bv1 sa (length (T # Ts)) x; t $ u = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
3. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
case (appL s t u)
[PROOF STATE]
proof (state)
this:
sa__ \<rightarrow>\<^sub>\<beta> ta__
\<lbrakk>sa__ = subst_bv1 ?s (length (?T # ?Ts)) x; ta__ = subst_bv1 ?t (length (?T # ?Ts)) x; typ_of1 ?Ts ?s = Some ?ty; typ_of1 ?Ts ?t = Some ?ty\<rbrakk> \<Longrightarrow> ?s \<rightarrow>\<^sub>\<beta> ?t
sa__ $ u = subst_bv1 s (length (T # Ts)) x
ta__ $ u = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
goal (3 subgoals):
1. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; s $ u = subst_bv1 sa (length (T # Ts)) x; t $ u = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
3. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
sa__ \<rightarrow>\<^sub>\<beta> ta__
\<lbrakk>sa__ = subst_bv1 ?s (length (?T # ?Ts)) x; ta__ = subst_bv1 ?t (length (?T # ?Ts)) x; typ_of1 ?Ts ?s = Some ?ty; typ_of1 ?Ts ?t = Some ?ty\<rbrakk> \<Longrightarrow> ?s \<rightarrow>\<^sub>\<beta> ?t
sa__ $ u = subst_bv1 s (length (T # Ts)) x
ta__ $ u = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
sa__ \<rightarrow>\<^sub>\<beta> ta__
\<lbrakk>sa__ = subst_bv1 ?s (length (?T # ?Ts)) x; ta__ = subst_bv1 ?t (length (?T # ?Ts)) x; typ_of1 ?Ts ?s = Some ?ty; typ_of1 ?Ts ?t = Some ?ty\<rbrakk> \<Longrightarrow> ?s \<rightarrow>\<^sub>\<beta> ?t
sa__ $ u = subst_bv1 s (length (T # Ts)) x
ta__ $ u = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
goal (1 subgoal):
1. s \<rightarrow>\<^sub>\<beta> t
[PROOF STEP]
by (metis beta.appL length_Cons loose_bvar_Suc no_loose_bvar_imp_no_subst_bv1 typ_of1_imp_no_loose_bvar)
[PROOF STATE]
proof (state)
this:
s \<rightarrow>\<^sub>\<beta> t
goal (2 subgoals):
1. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
case (appR s t u)
[PROOF STATE]
proof (state)
this:
sa__ \<rightarrow>\<^sub>\<beta> ta__
\<lbrakk>sa__ = subst_bv1 ?s (length (?T # ?Ts)) x; ta__ = subst_bv1 ?t (length (?T # ?Ts)) x; typ_of1 ?Ts ?s = Some ?ty; typ_of1 ?Ts ?t = Some ?ty\<rbrakk> \<Longrightarrow> ?s \<rightarrow>\<^sub>\<beta> ?t
u $ sa__ = subst_bv1 s (length (T # Ts)) x
u $ ta__ = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
goal (2 subgoals):
1. \<And>s t u sa ta T Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; u $ s = subst_bv1 sa (length (T # Ts)) x; u $ t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
2. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
sa__ \<rightarrow>\<^sub>\<beta> ta__
\<lbrakk>sa__ = subst_bv1 ?s (length (?T # ?Ts)) x; ta__ = subst_bv1 ?t (length (?T # ?Ts)) x; typ_of1 ?Ts ?s = Some ?ty; typ_of1 ?Ts ?t = Some ?ty\<rbrakk> \<Longrightarrow> ?s \<rightarrow>\<^sub>\<beta> ?t
u $ sa__ = subst_bv1 s (length (T # Ts)) x
u $ ta__ = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
sa__ \<rightarrow>\<^sub>\<beta> ta__
\<lbrakk>sa__ = subst_bv1 ?s (length (?T # ?Ts)) x; ta__ = subst_bv1 ?t (length (?T # ?Ts)) x; typ_of1 ?Ts ?s = Some ?ty; typ_of1 ?Ts ?t = Some ?ty\<rbrakk> \<Longrightarrow> ?s \<rightarrow>\<^sub>\<beta> ?t
u $ sa__ = subst_bv1 s (length (T # Ts)) x
u $ ta__ = subst_bv1 t (length (T # Ts)) x
typ_of1 Ts s = Some ty
typ_of1 Ts t = Some ty
goal (1 subgoal):
1. s \<rightarrow>\<^sub>\<beta> t
[PROOF STEP]
by (metis beta.simps length_Cons loose_bvar_Suc no_loose_bvar_imp_no_subst_bv1 typ_of1_imp_no_loose_bvar)
[PROOF STATE]
proof (state)
this:
s \<rightarrow>\<^sub>\<beta> t
goal (1 subgoal):
1. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
case (abs s t bT sa ta T Ts rT )
[PROOF STATE]
proof (state)
this:
s \<rightarrow>\<^sub>\<beta> t
\<lbrakk>s = subst_bv1 ?s (length (?T # ?Ts)) x; t = subst_bv1 ?t (length (?T # ?Ts)) x; typ_of1 ?Ts ?s = Some ?ty; typ_of1 ?Ts ?t = Some ?ty\<rbrakk> \<Longrightarrow> ?s \<rightarrow>\<^sub>\<beta> ?t
Abs bT s = subst_bv1 sa (length (T # Ts)) x
Abs bT t = subst_bv1 ta (length (T # Ts)) x
typ_of1 Ts sa = Some rT
typ_of1 Ts ta = Some rT
goal (1 subgoal):
1. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
obtain s' where "Abs bT s' = sa"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>s'. Abs bT s' = sa \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using abs.hyps(3) abs.prems loose_bvar_Suc no_loose_bvar_imp_no_subst_bv1 typ_of1_imp_no_loose_bvar
[PROOF STATE]
proof (prove)
using this:
Abs bT s = subst_bv1 sa (length (T # Ts)) x
typ_of1 Ts sa = Some rT
typ_of1 Ts ta = Some rT
loose_bvar ?t (Suc ?k) \<Longrightarrow> loose_bvar ?t ?k
\<not> loose_bvar ?t ?lev \<Longrightarrow> subst_bv1 ?t ?lev ?u = ?t
typ_of1 ?Ts ?t = Some ?ty \<Longrightarrow> \<not> loose_bvar ?t (length ?Ts)
goal (1 subgoal):
1. (\<And>s'. Abs bT s' = sa \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis length_Cons)
[PROOF STATE]
proof (state)
this:
Abs bT s' = sa
goal (1 subgoal):
1. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Abs bT s' = sa
goal (1 subgoal):
1. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
obtain t' where "Abs bT t' = ta"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>t'. Abs bT t' = ta \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using abs.hyps(4) abs.prems loose_bvar_Suc no_loose_bvar_imp_no_subst_bv1 typ_of1_imp_no_loose_bvar
[PROOF STATE]
proof (prove)
using this:
Abs bT t = subst_bv1 ta (length (T # Ts)) x
typ_of1 Ts sa = Some rT
typ_of1 Ts ta = Some rT
loose_bvar ?t (Suc ?k) \<Longrightarrow> loose_bvar ?t ?k
\<not> loose_bvar ?t ?lev \<Longrightarrow> subst_bv1 ?t ?lev ?u = ?t
typ_of1 ?Ts ?t = Some ?ty \<Longrightarrow> \<not> loose_bvar ?t (length ?Ts)
goal (1 subgoal):
1. (\<And>t'. Abs bT t' = ta \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis length_Cons)
[PROOF STATE]
proof (state)
this:
Abs bT t' = ta
goal (1 subgoal):
1. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Abs bT s' = sa
Abs bT t' = ta
[PROOF STEP]
have "s' \<rightarrow>\<^sub>\<beta> t'"
[PROOF STATE]
proof (prove)
using this:
Abs bT s' = sa
Abs bT t' = ta
goal (1 subgoal):
1. s' \<rightarrow>\<^sub>\<beta> t'
[PROOF STEP]
by (metis abs.hyps(1) abs.hyps(3) abs.hyps(4) abs.prems(1) abs.prems(2) length_Cons
loose_bvar_Suc no_loose_bvar_imp_no_subst_bv1 term.inject(4) typ_of1_imp_no_loose_bvar)
[PROOF STATE]
proof (state)
this:
s' \<rightarrow>\<^sub>\<beta> t'
goal (1 subgoal):
1. \<And>s t T sa ta Ta Ts ty. \<lbrakk>s \<rightarrow>\<^sub>\<beta> t; \<And>sa ta T Ts ty. \<lbrakk>s = subst_bv1 sa (length (T # Ts)) x; t = subst_bv1 ta (length (T # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta; Abs T s = subst_bv1 sa (length (Ta # Ts)) x; Abs T t = subst_bv1 ta (length (Ta # Ts)) x; typ_of1 Ts sa = Some ty; typ_of1 Ts ta = Some ty\<rbrakk> \<Longrightarrow> sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
s' \<rightarrow>\<^sub>\<beta> t'
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
s' \<rightarrow>\<^sub>\<beta> t'
goal (1 subgoal):
1. sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
using \<open>Abs bT s' = sa\<close> \<open>Abs bT t' = ta\<close>
[PROOF STATE]
proof (prove)
using this:
s' \<rightarrow>\<^sub>\<beta> t'
Abs bT s' = sa
Abs bT t' = ta
goal (1 subgoal):
1. sa \<rightarrow>\<^sub>\<beta> ta
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
sa \<rightarrow>\<^sub>\<beta> ta
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 9277, "file": "Metalogic_ProofChecker_BetaNorm", "length": 32}
|
#!/usr/bin/env python
"""
Created on Thu Jan 23 10:43:35 2014
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
import numpy as np
#from scipy.linalg import expm
from scipy.sparse.linalg import expm # scipy.linalg.expm is just a wrapper around this one.
#from expm_hacked import expm
#from scipy.sparse.linalg import expm_multiply
from cpab.cpaNd import CpaCalcs as CpaCalcsNd
from of.utils import ipshell
#from pyvision.essentials import *
from pylab import plt
#from class_affine_flow import AffineFlow
#from cy.transform.transform import calc_flowline_arr1d
#from cy.transform32.transform import calc_flowline_arr1d as calc_flowline_arr1d32
class CpaCalcs(CpaCalcsNd):
def __init__(self,XMINS,XMAXS,Ngrids,use_GPU_if_possible,my_dtype=np.float64):
"""
Ngrids: number of pixels in each dim.
Don't confuse Nx & Ny with the numbers of cells in each dim.
"""
super(CpaCalcs,self).__init__(XMINS,XMAXS,Ngrids,use_GPU_if_possible,my_dtype)
if np.asarray(XMINS).any():
raise NotImplementedError
Nx,Ny=Ngrids
Nx = int(Nx)
Ny = int(Ny)
self.Nx=Nx
self.Ny=Ny
yy,xx = np.mgrid[0:Ny+1,0:Nx+1]
xx=xx.astype(self.my_dtype)
yy=yy.astype(self.my_dtype)
# The shape is (2,1 + #pixels in y direction, 1 + #pixels in y direction)
self.x_dense_grid = np.asarray([xx,yy]).copy()
# The shape is (2, #pixels in y direction, #pixels in y direction)
self.x_dense_grid_img = np.asarray([xx[:-1,:-1],yy[:-1,:-1]]).copy()
# The shape is ( (1 + #pixels in y direction) * (1 + #pixels in y direction) , 2)
self.x_dense = np.asarray([self.x_dense_grid[0].ravel(),
self.x_dense_grid[1].ravel()]).T.copy()
# The shape is ( #pixels in y direction * #pixels in y direction , 2)
self.x_dense_img = np.asarray([self.x_dense_grid_img[0].ravel(),
self.x_dense_grid_img[1].ravel()]).T.copy()
if self.x_dense.shape[1] !=2:
raise ValueError(self.x.shape)
if self.x_dense_img.shape[1] !=2:
raise ValueError(self.x_dense_img.shape)
self.XMINS = np.asarray([xx.min(),yy.min()])
self.XMAXS = np.asarray([xx.max(),yy.max()]) # note this is greater than XMAXS (by 1)
|
{"hexsha": "4874ee6a5b04c01ecb0b86a849226b8e7572ab41", "size": 2518, "ext": "py", "lang": "Python", "max_stars_repo_path": "cpab/cpa2d/calcs/_CpaCalcs.py", "max_stars_repo_name": "freifeld/cpabDiffeo", "max_stars_repo_head_hexsha": "22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2016-03-16T21:35:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T04:16:21.000Z", "max_issues_repo_path": "cpab/cpa2d/calcs/_CpaCalcs.py", "max_issues_repo_name": "freifeld/cpabDiffeo", "max_issues_repo_head_hexsha": "22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cpab/cpa2d/calcs/_CpaCalcs.py", "max_forks_repo_name": "freifeld/cpabDiffeo", "max_forks_repo_head_hexsha": "22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-08-12T23:02:09.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-14T18:20:36.000Z", "avg_line_length": 32.7012987013, "max_line_length": 93, "alphanum_fraction": 0.6012708499, "include": true, "reason": "import numpy,from scipy", "num_tokens": 677}
|
theory Turan
imports
"Girth_Chromatic.Ugraphs"
"Random_Graph_Subgraph_Threshold.Ugraph_Lemmas"
begin
section \<open>Basic facts on graphs\<close>
lemma wellformed_uverts_0 :
assumes "uwellformed G" and "uverts G = {}"
shows "card (uedges G) = 0" using assms
by (metis uwellformed_def card.empty ex_in_conv zero_neq_numeral)
lemma finite_verts_edges :
assumes "uwellformed G" and "finite (uverts G)"
shows "finite (uedges G)"
proof -
have sub_pow: "uwellformed G \<Longrightarrow> uedges G \<subseteq> {S. S \<subseteq> uverts G}"
by (cases G, auto simp add: uwellformed_def)
then have "finite {S. S \<subseteq> uverts G}" using assms
by auto
with sub_pow assms show "finite (uedges G)"
using finite_subset by blast
qed
lemma ugraph_max_edges :
assumes "uwellformed G" and "card (uverts G) = n" and "finite (uverts G)"
shows "card (uedges G) \<le> n * (n-1)/2"
using assms wellformed_all_edges [OF assms(1)] card_all_edges [OF assms(3)] Binomial.choose_two [of "card(uverts G)"]
by (smt (verit, del_insts) all_edges_finite card_mono dbl_simps(3) dbl_simps(5) div_times_less_eq_dividend le_divide_eq_numeral1(1) le_square nat_mult_1_right numerals(1) of_nat_1 of_nat_diff of_nat_mono of_nat_mult of_nat_numeral right_diff_distrib')
lemma subgraph_verts_finite : "\<lbrakk> finite (uverts G); subgraph G' G \<rbrakk> \<Longrightarrow> finite (uverts G')"
using rev_finite_subset subgraph_def by auto
section \<open>Cliques\<close>
text \<open>In this section a straightforward definition of cliques for simple, undirected graphs is introduced.
Besides fundamental facts about cliques, also more specialized lemmata are proved in subsequent subsections.\<close>
definition uclique :: "ugraph \<Rightarrow> ugraph \<Rightarrow> nat \<Rightarrow> bool" where
"uclique C G p \<equiv> p = card (uverts C) \<and> subgraph C G \<and> C = complete (uverts C)"
lemma clique_any_edge :
assumes "uclique C G p" and "x \<in> uverts C" and "y \<in> uverts C" and "x \<noteq> y"
shows "{x,y} \<in> uedges G"
using assms
apply (simp add: uclique_def complete_def all_edges_def subgraph_def)
by (smt (verit, best) SigmaI fst_conv image_iff mem_Collect_eq mk_uedge.simps snd_conv subset_eq)
lemma clique_exists : "\<exists> C p. uclique C G p \<and> p \<le> card (uverts G)"
using bex_imageD card.empty emptyE gr_implies_not0 le_neq_implies_less
by (auto simp add: uclique_def complete_def subgraph_def all_edges_def)
lemma clique_exists1 :
assumes "uverts G \<noteq> {}" and "finite (uverts G)"
shows "\<exists> C p. uclique C G p \<and> 0 < p \<and> p \<le> card (uverts G)"
proof -
obtain x where x: "x \<in> uverts G"
using assms
by auto
show ?thesis
apply (rule exI [of _ "({x},{})"], rule exI [of _ 1])
using x assms(2)
by (simp add: uclique_def subgraph_def complete_def all_edges_def Suc_leI assms(1) card_gt_0_iff)
qed
lemma clique_max_size : "uclique C G p \<Longrightarrow> finite (uverts G) \<Longrightarrow> p \<le> card (uverts G)"
by (auto simp add: uclique_def subgraph_def Finite_Set.card_mono)
lemma clique_exists_gt0 :
assumes "finite (uverts G)" "card (uverts G) > 0"
shows "\<exists> C p. uclique C G p \<and> p \<le> card (uverts G) \<and> (\<forall>C q. uclique C G q \<longrightarrow> q \<le> p)"
proof -
have 1: "finite (uverts G) \<Longrightarrow> finite {p. \<exists>C. uclique C G p}"
using clique_max_size
by (smt (verit, best) finite_nat_set_iff_bounded_le mem_Collect_eq)
have 2: "\<And>A::nat set. finite A \<Longrightarrow> \<exists>x. x\<in>A \<Longrightarrow> \<exists>x\<in>A.\<forall>y\<in>A. y \<le> x"
using Max_ge Max_in by blast
have "\<exists>C p. uclique C G p \<and> (\<forall>C q. uclique C G q \<longrightarrow> q \<le> p)"
using 2 [OF 1 [OF \<open>finite (uverts G)\<close>]] clique_exists [of G]
by (smt (z3) mem_Collect_eq)
then show ?thesis
using \<open>finite (uverts G)\<close> clique_max_size
by blast
qed
text \<open>If there exists a $(p+1)$-clique @{term C} in a graph @{term G}
then we can obtain a $p$-clique in @{term G} by removing an arbitrary vertex from @{term C}\<close>
lemma clique_size_jumpfree :
assumes "finite (uverts G)" and "uwellformed G"
and "uclique C G (p+1)"
shows "\<exists>C'. uclique C' G p"
proof -
have "card(uverts G) > p"
using assms by (simp add: uclique_def subgraph_def card_mono less_eq_Suc_le)
obtain x where x: "x \<in> uverts C"
using assms by (fastforce simp add: uclique_def)
have "mk_uedge ` {uv \<in> uverts C \<times> uverts C. fst uv \<noteq> snd uv} - {A \<in> uedges C. x \<in> A} =
mk_uedge ` {uv \<in> (uverts C - {x}) \<times> (uverts C - {x}). fst uv \<noteq> snd uv}"
proof -
have "\<And>y. y \<in> mk_uedge ` {uv \<in> uverts C \<times> uverts C. fst uv \<noteq> snd uv} - {A \<in> uedges C. x \<in> A} \<Longrightarrow>
y \<in> mk_uedge ` {uv \<in> (uverts C - {x}) \<times> (uverts C - {x}). fst uv \<noteq> snd uv}"
using assms(3)
apply (simp add: uclique_def complete_def all_edges_def)
by (smt (z3) DiffI SigmaE SigmaI image_iff insertCI mem_Collect_eq mk_uedge.simps singleton_iff snd_conv)
moreover have "\<And>y. y \<in> mk_uedge ` {uv \<in> (uverts C - {x}) \<times> (uverts C - {x}). fst uv \<noteq> snd uv}
\<Longrightarrow> y \<in> mk_uedge ` {uv \<in> uverts C \<times> uverts C. fst uv \<noteq> snd uv} - {A \<in> uedges C. x \<in> A}"
apply (simp add: uclique_def complete_def all_edges_def)
by (smt (z3) DiffE SigmaE SigmaI image_iff insert_iff mem_Collect_eq mk_uedge.simps singleton_iff)
ultimately show ?thesis
by blast
qed
then have 1: "(uverts C - {x}, uedges C - {A \<in> uedges C. x \<in> A}) = Ugraph_Lemmas.complete (uverts C - {x})"
using assms(3)
apply (simp add: uclique_def complete_def all_edges_def)
by (metis (no_types, lifting) snd_eqD)
show ?thesis
apply (rule exI [of _ "C -- x"])
using assms x
apply (simp add: uclique_def remove_vertex_def subgraph_def)
apply (simp add: 1)
by (auto simp add: complete_def all_edges_def)
qed
text \<open>The next lemma generalises the lemma @{thm [source] clique_size_jumpfree} to a proof of
the existence of a clique of any size smaller than the size of the original clique.\<close>
lemma clique_size_decr :
assumes "finite (uverts G)" and "uwellformed G"
and "uclique C G p"
shows "q \<le> p \<Longrightarrow> \<exists>C. uclique C G q" using assms
proof (induction q rule: measure_induct [of "\<lambda>x. p - x"])
case (1 x)
then show ?case
proof (cases "x = p")
case True
then show ?thesis
using \<open>uclique C G p\<close>
by blast
next
case False
with 1(2) have "x < p"
by auto
from \<open>x < p\<close> have "p - Suc x < p - x"
by auto
then show ?thesis
using 1(1) assms(1,2,3) \<open>x < p\<close>
using clique_size_jumpfree [OF \<open>finite (uverts G)\<close> \<open>uwellformed G\<close> _]
by (metis "1.prems"(4) add.commute linorder_not_le not_less_eq plus_1_eq_Suc)
qed
qed
text \<open>With this lemma we can easily derive by contradiction that
if there is no $p$-clique then there cannot exist a clique of a size greater than @{term p}\<close>
corollary clique_size_neg_max :
assumes "finite (uverts G)" and "uwellformed G"
and "\<not>(\<exists>C. uclique C G p)"
shows "\<forall>C q. uclique C G q \<longrightarrow> q < p"
proof (rule ccontr)
assume 1: "\<not> (\<forall>C q. uclique C G q \<longrightarrow> q < p)"
show False
proof -
obtain C q where C: "uclique C G q"
and q: "q \<ge> p"
using 1 linorder_not_less
by blast
show ?thesis
using assms(3) q clique_size_decr [OF \<open>finite (uverts G)\<close> \<open>uwellformed G\<close> C ]
using order_less_imp_le by blast
qed
qed
corollary clique_complete :
assumes "finite V" and "x \<le> card V"
shows "\<exists>C. uclique C (complete V) x"
proof -
have "uclique (complete V) (complete V) (card V)"
by (simp add: uclique_def complete_def subgraph_def)
then show ?thesis
using clique_size_decr [OF _ complete_wellformed [of V] _ assms(2)] assms(1)
by (simp add: complete_def)
qed
lemma subgraph_clique :
assumes "uwellformed G" "subgraph C G" "C = complete (uverts C)"
shows "{e \<in> uedges G. e \<subseteq> uverts C} = uedges C"
proof -
from assms complete_wellformed [of "uverts C"] have "uedges C \<subseteq> {e \<in> uedges G. e \<subseteq> uverts C}"
by (auto simp add: subgraph_def uwellformed_def)
moreover from assms(1) complete_wellformed [of "uverts C"] have "{e \<in> uedges G. e \<subseteq> uverts C} \<subseteq> uedges C"
apply (simp add: subgraph_def uwellformed_def complete_def card_2_iff all_edges_def)
using assms(3)[unfolded complete_def all_edges_def] in_mk_uedge_img
by (smt (verit, ccfv_threshold) SigmaI fst_conv insert_subset mem_Collect_eq snd_conv subsetI)
ultimately show ?thesis
by auto
qed
text \<open>Next, we prove that in a graph @{term G} with a $p$-clique @{term C} and some vertex @{term v} outside of this clique,
there exists a $(p+1)$-clique in @{term G} if @{term v} is connected to all nodes in @{term C}.
The next lemma is an abstracted version that does not explicitly mention cliques:
If a vertex @{term n} has as many edges to a set of nodes @{term N} as there are nodes in @{term N}
then @{term n} is connected to all vertices in @{term N}.\<close>
lemma card_edges_nodes_all_edges :
fixes G :: "ugraph" and N :: "nat set" and E :: "nat set set" and n :: nat
assumes "uwellformed G"
and "finite N"
and "N \<subseteq> uverts G" and "E \<subseteq> uedges G"
and "n \<in> uverts G" and "n \<notin> N"
and "\<forall>e \<in> E. \<exists>x \<in> N. {n,x} = e"
and "card E = card N"
shows "\<forall>x \<in> N. {n,x} \<in> E"
proof (rule ccontr)
assume "\<not>(\<forall>x \<in> N. {n,x} \<in> E)"
show False
proof -
obtain x where x: "x \<in> N" and e: "{n,x} \<notin> E"
using \<open>\<not>(\<forall>x \<in> N. {n,x} \<in> E)\<close>
by auto
have "E \<subseteq> (\<lambda>y. {n,y}) ` (N - {x})"
using Set.image_diff_subset \<open>\<forall>e \<in> E. \<exists>x \<in> N. {n,x} = e\<close> x e
by auto
then show ?thesis
using \<open>finite N\<close> \<open>card E = card N\<close> x
using surj_card_le [of "N - {x}" E "(\<lambda>y. {n,y})"]
by (simp, metis card_gt_0_iff diff_less emptyE lessI linorder_not_le)
qed
qed
subsection \<open>Partitioning edges along a clique\<close>
text \<open>Tur\'{a}n's proof partitions the edges of a graph into three partitions for a $(p-1)$-clique @{term C}:
All edges within @{term C}, all edges outside of @{term C}, and all edges between a vertex in @{term C} and a
vertex not in @{term C}.
We prove a generalized lemma that partitions the edges along some arbitrary set of vertices
which does not necessarily need to induce a clique.
Furthermore, in Tur\'{a}n's graph theorem we only argue about the cardinality of the partitions
so that we restrict this proof to showing that
the sum of the cardinalities of the partitions is equal to number of all edges.\<close>
lemma graph_partition_edges_card :
assumes "finite (uverts G)" and "uwellformed G" and "A \<subseteq> (uverts G)"
shows "card (uedges G) = card {e \<in> uedges G. e \<subseteq> A} + card {e \<in> uedges G. e \<subseteq> uverts G - A} + card {e \<in> uedges G. e \<inter> A \<noteq> {} \<and> e \<inter> (uverts G - A) \<noteq> {}}"
using assms
proof -
have "uedges G = {e \<in> uedges G. e \<subseteq> A} \<union> {e \<in> uedges G. e \<subseteq> (uverts G) - A} \<union> {e \<in> uedges G. e \<inter> A \<noteq> {} \<and> e \<inter> ((uverts G) - A) \<noteq> {}}"
using assms uwellformed_def
by blast
moreover have "{e \<in> uedges G. e \<subseteq> A} \<inter> {e \<in> uedges G. e \<subseteq> uverts G - A} = {}"
using assms uwellformed_def
by (smt (verit, ccfv_SIG) Diff_disjoint Int_subset_iff card.empty disjoint_iff mem_Collect_eq nat.simps(3) nat_1_add_1 plus_1_eq_Suc prod.sel(2) subset_empty)
moreover have "({e \<in> uedges G. e \<subseteq> A} \<union> {e \<in> uedges G. e \<subseteq> uverts G - A}) \<inter> {e \<in> uedges G. e \<inter> A \<noteq> {} \<and> e \<inter> (uverts G - A) \<noteq> {}} = {}"
by blast
moreover have "finite {e \<in> uedges G. e \<subseteq> A}" using assms
by (simp add: finite_subset)
moreover have "finite {e \<in> uedges G. e \<subseteq> uverts G - A}" using assms
by (simp add: finite_subset)
moreover have "finite {e \<in> uedges G. e \<inter> A \<noteq> {} \<and> e \<inter> (uverts G - A) \<noteq> {}}"
using assms finite_verts_edges
by auto
ultimately show ?thesis
using assms Finite_Set.card_Un_disjoint
by (smt (verit, best) finite_UnI)
qed
text \<open>Now, we turn to the problem of calculating the cardinalities of these partitions
when they are induced by the biggest clique in the graph.
First, we consider the number of edges in a $p$-clique.\<close>
lemma clique_edges_inside :
assumes G1: "uwellformed G" and G2: "finite (uverts G)"
and p: "p \<le> card (uverts G)" and n: "n = card(uverts G)"
and C: "uclique C G p"
shows "card {e \<in> uedges G. e \<subseteq> uverts C} = p * (p-1) / 2"
proof -
have "2 dvd (card (uverts C) * (p - 1))"
using C uclique_def
by auto
have "2 = real 2"
by simp
then show ?thesis
using C uclique_def [of C G p] complete_def [of "uverts C"]
using subgraph_clique [OF G1, of C] subgraph_verts_finite [OF assms(2), of C]
using Real.real_of_nat_div [OF \<open>2 dvd (card (uverts C) * (p - 1))\<close>] Binomial.choose_two [of " card (uverts G)"]
by (smt (verit, del_insts) One_nat_def approximation_preproc_nat(5) card_all_edges diff_self_eq_0 eq_imp_le left_diff_distrib' left_diff_distrib' linorder_not_less mult_le_mono2 n_choose_2_nat not_gr0 not_less_eq_eq of_nat_1 of_nat_diff snd_eqD)
qed
text \<open>Next, we turn to the number of edges that connect a node inside of the biggest clique with
a node outside of said clique. For that we start by calculating a bound for the number of
edges from one single node outside of the clique into the clique.\<close>
lemma clique_edges_inside_to_node_outside :
assumes "uwellformed G" and "finite (uverts G)"
assumes "0 < p" and "p \<le> card (uverts G)"
assumes "uclique C G p" and "(\<forall>C p'. uclique C G p' \<longrightarrow> p' \<le> p)"
assumes y: "y \<in> uverts G - uverts C"
shows "card {{x,y}| x. x \<in> uverts C \<and> {x,y} \<in> uedges G} \<le> p - 1"
proof (rule ccontr)
txt \<open>For effective proof automation we use a local function definition to compute this
set of edges into the clique from any node @{term y}:\<close>
define S where "S \<equiv> \<lambda>y. {{x,y}| x. x \<in> uverts C \<and> {x,y} \<in> uedges G}"
assume "\<not> card {{x, y} |x. x \<in> uverts C \<and> {x, y} \<in> uedges G} \<le> p - 1"
then have Sy: "card (S y) > p - 1"
using S_def y by auto
have "uclique ({y} \<union> (uverts C),S y \<union> uedges C) G (Suc p)"
proof -
have "card ({y} \<union> uverts C) = Suc p"
using assms(3,5,7) uclique_def
by (metis DiffD2 card_gt_0_iff card_insert_disjoint insert_is_Un)
moreover have "subgraph ({y} \<union> uverts C, (S y) \<union> uedges C) G"
using assms(5,7)
by (auto simp add: uclique_def subgraph_def S_def)
moreover have "({y} \<union> (uverts C),(S y) \<union> uedges C) = complete ({y} \<union> (uverts C))"
proof -
have "(S y) \<union> uedges C \<subseteq> all_edges ({y} \<union> (uverts C))"
using y assms(5) S_def all_edges_def uclique_def complete_def
by (simp, smt (z3) SigmaE SigmaI fst_conv image_iff in_mk_uedge_img insertCI mem_Collect_eq snd_conv subsetI)
moreover have "all_edges ({y} \<union> (uverts C)) \<subseteq> (S y) \<union> uedges C"
proof -
have "\<forall>x\<in>uverts C. {y, x} \<in> S y"
proof -
have "card (S y) = card (uverts C)"
using Sy assms(2,3,5,7) S_def uclique_def card_gt_0_iff
using Finite_Set.surj_card_le [of "uverts C" "S y" "\<lambda>x. {x, y}"]
by (smt (verit, del_insts) Suc_leI Suc_pred' image_iff le_antisym mem_Collect_eq subsetI)
then show ?thesis
using card_edges_nodes_all_edges [OF assms(1), of "uverts C" "S y" y] assms(1,2,5,7) S_def uclique_def
by (smt (verit, ccfv_threshold) DiffE insert_commute mem_Collect_eq subgraph_def subgraph_verts_finite subsetI)
qed
then show ?thesis
using assms(5) all_edges_def S_def uclique_def complete_def mk_uedge.simps in_mk_uedge_img
by (smt (z3) insert_commute SigmaI fst_conv mem_Collect_eq snd_conv SigmaE UnCI image_iff insert_iff insert_is_Un subsetI)
qed
ultimately show ?thesis
by (auto simp add: complete_def)
qed
ultimately show ?thesis
by (simp add: uclique_def complete_def)
qed
then show False
using assms(6)
by fastforce
qed
text \<open>Now, that we have this upper bound for the number of edges from a single vertex into the largest clique
we can calculate the upper bound for all such vertices and edges:\<close>
lemma clique_edges_inside_to_outside :
assumes G1: "uwellformed G" and G2: "finite (uverts G)"
and p0: "0 < p" and pn: "p \<le> card (uverts G)" and "card(uverts G) = n"
and C: "uclique C G p" and C_max: "(\<forall>C p'. uclique C G p' \<longrightarrow> p' \<le> p)"
shows "card {e \<in> uedges G. e \<inter> uverts C \<noteq> {} \<and> e \<inter> (uverts G - uverts C) \<noteq> {}} \<le> (p - 1) * (n - p)"
proof -
define S where "S \<equiv> \<lambda>y. {{x,y}| x. x \<in> uverts C \<and> {x,y} \<in> uedges G}"
have "card (uverts G - uverts C) = n - p"
using pn C \<open>card(uverts G) = n\<close> G2
apply (simp add: uclique_def)
by (meson card_Diff_subset subgraph_def subgraph_verts_finite)
moreover have "{e \<in> uedges G. e \<inter> uverts C \<noteq> {} \<and> e \<inter> (uverts G - uverts C) \<noteq> {}} = {{x,y}| x y. x \<in> uverts C \<and> y \<in> (uverts G - uverts C) \<and> {x,y} \<in> uedges G}"
proof -
have "e \<in> {e \<in> uedges G. e \<inter> uverts C \<noteq> {} \<and> e \<inter> (uverts G - uverts C) \<noteq> {}}
\<Longrightarrow> \<exists>x y. e = {x,y} \<and> x \<in> uverts C \<and> y \<in> uverts G - uverts C" for e
using G1
apply (simp add: uwellformed_def)
by (smt (z3) DiffD2 card_2_iff disjoint_iff_not_equal insert_Diff insert_Diff_if insert_iff)
then show ?thesis
by auto
qed
moreover have "card {{x,y}| x y. x \<in> uverts C \<and> y \<in> (uverts G - uverts C) \<and> {x,y} \<in> uedges G} \<le> card (uverts G - uverts C) * (p-1)"
proof -
have "card {{x,y}| x y. x \<in> uverts C \<and> y \<in> (uverts G - uverts C) \<and> {x,y} \<in> uedges G}
\<le> (\<Sum>y \<in> (uverts G - uverts C). card (S y))"
proof -
have "finite (uverts G - uverts C)"
using \<open>finite (uverts G)\<close> by auto
have "{{x,y}| x y. x \<in> uverts C \<and> y \<in> (uverts G - uverts C) \<and> {x,y} \<in> uedges G}
= (\<Union>y \<in> (uverts G - uverts C). {{x,y}| x. x \<in> uverts C \<and> {x,y} \<in> uedges G})"
by auto
then show ?thesis
using Groups_Big.card_UN_le [OF \<open>finite (uverts G - uverts C)\<close>,
of "\<lambda>y. {{x, y} |x. x \<in> uverts C \<and> {x, y} \<in> uedges G}"]
using S_def
by auto
qed
moreover have "(\<Sum>y\<in>uverts G - uverts C. card (S y)) \<le> card (uverts G - uverts C) * (p-1)"
proof -
have "card (S y) \<le> p - 1" if y: "y \<in> uverts G - uverts C" for y
using clique_edges_inside_to_node_outside [OF assms(1,2,3,4) C C_max y] S_def y
by simp
then show ?thesis
by (metis id_apply of_nat_eq_id sum_bounded_above)
qed
ultimately show ?thesis
using order_trans
by blast
qed
ultimately show ?thesis
by (smt (verit, ccfv_SIG) mult.commute)
qed
text \<open>Lastly, we need to argue about the number of edges which are located entirely outside of
the greatest clique. Note that this is in the inductive step case in the overarching proof
of Tur\'{a}n's graph theorem. That is why we have access to the inductive hypothesis as an
assumption in the following lemma:\<close>
lemma clique_edges_outside :
assumes "uwellformed G" and "finite (uverts G)"
and p2: "2 \<le> p" and pn: "p \<le> card (uverts G)" and n: "n = card(uverts G)"
and C: "uclique C G (p-1)" and C_max: "(\<forall>C q. uclique C G q \<longrightarrow> q \<le> p-1)"
and IH: "\<And>G y. y < n \<Longrightarrow> finite (uverts G) \<Longrightarrow> uwellformed G \<Longrightarrow> \<forall>C p'. uclique C G p' \<longrightarrow> p' < p
\<Longrightarrow> 2 \<le> p \<Longrightarrow> card (uverts G) = y \<Longrightarrow> real (card (uedges G)) \<le> (1 - 1 / real (p - 1)) * real (y\<^sup>2) / 2"
shows "card {e \<in> uedges G. e \<subseteq> uverts G - uverts C} \<le> (1 - 1 / (p-1)) * (n - p + 1) ^ 2 / 2"
proof -
have "n - card (uverts C) < n"
using C pn p2 n
by (metis Suc_pred' diff_less less_2_cases_iff linorder_not_less not_gr0 uclique_def)
have GC1: "finite (uverts (uverts G - uverts C, {e \<in> uedges G. e \<subseteq> uverts G - uverts C}))"
using assms(2)
by simp
have GC2: "uwellformed (uverts G - uverts C, {e \<in> uedges G. e \<subseteq> uverts G - uverts C})"
using assms(1)
by (auto simp add: uwellformed_def)
have GC3: "\<forall>C' p'. uclique C' (uverts G - uverts C, {e \<in> uedges G. e \<subseteq> uverts G - uverts C}) p' \<longrightarrow> p' < p"
proof (rule ccontr)
assume "\<not>(\<forall>C' p'. uclique C' (uverts G - uverts C, {e \<in> uedges G. e \<subseteq> uverts G - uverts C}) p' \<longrightarrow> p' < p)"
then obtain C' p' where C': "uclique C' (uverts G - uverts C, {e \<in> uedges G. e \<subseteq> uverts G - uverts C}) p'" and p': "p' \<ge> p"
by auto
then have "uclique C' G p'"
using uclique_def subgraph_def
by auto
then show False
using p' p2 C_max
by fastforce
qed
have GC4: "card (uverts (uverts G - uverts C,{e \<in> uedges G. e \<subseteq> uverts G - uverts C})) = n - card (uverts C)"
using C n assms(2) uclique_def subgraph_def
by (simp, meson card_Diff_subset infinite_super)
show ?thesis
using C GC3 IH [OF \<open>n - card (uverts C) < n\<close> GC1 GC2 GC3 \<open>2 \<le> p\<close> GC4] assms(2) n uclique_def
by (simp, smt (verit, best) C One_nat_def Suc_1 Suc_leD clique_max_size of_nat_1 of_nat_diff p2)
qed
subsection \<open>Extending the size of the biggest clique\<close> text_raw \<open>\label{sec:extend_clique}\<close>
text \<open>In this section, we want to prove that we can add edges to a graph so that we augment the biggest clique
to some greater clique with a specific number of vertices. For that, we need the following lemma:
When too many edges have been added to a graph so that there exists a $(p+1)$-clique
then we can remove at least one of the added edges while also retaining a p-clique\<close>
lemma clique_union_size_decr :
assumes "finite (uverts G)" and "uwellformed (uverts G, uedges G \<union> E)"
and "uclique C (uverts G, uedges G \<union> E) (p+1)"
and "card E \<ge> 1"
shows "\<exists>C' E'. card E' < card E \<and> uclique C' (uverts G, uedges G \<union> E') p \<and> uwellformed (uverts G, uedges G \<union> E')"
proof (cases "\<exists>x \<in> uverts C. \<exists>e \<in> E. x \<in> e")
case True
then obtain x where x1: "x \<in> uverts C" and x2: "\<exists>e \<in> E. x \<in> e"
by auto
show ?thesis
proof (rule exI [of _ "C -- x"], rule exI [of _ "{e \<in> E. x \<notin> e}"])
have "card {e \<in> E. x \<notin> e} < card E"
using x2 assms(4)
by (smt (verit) One_nat_def card.infinite diff_is_0_eq mem_Collect_eq minus_nat.diff_0 not_less_eq psubset_card_mono psubset_eq subset_eq)
moreover have "uclique (C -- x) (uverts G, uedges G \<union> {e \<in> E. x \<notin> e}) p"
proof -
have "p = card (uverts (C -- x))"
using x1 assms(3)
by (auto simp add: uclique_def remove_vertex_def)
moreover have "subgraph (C -- x) (uverts G, uedges G \<union> {e \<in> E. x \<notin> e})"
using assms(3)
by (auto simp add: uclique_def subgraph_def remove_vertex_def)
moreover have "C -- x = Ugraph_Lemmas.complete (uverts (C -- x))"
proof -
have 1: "\<And>y. y \<in> mk_uedge ` {uv \<in> uverts C \<times> uverts C. fst uv \<noteq> snd uv} - {A \<in> uedges C. x \<in> A} \<Longrightarrow>
y \<in> mk_uedge ` {uv \<in> (uverts C - {x}) \<times> (uverts C - {x}). fst uv \<noteq> snd uv}"
by (smt (z3) DiffE DiffI SigmaE SigmaI Ugraph_Lemmas.complete_def all_edges_def assms(3) empty_iff image_iff insert_iff mem_Collect_eq mk_uedge.simps snd_conv uclique_def)
have 2: "\<And>y. y \<in> mk_uedge ` {uv \<in> (uverts C - {x}) \<times> (uverts C - {x}). fst uv \<noteq> snd uv} \<Longrightarrow>
y \<in> mk_uedge ` {uv \<in> uverts C \<times> uverts C. fst uv \<noteq> snd uv} - {A \<in> uedges C. x \<in> A}"
by (smt (z3) DiffE DiffI SigmaE SigmaI image_iff insert_iff mem_Collect_eq mk_uedge.simps singleton_iff)
show ?thesis
using assms(3)
apply (simp add: remove_vertex_def complete_def all_edges_def uclique_def)
using 1 2
by (smt (verit, ccfv_SIG) split_pairs subset_antisym subset_eq)
qed
ultimately show ?thesis
by (simp add: uclique_def)
qed
moreover have "uwellformed (uverts G, uedges G \<union> {e \<in> E. x \<notin> e})"
using assms(2)
by (auto simp add: uwellformed_def)
ultimately show "card {e \<in> E. x \<notin> e} < card E \<and>
uclique (C -- x) (uverts G, uedges G \<union> {e \<in> E. x \<notin> e}) p \<and>
uwellformed (uverts G, uedges G \<union> {e \<in> E. x \<notin> e})"
by auto
qed
next
case False
then have "\<And>x. x \<in> uedges C \<Longrightarrow> x \<notin> E"
using assms(2)
by (metis assms(3) card_2_iff' complete_wellformed uclique_def uwellformed_def)
then have "uclique C G (p+1)"
using assms(3)
by (auto simp add: uclique_def subgraph_def uwellformed_def)
show ?thesis
using assms(2,4) clique_size_jumpfree [OF assms(1) _ \<open>uclique C G (p+1)\<close>]
apply (simp add: uwellformed_def)
by (metis Suc_le_eq UnCI Un_empty_right card.empty prod.exhaust_sel)
qed
text \<open>We use this preceding lemma to prove the next result. In this lemma we assume that we have
added too many edges. The goal is then to remove some of the new edges appropriately so
that it is indeed guaranteed that there is no bigger clique.
Two proofs of this lemma will be described in the following.
Both fundamentally come down to the same core idea:
In essence, both proofs apply the well-ordering principle.
In the first proof we do so immediately by obtaining the minimum of a set:\<close>
lemma clique_union_make_greatest :
fixes p n :: nat
assumes "finite (uverts G)" and "uwellformed G"
and "uwellformed (uverts G, uedges G \<union> E)" and "card(uverts G) \<ge> p"
and "uclique C (uverts G, uedges G \<union> E) p"
and "\<forall>C' q'. uclique C' G q' \<longrightarrow> q' < p" and "1 \<le> card E"
shows "\<exists>C' E'. uwellformed (uverts G, uedges G \<union> E')
\<and> (uclique C' (uverts G, uedges G \<union> E') p)
\<and> (\<forall>C'' q'. uclique C'' (uverts G, uedges G \<union> E') q' \<longrightarrow> q' \<le> p)"
using assms
proof (induction "card E" arbitrary: C E rule: less_induct)
case (less E)
then show ?case
proof (cases "\<exists>A. uclique A (uverts G, uedges G \<union> E) (p+1)")
case True
then obtain A where A: "uclique A (uverts G, uedges G \<union> E) (p+1)"
by auto
obtain C' E' where E'1: "card E' < card E"
and E'2: "uclique C' (uverts G, uedges G \<union> E') p"
and E'3: "uwellformed (uverts G, uedges G \<union> E')"
and E'4: "1 \<le> card E'"
using less(7)
using clique_union_size_decr [OF assms(1) \<open>uwellformed (uverts G, uedges G \<union> E)\<close> A less(8)]
by (metis One_nat_def Suc_le_eq Un_empty_right card_gt_0_iff finite_Un finite_verts_edges fst_conv less.prems(1) less_not_refl prod.collapse snd_conv)
show ?thesis
using less(1) [OF E'1 assms(1,2) E'3 less(5) E'2 less(7) E'4]
using E'1 less(8)
by (meson less_or_eq_imp_le order_le_less_trans)
next
case False
show ?thesis
apply (rule exI [of _ C], rule exI [of _ E])
using clique_size_neg_max [OF _ less(4) False]
using less(2,4,6)
by fastforce
qed
qed
text \<open>In this second, alternative proof the well-ordering principle is used through complete induction.\<close>
lemma clique_union_make_greatest_alt :
fixes p n :: nat
assumes "finite (uverts G)" and "uwellformed G"
and "uwellformed (uverts G, uedges G \<union> E)" and "card(uverts G) \<ge> p"
and "uclique C (uverts G, uedges G \<union> E) p"
and "\<forall>C' q'. uclique C' G q' \<longrightarrow> q' < p" and "1 \<le> card E"
shows "\<exists>C' E'. uwellformed (uverts G, uedges G \<union> E')
\<and> (uclique C' (uverts G, uedges G \<union> E') p)
\<and> (\<forall>C'' q'. uclique C'' (uverts G, uedges G \<union> E') q' \<longrightarrow> q' \<le> p)"
proof -
define P where "P \<equiv> \<lambda>E. uwellformed (uverts G, uedges G \<union> E) \<and> (\<exists>C. uclique C (uverts G, uedges G \<union> E) p)"
have "finite {y. \<exists>E. P E \<and> card E = y}"
proof -
have "\<And>E. P E \<Longrightarrow> E \<subseteq> Pow (uverts G)"
by (auto simp add: P_def uwellformed_def)
then have "finite {E. P E}"
using assms(1)
by (metis Collect_mono Pow_def finite_Pow_iff rev_finite_subset)
then show ?thesis
by simp
qed
obtain F where F1: "P F"
and F2: "card F = Min {y. \<exists>E. P E \<and> card E = y}"
and F3: "card F > 0"
using assms(1,3,4,5,6) Min_in \<open>finite {y. \<exists>E. P E \<and> card E = y}\<close> P_def CollectD Collect_empty_eq
by (smt (verit, ccfv_threshold) Un_empty_right card_gt_0_iff finite_Un finite_verts_edges fst_conv le_refl linorder_not_le prod.collapse snd_conv)
have "p > 0"
using assms(6) clique_exists bot_nat_0.not_eq_extremum
by blast
then show ?thesis
proof (cases "\<exists>C. uclique C (uverts G, uedges G \<union> F) (p + 1)")
case True
then obtain F' where F'1 : "P F'" and F'2: "card F' < card F"
using F1 F2 F3 clique_union_size_decr [OF assms(1), of F _ p] P_def
by (smt (verit) One_nat_def Suc_eq_plus1 Suc_leI add_2_eq_Suc' assms(1) clique_size_jumpfree fst_conv)
then show ?thesis
using F2 \<open>finite {y. \<exists>F. P F \<and> card F = y}\<close> Min_gr_iff
by fastforce
next
case False
then show ?thesis
using clique_size_neg_max [OF _ _ False]
using assms(1) F1 P_def
by (smt (verit, ccfv_SIG) Suc_eq_plus1 Suc_leI fst_conv linorder_not_le)
qed
qed
text \<open>Finally, with this lemma we can turn to this section’s main challenge of increasing the
greatest clique size of a graph by adding edges.\<close>
lemma clique_add_edges_max :
fixes p :: nat
assumes "finite (uverts G)"
and "uwellformed G" and "card(uverts G) > p"
and "\<exists>C. uclique C G p" and "(\<forall>C q'. uclique C G q' \<longrightarrow> q' \<le> p)"
and "q \<le> card(uverts G)" and "p \<le> q"
shows "\<exists>E. uwellformed (uverts G, uedges G \<union> E) \<and> (\<exists>C. uclique C (uverts G, uedges G \<union> E) q)
\<and> (\<forall>C q'. uclique C (uverts G, uedges G \<union> E) q' \<longrightarrow> q' \<le> q)"
proof (cases "p < q")
case True
then show ?thesis
proof -
have "\<exists>E. uwellformed (uverts G, uedges G \<union> E) \<and> (\<exists>C. uclique C (uverts G, uedges G \<union> E) q) \<and> card E \<ge> 1"
apply (rule exI [of _ "all_edges (uverts G)"])
using Set.Un_absorb1 [OF wellformed_all_edges [OF assms(2)]]
using complete_wellformed [of "uverts G"] clique_complete [OF assms(1,6)]
using all_edges_def assms(1,5)
apply (simp add: complete_def)
by (metis Suc_leI True Un_empty_right all_edges_finite card_gt_0_iff linorder_not_less prod.collapse)
then obtain E C where E1: "uwellformed (uverts G, uedges G \<union> E)"
and E2: "uclique C (uverts G, uedges G \<union> E) q"
and E3: "card E \<ge> 1"
by auto
show ?thesis
using clique_union_make_greatest [OF assms(1,2) E1 assms(6) E2 _ E3] assms(5) True
using order_le_less_trans
by blast
qed
next
case False
show ?thesis
apply (rule exI [of _ "{}"])
using False assms(2,4,5,7)
by simp
qed
section \<open>Properties of the upper edge bound\<close>
text \<open>In this section we prove results about the upper edge bound in Tur\'{a}n's theorem.
The first lemma proves that upper bounds of the sizes of the partitions sum up exactly to the overall upper bound.\<close>
lemma turan_sum_eq :
fixes n p :: nat
assumes "p \<ge> 2" and "p \<le> n"
shows "(p-1) * (p-2) / 2 + (1 - 1 / (p-1)) * (n - p + 1) ^ 2 / 2 + (p - 2) * (n - p + 1) = (1 - 1 / (p-1)) * n^2 / 2"
proof -
have "a * (a-1) / 2 + (1 - 1 / a) * (n - a) ^ 2 / 2 + (a - 1) * (n - a) = (1 - 1 / a) * n^2 / 2"
if a1: "a \<ge> 1" and a2: "n \<ge> a"
for a :: nat
proof -
have "a\<^sup>2 + (n - a)\<^sup>2 + a * (n - a) * 2 = n\<^sup>2"
using a2
apply (simp flip: Groups.ab_semigroup_mult_class.mult.commute [of 2 "a * (n - a)"])
apply (simp add: Semiring_Normalization.comm_semiring_1_class.semiring_normalization_rules(18) [of 2 a "(n - a)"])
by (simp flip: Power.comm_semiring_1_class.power2_sum [of a "n-a"])
then have "((a - 1) / a) * (a ^ 2 + (n - a) ^ 2 + a * (n - a) * 2) = ((a - 1) / a) * n^2"
by presburger
then have "(((a - 1) / a) * a ^ 2 + ((a - 1) / a) * (n - a) ^ 2 + ((a - 1) / a) * a * (n - a) * 2) = ..."
using Rings.semiring_class.distrib_left [of "(a - 1) / a" "a\<^sup>2 + (n - a)\<^sup>2" "a * (n - a) * 2"]
using Rings.semiring_class.distrib_left [of "(a - 1) / a" "a\<^sup>2" "(n - a)\<^sup>2"]
by auto
moreover have "((a - 1) / a) * a ^ 2 = a * (a-1)"
by (simp add: power2_eq_square)
ultimately have "a * (a-1) + ((a - 1) / a) * (n - a) ^ 2 + (a - 1) * (n - a) * 2 = ((a - 1) / a) * n^2"
using a1 a2
by auto
moreover have "1 - 1 / a = (a - 1) / a"
by (smt (verit, del_insts) One_nat_def Suc_pred diff_divide_distrib diff_is_0_eq of_nat_1 of_nat_diff of_nat_le_0_iff of_nat_le_iff of_nat_less_iff right_inverse_eq that)
ultimately have "a * (a-1) + (1 - 1 / a) * (n - a) ^ 2 + (a - 1) * (n - a) * 2 = (1 - 1 / a) * n^2"
by simp
then show ?thesis
by simp
qed
moreover have "p - 1 \<ge> 1"
using \<open>p \<ge> 2\<close> by auto
moreover have "n \<ge> p - 1"
using assms(2) by auto
ultimately show ?thesis
by (smt (verit) assms Nat.add_diff_assoc2 Nat.diff_diff_right diff_diff_left le_eq_less_or_eq less_Suc_eq_le linorder_not_less nat_1_add_1 plus_1_eq_Suc)
qed
text \<open>The next fact proves that the upper bound of edges is monotonically increasing with the size of the biggest clique.\<close>
lemma turan_mono :
fixes n p q :: nat
assumes "0 < q" and "q < p" and "p \<le> n"
shows "(1 - 1 / q) * n^2 / 2 \<le> (1 - 1 / (p-1)) * n^2 / 2"
using assms
by (simp add: Extended_Nonnegative_Real.divide_right_mono_ennreal Real.inverse_of_nat_le)
section \<open>Tur\'{a}n's Graph Theorem\<close>
text \<open>In this section we turn to the direct adaptation of Tur\'{a}n's original proof as presented by Aigner and Ziegler \cite{Aigner2018}\<close>
theorem turan :
fixes p n :: nat
assumes "finite (uverts G)"
and "uwellformed G" and "\<forall>C p'. uclique C G p' \<longrightarrow> p' < p" and "p \<ge> 2" and "card(uverts G) = n"
shows "card (uedges G) \<le> (1 - 1 / (p-1)) * n^2 / 2" using assms
proof (induction n arbitrary: G rule: less_induct)
case (less n)
then show ?case
proof (cases "n < p")
case True
show ?thesis
proof (cases "n")
case 0
with less True show ?thesis
by (auto simp add: wellformed_uverts_0)
next
case (Suc n')
with True have "(1 - 1 / real n) \<le> (1 - 1 / real (p - 1))"
by (metis diff_Suc_1 diff_left_mono inverse_of_nat_le less_Suc_eq_le linorder_not_less list_decode.cases not_add_less1 plus_1_eq_Suc)
moreover have "real (card (uedges G)) \<le> (1 - 1 / real n) * real (n\<^sup>2) / 2"
using ugraph_max_edges [OF less(3,6,2)]
by (smt (verit, ccfv_SIG) left_diff_distrib mult.right_neutral mult_of_nat_commute nonzero_mult_div_cancel_left of_nat_1 of_nat_mult power2_eq_square times_divide_eq_left)
ultimately show ?thesis
using Rings.ordered_semiring_class.mult_right_mono divide_less_eq_numeral1(1) le_less_trans linorder_not_less of_nat_0_le_iff
by (smt (verit, ccfv_threshold) divide_nonneg_nonneg times_divide_eq_right)
qed
next
case False
show ?thesis
proof -
obtain C q where C: "uclique C G q"
and C_max: "(\<forall>C q'. uclique C G q' \<longrightarrow> q' \<le> q)"
and q: "q < card (uverts G)"
using clique_exists_gt0 [OF \<open>finite (uverts G)\<close>] False \<open>p \<ge> 2\<close> less.prems(1,3,5)
by (metis card.empty card_gt_0_iff le_eq_less_or_eq order_less_le_trans pos2)
obtain E C' where E: "uwellformed (uverts G, uedges G \<union> E)"
and C': "(uclique C' (uverts G, uedges G \<union> E) (p-1))"
and C'_max: "(\<forall>C q'. uclique C (uverts G, uedges G \<union> E) q' \<longrightarrow> q' \<le> p-1)"
using clique_add_edges_max [OF \<open>finite (uverts G)\<close> \<open>uwellformed G\<close> q _ C_max, of "p-1"]
using C less(4) less(5) False \<open>card (uverts G) = n\<close>
by (smt (verit) One_nat_def Suc_leD Suc_pred less_Suc_eq_le linorder_not_less order_less_le_trans pos2)
have "card {e \<in> uedges G \<union> E. e \<subseteq> uverts C'} = (p-1) * (p-2) / 2"
using clique_edges_inside [OF E _ _ _ C'] False less(2) less.prems(4) C'
by (smt (verit, del_insts) Collect_cong Suc_1 add_leD1 clique_max_size fst_conv of_nat_1 of_nat_add of_nat_diff of_nat_mult plus_1_eq_Suc snd_conv)
moreover have "card {e \<in> uedges G \<union> E. e \<subseteq> uverts G - uverts C'} \<le> (1 - 1 / (p-1)) * (n - p + 1) ^ 2 / 2"
proof -
have "real(card{e \<in> uedges (uverts G, uedges G \<union> E). e \<subseteq> uverts (uverts G, uedges G \<union> E) - uverts C'})
\<le> (1 - 1 / (real p - 1)) * (real n - real p + 1)\<^sup>2 / 2"
using clique_edges_outside [OF E _ less(5) _ _ C' C'_max, of n] linorder_class.leI [OF False] less(1,2,6)
by (metis (no_types, lifting) fst_conv)
then show ?thesis
by (simp, smt (verit, best) False One_nat_def Suc_1 Suc_leD add.commute leI less.prems(4) of_nat_1 of_nat_diff)
qed
moreover have "card {e \<in> uedges G \<union> E. e \<inter> uverts C' \<noteq> {} \<and> e \<inter> (uverts G - uverts C') \<noteq> {}} \<le> (p - 2) * (n - p + 1)"
using clique_edges_inside_to_outside [OF E _ _ _ _ C' C'_max, of n] less(2,5,6)
by (simp, metis (no_types, lifting) C' False Nat.add_diff_assoc Nat.add_diff_assoc2 One_nat_def Suc_1 clique_max_size fst_conv leI mult_Suc_right plus_1_eq_Suc)
ultimately have "real (card (uedges G \<union> E)) \<le> (1 - 1 / real (p - 1)) * real (n\<^sup>2) / 2"
using graph_partition_edges_card [OF _ E, of "uverts C'"]
using less(2) turan_sum_eq [OF \<open>2 \<le> p\<close>, of n] False C' uclique_def subgraph_def
by (smt (verit) Collect_cong fst_eqD linorder_not_le of_nat_add of_nat_mono snd_eqD)
then show ?thesis
using less(2) E finite_verts_edges Finite_Set.card_mono [OF _ Set.Un_upper1 [of "uedges G" E]]
by force
qed
qed
qed
section \<open>A simplified proof of Tur\'{a}n's Graph Theorem\<close>
text \<open>In this section we discuss a simplified proof of Tur\'{a}n's Graph Theorem which uses an idea put forward by the author:
Instead of increasing the size of the biggest clique it is also possible to use the fact that
the expression in Tur\'{a}n's graph theorem is monotonically increasing in the size of the biggest clique (Lemma @{thm [source] turan_mono}).
Hence, it suffices to prove the upper bound for the actual biggest clique size in the graph.
Afterwards, the monotonicity provides the desired inequality.
The simplifications in the proof are annotated accordingly.\<close>
theorem turan' :
fixes p n :: nat
assumes "finite (uverts G)"
and "uwellformed G" and "\<forall>C p'. uclique C G p' \<longrightarrow> p' < p" and "p \<ge> 2" and "card(uverts G) = n"
shows "card (uedges G) \<le> (1 - 1 / (p-1)) * n^2 / 2" using assms
proof (induction n arbitrary: p G rule: less_induct)
txt \<open>In the simplified proof we also need to generalize over the biggest clique size @{term p}
so that we can leverage the induction hypothesis in the proof
for the already pre-existing biggest clique size which might be smaller than @{term "p-1"}.\<close>
case (less n)
then show ?case
proof (cases "n < p")
case True
show ?thesis
proof (cases "n")
case 0
with less True show ?thesis
by (auto simp add: wellformed_uverts_0)
next
case (Suc n')
with True have "(1 - 1 / real n) \<le> (1 - 1 / real (p - 1))"
by (metis diff_Suc_1 diff_left_mono inverse_of_nat_le less_Suc_eq_le linorder_not_less list_decode.cases not_add_less1 plus_1_eq_Suc)
moreover have "real (card (uedges G)) \<le> (1 - 1 / real n) * real (n\<^sup>2) / 2"
using ugraph_max_edges [OF less(3,6,2)]
by (smt (verit, ccfv_SIG) left_diff_distrib mult.right_neutral mult_of_nat_commute nonzero_mult_div_cancel_left of_nat_1 of_nat_mult power2_eq_square times_divide_eq_left)
ultimately show ?thesis
using Rings.ordered_semiring_class.mult_right_mono divide_less_eq_numeral1(1) le_less_trans linorder_not_less of_nat_0_le_iff
by (smt (verit, ccfv_threshold) divide_nonneg_nonneg times_divide_eq_right)
qed
next
case False
show ?thesis
proof -
from False \<open>p \<ge> 2\<close>
obtain C q where C: "uclique C G q"
and C_max: "(\<forall>C q'. uclique C G q' \<longrightarrow> q' \<le> q)"
and q1: "q < card (uverts G)" and q2: "0 < q"
and pq: "q < p"
using clique_exists_gt0 [OF \<open>finite (uverts G)\<close>] clique_exists1 less.prems(1,3,5)
by (metis card.empty card_gt_0_iff le_eq_less_or_eq order_less_le_trans pos2)
txt \<open>In the unsimplified proof we extend this existing greatest clique C to a clique of size @{term "p-1"}.
This part is made superfluous in the simplified proof.
In particular, also Section \ref{sec:extend_clique} is unneeded for this simplified proof.
From here on the proof is analogous to the unsimplified proof
with the potentially smaller clique of size @{term q} in place of the extended clique.\<close>
have "card {e \<in> uedges G. e \<subseteq> uverts C} = q * (q-1) / 2"
using clique_edges_inside [OF less(3,2) _ _ C] q1 less(6)
by auto
moreover have "card {e \<in> uedges G. e \<subseteq> uverts G - uverts C} \<le> (1 - 1 / q) * (n - q) ^ 2 / 2"
proof -
have "real (card {e \<in> uedges G. e \<subseteq> uverts G - uverts C})
\<le> (1 - 1 / (real (q + 1) - 1)) * (real n - real (q + 1) + 1)\<^sup>2 / 2"
using clique_edges_outside [OF less(3,2) _ _ , of "q+1" n C] C C_max q1 q2 linorder_class.leI [OF False] less(1,6)
by (smt (verit, ccfv_threshold) Suc_1 Suc_eq_plus1 Suc_leI diff_add_inverse2 zero_less_diff)
then show ?thesis
using less.prems(5) q1
by (simp add: of_nat_diff)
qed
moreover have "card {e \<in> uedges G. e \<inter> uverts C \<noteq> {} \<and> e \<inter> (uverts G - uverts C) \<noteq> {}} \<le> (q - 1) * (n - q)"
using clique_edges_inside_to_outside [OF less(3,2) q2 _ less(6) C C_max] q1
by simp
ultimately have "real (card (uedges G)) \<le> (1 - 1 / real q) * real (n\<^sup>2) / 2"
using graph_partition_edges_card [OF less(2,3), of "uverts C"]
using C uclique_def subgraph_def q1 q2 less.prems(5) turan_sum_eq [of "Suc q" n]
by (smt (verit) Nat.add_diff_assoc Suc_1 Suc_le_eq Suc_le_mono add.commute add.right_neutral diff_Suc_1 diff_Suc_Suc of_nat_add of_nat_mono plus_1_eq_Suc)
then show ?thesis
txt \<open>The final statement can then easily be derived with the monotonicity (Lemma @{thm [source] turan_mono}).\<close>
using turan_mono [OF q2 pq, of n] False
by linarith
qed
qed
qed
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/Turans_Graph_Theorem/Turan.thy"}
|
"""
scipy.interpolate module
- [Interpolation (scipy.interpolate) Reference Guide](https://docs.scipy.org/doc/scipy/reference/interpolate.html)
# Examples
You can interpolate 1D data:
```julia-repl
julia> x = collect(0:10);
julia> y = exp.(-x/3.0);
julia> f = SciPy.interpolate.interp1d(x, y);
julia> f(0.5)
0-dimensional Array{Float64,0}:
0.8582656552868946
```
"""
module interpolate
using PyCall
import PyCall: hasproperty # Base.hasproperty in Julia 1.2
import ..pyinterpolate
import .._generate_docstring
import ..LazyHelp
const _ignore_funcs = ["interpolate"]
# This list can be generated by print_scipy_api_list.py script
all_properties = ["Akima1DInterpolator", "BPoly", "BSpline", "BarycentricInterpolator", "BivariateSpline", "CloughTocher2DInterpolator", "CubicHermiteSpline", "CubicSpline", "InterpolatedUnivaria
teSpline", "KroghInterpolator", "LSQBivariateSpline", "LSQSphereBivariateSpline", "LSQUnivariateSpline", "LinearNDInterpolator", "NdPPoly", "NearestNDInterpolator", "PPoly", "Pch
ipInterpolator", "Rbf", "RectBivariateSpline", "RectSphereBivariateSpline", "RegularGridInterpolator", "SmoothBivariateSpline", "SmoothSphereBivariateSpline", "UnivariateSpline",
"approximate_taylor_polynomial", "barycentric_interpolate", "bisplev", "bisplrep", "dfitpack", "fitpack", "fitpack2", "griddata", "insert", "interp1d", "interp2d", "interpn", "i
nterpnd", "interpolate", "krogh_interpolate", "lagrange", "make_interp_spline", "make_lsq_spline", "ndgriddata", "pade", "pchip_interpolate", "polyint", "rbf", "spalde", "splanti
der", "splder", "splev", "splint", "splprep", "splrep", "sproot"]
for f in all_properties
f in _ignore_funcs && continue
sf = Symbol(f)
@eval @doc LazyHelp(pyinterpolate, $f) $sf(args...; kws...) = pycall(pyinterpolate.$f, PyAny, args...; kws...)
end
function __init__()
copy!(pyinterpolate, pyimport_conda("scipy.interpolate", "scipy"))
end
end # module
|
{"hexsha": "7086f29ff736260c963104d11613e98050941dd2", "size": 1927, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/interpolate.jl", "max_stars_repo_name": "AtsushiSakai/SciPy.jl", "max_stars_repo_head_hexsha": "073706533f68989ccd761d813cd35593ea7c2a50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-06-06T05:11:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T02:17:44.000Z", "max_issues_repo_path": "src/interpolate.jl", "max_issues_repo_name": "AtsushiSakai/SciPy.jl", "max_issues_repo_head_hexsha": "073706533f68989ccd761d813cd35593ea7c2a50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2020-05-30T13:45:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-01T05:15:36.000Z", "max_forks_repo_path": "src/interpolate.jl", "max_forks_repo_name": "AtsushiSakai/SciPy.jl", "max_forks_repo_head_hexsha": "073706533f68989ccd761d813cd35593ea7c2a50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-06-18T09:37:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-03T02:47:23.000Z", "avg_line_length": 36.358490566, "max_line_length": 195, "alphanum_fraction": 0.7415672029, "num_tokens": 607}
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import os
from astropy.table import Table, vstack
from collections import OrderedDict
## Import some helper functions, you can see their definitions by uncomenting the bash shell command
from desispec.workflow.exptable import default_obstypes_for_exptable
from desispec.workflow.utils import define_variable_from_environment, pathjoin
from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword
from desiutil.log import get_logger
###############################################
##### Processing Table Column Definitions #####
###############################################
## To eventually being turned into a full-fledged data model. For now a brief description.
# EXPID, int, the exposure ID's assosciate with the job. Always a np.array, even if a single exposure.
# OBSTYPE, string, the obstype as defined by ICS.
# TILEID, int, the TILEID of the tile the exposure observed.
# NIGHT, int, the night of the observation.
# BADAMPS, string, comma list of "{camera}{petal}{amp}", i.e. "[brz][0-9][ABCD]". Example: 'b7D,z8A'
# in the csv this is saved as a semicolon separated list
# LASTSTEP, string, the last step the pipeline should run through for the given exposure. Inclusive of last step.
# EXPFLAG, np.ndarray, set of flags that describe that describe the exposure.
# PROCCAMWORD, string, The result of difference_camword(CAMWORD,BADCAMWWORD) from those exposure table entries.
# This summarizes the cameras that should be processed for the given exposure/job
# CALIBRATOR, int, A 0 signifies that the job is not assosciated with a calibration exposure. 1 means that it is.
# INTID, int, an internally generated ID for a single job within a production. Only unique within a production and
# not guaranteed will not necessarily be the same between different production runs (e.g. between a daily
# run and a large batch reprocessing run).
# OBSDESC, string, describes the observation in more detail than obstype. Currently only used for DITHER on dither tiles.
# JOBDESC, string, described the job that the row defines. For a single science exposure that could be 'prestdstar' or
# 'poststdstar'. For joint science that would be 'stdstarfit'. For individual arcs it is 'arc', for
# joint arcs it is 'psfnight'. For individual flats it is 'flat', for joint fits it is 'psfnightly'.
# LATEST_QID, int, the most recent Slurm ID assigned to the submitted job.
# SUBMIT_DATE, int, the 'unix time' of the job submission in seconds (int(time.time())).
# STATUS, string, the most recent Slurm status of the job. See docstring of desispec.workflow.queue.get_resubmission_states
# for a list and description.
# SCRIPTNAME, string, the name of the script submitted to Slurm. Due to astropy table constraints, this is truncated
# to a maximum of 40 characters.
# INT_DEP_IDS, np.array, internal ID's of all jobs that are dependencies for the current row. I.e. inputs to the current job.
# LATEST_DEP_QID, np.array, the most recent Slurm ID's for the dependencies jobs uniquely identified by internal ID's
# in INT_DEP_IDS
# ALL_QIDS, np.array, a list of all Slurm ID's assosciated with submissions of this job. Useful if multiple submissions
# were made because of node failures or any other issues that were later resolved (or not resolved).
##################################################
def get_processing_table_column_defs(return_default_values=False, overlap_only=False, unique_only=False):
"""
Contains the column names, data types, and default row values for a DESI processing table. It returns
the names and datatypes with the defaults being given with an optional flag. Returned as 2 (or 3) lists.
Args:
return_default_values, bool. True if you want the default values returned.
overlap_only, bool. Only return the columns that are common to both processing and exposure tables.
unique_only, bool. Only return columns that are not found in an exposure table.
Returns:
colnames, list. List of column names for an processing table.
coldtypes, list. List of column datatypes for the names in colnames.
coldeflts, list. Optionally returned if return_default_values is True. List of default values for the
corresponding colnames.
"""
## Define the column names for the internal production table and their respective datatypes, split in two
## only for readability's sake
colnames1 = ['EXPID' , 'OBSTYPE', 'TILEID', 'NIGHT' ]
coltypes1 = [np.ndarray , 'S10' , int , int ]
coldeflt1 = [np.ndarray(shape=0).astype(int), 'unknown', -99 , 20000101]
colnames1 += ['BADAMPS', 'LASTSTEP', 'EXPFLAG' ]
coltypes1 += ['S30' , 'S30' , np.ndarray ]
coldeflt1 += ['' , 'all' , np.array([], dtype=str)]
colnames2 = [ 'PROCCAMWORD' ,'CALIBRATOR', 'INTID', 'OBSDESC', 'JOBDESC', 'LATEST_QID']
coltypes2 = [ 'S40' , np.int8 , int , 'S16' , 'S12' , int ]
coldeflt2 = [ 'a0123456789' , 0 , -99 , '' , 'unknown', -99 ]
colnames2 += [ 'SUBMIT_DATE', 'STATUS', 'SCRIPTNAME']
coltypes2 += [ int , 'S10' , 'S40' ]
coldeflt2 += [ -99 , 'U' , '' ]
colnames2 += ['INT_DEP_IDS' , 'LATEST_DEP_QID' , 'ALL_QIDS' ]
coltypes2 += [np.ndarray , np.ndarray , np.ndarray ]
coldeflt2 += [np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int)]
colnames = colnames1 + colnames2
coldtypes = coltypes1 + coltypes2
coldeflts = coldeflt1 + coldeflt2
if return_default_values:
if overlap_only:
return colnames1, coltypes1, coldeflt1
elif unique_only:
return colnames2, coltypes2, coldeflt2
else:
return colnames, coldtypes, coldeflts
else:
if overlap_only:
return colnames1, coltypes1
elif unique_only:
return colnames2, coltypes2
else:
return colnames, coldtypes
def default_exptypes_for_proctable():
"""
Defines the exposure types to be recognized by the workflow and saved in the processing table by default.
Returns:
list. A list of default obstypes to be included in a processing table.
"""
## Define the science types to be included in the exposure table (case insensitive)
return ['arc', 'dark', 'flat', 'science', 'twilight', 'sci', 'dither']
def get_processing_table_name(specprod=None, prodmod=None, extension='csv'):
"""
Defines the default processing name given the specprod of the production and the optional extension.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The processing table name given the input night and extension.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
if prodmod is not None:
prodname_modifier = '-' + str(prodmod)
elif 'SPECPROD_MOD' in os.environ:
prodname_modifier = '-' + os.environ['SPECPROD_MOD']
else:
prodname_modifier = ''
return f'processing_table_{specprod}{prodname_modifier}.{extension}'
def get_processing_table_path(specprod=None):
"""
Defines the default path to save a processing table. If specprod is not given, the environment variable
'SPECPROD' must exist.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
Returns:
str. The full path to the directory where the processing table should be written (or is already written). This
does not including the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
basedir = define_variable_from_environment(env_name='DESI_SPECTRO_REDUX',
var_descr="The specprod path")
path = pathjoin(basedir, specprod, 'processing_tables')
return path
def get_processing_table_pathname(specprod=None, prodmod=None, extension='csv'): # base_path,specprod
"""
Defines the default pathname to save a processing table.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The full pathname where the processing table should be written (or is already written). This
includes the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
path = get_processing_table_path(specprod)
table_name = get_processing_table_name(specprod, prodmod, extension)
return pathjoin(path, table_name)
def instantiate_processing_table(colnames=None, coldtypes=None, rows=None):
"""
Create an empty processing table with proper column names and datatypes. If rows is given, it inserts the rows
into the table, otherwise it returns a table with no rows.
Args:
colnames, list. List of column names for a procesing table.
coldtypes, list. List of column datatypes for the names in colnames.
rows, list or np.array of Table.Rows or dicts. An iterable set of Table.Row's or dicts with keys/colnames and value
pairs that match the default column names and data types of the
default exposure table.
Returns:
processing_table, Table. An astropy Table with the column names and data types for a DESI workflow processing
table. If the input rows was not None, it contains those rows, otherwise it has no rows.
"""
## Define the column names for the exposure table and their respective datatypes
if colnames is None or coldtypes is None:
colnames, coldtypes = get_processing_table_column_defs()
processing_table = Table(names=colnames, dtype=coldtypes)
if rows is not None:
for row in rows:
processing_table.add_row(row)
return processing_table
def exptable_to_proctable(input_exptable, obstypes=None):
"""
Converts an exposure table to a processing table and an unprocessed table. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
Args:
input_exptable, Table. An exposure table. Each row will be converted to a row of an processing table. If
comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
obstypes, list or np.array. Optional. A list of exposure OBSTYPE's that should be processed (and therefore
added to the processing table).
Returns:
processing_table, Table. The output processing table. Each row corresponds with an exposure that should be
processed.
unprocessed_table, Table. The output unprocessed table. Each row is an exposure that should not be processed.
"""
log = get_logger()
exptable = input_exptable.copy()
if obstypes is None:
obstypes = default_obstypes_for_exptable()
## Define the column names for the exposure table and their respective datatypes
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
# for col in ['COMMENTS']: #'HEADERERR',
# if col in exptable.colnames:
# for ii, arr in enumerate(exptable[col]):
# for item in arr:
# clean_item = item.strip(' \t')
# if len(clean_item) > 6:
# keyval = None
# for symb in [':', '=']:
# if symb in clean_item:
# keyval = [val.strip(' ') for val in clean_item.split(symb)]
# break
# if keyval is not None and len(keyval) == 2 and keyval[0].upper() in exptable.colnames:
# key, newval = keyval[0].upper(), keyval[1]
# expid, oldval = exptable['EXPID'][ii], exptable[key][ii]
# log.info(
# f'Found a requested correction to ExpID {expid}: Changing {key} val from {oldval} to {newval}')
# exptable[key][ii] = newval
good_exps = (exptable['EXPFLAG'] == 0)
good_types = np.array([val in obstypes for val in exptable['OBSTYPE']]).astype(bool)
good = (good_exps & good_types)
good_table = exptable[good]
unprocessed_table = exptable[~good]
## Remove columns that aren't relevant to processing, they will be added back in the production tables for
## end user viewing
for col in ['REQRA', 'REQDEC', 'TARGTRA', 'TARGTDEC', 'HEADERERR', 'COMMENTS', 'BADEXP']:
if col in exptable.colnames:
good_table.remove_column(col)
if len(good_table) > 0:
rows = []
for erow in good_table:
prow = erow_to_prow(erow)#, colnames, coldtypes, coldefaults)
rows.append(prow)
processing_table = Table(names=colnames, dtype=coldtypes, rows=rows)
else:
processing_table = Table(names=colnames, dtype=coldtypes)
return processing_table, unprocessed_table
def erow_to_prow(erow):#, colnames=None, coldtypes=None, coldefaults=None, joinsymb='|'):
"""
Converts an exposure table row to a processing table row. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those are ignored.
Args:
erow, Table.Row or dict. An exposure table row. The row will be converted to a row of an processing table.
If comments are made in COMMENTS or HEADERERR, those are ignored.
Returns:
prow, dict. The output processing table row.
"""
log = get_logger()
erow = table_row_to_dict(erow)
row_names = list(erow.keys())
## Define the column names for the exposure table and their respective datatypes
#if colnames is None:
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
colnames, coldtypes, coldefaults = np.array(colnames,dtype=object), \
np.array(coldtypes,dtype=object), \
np.array(coldefaults,dtype=object)
prow = dict()
for nam, typ, defval in zip(colnames, coldtypes, coldefaults):
if nam == 'PROCCAMWORD':
if 'BADCAMWORD' in row_names:
badcamword = erow['BADCAMWORD']
else:
badcamword = ''
prow[nam] = difference_camwords(erow['CAMWORD'],badcamword)
elif nam == 'OBSDESC':
if nam in colnames:
prow[nam] = coldefaults[colnames == nam][0]
else:
prow[nam] = ''
for word in ['dither', 'acquisition', 'focus', 'test']:
if 'PROGRAM' in row_names and word in erow['PROGRAM'].lower():
prow[nam] = word
elif nam == 'EXPID':
prow[nam] = np.array([erow[nam]])
elif nam in row_names:
prow[nam] = erow[nam]
else:
prow[nam] = defval
## For obstypes that aren't science, BADAMPS loses it's relevance. For processing,
## convert those into bad cameras in BADCAMWORD, so the cameras aren't processed.
## Otherwise we'll have nightly calibrations with only half the fibers useful.
if prow['OBSTYPE'] != 'science' and prow['BADAMPS'] != '':
badcams = []
for (camera, petal, amplifier) in parse_badamps(prow['BADAMPS']):
badcams.append(f'{camera}{petal}')
newbadcamword = create_camword(badcams)
log.info("For nonsscience exposure: {}, converting BADAMPS={} to bad cameras={}.".format( erow['EXPID'],
prow['BADAMPS'],
newbadcamword ) )
prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],newbadcamword)
prow['BADAMPS'] = ''
return prow
def table_row_to_dict(table_row):
"""
Helper function to convert a table row to a dictionary, which is much easier to work with for some applications
Args:
table_row, Table.Row or dict. The row of an astropy table that you want to convert into a dictionary where
each key is a column name and the values are the column entry.
Returns:
out, dict. Dictionary where each key is a column name and the values are the column entry.
"""
if type(table_row) is Table.Row:
out = {coln: table_row[coln] for coln in table_row.colnames}
return out
elif type(table_row) in [dict, OrderedDict]:
return table_row
else:
log = get_logger()
typ = type(table_row)
log.error(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.")
raise TypeError(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.")
|
{"hexsha": "c41d804a676cf7f90456f43eda57f3627ee0a10f", "size": 19179, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/desispec/workflow/proctable.py", "max_stars_repo_name": "Waelthus/desispec", "max_stars_repo_head_hexsha": "8be844ef3734cb831558caf794d7258a4b7017cc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py/desispec/workflow/proctable.py", "max_issues_repo_name": "Waelthus/desispec", "max_issues_repo_head_hexsha": "8be844ef3734cb831558caf794d7258a4b7017cc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/desispec/workflow/proctable.py", "max_forks_repo_name": "Waelthus/desispec", "max_forks_repo_head_hexsha": "8be844ef3734cb831558caf794d7258a4b7017cc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.8351351351, "max_line_length": 129, "alphanum_fraction": 0.6303769748, "include": true, "reason": "import numpy,from astropy", "num_tokens": 4337}
|
HEROKU = False
if HEROKU:
import os
from random import randint
import flask
dropd_color = 'black'
dropd_back = 'gray'
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from dash.dependencies import Input, Output, State, ALL, MATCH
import plotly.graph_objs as go
import visdcc
import base64
import io
import pretty_midi
from textwrap import dedent
import numpy as np
import json
import pickle
from help import get_help
def help(topic):
return html.Div(html.Details([html.Summary('?', className= 'button'), html.Div(get_help.help(topic))]))
#Encoder that conferts numpy arrays to list for json dumps
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
with open('no_data_orchestra.pickle', 'rb') as handle:
orchestra = pickle.load(handle)
if HEROKU:
server = flask.Flask(__name__)
server.secret_key = os.environ.get('secret_key', str(randint(0, 1000000)))
app = dash.Dash(__name__, server=server)
app = dash.Dash(__name__, server=server)
else:
app = dash.Dash(__name__)
#I think either one works, here are both :)
app.config['suppress_callback_exceptions'] = True
app.config.suppress_callback_exceptions = True
##################
# Find all indexes of item in a list:
def list_idx_of_item(list, item): return [ i for i in range(len(list)) if list[i] == item ]
############################################################
############################################################
################# ANALYZE SCORE CONTENT HERE ##############
############################################################
############################################################
image_filename3 = 'test_score.png' # Orchestration Analyzer logo
encoded_image3 = base64.b64encode(open(image_filename3, 'rb').read())
pianoroll_resolution=10 #In milliseconds
inst_list=list(orchestra.keys())
tech_list=['normal']
dyn_list=['p', 'mf', 'f']
note_numbers=np.arange(128)
notenames=[]
roll = pretty_midi.PrettyMIDI('test_score.mid')
for i in range(128):
notenames.append(pretty_midi.note_number_to_name(i))
fig_layout = {
'title': 'Score',
'plot_bgcolor': 'black',
'paper_bgcolor': 'black',
'font': {
'color': 'white'
},
'xaxis': {'title': 'Bar',
#'rangeslider': {'visible': True},
#'rangeselector': {'visible': True},
},
'yaxis': {
'tickmode': 'array',
'tickvals': np.arange(128),
'ticktext': notenames,
'range': [36, 96],
'nticks': 10,
'title': 'note'
},
'dragmode': 'pan',
#'showscale': False,
#'coloraxis_showscale': False
}
fig_config = {
'displayModeBar': False
}
trace_template = {
"type": "heatmap",
"zmin": 0,
"zmax": 1,
'showlegend': True,
'showscale': False,
#'opacity': 0.5,
}
valid_instruments=[]
target_lista=[]
inst_lista=[]
tech_lista=[]
onoff_lista=[]
def add_instruments(midi_data):
length=len(midi_data.instruments)
graphs = [html.Tr([
html.Th('Score name'),
html.Th('Database name'),
html.Th('technique'),
html.Th('target/orch.'),
html.Th('on/off')
])]
def set_id(set_type, index):
return {
'type': set_type,
'index': index
}
for i in range(length):
if midi_data.instruments[i].program != 0:
instrument = midi_data.instruments[i]
valid_instruments.append(i)
if instrument.program == 41-1:
inst_name='violin'
elif instrument.program == 43-1:
inst_name='cello'
elif instrument.program == 74-1:
inst_name='flute'
elif instrument.program == 57-1:
inst_name='trumpet'
else:
inst_name='violin'
score_name = instrument.name
#inst_lista.append(inst_name)
tch = 'normal'
#tech_lista.append(tch)
onoff = 1
#onoff_lista.append(onoff)
target = 0
#target_lista.append(target)
graphs.append(html.Tr(children=[
html.Th(html.Div(id=set_id('scorename',i), children=score_name,
style={'display': 'inline-block', 'padding': '8px', 'fontSize': '25px', 'color': 'grey', 'textAlign':'left'}),
),
html.Th(dcc.Dropdown(
options=[{'label': val, 'value': val} for val in inst_list],
# className='select',
value=inst_name,
multi=False,
id=set_id('instrument', i),
style={'backgroundColor': dropd_back, 'color': dropd_color, 'display': 'inline-block', 'opacity': 1,
'border': 'none', 'width': '150px', 'fontSize': '25px', 'bottom': '-10px'},
)),
html.Th(dcc.Dropdown(
options=[{'label': val, 'value': val} for val in tech_list],
className='select',
value=tch,
multi=False,
id=set_id('tech',i),
style={'backgroundColor': dropd_back, 'color': dropd_color, 'display': 'inline-block', 'opacity': 1,
'border': 'none', 'width': '150px', 'fontSize': '25px', 'bottom': '-10px'},
)),
#For target, value is 100+something, for orchestration 0+something :)
html.Th(dcc.Dropdown(
options=[{'label': 'target', 'value': 100+i}, {'label': 'orchestration', 'value': 0+i}],
className='select',
value=0+i,
multi=False,
id=set_id('target',i),
style={'backgroundColor': dropd_back, 'color': dropd_color, 'display': 'inline-block', 'opacity': 1,
'border': 'none', 'width': '200px', 'fontSize': '25px', 'bottom': '-10px'},
)),
html.Th(dcc.Dropdown(
options=[{'label': 'on', 'value': 1}, {'label': 'off', 'value': 0}],
className='select',
value=onoff,
multi=False,
id=set_id('onoff',i),
style={'backgroundColor': dropd_back, 'color': dropd_color, 'display': 'inline-block', 'opacity': 1,
'border': 'none', 'width': '100px', 'fontSize': '25px', 'bottom': '-10px'},
)),
#html.Th(html.Div(id="graph_{}".format(i))),
]))
graphs=html.Table(graphs, style={'width': '100%'})
return graphs
def add_trace(trace_data, name, color='white'):
trace_data[np.where(trace_data == 0)] = None #Replace zeros with none
new_trace=trace_template #load predefined data
new_trace['colorscale'] = [[0, 'black'], [1, color]]
new_trace['z'] = trace_data #add data to trace
new_trace['name'] = name
return new_trace
from chord import maskingCurve_peakInput
from helpers import constants, get_fft, findPeaks
def get_masking(data, peaks):
#app.logger.info(peaks)
#app.logger.info(data)
#S = get_fft.get_fft(data)
if len(peaks) == 0:
masking_threshold=np.zeros(106)#-30
#masking_threshold = map(lambda number: None if number == 0 else number, masking_threshold) #Change zeros to None
return masking_threshold
S=np.zeros(22048)+70
masking_threshold = maskingCurve_peakInput.maskingCurve(S, peaks) # Calculate the masking curve
'''
try:
masking_threshold = maskingCurve_peakInput.maskingCurve(S, peaks) #Calculate the masking curve
except:
#print("Masking calculation fail, using flat masking curve")
masking_freq = constants.threshold[:, 0]
masking_threshold = np.zeros(106)-30
'''
return masking_threshold
from score import combine_peaks
def do3dgraph(midi_data, target, whole_orchestra_pianoroll):
##FOR DEBUG:
target_lista[0]=1 #Force first instrument as target!!
orch3d=[]
tar3d=[]
#For orchesdtration:
for i in range(len(midi_data.get_piano_roll(pianoroll_resolution)[0, :])):
sound_slice_o = np.zeros(44100) #Orchestra empty vector
peaks_o = []
sound_slice_t = np.zeros(44100) #Target empty vector
peaks_t = []
for ind in range(len(valid_instruments)):
#for orchestration:
if target_lista[ind]==0 and onoff_lista[ind]==1:
valid = valid_instruments[ind]
s_inst = inst_lista[ind]
s_tech = tech_lista[ind]
try:
s_slice = whole_orchestra_pianoroll[valid][0][:, i]#[:, i]
notenumbers=np.nonzero(s_slice)
dynamics=s_slice[notenumbers]
#app.logger.info(notenumbers)
for p in range(len(notenumbers)):
notenumber=notenumbers[p]
dynamic=dynamics[p]
if dynamic<50:
dyny='p'
elif dynamic>80:
dyny='f'
else:
dyny='mf'
#sound_o = orchestra[s_inst][s_tech][dyny][notenumber[0]]['data']
#sound_slice_o=sound_slice_o+orchestra[s_inst][s_tech][dyny][notenumber[0]]['data']
pks = orchestra[s_inst][s_tech][dyny][notenumber[0]]['peaks'] #findPeaks.peaks(get_fft.get_fft(sound_o), notenumber)
peaks_o = combine_peaks.combine_peaks(peaks_o, pks)#orchestra[s_inst][s_tech][dyny][notenumber[0]]['peaks'])
except:
pass
#for target:
elif target_lista[ind] == 1 and onoff_lista[ind] == 1:
valid = valid_instruments[ind]
s_inst = inst_lista[ind]
s_tech = tech_lista[ind]
try:
s_slice = whole_orchestra_pianoroll[valid][0][:, i] # [:, i]
notenumbers = np.nonzero(s_slice)
dynamics = s_slice[notenumbers]
#app.logger.info(notenumbers)
for p in range(len(notenumbers)):
notenumber = notenumbers[p]
dynamic = dynamics[p]
if dynamic < 50:
dyny = 'p'
elif dynamic > 80:
dyny = 'f'
else:
dyny = 'mf'
#sound_t = orchestra[s_inst][s_tech][dyny][notenumber[0]]['data']
#sound_slice_t = sound_slice_t + orchestra[s_inst][s_tech][dyny][notenumber[0]]['data']
pl, pf = orchestra[s_inst][s_tech][dyny][notenumber[0]]['peaks'] #findPeaks.peaks(get_fft.get_fft(sound_t), notenumber)
peaks_t = combine_peaks.combine_peaks(peaks_t, pks)#orchestra[s_inst][s_tech][dyny][notenumber[0]]['peaks'])
except:
pass
orch3d.append(get_masking(sound_slice_o, peaks_o))
if any(sound_slice_t) == False:
tar3d.append(get_masking(sound_slice_t, peaks_t))
else:
tar3d.append(get_masking(sound_slice_t, peaks_t)+10)
#app.logger.info(orch3d)
#app.logger.info(tar3d)
#Set 3d camera direct above:
camera = dict(
eye=dict(x=1, y=0., z=2.5)
)
layout = {
'plot_bgcolor': 'black',
'paper_bgcolor': 'black',
'font': {
'color': 'white'
},#'width': '800', 'height': '200',
'scene': {
"aspectratio": {"x": 1, "y": 4, "z": 0.5},
'camera': camera,
},
}
return dcc.Graph(figure = {'data':[go.Surface(z=orch3d, opacity=1, colorscale= 'Greys', showscale=False),
go.Surface(z=tar3d, opacity=1, colorscale= 'Greens', showscale=False)], 'layout': layout}, config=fig_config)
def do_graph(midi_data, instrument, tech, tgt, onoff, score_range, bar_offset):
all_traces = []
target_pianoroll = []
orchestration_pianoroll = []
all_data = midi_data.get_piano_roll(pianoroll_resolution) # Do not set range yet! [:, score_range[0]:score_range[1]] #Do an overall pianoroll score
score_length = len(all_data[0,:])
all_data = all_data[:, score_range[0]:score_range[1]]
alltrace = add_trace(all_data, 'orchestration')
all_traces.append(alltrace.copy())
# Do separate scores for targets, if they are on
for ind in range(len(tgt)):
if onoff[ind]==1 and tgt[ind]>=100: #Remember, target indices are 100+idx, orchestration are 0+idx
#Get the instruments piano roll, from range start to end
t_PR = midi_data.instruments[tgt[ind] - 100].get_piano_roll(pianoroll_resolution)
#Check current inst pianoroll length
t_PR_len = len(t_PR[0,:])
#Append zeros to make all piano roll equal length
t_PR = np.hstack([t_PR, np.zeros([128,score_length-t_PR_len])])
#Append all to list and traces
target_pianoroll.append(t_PR[:, score_range[0]:score_range[1]]) #Append to the list of targets, took away copy()
all_traces.append(add_trace(target_pianoroll[-1], midi_data.instruments[tgt[ind]-100].name, 'red').copy()) #Add target to traces
if onoff[ind]==1 and tgt[ind]<100:
o_PR = midi_data.instruments[tgt[ind]].get_piano_roll(pianoroll_resolution)
o_PR_len = len(o_PR[0, :]) #Append zeros to make all piano roll equal length
o_PR = np.hstack([o_PR, np.zeros([128, score_length - o_PR_len])])
orchestration_pianoroll.append([o_PR[:, score_range[0]:score_range[1]], midi_data.instruments[tgt[ind]].name].copy())
#print(all_traces)
#Get values where bar changes
tickvals=midi_data.get_downbeats()[bar_offset[0]:bar_offset[1]]
#Get offset for first value
offset=tickvals[0]*pianoroll_resolution
fig_layout['xaxis']['tickmode']='array'
fig_layout['xaxis']['tickvals']=[round(i*pianoroll_resolution-offset) for i in tickvals] #Do the math to get the right place
fig_layout['xaxis']['ticktext']=np.arange(len(midi_data.get_downbeats()[bar_offset[0]:bar_offset[1]]))+bar_offset[0]+1 #Do the math to get the right text
#masking3d=do3dgraph(midi_data, tgt, orchestration_pianoroll)
masking3d=''
graph = dcc.Graph(id='midi_graph', figure={'data': all_traces,
'layout': fig_layout
}, config=fig_config)
graph3d = masking3d
return html.Div([graph3d, graph])
#graafi=do_graph(roll, 1)
###POISTA SIT TÄSTÄ
# roll = pretty_midi.PrettyMIDI('symphonic_songs.mid')
#piano=go.Heatmap(z=roll.get_piano_roll())
# alles=roll.get_piano_roll()
# alles[np.where(alles == 0)]=None
# trace1 = {
# "type": "heatmap",
# 'z':alles,
# "zmin": 0,
# "zmax": 1,
# "name": "all",
# 'colorscale': [[0, 'black'], [1, 'white']],
# 'showlegend': True,
# 'legendOrientation': 'h',
# 'showscale': False,
# #'visible': True
# }
# puts=roll.instruments[0].get_piano_roll()
# puts[np.where(puts == 0)]=None
# trace2 = {
# "type": "heatmap",
# 'z': puts,
# "name": "1st stave",
# 'colorscale': [[0, 'black'], [1, 'red']],
# "zmin": 0,
# "zmax": 1,
# 'showlegend': True,
# 'showscale': False,
# 'visible': True,
# }
#
# traces=[trace1, trace2]
#graafi = dcc.Graph(id='jopo', figure = figur,config = fig_config)
# graafi = dcc.Graph(id='jopo', figure = {'data':traces,
# 'layout': fig_layout
# },config = fig_config)
###POISTA SIT TÄHÄ
examples = ['Test_score', 'Brahms', 'Chamber music', 'Own works', 'Push here to load an example score']
slider_range=[]
analyzer_layout = html.Div(children=[
#html.Div(id="hidden", style={'visible': False}),
#html.Div(id="temp", style={'visible': False}),
#html.Div(id="temp2", style={'visible': False}),
#html.Button(id='analyze_button', n_clicks=0, children='Push here for analysis', className='button',),
html.Div('NOTE: Score upload is disabled to save server calculation time. '
'However, you can test pre-loaded by pressing ´Push here to analyze´. '
'The score of pre-loaded material can be seen by pressing ´Show score´. '
'Analysis takes time, and appears at the bottom of the page.', style={'color':'red', 'textSize': 24}),
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select File'),
],
style={
'textAlign': 'center',
'color': 'grey'
}
),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px',
'color': 'grey'
},
# Allow multiple files to be uploaded
multiple=True
),
dcc.Dropdown(
options=[{'label': val, 'value': val} for val in examples],
className='select',
value=examples[-1],
multi=False,
id='score_select',
style={'backgroundColor': dropd_back, 'color': dropd_color, 'display': 'inline-block', 'opacity': 1,
'border': 'none', 'width': '100%', 'fontSize': '25px', 'bottom': '-10px'},
),
###Tämä pitäisi tulla callbackinä, ei toimi!
### Tähän asti siirrettävä callbackiin!!
html.Div(id='load_return'),
#html.Div(add_instruments(roll)),
html.Div(id='graafi', className='waiting'),
html.Div(id='3d', style={'width': '100%'}),
#graafi,# dcc.Graph(
# id='piano_scroll',
# figure=fig,
# config = fig_config
# ),
# dcc.Graph(
# id='testaus',
# ),
html.Div(id='valikot'),
html.Div(id='testi',
style={
'color': 'grey'})
# children=dcc.Graph(figure = {'data':[{'z':pm.get_piano_roll(), 'type':'heatmap', 'colorscale': [[0, 'white'], [1, 'blue']]}],
# 'layout': fig_layout
# },
# config=fig_config)
# )
],
)
def define_score_instruments(midifile):
return html.Div([
add_instruments(midifile),
html.Div('bars', style={'textAlign': 'center', 'color': 'grey'}),
dcc.RangeSlider(
id='score_range',
min=1,
max=len(roll.get_downbeats()),
step=1,
value=[1, len(roll.get_downbeats())],
pushable=1,
dots=True,
updatemode='drag',
tooltip={
'always_visible': False,
'placement': 'bottom'
},
# marks={
# 0:'0',
# 5:'5',
# 10:'10'
# }
),
html.Div(id='slider_return', style={'textAlign': 'center', 'color': 'grey'}),
html.Button(id='analyze_button', n_clicks=0, children='Push here for analysis', className='button',),
html.Details([html.Summary('Show score', className='button'), html.Div(
html.Div(html.Div(children=[html.Img(src='data:image/png;base64,{}'.format(encoded_image3.decode()), style={'width': '100%'})], style={'textAlign': 'center'})),
)]),])
@app.callback(
Output('slider_return', 'children'),
[Input('score_range', 'value')])
def range_output(value):
text="Analysis range set from bar {} to {}".format(value[0], value[1]-1)
return text
########### RANGE SLIDER IS GIVING WRONG BARS EVERY OTCHER NUMBER!
###########
@app.callback(
Output('graafi', 'children'),
[Input('analyze_button', 'n_clicks'),],
[State('hidden_score', 'children'),
State({'type': 'instrument', 'index': ALL}, 'value'),
State({'type': 'tech', 'index': ALL}, 'value'),
State({'type': 'target', 'index': ALL}, 'value'),
State({'type': 'onoff', 'index': ALL}, 'value'),
State('score_range', 'value')
])
def button_output(value, hidden_score, instrument, tech, target, onoff, score_range):
if value>0:
score_range[0]-=1 #Adjust range to show right value
hidden_score = json.loads(hidden_score)
hidden_score = base64.b64decode(hidden_score)
midi_data = pretty_midi.PrettyMIDI(io.BytesIO(hidden_score))
#Get downbeats for range:
downbeats= midi_data.get_downbeats()
#get midi range timings from slider:
s_range=[int(round(downbeats[score_range[0]]*pianoroll_resolution)), int(round(downbeats[score_range[1]-1]*pianoroll_resolution))]
return do_graph(midi_data, instrument, tech, target, onoff, s_range, score_range)
return ''
@app.callback(
[Output('hidden_score', 'children'),
Output('load_return', 'children')],
[Input('score_select', 'value')])
def select_output(value):
if value == 'Test_score':
with open('test_score.mid', mode='rb') as file: # b is important -> binary
midifile = file.read()
this_midifile = pretty_midi.PrettyMIDI('test_score.mid')
encoded_midi = base64.b64encode(midifile) #Encode midifile with base64
dump = json.dumps(encoded_midi.decode('utf-8')) #Decode into string for json dumps
load_return = define_score_instruments(this_midifile)
decoded = base64.b64decode(dump) #convert back with base64 decode
midi_data=pretty_midi.PrettyMIDI(io.BytesIO(decoded)) #Load to midi as io.bytes object
#pretty_midi.PrettyMIDI(decoded)
return [dump, load_return]
return ['','']
'''
@app.callback(
Output('load_return', 'children'), #temp -> load_return
[Input('score_select', 'value')]) #vaihda: testi -> score_select child->value
def select_output(value):
if value == 'Test_score':
roll = pretty_midi.PrettyMIDI('test_score.mid')
content = html.Div([add_instruments(roll), html.Div('bars', style={'textAlign': 'center', 'color': 'grey'}),
dcc.RangeSlider(
id='score_range',
min=0,
max=len(roll.get_downbeats()),
step=1,
value=[0, len(roll.get_downbeats())],
pushable=1,
dots=True,
tooltip={
'always_visible': True,
'placement': 'bottom'
},
# marks={
# 0:'0',
# 5:'5',
# 10:'10'
# }
),
html.Div(id='slider_return', style={'textAlign': 'center', 'color': 'grey'}),
html.Button(id='analyze_button', n_clicks=0, children='Push here for analysis', className='button',)])
return content
return
'''
def parse_contents(contents, filename, date):
if contents is not None:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
pm = pretty_midi.PrettyMIDI(io.BytesIO(decoded))
return pm
# return {'data': [{'z': pm.get_piano_roll(), 'type': 'heatmap', 'colorscale': [[0, 'black'], [1, 'white']]}],
# 'layout': fig_layout
# }
except Exception as e:
print(e)
@app.callback(Output('testi', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
tiedot = [
parse_contents(c, n, d) for c, n, d in
zip(list_of_contents, list_of_names, list_of_dates)]
rolli = tiedot[0]
piano=go.Heatmap(z=rolli.get_piano_roll(pianoroll_resolution))
trace1 = {
"type": "heatmap",
'z':rolli.get_piano_roll(pianoroll_resolution),
"name": "1st stave",
'colorscale': [[0, 'black'], [1, 'white']],
'showlegend': True,
'showscale': False,
}
trace2 = {
"type": "heatmap",
'z': rolli.instruments[0].get_piano_roll(pianoroll_resolution),
"name": "1st stave",
'colorscale': [[0, 'black'], [1, 'white']],
'showlegend': True,
'showscale': False,
}
return dcc.Graph(figure = {'data':[trace1, trace2],
'layout': fig_layout
},config = fig_config)
############################################################
############################################################
################## ANALYZE SCORE CONTENT ENDS ##############
############################################################
############################################################
app.layout = html.Div([
html.Div(id='score', children=analyzer_layout, style={'display': 'block'}),
html.Div(id='testing'),
html.Div(id='hidden-container', style={'display': 'none'}),
html.Div(id='hidden2', style={'display': 'none'}),
html.Div(id='hidden3', style={'display': 'none'}),
html.Div(id='hidden4', style={'display': 'none'}),
html.Div(id='hidden_score', style={'display': 'none'}),
])
app.title = 'Orchestration_Analyzer'
if HEROKU:
if __name__ == '__main__':
app.run_server(debug=False, threaded=True)
else:
PORT = 8050
ADDRESS = '0.0.0.0' #'127.0.0.1'
if __name__ == '__main__':
app.run_server(port=PORT, host=ADDRESS, debug=True)
|
{"hexsha": "c5b0b5ca64e6f95a7c9b2f86f8e7da7c13c72d0a", "size": 26447, "ext": "py", "lang": "Python", "max_stars_repo_path": "score/revised_score_analyze.py", "max_stars_repo_name": "SuperShinyEyes/Score-Tool", "max_stars_repo_head_hexsha": "907dd9e695c9950c6f168480e591a239cdf6a826", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-25T20:35:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-10T17:24:15.000Z", "max_issues_repo_path": "score/revised_score_analyze.py", "max_issues_repo_name": "SuperShinyEyes/Score-Tool", "max_issues_repo_head_hexsha": "907dd9e695c9950c6f168480e591a239cdf6a826", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-10T17:57:02.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-28T15:13:49.000Z", "max_forks_repo_path": "score/revised_score_analyze.py", "max_forks_repo_name": "SuperShinyEyes/Score-Tool", "max_forks_repo_head_hexsha": "907dd9e695c9950c6f168480e591a239cdf6a826", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-25T20:36:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-25T20:36:06.000Z", "avg_line_length": 38.4963609898, "max_line_length": 168, "alphanum_fraction": 0.542178697, "include": true, "reason": "import numpy", "num_tokens": 6451}
|
(*******************************************************************************
Project: Refining Authenticated Key Agreement with Strong Adversaries
Module: Channels.thy (Isabelle/HOL 2016-1)
ID: $Id: Channels.thy 132885 2016-12-23 18:41:32Z csprenge $
Author: Joseph Lallemand, INRIA Nancy <joseph.lallemand@loria.fr>
Christoph Sprenger, ETH Zurich <sprenger@inf.ethz.ch>
Channel messages and related message derivations (extract and fake).
Copyright (c) 2015-2016 Joseph Lallemand and Christoph Sprenger
Licence: LGPL
*******************************************************************************)
section \<open>Channel Messages\<close>
theory Channels
imports Message_derivation
begin
(**************************************************************************************************)
subsection \<open>Channel messages\<close>
(**************************************************************************************************)
datatype chan =
Chan "tag" "agent" "agent" "msg"
abbreviation
Insec :: "[agent, agent, msg] \<Rightarrow> chan" where
"Insec \<equiv> Chan insec"
abbreviation
Confid :: "[agent, agent, msg] \<Rightarrow> chan" where
"Confid \<equiv> Chan confid"
abbreviation
Auth :: "[agent, agent, msg] \<Rightarrow> chan" where
"Auth \<equiv> Chan auth"
abbreviation
Secure :: "[agent, agent, msg] \<Rightarrow> chan" where
"Secure \<equiv> Chan secure"
(**************************************************************************************************)
subsection \<open>Extract\<close>
(**************************************************************************************************)
text \<open>The set of payload messages that can be extracted from a set of (crypto) messages
and a set of channel messages, given a set of bad agents. The second rule states that
the payload can be extracted from insecure and authentic channels as well as from channels
with a compromised endpoint.\<close>
inductive_set
extr :: "agent set \<Rightarrow> msg set \<Rightarrow> chan set \<Rightarrow> msg set"
for bad :: "agent set"
and IK :: "msg set"
and H :: "chan set"
where
extr_Inj: "M \<in> IK \<Longrightarrow> M \<in> extr bad IK H"
| extr_Chan:
"\<lbrakk> Chan c A B M \<in> H; c = insec \<or> c = auth \<or> A \<in> bad \<or> B \<in> bad \<rbrakk> \<Longrightarrow> M \<in> extr bad IK H"
declare extr.intros [intro]
declare extr.cases [elim]
lemma extr_empty_chan [simp]: "extr bad IK {} = IK"
by (auto)
lemma IK_subset_extr: "IK \<subseteq> extr bad IK chan"
by (auto)
lemma extr_mono_chan [dest]: "G \<subseteq> H \<Longrightarrow> extr bad IK G \<subseteq> extr bad IK H"
by (safe, erule extr.induct, auto)
lemma extr_mono_IK [dest]: "IK1 \<subseteq> IK2 \<Longrightarrow> extr bad IK1 H \<subseteq> extr bad IK2 H"
by (safe) (erule extr.induct, auto)
lemma extr_mono_bad [dest]: "bad \<subseteq> bad' \<Longrightarrow> extr bad IK H \<subseteq> extr bad' IK H"
by (safe, erule extr.induct, auto)
lemmas extr_monotone_chan [elim] = extr_mono_chan [THEN [2] rev_subsetD]
lemmas extr_monotone_IK [elim] = extr_mono_IK [THEN [2] rev_subsetD]
lemmas extr_monotone_bad [elim] = extr_mono_bad [THEN [2] rev_subsetD]
lemma extr_mono [intro]: "\<lbrakk> b \<subseteq> b'; I \<subseteq> I'; C \<subseteq> C' \<rbrakk> \<Longrightarrow> extr b I C \<subseteq> extr b' I' C'"
by (force)
lemmas extr_monotone [elim] = extr_mono [THEN [2] rev_subsetD]
lemma extr_insert [intro]: "M \<in> extr bad IK H \<Longrightarrow> M \<in> extr bad IK (insert C H)"
by (auto)
lemma extr_insert_Chan [simp]:
"extr bad IK (insert (Chan c A B M) H)
= (if c = insec \<or> c = auth \<or> A \<in> bad \<or> B \<in> bad
then insert M (extr bad IK H) else extr bad IK H)"
by auto
(* do not declare [simp]! *)
lemma extr_insert_chan_eq: "extr bad IK (insert X CH) = extr bad IK {X} \<union> extr bad IK CH"
by (auto)
lemma extr_insert_IK_eq [simp]: "extr bad (insert X IK) CH = insert X (extr bad IK CH)"
by (auto)
lemma extr_insert_bad:
"extr (insert A bad) IK CH \<subseteq>
extr bad IK CH \<union> {M. \<exists> B. Confid A B M \<in> CH \<or> Confid B A M \<in> CH \<or>
Secure A B M \<in> CH \<or> Secure B A M \<in> CH}"
by (rule, erule extr.induct, auto intro: tag.exhaust)
lemma extr_insert_Confid [simp]:
"A \<notin> bad \<Longrightarrow>
B \<notin> bad \<Longrightarrow>
extr bad IK (insert (Confid A B X) CH) = extr bad IK CH"
by auto
(**************************************************************************************************)
subsection \<open>Fake\<close>
(**************************************************************************************************)
text \<open>The set of channel messages that an attacker can fake given a set of compromised
agents, a set of crypto messages and a set of channel messages. The second rule states
that an attacker can fake an insecure or confidential messages or a channel message
with a compromised endpoint using a payload that he knows.\<close>
inductive_set
fake :: "agent set \<Rightarrow> msg set \<Rightarrow> chan set \<Rightarrow> chan set"
for bad :: "agent set"
and IK :: "msg set"
and chan :: "chan set"
where
fake_Inj: "M \<in> chan \<Longrightarrow> M \<in> fake bad IK chan"
| fake_New:
"\<lbrakk> M \<in> IK; c = insec \<or> c = confid \<or> A \<in> bad \<or> B \<in> bad \<rbrakk>
\<Longrightarrow> Chan c A B M \<in> fake bad IK chan"
declare fake.cases [elim]
declare fake.intros [intro]
lemmas fake_intros = fake_Inj fake_New
lemma fake_mono_bad [intro]:
"bad \<subseteq> bad' \<Longrightarrow> fake bad IK chan \<subseteq> fake bad' IK chan"
by (auto)
lemma fake_mono_ik [intro]:
"IK \<subseteq> IK' \<Longrightarrow> fake bad IK chan \<subseteq> fake bad IK' chan"
by (auto)
lemma fake_mono_chan [intro]:
"chan \<subseteq> chan' \<Longrightarrow> fake bad IK chan \<subseteq> fake bad IK chan'"
by (auto)
lemma fake_mono [intro]:
"\<lbrakk> bad \<subseteq> bad'; IK \<subseteq> IK'; chan \<subseteq> chan'\<rbrakk> \<Longrightarrow> fake bad IK chan \<subseteq> fake bad' IK' chan'"
by (auto, erule fake.cases, auto)
lemmas fake_monotone_bad [elim] = fake_mono_bad [THEN [2] rev_subsetD]
lemmas fake_monotone_ik [elim] = fake_mono_ik [THEN [2] rev_subsetD]
lemmas fake_monotone_chan [elim] = fake_mono_chan [THEN [2] rev_subsetD]
lemmas fake_monotone [elim] = fake_mono [THEN [2] rev_subsetD]
lemma chan_subset_fake: "chan \<subseteq> fake bad IK chan"
by auto
lemma extr_fake:
"X \<in> fake bad IK chan \<Longrightarrow> extr bad IK' {X} \<subseteq> IK \<union> extr bad IK' chan"
by auto
lemmas extr_fake_2 [elim] = extr_fake [THEN [2] rev_subsetD]
lemma fake_parts_extr_singleton:
"X \<in> fake bad IK chan \<Longrightarrow> parts (extr bad IK' {X}) \<subseteq> parts IK \<union> parts (extr bad IK' chan)"
by (rule extr_fake [THEN parts_mono, simplified])
lemmas fake_parts_extr_singleton_2 [elim] = fake_parts_extr_singleton [THEN [2] rev_subsetD]
lemma fake_parts_extr_insert:
assumes "X \<in> fake bad IK CH"
shows "parts (extr bad IK' (insert X CH)) \<subseteq> parts (extr bad IK' CH) \<union> parts IK"
proof -
have "parts (extr bad IK' (insert X CH)) \<subseteq> parts (extr bad IK' {X}) \<union> parts (extr bad IK' CH)"
by (auto simp: extr_insert_chan_eq [where CH=CH])
also have "... \<subseteq> parts (extr bad IK' CH) \<union> parts IK" using assms
by (auto dest!: fake_parts_extr_singleton)
finally show ?thesis .
qed
lemma fake_synth_analz_extr:
assumes "X \<in> fake bad (synth (analz (extr bad IK CH))) CH"
shows "synth (analz (extr bad IK (insert X CH))) = synth (analz (extr bad IK CH))"
using assms
proof (intro equalityI)
have "synth (analz (extr bad IK (insert X CH)))
\<subseteq> synth (analz (extr bad IK {X} \<union> extr bad IK CH))"
by - (rule synth_analz_mono, auto)
also have "... \<subseteq> synth (analz (synth (analz (extr bad IK CH)) \<union> extr bad IK CH))" using assms
by - (rule synth_analz_mono, auto)
also have "... \<subseteq> synth (analz (synth (analz (extr bad IK CH))))"
by - (rule synth_analz_mono, auto)
also have "... \<subseteq> synth (analz (extr bad IK CH))" by simp
finally show "synth (analz (extr bad IK (insert X CH))) \<subseteq> synth (analz (extr bad IK CH))" .
next
have "extr bad IK CH \<subseteq> extr bad IK (insert X CH)"
by auto
then show "synth (analz (extr bad IK CH)) \<subseteq> synth (analz (extr bad IK (insert X CH)))"
by - (rule synth_analz_mono, auto)
qed
(**************************************************************************************************)
subsection \<open>Closure of Dolev-Yao, extract and fake\<close>
(**************************************************************************************************)
subsubsection \<open>\<open>dy_fake_msg\<close>: returns messages, closure of DY and extr is sufficient\<close>
(**************************************************************************************************)
text \<open>Close @{term extr} under Dolev-Yao closure using @{term synth} and @{term analz}.
This will be used in Level 2 attacker events to fake crypto messages.\<close>
definition
dy_fake_msg :: "agent set \<Rightarrow> msg set \<Rightarrow> chan set \<Rightarrow> msg set"
where
"dy_fake_msg b i c = synth (analz (extr b i c))"
lemma dy_fake_msg_empty [simp]: "dy_fake_msg bad {} {} = synth {}"
by (auto simp add: dy_fake_msg_def)
lemma dy_fake_msg_mono_bad [dest]: "bad \<subseteq> bad' \<Longrightarrow> dy_fake_msg bad I C \<subseteq> dy_fake_msg bad' I C"
by (auto simp add: dy_fake_msg_def intro!: synth_analz_mono)
lemma dy_fake_msg_mono_ik [dest]: "G \<subseteq> H \<Longrightarrow> dy_fake_msg bad G C \<subseteq> dy_fake_msg bad H C"
by (auto simp add: dy_fake_msg_def intro!: synth_analz_mono)
lemma dy_fake_msg_mono_chan [dest]: "G \<subseteq> H \<Longrightarrow> dy_fake_msg bad I G \<subseteq> dy_fake_msg bad I H"
by (auto simp add: dy_fake_msg_def intro!: synth_analz_mono)
lemmas dy_fake_msg_monotone_bad [elim] = dy_fake_msg_mono_bad [THEN [2] rev_subsetD]
lemmas dy_fake_msg_monotone_ik [elim] = dy_fake_msg_mono_ik [THEN [2] rev_subsetD]
lemmas dy_fake_msg_monotone_chan [elim] = dy_fake_msg_mono_chan [THEN [2] rev_subsetD]
lemma dy_fake_msg_insert [intro]:
"M \<in> dy_fake_msg bad I C \<Longrightarrow> M \<in> dy_fake_msg bad I (insert X C)"
by (auto)
lemma dy_fake_msg_mono [intro]:
"\<lbrakk> b \<subseteq> b'; I \<subseteq> I'; C \<subseteq> C' \<rbrakk> \<Longrightarrow> dy_fake_msg b I C \<subseteq> dy_fake_msg b' I' C'"
by (force simp add: dy_fake_msg_def intro!: synth_analz_mono)
lemmas dy_fake_msg_monotone [elim] = dy_fake_msg_mono [THEN [2] rev_subsetD]
lemma dy_fake_msg_insert_chan:
"x = insec \<or> x = auth \<Longrightarrow>
M \<in> dy_fake_msg bad IK (insert (Chan x A B M) CH)"
by (auto simp add: dy_fake_msg_def)
subsubsection \<open>\<open>dy_fake_chan\<close>: returns channel messages\<close>
(**************************************************************************************************)
text \<open>The set of all channel messages that an attacker can fake is obtained using
@{term fake} with the sets of possible payload messages derived with @{term dy_fake_msg}
defined above. This will be used in Level 2 attacker events to fake channel messages.\<close>
definition
dy_fake_chan :: "agent set \<Rightarrow> msg set \<Rightarrow> chan set \<Rightarrow> chan set"
where
"dy_fake_chan b i c = fake b (dy_fake_msg b i c) c"
lemma dy_fake_chan_mono_bad [intro]:
"bad \<subseteq> bad' \<Longrightarrow> dy_fake_chan bad I C \<subseteq> dy_fake_chan bad' I C"
by (auto simp add: dy_fake_chan_def)
lemma dy_fake_chan_mono_ik [intro]:
"T \<subseteq> T' \<Longrightarrow> dy_fake_chan bad T C \<subseteq> dy_fake_chan bad T' C"
by (auto simp add: dy_fake_chan_def)
lemma dy_fake_chan_mono_chan [intro]:
"C \<subseteq> C' \<Longrightarrow> dy_fake_chan bad T C \<subseteq> dy_fake_chan bad T C'"
by (auto simp add: dy_fake_chan_def)
lemmas dy_fake_chan_monotone_bad [elim] = dy_fake_chan_mono_bad [THEN [2] rev_subsetD]
lemmas dy_fake_chan_monotone_ik [elim] = dy_fake_chan_mono_ik [THEN [2] rev_subsetD]
lemmas dy_fake_chan_monotone_chan [elim] = dy_fake_chan_mono_chan [THEN [2] rev_subsetD]
lemma dy_fake_chan_mono [intro]:
assumes "b \<subseteq> b'" and "I \<subseteq> I'" and "C \<subseteq> C'"
shows "dy_fake_chan b I C \<subseteq> dy_fake_chan b' I' C'"
proof -
have "dy_fake_chan b I C \<subseteq> dy_fake_chan b' I C" using \<open>b \<subseteq> b'\<close> by auto
also have "... \<subseteq> dy_fake_chan b' I' C" using \<open>I \<subseteq> I'\<close> by auto
also have "... \<subseteq> dy_fake_chan b' I' C'" using \<open>C \<subseteq> C'\<close> by auto
finally show ?thesis .
qed
lemmas dy_fake_chan_monotone [elim] = dy_fake_chan_mono [THEN [2] rev_subsetD]
lemma dy_fake_msg_subset_synth_analz:
"\<lbrakk>extr bad IK chan \<subseteq> T \<rbrakk> \<Longrightarrow> dy_fake_msg bad IK chan \<subseteq> synth (analz T)"
by (auto simp add: dy_fake_msg_def synth_analz_mono)
lemma dy_fake_chan_mono2:
"\<lbrakk> extr bad IK chan \<subseteq> synth (analz y); chan \<subseteq> fake bad (synth (analz y)) z \<rbrakk>
\<Longrightarrow> dy_fake_chan bad IK chan \<subseteq> fake bad (synth (analz y)) z"
apply (auto simp add: dy_fake_chan_def, erule fake.cases, auto)
apply (auto intro!: fake_New dest!: dy_fake_msg_subset_synth_analz)
done
lemma extr_subset_dy_fake_msg: "extr bad IK chan \<subseteq> dy_fake_msg bad IK chan"
by (auto simp add: dy_fake_msg_def)
lemma dy_fake_chan_extr_insert:
"M \<in> dy_fake_chan bad IK CH \<Longrightarrow> extr bad IK (insert M CH) \<subseteq> dy_fake_msg bad IK CH"
by (auto simp add: dy_fake_chan_def dy_fake_msg_def dest: fake_synth_analz_extr)
lemma dy_fake_chan_extr_insert_parts:
"M \<in> dy_fake_chan bad IK CH \<Longrightarrow>
parts (extr bad IK (insert M CH)) \<subseteq> parts (extr bad IK CH) \<union> dy_fake_msg bad IK CH"
by (drule dy_fake_chan_extr_insert [THEN parts_mono], auto simp add: dy_fake_msg_def)
lemma dy_fake_msg_extr:
"extr bad ik chan \<subseteq> synth (analz X) \<Longrightarrow> dy_fake_msg bad ik chan \<subseteq> synth (analz X)"
by (drule synth_analz_mono) (auto simp add: dy_fake_msg_def)
lemma extr_insert_dy_fake_msg:
"M \<in> dy_fake_msg bad IK CH \<Longrightarrow> extr bad (insert M IK) CH \<subseteq> dy_fake_msg bad IK CH"
by (auto simp add: dy_fake_msg_def)
lemma dy_fake_msg_insert_dy_fake_msg:
"M \<in> dy_fake_msg bad IK CH \<Longrightarrow> dy_fake_msg bad (insert M IK) CH \<subseteq> dy_fake_msg bad IK CH"
by (drule synth_analz_mono [OF extr_insert_dy_fake_msg], auto simp add: dy_fake_msg_def)
lemma synth_analz_insert_dy_fake_msg:
"M \<in> dy_fake_msg bad IK CH \<Longrightarrow> synth (analz (insert M IK)) \<subseteq> dy_fake_msg bad IK CH"
by (auto dest!: dy_fake_msg_insert_dy_fake_msg, erule subsetD,
auto simp add: dy_fake_msg_def elim: synth_analz_monotone)
lemma Fake_insert_dy_fake_msg:
"M \<in> dy_fake_msg bad IK CH \<Longrightarrow>
extr bad IK CH \<subseteq> synth (analz X) \<Longrightarrow>
synth (analz (insert M IK)) \<subseteq> synth (analz X)"
by (auto dest!: synth_analz_insert_dy_fake_msg dy_fake_msg_extr)
lemma dy_fake_chan_insert_chan:
"x = insec \<or> x = auth \<Longrightarrow>
Chan x A B M \<in> dy_fake_chan bad IK (insert (Chan x A B M) CH)"
by (auto simp add: dy_fake_chan_def)
lemma dy_fake_chan_subset:
"CH \<subseteq> fake bad (dy_fake_msg bad IK CH) CH' \<Longrightarrow>
dy_fake_chan bad IK CH \<subseteq> fake bad (dy_fake_msg bad IK CH) CH'"
by (auto simp add: dy_fake_chan_def)
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Key_Agreement_Strong_Adversaries/Channels.thy"}
|
class step_by_step_brian_sim(object):
'''
Step-by-Step Brian Simulation (Nov/2014 - ricardo.deazambuja@plymouth.ac.uk)
This class was created to make it easier to run a Brian simulation step-by-step, passing input spikes without
running out of memory or having to create the input spikes beforehand. It also makes the code more clear because
you generate a separated function with your Brian simulation code. This function (here called simulation) receives:
simulation(brian.defaultclock, brian)
brian.defaultclock: Brian defaultclock to be used
brian: the result of the command "import brian", so the user doesn't need to import Brian and have to use "brian.".
And must return a tuple:
(Input_layer, Output_layer, pop_objects, syn_objects, monitors_objects)
Input_layer: is a SpikeGenerator
Output_layer: the layer the user wants to output the spikes
pop_objects: a list with all the NeuronGroups used
syn_objects: a list with all the Synapses objects used
monitors_objects: a list with all the Monitors or functions used with the @network_operation decorator
At initialization the simulation step size (in ms) can be passed (default is 2).
After the creation of the instance, calling the method "run_step(input_spike_index_list)" sends the spikes
to the simulation and simulates one step (according to the initialization).
The method run_step returns returns a tuple:
int(number_of_the_run),
float(current_simulation_time),
numpy.array(tuple(processed_received_spikes)),
list(list(output_spikes)),
list(float(output_spikes_times))
'''
def __init__(self, simulation, init_step_size=2):
print "Initializing the simulation..."
self.step_size = init_step_size
self._generator = self._return_generator(simulation)
print "Initializing the simulation...Done"
print "Call .run_step(input_spikes_list) to run one step of the simulation!"
def run_step(self,input_spikes=None):
'''
Calls the generator .next and send methods and returns the spikes and times generated.
'''
self._generator.next() # Runs up to the first yield (where the generator waits for the .send method)
ans = self._generator.send(input_spikes) # Sends the spikes and runs to the second yield
#(where the generator returns the result of the simulation)
return ans
def _return_generator(self, simulation):
'''
Defines a simulation using a python generator.
'''
import brian
import numpy
print "Starting the simulation!"
print "Reseting the Brian Simulation object...",
brian.reinit() # This is only necessary when using the same enviroment over and over (like with iPython).
print "Done!"
clock_mult = self.step_size
brian.defaultclock.dt = clock_mult*brian.ms
print "Initial simulation time:", brian.defaultclock.t
print "Simulation step:", brian.defaultclock.dt
# Calls the user function with the Brian objects to be used in the simulation
Input_layer, Output_layer, pop_objects, syn_objects, monitors_objects = simulation(brian.defaultclock, brian)
output_spikes = []
output_spikes_time = []
# Every time spikes occur at the SpikeMonitor related to the output neuron group, this function is called
def output_spikes_proc(spikes):
if len(spikes):
output_spikes.append(spikes.tolist()) # Saves the indexes of the neurons who generated spikes
output_spikes_time.append(1000*float(brian.defaultclock.t)) # Converts and save the actual time in ms
# The spike monitor and all this code could be replaced by the .get_spikes() method of neurongroups.
# I need to check what is fastest way!
OutputMonitor=brian.SpikeMonitor(Output_layer, record=False, function=output_spikes_proc)
# Because it is not saving, the system is not going to run out of memory after a long simulation.
net = brian.Network(pop_objects + syn_objects + monitors_objects + [OutputMonitor])
r=0
while True:
spiketimes = yield # Receives the content from the Python generator method .send()
if spiketimes:
spiketimes = [(i,brian.defaultclock.t) for i in spiketimes] # The spikes received are inserted as the last simulated time
Input_layer.set_spiketimes(spiketimes)
net.run(clock_mult*brian.ms) # I'm running one step each time this function is called
r+=1
yield (
r,
float(brian.defaultclock.t)*1000,
numpy.array(Input_layer.get_spiketimes()).astype(dtype=numpy.float), # I'm doing this way to prove the spikes were received
output_spikes,
output_spikes_time
)# After the .send method, the generator executes this line and stops here
output_spikes=[] # Cleans the output_spikes list so only the last spikes generated are sent
output_spikes_time=[] # Cleans the output_spikes list so only the last spikes generated are sent
# I'm using the .astype(numpy.float) because the arrays have Brian objects (units),
# and I think using only floats the memory footprint can be smaller.
|
{"hexsha": "c5ecd0c741f35ca419260a44ed3aace85a022ba5", "size": 5572, "ext": "py", "lang": "Python", "max_stars_repo_path": "step_by_step_brian.py", "max_stars_repo_name": "ricardodeazambuja/IJCNN2017", "max_stars_repo_head_hexsha": "817165185de6152041bbaf21cbad6d12fb58f064", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-18T14:30:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T12:52:51.000Z", "max_issues_repo_path": "step_by_step_brian.py", "max_issues_repo_name": "ricardodeazambuja/IJCNN2017", "max_issues_repo_head_hexsha": "817165185de6152041bbaf21cbad6d12fb58f064", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-18T06:06:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-18T09:39:56.000Z", "max_forks_repo_path": "step_by_step_brian.py", "max_forks_repo_name": "ricardodeazambuja/IJCNN2017", "max_forks_repo_head_hexsha": "817165185de6152041bbaf21cbad6d12fb58f064", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.5660377358, "max_line_length": 141, "alphanum_fraction": 0.6739052405, "include": true, "reason": "import numpy", "num_tokens": 1158}
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#学習した場所領域のサンプルをrviz上に可視化するプログラム
#作成者 石伏智
#作成日 2015年12月
#サンプリング点プロット→ガウスの概形描画に変更(磯部、2016卒論)
#編集、更新:谷口彰 更新日:2017/02/10
#mu 2次元、sig 2×2次元版
#自己位置も取得して描画するのは別プログラム
"""
実行前に指定されているフォルダが正しいかをチェックする
file_read.pyも同様に !
実行方法
python place_draw.py (parameterフォルダの絶対パス) (表示する場所領域を指定したい場合は数字を入力)
実行例
python place_draw.py /home/emlab/py-faster-rcnn/work/gibbs_sampling_program
"""
import glob
import re
import os
import rospy
import math
import sys
import time
import geometry_msgs.msg as gm
from geometry_msgs.msg import Point
import sensor_msgs.msg as sm
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import numpy as np
import struct
#import PyKDLs
sys.path.append("lib/")
from __init__ import *
"""
def read_result(filename):
file_dir = os.chdir(filename)
f = open('SBP.txt')
line = f.readline() # 1行を文字列として読み込む(改行文字も含まれる)
place_num = int(line)
return place_num
"""
def mu_read(filename):
all_mu=[]
#fp = open(filename+'mu'+maxparticle+".csv", "r") # check
#convert = lambda text: int(text) if text.isdigit() else text
#alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
#file.sort(key=alphanum_key)
#for f in file:
K = 0
for line in open(filename+'mu'+str(maxparticle)+".csv", 'r'): #.readlines()
mu=[] #(x,y,sin,cos)
# readlines()は,ファイルを全て読み込み、1行毎に処理を行う
#print line
data=line[:].split(',')
mu +=[float(data[0])]
mu +=[float(data[1])]
mu +=[0]#float(data[2])]
mu +=[0]#float(data[3])]
#print position
all_mu.append(mu)
K += 1
return all_mu, K
def sigma_read(filename):
all_sigma=[]
#file = glob.glob(filename+'/parameter3/sigma/*.txt') # check
#convert = lambda text: int(text) if text.isdigit() else text
#alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
#file.sort(key=alphanum_key)
for line in open(filename+'sig'+str(maxparticle)+".csv", 'r'):
#sigma=[] #(x,y,sin,cos)
data=line[:].split(',')
sigma = [[float(data[0]),float(data[1]),0,0],[float(data[2]),float(data[3]),0,0],[0,0,0,0],[0,0,0,0]]
# readlines()は,ファイルを全て読み込み、1行毎に処理を行う
#line=open(f, 'r').readlines()
#i = 0
#for l in line:
# sigma_l.append(float(data[0]))
# sigma_l.append(float(data[1]))
# sigma_l.append(float(data[2]))
# sigma_l.append(float(data[3]))
#
#sigma.append(sigma_l)
#
all_sigma.append(sigma)
return all_sigma
"""
def sampling_read(filename, class_num):
c_all_position=[]
for c in range(class_num):
all_position=[] #すべての自己位置データのリスト
file = glob.glob(filename+'/sampling_data3/class'+repr(c)+'/*.txt') # check
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
file.sort(key=alphanum_key)
#print file
for f in file:
position=[] #(x,y,sin,cos)
line=open(f, 'r').readlines()
#print line
data=line[0][:].split(',')
position +=[float(data[0])]
position +=[float(data[1])]
position +=[float(data[2])]
position +=[float(data[3])]
#print position
all_position.append(position)
c_all_position.append(all_position)
return c_all_position
"""
# 自作ファイル
#import file_read as f_r
#from SBP import read_result
#実験ファイル名trialnameを取得
trialname = sys.argv[1]
print trialname
#step番号を取得
step = int(sys.argv[2])
print step
filename = datafolder+trialname+"/"+ str(step) +"/"
#filename50 = datafolder+trialname+"/"+ str(50) +"/"
#maxparticle = 0
#i = 0
##datafolder+trialname+"/"+stepにおける最大尤度のパーティクルを読み込み
#for line in open( filename50 + 'weights.csv', 'r'):
# #itemList = line[:].split(',')
# if (i == 0):
# maxparticle = int(line)
# i +=1
maxparticle = int(sys.argv[3]) #どのIDのパーティクルか
#pid = int(sys.argv[3])
#filename=sys.argv[1]
#Class_NUM=0#read_result(filename)
RAD_90=math.radians(90)
color_all=1 #1 or 0 、(0ならばすべて赤)
mu_draw =1 #1 or 0 、(0ならば中心値を表示しない)
sigma_draw=1 #1 or 0, (0ならば分散を表示しない)
mu_arrow=0 #矢印を可視化する場合
COLOR=[
#[0,0,0], #ロボット自己位置用
[1,0,0],[0,1,0],[0,0,1],[0.5,0.5,0],[0.5,0,0.5], #4
[0,0.5,0.5],[0.8,0.1,0.1],[0.1,0.8,0.1],[0.1,0.1,0.8],[0.6,0.2,0.2],#9
[0.2,0.6,0.2],[0.2,0.2,0.6],[0.4,0.3,0.3],[0.3,0.4,0.3],[0.3,0.3,0.4], #14
[0.7,0.2,0.1],[0.7,0.1,0.2],[0.2,0.7,0.1],[0.1,0.7,0.2],[0.2,0.1,0.7],#19
[0.1,0.2,0.7],[0.5,0.2,0.3],[0.5,0.3,0.2],[0.3,0.5,0.2],[0.2,0.5,0.3],#24
[0.3,0.2,0.5],[0.2,0.3,0.5],[0.7,0.15,0.15],[0.15,0.7,0.15],[0.15,0.15,0.7],#29
[0.6,0.3,0.1],[0.6,0.1,0.3],[0.1,0.6,0.3],[0.3,0.6,0.1],[0.3,0.1,0.6],#34
[0.1,0.3,0.6],[0.8,0.2,0],[0.8,0,0.2],[0.2,0.8,0],[0,0.8,0.2],#39
[0.2,0,0.8],[0,0.2,0.8],[0.7,0.3,0],[0.7,0,0.3],[0.3,0.7,0.0],#44
[0.3,0,0.7],[0,0.7,0.3],[0,0.3,0.7],[0.25,0.25,0.5],[0.25,0.5,0.25], #49
[1,0,0],[0,1,0],[0,0,1],[0.5,0.5,0],[0.5,0,0.5], #54
[0,0.5,0.5],[0.8,0.1,0.1],[0.1,0.8,0.1],[0.1,0.1,0.8],[0.6,0.2,0.2],#59
[0.2,0.6,0.2],[0.2,0.2,0.6],[0.4,0.3,0.3],[0.3,0.4,0.3],[0.3,0.3,0.4], #64
[0,7,0.2,0.1],[0.7,0.1,0.2],[0.2,0.7,0.1],[0.1,0.7,0.2],[0.2,0.1,0.7],#69
[0.1,0.2,0.7],[0.5,0.2,0.3],[0.5,0.3,0.2],[0.3,0.5,0.2],[0.2,0.5,0.3],#74
[0.3,0.2,0.5],[0.2,0.3,0.5],[0.7,0.15,0.15],[0.15,0.7,0.15],[0.15,0.15,0.7],#79
[0.6,0.3,0.1],[0.6,0.1,0.3],[0.1,0.6,0.3],[0.3,0.6,0.1],[0.3,0.1,0.6],#84
[0.1,0.3,0.6],[0.8,0.2,0],[0.8,0,0.2],[0.2,0.8,0],[0,0.8,0.2],#89
[0.2,0,0.8],[0,0.2,0.8],[0.7,0.3,0],[0.7,0,0.3],[0.3,0.7,0.0],#94
[0.3,0,0.7],[0,0.7,0.3],[0,0.3,0.7],[0.25,0.25,0.5],[0.25,0.5,0.25] #99
]
#特定の番号のガウス分布のみ描画したいとき
try:
Number=None #int(sys.argv[3])
except IndexError:
Number=None
# 石伏さんはハイパーパラメータの値をパラメータ.txtに保持しているため、以下の処理をしている
#env_para=np.genfromtxt(filename+"/パラメータ.txt",dtype= None,delimiter =" ")
#Class_NUM=int(env_para[4][1])
"""
#=============各場所領域に割り当てられているデータの読みこみ===================
def class_check():
Class_list=[]
for i in range(Class_NUM):
#f=filename+"/parameter3/class/class"+repr(i)+".txt" # check
data=[]
# default(エラー)
#for line in open(f,'r').readlines():
# print str(line) + "\n\n"
# data.append(int(line))
#for line in open(f, 'r'):
# print "読み込み完了"
#replaceを使えば簡単にできる
#line1=line.split('[') # 始めの"["を除く
#line1=line1[1].split(']') # 終わりの"["を除く
#line2=line1[0]
#print "\nline2:" + str(line2) + "\n"
# 場所クラスに中身があるときはtry、中身がないときはexceptに移動
#try:
# data = [int(item) for item in line2.split(',')]
#except ValueError:
# data = []
#c=[]
#for item in data:
# print item
# try:
# num=int(item)
# c.append(num)
# except ValueError:
# pass
Class_list.append(data)
return Class_list
"""
def place_draw():
# 場所のクラスの割り当てられていない場合は省く→CRPでは割り当てられていないデータは存在しない
#class_list=class_check()
#print class_list
pub = rospy.Publisher('draw_space',MarkerArray, queue_size = 10)
rospy.init_node('draw_spatial_concepts', anonymous=True)
rate = rospy.Rate(10) # 10hz
#ロボットの自己位置を読み込み
#mu_temp = [[float(sys.argv[4]),float(sys.argv[5]),0,0]]
#sigma_temp = [[[0.1,0,0,0],[0,0.1,0,0],[0,0,0,0],[0,0,0,0]]]
#最大尤度のパーティクルのmuとsigを読み込み
mumu,Class_NUM = mu_read(filename)
sigsig = sigma_read(filename)
#sample = sampling_read(filename, Class_NUM)
#print "sigma: ",sigma
print sigsig
#mu_all = mu_temp + mumu
#sigma = sigma_temp + sigsig
mu_all = mumu
sigma = sigsig
print mu_all
print sigma
#Class_NUM += 1
data_class=[i for i in xrange(Class_NUM)]
#for n in range(Class_NUM):
# #if len(class_list[n])!=0:
# data_class.append(n)
marker_array=MarkerArray()
id=0
for c in data_class:
#場所領域の中心値を示す場合
#===場所領域の範囲の可視化====================
if sigma_draw==1:
marker =Marker()
marker.type=Marker.CYLINDER
(eigValues,eigVectors) = np.linalg.eig(sigma[c])
angle = (math.atan2(eigVectors[1, 0], eigVectors[0, 0]));
marker.scale.x = 2*math.sqrt(eigValues[0]);
marker.scale.y = 2*math.sqrt(eigValues[1]);
marker.pose.orientation.w = math.cos(angle*0.5);
marker.pose.orientation.z = math.sin(angle*0.5);
marker.scale.z=0.01 # default: 0.05
marker.color.a=0.3
marker.header.frame_id='map'
marker.header.stamp=rospy.get_rostime()
marker.id=id
id +=1
marker.action=Marker.ADD
marker.pose.position.x=mu_all[c][0]
marker.pose.position.y=mu_all[c][1]
marker.color.r = COLOR[c][0] # default: COLOR[c][0] 色のばらつきを広げる
marker.color.g = COLOR[c][1] # default: COLOR[c][1] 色のばらつきを広げる
marker.color.b = COLOR[c][2] # default: COLOR[c][2] 色のばらつきを広げる
if Number != None:
if Number==c:
marker_array.markers.append(marker)
else:
marker_array.markers.append(marker)
if mu_draw==1:
mu_marker =Marker()
if mu_arrow==1: #矢印を可視化する場合
mu_marker.type=Marker.ARROW
orient_cos=mu_all[c][3]
orient_sin=mu_all[c][2]
if orient_sin>1.0:
orient_sin=1.0
elif orient_sin<-1.0:
orient_sin=-1.0
#radian xを導出
radian=math.asin(orient_sin)
if orient_sin>0 and orient_cos<0:
radian=radian+RAD_90
elif orient_sin<0 and orient_cos<0:
radian=radian-RAD_90
mu_marker.pose.orientation.z=math.sin(radian/2.0)
mu_marker.pose.orientation.w=math.cos(radian/2.0)
#<<<<<<<矢印の大きさ変更>>>>>>>>>>>>>>>>>>>>>>>>
mu_marker.scale.x=0.5 # default: 0.4
mu_marker.scale.y=0.07 # default: 0.1
mu_marker.scale.z=0.001 # default: 1.0
mu_marker.color.a=1.0
elif mu_arrow==0:
mu_marker.type=Marker.SPHERE
mu_marker.scale.x=0.1
mu_marker.scale.y=0.1
mu_marker.scale.z=0.01 # default: 0.05
mu_marker.color.a=1.0
mu_marker.header.frame_id='map'
mu_marker.header.stamp=rospy.get_rostime()
mu_marker.id=id
id +=1
mu_marker.action=Marker.ADD
mu_marker.pose.position.x=mu_all[c][0]
mu_marker.pose.position.y=mu_all[c][1]
#print c,mu_marker.pose.position.x,mu_marker.pose.position.y
if color_all==1:
mu_marker.color.r = COLOR[c][0] # default: COLOR[c][0]
mu_marker.color.g = COLOR[c][1] # default: COLOR[c][1]
mu_marker.color.b = COLOR[c][2] # default: COLOR[c][2]
elif color_all==0:
mu_marker.color.r = 1.0
mu_marker.color.g = 0
mu_marker.color.b = 0
if Number != None:
if Number==c:
marker_array.markers.append(mu_marker)
else:
marker_array.markers.append(mu_marker)
print marker_array.markers
count =0
#while not rospy.is_shutdown():
while(count <= 5):
#pub.publish(marker)
pub.publish(marker_array)
rate.sleep()
#time.sleep(5.0)
count = count+1
if __name__ == '__main__':
try:
place_draw()
except rospy.ROSInterruptException:
pass
|
{"hexsha": "6abcb5edf0cebd034b39b8b5354d3c7961299e97", "size": 12178, "ext": "py", "lang": "Python", "max_stars_repo_path": "learning/new_place_draw_online.py", "max_stars_repo_name": "a-taniguchi/SpCoSLAM_evaluation", "max_stars_repo_head_hexsha": "d24cbcc12a437d831049228ad1b80b22574c9ec0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-05-03T13:55:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T05:00:26.000Z", "max_issues_repo_path": "learning/new_place_draw_online.py", "max_issues_repo_name": "a-taniguchi/SpCoSLAM_evaluation", "max_issues_repo_head_hexsha": "d24cbcc12a437d831049228ad1b80b22574c9ec0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learning/new_place_draw_online.py", "max_forks_repo_name": "a-taniguchi/SpCoSLAM_evaluation", "max_forks_repo_head_hexsha": "d24cbcc12a437d831049228ad1b80b22574c9ec0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-01-13T17:35:02.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-05T10:11:01.000Z", "avg_line_length": 32.1319261214, "max_line_length": 109, "alphanum_fraction": 0.5420430284, "include": true, "reason": "import numpy", "num_tokens": 4635}
|
[STATEMENT]
lemma lhd_inf_llist [simp]: "lhd (inf_llist f) = f 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lhd (inf_llist f) = f 0
[PROOF STEP]
by(simp add: inf_llist_def)
|
{"llama_tokens": 89, "file": "Coinductive_Coinductive_List", "length": 1}
|
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import microdf as mdf
import os
from numerize import numerize
from components import make_html_label, set_options
# ---------------------------------------------------------------------------- #
# SECTION import pre-processed data #
# ---------------------------------------------------------------------------- #
# Import data from Ipums
person = pd.read_csv("person.csv.gz")
spmu = pd.read_csv("spmu.csv.gz")
# import baseline poverty gap, gini by state & us
all_state_stats = pd.read_csv("all_state_stats.csv.gz", index_col=0)
# import baseline white/black/child etc. poverty rates & population
demog_stats = pd.read_csv("demog_stats.csv.gz")
# Colors
BLUE = "#1976D2"
# create a list of all states, including "US" as a state
states_no_us = person.state.unique().tolist()
states_no_us.sort()
states = ["US"] + states_no_us
# ---------------------------------------------------------------------------- #
# SECTION dash components #
# ---------------------------------------------------------------------------- #
# ----------------------- SECTION Create 4 input cards ----------------------- #
cards = dbc.CardDeck(
[
# -------------- SECTION Card 1 state-dropdown component ------------- #
dbc.Card(
[
dbc.CardBody(
[
make_html_label("Select state:"),
dcc.Dropdown(
# define component_id for input of app@callback function
id="state-dropdown",
multi=False,
value="US",
# create a list of dicts of states and their labels
# to be selected by user in dropdown
options=[{"label": x, "value": x} for x in states],
),
html.Br(),
make_html_label("Reform level:"),
dcc.RadioItems(
id="level",
options=set_options(
{"Federal": "federal", "State": "state"}
),
value="federal",
labelStyle={"display": "block"},
inputStyle={"margin-right": "5px"},
),
]
),
],
# color="info",
outline=False,
),
# exclude/include from UBI checklist
dbc.Card(
[
dbc.CardBody(
[
make_html_label("Include in UBI:"),
dcc.Checklist(
id="include-checklist",
options=set_options(
{
"Non-citizens": "non_citizens",
"Children": "children",
"Adult": "adults",
}
),
# specify checked items
value=[
"adults",
"children",
"non_citizens",
],
inputStyle={"margin-right": "5px"},
labelStyle={"display": "block"},
),
]
),
],
outline=False,
),
# --- toggle here to next section to change deck size --- #
# ]
# )
# taxes_benefits_cards = dbc.CardDeck(
# [
# ----------------- SECTION Card 3 - Repeal Benefits ----------------- #
# define third card where the repeal benefits checklist is displayed
dbc.Card(
[
dbc.CardBody(
[
# label the card
make_html_label("Repeal benefits:"),
# use dash component to create checklist to choose
# which benefits to repeal
dcc.Checklist(
# this id string is a dash component_id
# and is referenced as in input in app.callback
id="benefits-checklist",
# 'options' here refers the selections available to the user in the
# checklist
options=set_options(
{
" Child Tax Credit": "ctc",
" Supplemental Security Income (SSI)": "incssi",
" SNAP (food stamps)": "spmsnap",
" Earned Income Tax Credit": "eitcred",
" Unemployment benefits": "incunemp",
" Energy subsidy (LIHEAP)": "spmheat",
}
),
# do not repeal benefits by default
value=[],
labelStyle={"display": "block"},
inputStyle={"margin-right": "5px"},
),
]
),
],
outline=False,
),
# -------------------- SECTION Card 2 - taxes ------------------- #
# tax slider
# allows user to repeal certain federal and state taxes
# component_id: "taxes-checklist"
# tax rate slider
# Allows user to adjust tax rate that determines ubi benefit amount
# component_id="agi-slider"
dbc.Card(
[
dbc.CardBody(
[
# define attributes of taxes-checklist component
make_html_label("Repeal current taxes:"),
html.Br(),
dcc.Checklist(
# define component id to be used in callback
id="taxes-checklist",
options=set_options(
{
"Income taxes": "fedtaxac",
"Employee side payroll": "fica",
}
),
value=[],
labelStyle={"display": "block"},
inputStyle={"margin-right": "5px"},
),
html.Br(),
# defines label/other HTML attributes of agi-slider component
make_html_label("Income tax rate:"),
dcc.Slider(
id="agi-slider",
min=0,
max=50,
step=1,
value=0,
tooltip={
"always_visible": True,
"placement": "bottom",
},
# define marker values to show increments on slider
marks={
0: {
"label": "0%",
},
10: {
"style": {"color": "#F8F8FF"},
},
20: {
"style": {"color": "#F8F8FF"},
},
30: {
"style": {"color": "#F8F8FF"},
},
40: {
"style": {"color": "#F8F8FF"},
},
50: {
"label": "50%",
},
},
),
html.Div(id="slider-output-container"),
]
),
html.Br(),
],
outline=False,
),
]
)
# --------------------- charts cards --------------------- #
charts = dbc.CardDeck(
[
dbc.Card(
dcc.Graph(
id="econ-graph",
figure={},
config={"displayModeBar": False},
),
),
dbc.Card(
dcc.Graph(
id="breakdown-graph",
figure={},
config={"displayModeBar": False},
),
outline=True,
),
]
)
# ------------------------------- summary card ------------------------------- #
# create the summary card that contains ubi amount, revenue, pct. better off
SUMMARY_OUTPUTS = [
"revenue-output", # Funds for UBI
"ubi-population-output", # UBI Population
"ubi-output", # Monthly UBI
"winners-output", # Percent better off
"resources-output", # Average change in resources per person
]
text = (
dbc.Card(
[
dbc.CardBody(
[
html.Div(
id=x,
style={
"text-align": "left",
"color": "black",
"fontSize": 18,
"font-family": "Roboto",
},
)
for x in SUMMARY_OUTPUTS
]
),
],
color="white",
outline=False,
),
)
# ---------------------------------------------------------------------------- #
# SECTION app #
# ---------------------------------------------------------------------------- #
# Get base pathname from an environment variable that CS will provide.
url_base_pathname = os.environ.get("URL_BASE_PATHNAME", "/")
app = dash.Dash(
__name__,
external_stylesheets=[
dbc.themes.FLATLY,
"https://fonts.googleapis.com/css2?family=Roboto:wght@300;400&display=swap",
"/assets/style.css",
],
# tell dash to use mobile version of something
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
# Pass the url base pathname to Dash.
url_base_pathname=url_base_pathname,
)
server = app.server # the server object
# Design the app
app.layout = html.Div(
[
# navbar (top)
dbc.Navbar(
[
html.A(
dbc.Row(
[
dbc.Col(
# insert logo
html.Img(
src="https://raw.githubusercontent.com/UBICenter/ubicenter.org/master/assets/images/logos/wide-blue.jpg",
height="30px",
),
),
],
align="center",
# gutters are used to separate the navbar items from the content area
no_gutters=True,
),
href="https://www.ubicenter.org",
target="blank",
),
dbc.NavbarToggler(id="navbar-toggler"),
]
),
html.Br(),
dbc.Row(
[
dbc.Col(
html.H1(
"Basic Income Builder",
id="header",
style={
"text-align": "center",
"color": "#1976D2",
"fontSize": 50,
"letter-spacing": "2px",
"font-weight": 300,
"font-family": "Roboto",
},
),
width={"size": "auto"},
md={"size": 8, "offset": 2},
),
]
),
html.Br(),
# app description
dbc.Row(
[
dbc.Col(
html.H4(
"Fund a universal basic income by adding taxes, replacing taxes, and/or repealing benefits",
style={
"text-align": "center",
"color": "#212121",
"fontSize": 25,
"font-family": "Roboto",
},
),
width={"size": "auto"},
md={"size": 8, "offset": 2},
),
]
),
# second row of app description
dbc.Row(
[
dbc.Col(
html.H4(
"Any surplus is shared equally across all eligible recipients",
style={
"text-align": "center",
"color": "#212121",
"fontSize": 25,
"font-family": "Roboto",
},
),
width={"size": "auto"},
md={"size": 8, "offset": 2},
),
]
),
html.Br(),
# row with one column containing input cards
dbc.Row(
[
dbc.Col(
cards,
width={
"size": 12,
},
md={"size": 10, "offset": 1},
),
]
),
html.Br(),
dbc.Row(
[
dbc.Col(
html.H1(
"Results of your reform:",
style={
"text-align": "center",
"color": "#1976D2",
"fontSize": 30,
"font-family": "Roboto",
},
),
width={"size": "auto"},
md={"size": 6, "offset": 3},
),
]
),
# contains simulation results in text form
dbc.Row(
[
dbc.Col(
text,
width={
"size": "auto",
},
md={"size": 6, "offset": 3},
)
]
),
html.Br(),
# ---------------- contains charts --------------- #
dbc.Row(
[
dbc.Col(
charts,
width={
"size": 12,
},
md={"size": 10, "offset": 1},
),
],
),
# 6 line breaks at the end of the page to make it look nicer :)
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
# footnote explanation of data source and modeling assumptions
dbc.Row(
[
dbc.Col(
html.H4(
[
"Source: 2017-2019 Current Population Survey March Supplement. ",
"This dataset is known to underestimate benefit receipt and high incomes. ",
"No behavioral responses are assumed. ",
],
style={
"text-align": "left",
"color": "gray",
"fontSize": 12,
"font-family": "Roboto",
},
),
width={
"size": "auto",
},
md={"size": 8, "offset": 2},
),
]
),
# link to paper
dbc.Row(
[
dbc.Col(
html.H4(
[
"To see a detailed explanation of our simulation, see ",
html.A(
"our paper.",
href="https://www.ubicenter.org/introducing-basic-income-builder",
target="blank",
),
],
style={
"text-align": "left",
"color": "gray",
"fontSize": 12,
"font-family": "Roboto",
},
),
width={
"size": "auto",
},
md={"size": 8, "offset": 2},
),
]
),
# link to contact email and github issue tracker
dbc.Row(
[
dbc.Col(
html.H4(
[
"Questions or feedback? ",
"Email ",
html.A(
"contact@ubicenter.org",
href="mailto:contact@ubicenter.org",
),
" or file an issue at ",
html.A(
"github.com/UBICenter/us-calc/issues",
href="http://github.com/UBICenter/us-calc/issues",
),
],
style={
"text-align": "left",
"color": "gray",
"fontSize": 12,
"font-family": "Roboto",
},
),
width={
"size": "auto",
# "offset": 2
},
md={"size": 8, "offset": 2},
),
]
),
html.Br(),
html.Br(),
]
)
# ---------------------------------------------------------------------------- #
# SECTION callbacks #
# ---------------------------------------------------------------------------- #
@app.callback(
Output(component_id="ubi-output", component_property="children"),
Output(component_id="revenue-output", component_property="children"),
Output(component_id="ubi-population-output", component_property="children"),
Output(component_id="winners-output", component_property="children"),
Output(component_id="resources-output", component_property="children"),
Output(component_id="econ-graph", component_property="figure"),
Output(component_id="breakdown-graph", component_property="figure"),
Input(component_id="state-dropdown", component_property="value"),
Input(component_id="level", component_property="value"),
Input(component_id="agi-slider", component_property="value"),
Input(component_id="benefits-checklist", component_property="value"),
Input(component_id="taxes-checklist", component_property="value"),
Input(component_id="include-checklist", component_property="value"),
)
# TODO one function to translate args to params, another to run the function, another to return the output
def ubi(state_dropdown, level, agi_tax, benefits, taxes, include):
"""this does everything from microsimulation to figure creation.
Dash does something automatically where it takes the input arguments
in the order given in the @app.callback decorator
Args:
state_dropdown: takes input from callback input, component_id="state-dropdown"
level: component_id="level"
agi_tax: component_id="agi-slider"
benefits: component_id="benefits-checklist"
taxes: component_id="taxes-checklist"
include: component_id="include-checklist"
Returns:
ubi_line: outputs to "ubi-output" in @app.callback
revenue_line: outputs to "revenue-output" in @app.callback
ubi_population_line: outputs to "revenue-output" in @app.callback
winners_line: outputs to "winners-output" in @app.callback
resources_line: outputs to "resources-output" in @app.callback
fig: outputs to "econ-graph" in @app.callback
fig2: outputs to "breakdown-graph" in @app.callback
"""
# -------------------- calculations based on reform level -------------------- #
# if the "Reform level" selected by the user is federal
if level == "federal":
# combine taxes and benefits checklists into one list to be used to
# subset spmu dataframe
taxes_benefits = taxes + benefits
# initialize new resources column with old resources as baseline
spmu["new_resources"] = spmu.spmtotres
# initialize revenue at zero
revenue = 0
# Calculate the new revenue and spmu resources from tax and benefit change
for tax_benefit in taxes_benefits:
# subtract taxes and benefits that have been changed from spm unit's resources
spmu.new_resources -= spmu[tax_benefit]
# add that same value to revenue
revenue += mdf.weighted_sum(spmu, tax_benefit, "spmwt")
# if "Income taxes" = ? and "child_tax_credit" = ?
# in taxes/benefits checklist
if ("fedtaxac" in taxes_benefits) & ("ctc" in taxes_benefits):
spmu.new_resources += spmu.ctc
revenue -= mdf.weighted_sum(spmu, "ctc", "spmwt")
if ("fedtaxac" in taxes_benefits) & ("eitcred" in taxes_benefits):
spmu.new_resources += spmu.eitcred
revenue -= mdf.weighted_sum(spmu, "eitcred", "spmwt")
# Calculate the new taxes from flat tax on AGI
tax_rate = agi_tax / 100
spmu["new_taxes"] = np.maximum(spmu.adjginc, 0) * tax_rate
# subtract new taxes from new resources
spmu.new_resources -= spmu.new_taxes
# add new revenue when new taxes are applied on spmus, multiplied by weights
revenue += mdf.weighted_sum(spmu, "new_taxes", "spmwt")
# Calculate the total UBI a spmu recieves based on exclusions
spmu["numper_ubi"] = spmu.numper
# TODO make into linear equation on one line using array of some kind
if "children" not in include:
# subtract the number of children from the number of
# people in spm unit receiving ubi benefit
spmu["numper_ubi"] -= spmu.child
if "non_citizens" not in include:
spmu["numper_ubi"] -= spmu.non_citizen
if ("children" not in include) and ("non_citizens" not in include):
spmu["numper_ubi"] += spmu.non_citizen_child
if "adults" not in include:
spmu["numper_ubi"] -= spmu.adult
if ("adults" not in include) and ("non_citizens" not in include):
spmu["numper_ubi"] += spmu.non_citizen_adult
# Assign UBI
ubi_population = (spmu.numper_ubi * spmu.spmwt).sum()
ubi_annual = revenue / ubi_population
spmu["total_ubi"] = ubi_annual * spmu.numper_ubi
# Calculate change in resources
spmu.new_resources += spmu.total_ubi
spmu["new_resources_per_person"] = spmu.new_resources / spmu.numper
# Sort by state
# NOTE: the "target" here refers to the population being
# measured for gini/poverty rate/etc.
# I.e. the total population of the state/country and
# INCLUDING those excluding form recieving ubi payments
# state here refers to the selection from the drop down, not the reform level
if state_dropdown == "US":
target_spmu = spmu
else:
target_spmu = spmu[spmu.state == state_dropdown]
# if the "Reform level" dropdown selected by the user is State
if level == "state":
# Sort by state
if state_dropdown == "US":
target_spmu = spmu
else:
target_spmu = spmu[spmu.state == state_dropdown]
# Initialize
target_spmu["new_resources"] = target_spmu.spmtotres
revenue = 0
# Change income tax repeal to state level
if "fedtaxac" in taxes:
target_spmu.new_resources -= target_spmu.stataxac
revenue += mdf.weighted_sum(target_spmu, "stataxac", "spmwt")
# Calculate change in tax revenue
tax_rate = agi_tax / 100
target_spmu["new_taxes"] = target_spmu.adjginc * tax_rate
target_spmu.new_resources -= target_spmu.new_taxes
revenue += mdf.weighted_sum(target_spmu, "new_taxes", "spmwt")
# Calculate the total UBI a spmu recieves based on exclusions
target_spmu["numper_ubi"] = target_spmu.numper
if "children" not in include:
target_spmu["numper_ubi"] -= target_spmu.child
if "non_citizens" not in include:
target_spmu["numper_ubi"] -= target_spmu.non_citizen
if ("children" not in include) and ("non_citizens" not in include):
target_spmu["numper_ubi"] += target_spmu.non_citizen_child
if "adults" not in include:
target_spmu["numper_ubi"] -= target_spmu.adult
if ("adults" not in include) and ("non_citizens" not in include):
target_spmu["numper_ubi"] += target_spmu.non_citizen_adult
# Assign UBI
ubi_population = (target_spmu.numper_ubi * target_spmu.spmwt).sum()
ubi_annual = revenue / ubi_population
target_spmu["total_ubi"] = ubi_annual * target_spmu.numper_ubi
# Calculate change in resources
target_spmu.new_resources += target_spmu.total_ubi
target_spmu["new_resources_per_person"] = (
target_spmu.new_resources / target_spmu.numper
)
# NOTE: code after this applies to both reform levels
# Merge and create target_persons -
# NOTE: the "target" here refers to the population being
# measured for gini/poverty rate/etc.
# I.e. the total population of the state/country and
# INCLUDING those excluding form recieving ubi payments
sub_spmu = target_spmu[
["spmfamunit", "year", "new_resources", "new_resources_per_person"]
]
target_persons = person.merge(sub_spmu, on=["spmfamunit", "year"])
# filter demog_stats for selected state from dropdown
baseline_demog = demog_stats[demog_stats.state == state_dropdown]
# TODO: return dictionary of results instead of return each variable
def return_demog(demog, metric):
"""
retrieve pre-processed data by demographic
args:
demog - string one of
['person', 'adult', 'child', 'black', 'white',
'hispanic', 'pwd', 'non_citizen', 'non_citizen_adult',
'non_citizen_child']
metric - string, one of ['pov_rate', 'pop']
returns:
value - float
"""
# NOTE: baseline_demog is a dataframe with global scope
value = baseline_demog.loc[
(baseline_demog["demog"] == demog) & (baseline_demog["metric"] == metric),
"value",
# NOTE: returns the first value as a float, be careful if you redefine baseline_demog
].values[0]
return value
population = return_demog(demog="person", metric="pop")
child_population = return_demog(demog="child", metric="pop")
non_citizen_population = return_demog(demog="non_citizen", metric="pop")
non_citizen_child_population = return_demog(demog="non_citizen_child", metric="pop")
# filter all state stats gini, poverty_gap, etc. for dropdown state
baseline_all_state_stats = all_state_stats[all_state_stats.index == state_dropdown]
def return_all_state(metric):
"""filter baseline_all_state_stats and return value of select metric
Keyword arguments:
metric - string, one of 'poverty_gap', 'gini', 'total_resources'
returns:
value- float
"""
return baseline_all_state_stats[metric].values[0]
# Calculate total change in resources
original_total_resources = return_all_state("total_resources")
# DO NOT PREPROCESS, new_resources
new_total_resources = (target_spmu.new_resources * target_spmu.spmwt).sum()
change_total_resources = new_total_resources - original_total_resources
change_pp = change_total_resources / population
original_poverty_rate = return_demog("person", "pov_rate")
original_poverty_gap = return_all_state("poverty_gap")
# define orignal gini coefficient
original_gini = return_all_state("gini")
# function to calculate rel difference between one number and another
def rel_change(new, old, round=3):
return ((new - old) / old).round(round)
# Calculate poverty gap
target_spmu["new_poverty_gap"] = np.where(
target_spmu.new_resources < target_spmu.spmthresh,
target_spmu.spmthresh - target_spmu.new_resources,
0,
)
poverty_gap = mdf.weighted_sum(target_spmu, "new_poverty_gap", "spmwt")
poverty_gap_change = rel_change(poverty_gap, original_poverty_gap)
# Calculate the change in poverty rate
target_persons["poor"] = target_persons.new_resources < target_persons.spmthresh
total_poor = (target_persons.poor * target_persons.asecwt).sum()
poverty_rate = total_poor / population
poverty_rate_change = rel_change(poverty_rate, original_poverty_rate)
# Calculate change in Gini
gini = mdf.gini(target_persons, "new_resources_per_person", "asecwt")
gini_change = rel_change(gini, original_gini, 3)
# Calculate percent winners
target_persons["winner"] = target_persons.new_resources > target_persons.spmtotres
total_winners = (target_persons.winner * target_persons.asecwt).sum()
percent_winners = (total_winners / population * 100).round(1)
# -------------- calculate all of the poverty breakdown numbers -------------- #
# Calculate the new poverty rate for each demographic
def pv_rate(column):
return mdf.weighted_mean(
target_persons[target_persons[column]], "poor", "asecwt"
)
# Round all numbers for display in hover
def hover_string(metric, round_by=1):
"""formats 0.121 to 12.1%"""
string = str(round(metric * 100, round_by)) + "%"
return string
DEMOGS = ["child", "adult", "pwd", "white", "black", "hispanic"]
# create dictionary for demographic breakdown of poverty rates
pov_breakdowns = {
# return precomputed baseline poverty rates
"original_rates": {demog: return_demog(demog, "pov_rate") for demog in DEMOGS},
"new_rates": {demog: pv_rate(demog) for demog in DEMOGS},
}
# add poverty rate changes to dictionary
pov_breakdowns["changes"] = {
# Calculate the percent change in poverty rate for each demographic
demog: rel_change(
pov_breakdowns["new_rates"][demog],
pov_breakdowns["original_rates"][demog],
)
for demog in DEMOGS
}
# create string for hover template
pov_breakdowns["strings"] = {
demog: "Original "
+ demog
+ " poverty rate: "
+ hover_string(pov_breakdowns["original_rates"][demog])
+ "<br><extra></extra>"
+ "New "
+ demog
+ " poverty rate: "
+ hover_string(pov_breakdowns["new_rates"][demog])
for demog in DEMOGS
}
# format original and new overall poverty rate
original_poverty_rate_string = hover_string(original_poverty_rate)
poverty_rate_string = hover_string(poverty_rate)
original_poverty_gap_billions = "{:,}".format(int(original_poverty_gap / 1e9))
poverty_gap_billions = "{:,}".format(int(poverty_gap / 1e9))
original_gini_string = str(round(original_gini, 3))
gini_string = str(round(gini, 3))
# --------------SECTION populates "Results of your reform:" ------------ #
# Convert UBI and winners to string for title of chart
ubi_string = str("{:,}".format(int(round(ubi_annual / 12))))
# populates Monthly UBI
ubi_line = "Monthly UBI: $" + ubi_string
# populates 'Funds for UBI'
revenue_line = "Funds for UBI: $" + numerize.numerize(revenue, 1)
# populates population and revenue for UBI if state selected from dropdown
if state_dropdown != "US":
# filter for selected state
state_spmu = target_spmu[target_spmu.state == state_dropdown]
# calculate population of state recieving UBI
state_ubi_population = (state_spmu.numper_ubi * state_spmu.spmwt).sum()
ubi_population_line = "UBI population: " + numerize.numerize(
state_ubi_population, 1
)
state_revenue = ubi_annual * state_ubi_population
revenue_line = (
"Funds for UBI ("
+ state_dropdown
+ "): $"
+ numerize.numerize(state_revenue, 1)
)
else:
ubi_population_line = "UBI population: " + numerize.numerize(ubi_population, 1)
winners_line = "Percent better off: " + str(percent_winners) + "%"
resources_line = "Average change in resources per person: $" + "{:,}".format(
int(change_pp)
)
# ---------- populate economic breakdown bar chart ------------- #
# Create x-axis labels for each chart
econ_fig_x_lab = ["Poverty rate", "Poverty gap", "Gini index"]
econ_fig_cols = [poverty_rate_change, poverty_gap_change, gini_change]
econ_fig = go.Figure(
[
go.Bar(
x=econ_fig_x_lab,
y=econ_fig_cols,
text=econ_fig_cols,
hovertemplate=[
# poverty rates
"Original poverty rate: "
+ original_poverty_rate_string
+ "<br><extra></extra>"
"New poverty rate: " + poverty_rate_string,
# poverty gap
"Original poverty gap: $"
+ original_poverty_gap_billions
+ "B<br><extra></extra>"
"New poverty gap: $" + poverty_gap_billions + "B",
# gini
"Original Gini index: <extra></extra>"
+ original_gini_string
+ "<br>New Gini index: "
+ gini_string,
],
marker_color=BLUE,
)
]
)
# Edit text and display the UBI amount and percent winners in title
econ_fig.update_layout(
uniformtext_minsize=10,
uniformtext_mode="hide",
plot_bgcolor="white",
title_text="Economic overview",
title_x=0.5,
hoverlabel_align="right",
font_family="Roboto",
title_font_size=20,
paper_bgcolor="white",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Roboto"),
yaxis_tickformat="%",
)
econ_fig.update_traces(texttemplate="%{text:.1%f}", textposition="auto")
econ_fig.update_xaxes(
tickangle=45,
title_text="",
tickfont={"size": 14},
title_standoff=25,
title_font=dict(size=14, family="Roboto", color="black"),
)
econ_fig.update_yaxes(
tickprefix="",
tickfont={"size": 14},
title_standoff=25,
title_font=dict(size=14, family="Roboto", color="black"),
)
# ------------------ populate poverty breakdown charts ---------------- #
breakdown_fig_x_lab = [
"Child",
"Adult",
"Has disability",
"White",
"Black",
"Hispanic",
]
breakdown_fig_cols = [pov_breakdowns["changes"][demog] for demog in DEMOGS]
hovertemplate = [pov_breakdowns["strings"][demog] for demog in DEMOGS]
breakdown_fig = go.Figure(
[
go.Bar(
x=breakdown_fig_x_lab,
y=breakdown_fig_cols,
text=breakdown_fig_cols,
hovertemplate=hovertemplate,
marker_color=BLUE,
)
]
)
breakdown_fig.update_layout(
uniformtext_minsize=10,
uniformtext_mode="hide",
plot_bgcolor="white",
title_text="Poverty rate breakdown",
title_x=0.5,
hoverlabel_align="right",
font_family="Roboto",
title_font_size=20,
paper_bgcolor="white",
hoverlabel=dict(bgcolor="white", font_size=14, font_family="Roboto"),
yaxis_tickformat="%",
)
breakdown_fig.update_traces(texttemplate="%{text:.1%f}", textposition="auto")
breakdown_fig.update_xaxes(
tickangle=45,
title_text="",
tickfont=dict(size=14, family="Roboto"),
title_standoff=25,
title_font=dict(size=14, family="Roboto", color="black"),
)
breakdown_fig.update_yaxes(
tickprefix="",
tickfont=dict(size=14, family="Roboto"),
title_standoff=25,
title_font=dict(size=14, family="Roboto", color="black"),
)
# set both y-axes to the same range
full_econ_fig = econ_fig.full_figure_for_development(warn=False)
full_breakdown_fig = breakdown_fig.full_figure_for_development(warn=False)
# find the minimum of both y-axes
global_ymin = min(
min(full_econ_fig.layout.yaxis.range),
min(full_breakdown_fig.layout.yaxis.range),
)
global_ymax = max(
max(full_econ_fig.layout.yaxis.range),
max(full_breakdown_fig.layout.yaxis.range),
)
# update the yaxes of the figure to account for both ends of the ranges
econ_fig.update_yaxes(dict(range=[global_ymin, global_ymax], autorange=False))
breakdown_fig.update_yaxes(dict(range=[global_ymin, global_ymax], autorange=False))
# adjust margins to fit mobile better
for fig in [econ_fig, breakdown_fig]:
fig.update_layout(
margin=dict(l=20, r=20),
)
return (
ubi_line,
revenue_line,
ubi_population_line,
winners_line,
resources_line,
econ_fig,
breakdown_fig,
)
@app.callback(
Output("include-checklist", "options"),
Input("include-checklist", "value"),
)
def update(checklist):
"""[summary]
prevent users from excluding both adults and children
Parameters
----------
checklist : list
takes the input "include-checklist" from the callback
Returns
-------
"Include in UBI" checklist with correct options
"""
if "adults" not in checklist:
return [
{"label": "Non-citizens", "value": "non_citizens"},
{"label": "Children", "value": "children", "disabled": True},
{"label": "Adults", "value": "adults"},
]
elif "children" not in checklist:
return [
{"label": "Non-citizens", "value": "non_citizens"},
{"label": "Children", "value": "children"},
{"label": "Adults", "value": "adults", "disabled": True},
]
else:
return [
{"label": "Non-citizens", "value": "non_citizens"},
{"label": "Children", "value": "children"},
{"label": "Adults", "value": "adults"},
]
@app.callback(
Output("benefits-checklist", "options"),
Input("level", "value"),
)
def update(radio):
# update checklist options for benefits-checklist widget if level is state
# update radio options for
"""update radio items for"""
if "state" in radio:
return [
{"label": " Child Tax Credit", "value": "ctc", "disabled": True},
{
"label": " Supplemental Security Income (SSI)",
"value": "incssi",
"disabled": True,
},
{
"label": " SNAP (food stamps)",
"value": "spmsnap",
"disabled": True,
},
{
"label": " Earned Income Tax Credit",
"value": "eitcred",
"disabled": True,
},
{
"label": " Unemployment benefits",
"value": "incunemp",
"disabled": True,
},
{
"label": " Energy subsidy (LIHEAP)",
"value": "spmheat",
"disabled": True,
},
]
else:
return [
{"label": " Child Tax Credit", "value": "ctc"},
{
"label": " Supplemental Security Income (SSI)",
"value": "incssi",
},
{"label": " SNAP (food stamps)", "value": "spmsnap"},
{"label": " Earned Income Tax Credit", "value": "eitcred"},
{"label": " Unemployment benefits", "value": "incunemp"},
{"label": " Energy subsidy (LIHEAP)", "value": "spmheat"},
]
@app.callback(
Output("taxes-checklist", "options"),
Input("level", "value"),
)
def update(radio):
"""update radio buttons for taxs if state selected"""
if "state" in radio:
return [
{"label": "Income taxes", "value": "fedtaxac"},
{
"label": "Employee side payroll",
"value": "fica",
"disabled": True,
},
]
else:
return [
{"label": "Income taxes", "value": "fedtaxac"},
{"label": "Employee side payroll", "value": "fica"},
]
if __name__ == "__main__":
app.run_server(debug=True, port=8000, host="127.0.0.1")
|
{"hexsha": "fbbe871350b8d25bb1ac09c88f585c79d2365bd8", "size": 42681, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "fedderw/us-calc", "max_stars_repo_head_hexsha": "36a4935122ab7688656f2e5dfcd3fe2cb5d88c8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-12-06T20:21:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-20T19:45:56.000Z", "max_issues_repo_path": "app.py", "max_issues_repo_name": "fedderw/us-calc", "max_issues_repo_head_hexsha": "36a4935122ab7688656f2e5dfcd3fe2cb5d88c8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2020-12-06T21:29:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-02T21:38:22.000Z", "max_forks_repo_path": "app.py", "max_forks_repo_name": "fedderw/us-calc", "max_forks_repo_head_hexsha": "36a4935122ab7688656f2e5dfcd3fe2cb5d88c8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-12-07T22:45:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-13T00:21:46.000Z", "avg_line_length": 36.730636833, "max_line_length": 141, "alphanum_fraction": 0.4775192709, "include": true, "reason": "import numpy", "num_tokens": 8720}
|
\chapter{Miscellaneous Transformations}
\section{Fresnel Term - Schlick's approximation}
The \emph{Fresnel equations} describes the reflection and transmission of a electromagnetic wave at an interface. The Fresnel equation provides a reflection and transmission coefficients for waves. In Computer Graphics we often use the \emph{Schlick's approximation}. The specular reflection coefficient $R$ of the Fresnel equation can be approximated by:
\begin{equation}
R(\theta) = R_0 + (1 - R_0)(1 - \cos \theta)^5
\label{eq:schlickapprox}
\end{equation}
and
\begin{equation*}
R_0 = \left(\frac{n_1-n_2}{n_1+n_2}\right)^2
\end{equation*}
where $\theta$ is the angle between the viewing direction and the half-angle direction. This is equal to the halfway between the incident
light direction and the viewing direction, i.e. $\cos \theta = (H \cdot V)$. $n_1$ $n_2$ are the refraction indices of the two medias. $R_0$ is the reflection coefficient for light incoming parallel to the normal (i.e., the value of the Fresnel term when $\theta = 0 \degree$ or minimal reflection). In Computer Graphics one of the interfaces is usually air, meaning that $n_1$ is approximately equal to 1.
\section{Spherical Coordinates and Space Transformation}
\label{sec:sphericalcoordinates}
\begin{figure}[H]
\centering
\includegraphics[scale=0.35]{sphericalcoordinates.png}
\caption[Illustration of Spherical Coordinate System]{Illustration$\footnotemark$ of Spherical coordinates $(r,θ,φ)$ radius $r$, polar (inclination) angle $\theta$, and azimuthal angle $\phi$.}
\label{fig:sphericalcoordinatesystem}
\end{figure}
\footnotetext{image source of figure $\ref{fig:sphericalcoordinatesystem}$ \texttt{http://en.wikipedia.org/wiki/Spherical\textunderscore coordinate\textunderscore system}}
To define a \emph{spherical coordinate} system as shown in figure $\ref{fig:sphericalcoordinatesystem}$ we need two particular angles, the \emph{polar angle} $\theta$ and the \emph{azimuthal} $\phi$ plus a radius $r$. Then the Cartesian coordinates $(x,y,z)$ may be retrieved from their spherical coordinate representation as the following: $\forall \colvec[x]{y}{z} \in \mathbb{R}^3 : \exists r \in [0,\infty) \exists \phi \in [0,2\pi] \exists \theta \in [0,\pi] $ s.t.
\begin{equation*}
\colvec[x]{y}{z} = \colvec[r sin(\theta)cos(\phi)]{r sin(\theta)sin(\phi)}{r cos(\theta)}
\label{eq:sphericalcoordinates}
\end{equation*}
\label{sec:componentw}
From the definition $\ref{eq:uvw}$ of $(u,v,w)= -\omega_i - \omega_r$ and using spherical coordinates $\ref{eq:sphericalcoordinates}$, we get for $w$ the following identity:
\begin{align}
w
&= -\omega_i - \omega_r \nonumber \\
&= -(\omega_i + \omega_r) \nonumber \\
&= -\left( cos(\theta_i)+cos(\theta_r) \right)
\label{eq:sphericalomega}
\end{align}
and therefore $w^2$ is equal $(cos(\theta_i)+cos(\theta_r))^2$.
\section{Tangent Space}
\label{sec:tangentspace}
The concept of performing a transformation into the \emph{tangent space} is used in order to convert a point between the world and and its local (tangent) space. \\
We can think of the tangent space as a bumpy surface defined on a flat plane. If the normals of a fragment were defined in a world space coordinate system, we would have to rotate these normals every time the model is rotated - even when just for a small amount. Since lights, the camera and other scene primitives usually are defined in the world space coordinate system we would to have to rotate them according to every fragment position. This would require to apply countless many object-to-world transformations at a pixel level. The workaround for this issue is to transform all vertex primitives into tangent space in the vertex shader. \\
To make this point clear: Even we would rotate the cube as shown in figure $\ref{fig:cubeintangentspace}$, its tangent space axis will remain aligned w.r.t its face. This will save us from apply many space transformations on fragments.
\begin{figure}[H]
\centering
\includegraphics[scale=0.6]{tangentspace.png}
\caption[Illustration of a Tangent Space]{Cube in world space $(x,y,z)$ showing the tangent-space $(u,v,n)$ of its face $(2,1,3)$}
\label{fig:cubeintangentspace}
\end{figure}
|
{"hexsha": "d4f5fe3e19e39e9ff82e7dcf6ac5adccd8206859", "size": 4280, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "document/Source/Chapters/appendix.tex", "max_stars_repo_name": "simplay/Bachelor-Thesis", "max_stars_repo_head_hexsha": "ef450c5420b768b2a1fd84c9ad768f34db12fc88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "document/Source/Chapters/appendix.tex", "max_issues_repo_name": "simplay/Bachelor-Thesis", "max_issues_repo_head_hexsha": "ef450c5420b768b2a1fd84c9ad768f34db12fc88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-05-13T14:35:57.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-13T14:35:57.000Z", "max_forks_repo_path": "document/Source/Chapters/appendix.tex", "max_forks_repo_name": "simplay/Bachelor-Thesis", "max_forks_repo_head_hexsha": "ef450c5420b768b2a1fd84c9ad768f34db12fc88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 69.0322580645, "max_line_length": 647, "alphanum_fraction": 0.7448598131, "num_tokens": 1141}
|
import numpy as np
import ast
def newtonInterpolation(x, y):
x = ast.literal_eval(x)
y = ast.literal_eval(y)
n = len(y)
table = np.zeros([n, n]) # Create a square matrix to hold table
table[::, 0] = y # first column is y
results = {"table": [], "coefficient": []}
results["table"].append(y)
""" Creates Newton table and extracts coefficients """
for j in range(1, n):
column = []
for i in range(j):
column.append(0)
for i in range(n - j):
# create table by updating other columns
table[i][j] = (table[i + 1][j - 1] - table[i][j - 1]) / (x[i + j] - x[i])
column.append( table[i][j])
results["table"].append(column)
coeff = table[0] # return first row
for c in coeff:
results["coefficient"].append(c)
polynom = ""
for i in range(n):
polynom += str(round(table[0][i],4))
for j in range(i):
polynom+= "*( x -"+ str(round(x[j],4))+ ")"
if (i != n - 1):
polynom += "+"
polynom = polynom.replace(" ", "").replace("--", "+").replace("++", "+").replace("+-", "-").replace("-+", "-")
results["polynom"] = polynom
return results
|
{"hexsha": "14b7cc3639476ce7acd8b507dbf62e2164b1947f", "size": 1268, "ext": "py", "lang": "Python", "max_stars_repo_path": "methods/newtonInterpolation.py", "max_stars_repo_name": "eechava6/NumericalAnalysis", "max_stars_repo_head_hexsha": "1b44349fe4c5e24413c3d5faeca7d227272814ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "methods/newtonInterpolation.py", "max_issues_repo_name": "eechava6/NumericalAnalysis", "max_issues_repo_head_hexsha": "1b44349fe4c5e24413c3d5faeca7d227272814ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "methods/newtonInterpolation.py", "max_forks_repo_name": "eechava6/NumericalAnalysis", "max_forks_repo_head_hexsha": "1b44349fe4c5e24413c3d5faeca7d227272814ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9268292683, "max_line_length": 115, "alphanum_fraction": 0.4936908517, "include": true, "reason": "import numpy", "num_tokens": 348}
|
# Copyright (c) xiaoxuan : https://github.com/shawnau/kaggle-HPA
# modified by sailfish009
import sys
sys.path.append('../')
import os
import math
import operator
from functools import reduce
from collections import Counter
import numpy as np
import pandas as pd
from .ml_stratifiers import MultilabelStratifiedShuffleSplit
def combine_dataset(root, a_csv, b_csv):
df_train = pd.read_csv(os.path.join(root, a_csv))
df_external = pd.read_csv(os.path.join(root, b_csv))
df_combined = pd.concat([df_train, df_external])
df_combined.reset_index(drop=True, inplace=True)
print("train: %d, external: %d, combined: %d" % (len(df_train), len(df_external), len(df_combined)))
df_combined['target_vec'] = df_combined['Target'].apply(str2vec)
return df_combined
def str2vec(s):
tags = list(map(int, s.split(' ')))
vec = np.zeros(28)
vec[tags] = 1
return vec.tolist()
def train_valid_split(df, n_splits=4) -> ([pd.DataFrame], [pd.DataFrame]):
df_backup = df.copy()
X = df['Id'].tolist()
y = df['target_vec'].tolist()
msss = MultilabelStratifiedShuffleSplit(n_splits=n_splits, test_size=0.1, random_state=42)
train_dfs, valid_dfs = [], []
for train_index, valid_index in msss.split(X,y):
train_df = df_backup.iloc[train_index]
train_dfs.append(train_df[['Id', 'Target']])
valid_df = df_backup.iloc[valid_index]
valid_dfs.append(valid_df[['Id', 'Target']])
return train_dfs, valid_dfs
def count_distrib(df):
tag_list = df['Target'].tolist()
tag_list = reduce(operator.add, map(lambda x: list(map(int, x.split(' '))), tag_list))
return Counter(tag_list)
def create_class_weight(root, labels_dict, mu=0.5):
"""
this is for calculating weighted BCE loss only
:param labels_dict:
:param mu:
:return:
"""
total = sum(labels_dict.values())
keys = labels_dict.keys()
class_weight = dict()
class_weight_log = dict()
for key in keys:
score = total / float(labels_dict[key])
score_log = math.log(mu * total / float(labels_dict[key]))
class_weight[key] = round(score, 2) if score > 1.0 else round(1.0, 2)
class_weight_log[key] = round(score_log, 2) if score_log > 1.0 else round(1.0, 2)
class_weight_vec = np.zeros(len(class_weight))
class_weight_log_vec = np.zeros(len(class_weight))
for k in class_weight:
class_weight_vec[k] = class_weight[k]
for k in class_weight_log:
class_weight_log_vec[k] = class_weight_log[k]
return class_weight_vec, class_weight_log_vec
def create_sample_weight(root, df, save_name, mu=1.0):
"""
assign sample weights for each sample. rare sample have higher weights(linearly)
refer to
:param df:
:param save_name:
:param mu:
:return:
"""
label_list = df['Target'].tolist()
import pickle
import operator
from functools import reduce
from collections import Counter
freq_count = dict(Counter(
reduce(operator.add,
map(lambda x: list(map(int, x.split(' '))),
label_list
)
)
))
total = sum(freq_count.values())
keys = freq_count.keys()
assert sorted(list(keys)) == list(range(len(keys)))
class_weight = dict()
class_weight_log = dict()
for key in range(len(keys)):
score = total / float(freq_count[key])
score_log = math.log(mu * total / float(freq_count[key]))
class_weight[key] = round(score, 2) if score > 1.0 else round(1.0, 2)
class_weight_log[key] = round(score_log, 2) if score_log > 1.0 else round(1.0, 2)
rareness = [x[0] for x in sorted(freq_count.items(), key=operator.itemgetter(1))]
weights = []
sample_labels = list(map(lambda x: list(map(int, x.split(' '))), label_list))
for labels in sample_labels:
for rare_label in rareness:
if rare_label in labels:
weights.append(class_weight[rare_label])
break
assert len(weights) == len(label_list)
save_path = os.path.join(root, save_name)
with open(save_path, 'wb') as f:
pickle.dump(weights, f)
print("%d weights saved into %s" % (len(label_list), save_path))
def calc_statistics(cfg, loader='train'):
"""
calculate mean and std for data sets
please close normalize in dataset's transform manually
:return:
"""
from .data import make_data_loader
if loader == 'train':
data_loader = make_data_loader(cfg, cfg.DATASETS.TRAIN, is_train=True)
elif loader == 'valid':
data_loader = make_data_loader(cfg, cfg.DATASETS.VALID, is_train=False)
elif loader == 'test':
data_loader = make_data_loader(cfg, cfg.DATASETS.TEST, is_train=False)
else:
raise KeyError('loader must be specified')
mean = 0.
std = 0.
nb_samples = 0.
for iteration, (images, targets, indices) in enumerate(data_loader):
batch_size = images.size(0)
images = images.view(batch_size, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
nb_samples += batch_size
running_mean = mean / nb_samples
running_std = std / nb_samples
print("iter %d running mean: " % iteration, running_mean)
print("iter %d running std : " % iteration, running_std)
# if __name__ == "__main__":
# df_combined = combine_dataset("kaggle/")
# train_dfs, valid_dfs = train_test_split(df_combined, 4)
# for i in range(4):
# train_df = train_dfs[i]
# train_df.to_csv("train_split_%d.csv"%i, index=False)
# valid_df = valid_dfs[i]
# valid_df.to_csv("valid_split_%d.csv"%i, index=False)
#
# create_sample_weight(train_df, "train_split_%d_weights.pickle"%i)
# class_weight_vec, class_weight_log_vec = create_class_weight(dict(count_distrib(df_combined)), mu=0.5)
|
{"hexsha": "a948edbf64c48251f59fc659a9447dd6892630c6", "size": 5930, "ext": "py", "lang": "Python", "max_stars_repo_path": "custom/protein/preprocess.py", "max_stars_repo_name": "sailfish009/mydl", "max_stars_repo_head_hexsha": "ca2dfd1ff1b609dbd76ef966bbe53b7854f6f78f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "custom/protein/preprocess.py", "max_issues_repo_name": "sailfish009/mydl", "max_issues_repo_head_hexsha": "ca2dfd1ff1b609dbd76ef966bbe53b7854f6f78f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "custom/protein/preprocess.py", "max_forks_repo_name": "sailfish009/mydl", "max_forks_repo_head_hexsha": "ca2dfd1ff1b609dbd76ef966bbe53b7854f6f78f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.476744186, "max_line_length": 108, "alphanum_fraction": 0.6507588533, "include": true, "reason": "import numpy", "num_tokens": 1529}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
from os.path import abspath, exists
import numpy as np
import mdtraj as md
import matplotlib.pyplot as plt
import utils
import colvars
logger = logging.getLogger("SingleIterationPostProcessor")
def load_swarm(runner, point_idx, swarm_batch_idx, swarm_idx, ignore_missing_files=False,
traj_filetypes=["xtc", "trr"], fallback_on_restrained_output=True):
"""
Load a single swarm trajectory
:param runner:
:param point_idx:
:param swarm_batch_idx:
:param swarm_idx:
:param ignore_missing_files: return None instead of throwing exception when files are not found
:param traj_filetypes: types of trajectory file types to try and load
:param fallback_on_restrained_output: Try to load the restrained last frame and the .gro output for this swarm and create a 2-frame trajectory from this
:return: trajectory or None
"""
# Load swarm trajectory
found_traj = False
for ftype in traj_filetypes:
trajpath = runner.point_path(point_idx) + runner.swarm_name(point_idx, swarm_batch_idx, swarm_idx) + "." + ftype
if exists(trajpath):
found_traj = True
break
if not found_traj:
# if fallback_on_restrained_output:
# return load_restrained_out_swarm(runner, point_idx, swarm_batch_idx, swarm_idx,
# ignore_missing_files=ignore_missing_files)
if ignore_missing_files:
logger.warn("File %s not found. Skipping this swarm", trajpath)
return None
else:
raise IOError("Swarm %s-%s not found for point %s at iteration %s" % (
swarm_batch_idx, swarm_idx, point_idx, runner.iteration))
trajpath = abspath(trajpath)
try:
swarmtraj = md.load(trajpath, top=runner.topology)
except Exception as ex:
logger.exception(ex)
logger.error("Could not load file %s.", trajpath)
if fallback_on_restrained_output:
# Quite often
return load_restrained_out_swarm(runner, point_idx, swarm_batch_idx, swarm_idx,
ignore_missing_files=ignore_missing_files)
raise ex
return swarmtraj
def load_restrained_out_swarm(runner, point_idx, swarm_batch_idx, swarm_idx, ignore_missing_files=False):
"""
Try to load the restrained last frame and the .gro output for this swarm and create a 2-frame trajectory from this
:param runner:
:param point_idx:
:param swarm_batch_idx:
:param swarm_idx:
:param ignore_missing_files: return None if trajectory is not found instead of throwing exception
:return: a 2-frame trajectory
"""
restrained_out = load_restrained(runner, point_idx, only_last_frame=True, ignore_missing_files=ignore_missing_files)
swarm_out = load_swarm(runner, point_idx, swarm_batch_idx, swarm_idx, traj_filetypes=["gro"],
ignore_missing_files=ignore_missing_files, fallback_on_restrained_output=False)
if restrained_out is None or swarm_out is None:
msg = "%s not found from restrained_out. Skipping this swarm" % runner.swarm_name(point_idx, swarm_batch_idx,
swarm_idx)
if ignore_missing_files:
logger.warn(msg)
return None
else:
raise IOError(msg)
return restrained_out + swarm_out
def load_restrained(runner, point_idx, traj_filetypes=["trr", "xtc", "gro"], ignore_missing_files=False,
only_last_frame=True):
"""
:param runner:
:param point_idx:
:param traj_filetypes: types of trajectory file types to try and load
:param ignore_missing_files: return None if trajectory is not found instead of throwing exception
:param only_last_frame: only return the last frame
:return:
"""
found_traj = False
for ftype in traj_filetypes:
trajpath = runner.point_path(point_idx) + runner.point_name(point_idx) + "-restrained.%s" % ftype
if exists(trajpath):
found_traj = True
break
if not found_traj:
msg = "File %s not found. Skipping this swarm" % trajpath
if ignore_missing_files:
logger.warn(msg)
return None
else:
raise IOError(msg)
restrained = md.load(trajpath, top=runner.topology)
return restrained[-1] if only_last_frame else restrained
def merge_restrained(runner, traj_filetypes=["trr", "xtc", "gro"]):
"""Merge all restrained simulation endpoints for this iteration"""
traj = None
for idx in range(len(runner.stringpath)):
if runner.fixed_endpoints and (idx == 0 or idx == len(runner.stringpath) - 1):
continue
t = load_restrained(runner, idx, traj_filetypes=traj_filetypes)
if traj is None:
traj = t
else:
traj += t
return traj
def save_string(string_filepath, iteration, stringpath, append_length=False):
if append_length:
suffix = "{}_len{}".format(iteration, len(stringpath))
else:
suffix = str(iteration)
name = string_filepath % suffix
np.savetxt(name, stringpath)
def create_stringpath_files_of_different_lengths(runner, short_stringpath, number_of_points_to_add=1):
"""Modifies the current iteration's output string and adds a point to it. The original string is saved with a suffix"""
long_stringpath = utils.change_string_length(short_stringpath, len(short_stringpath) + number_of_points_to_add)
save_string(runner.string_filepath, runner.iteration, short_stringpath, append_length=True)
save_string(runner.string_filepath, runner.iteration, long_stringpath, append_length=True)
logger.info("Added %s points to string for iteration %s. New string length=%s", number_of_points_to_add,
runner.iteration, len(long_stringpath))
return long_stringpath
def save_input_coordinate_mapping(string_filepath, iteration, input_coordinate_mapping):
filepath = string_filepath % (str(iteration) + "-mapping")
np.savetxt(filepath, input_coordinate_mapping)
|
{"hexsha": "df41555b18281e7b1b5ce196861c57f7bd6e3924", "size": 6471, "ext": "py", "lang": "Python", "max_stars_repo_path": "string-method/src/stringprocessor/processing_utils.py", "max_stars_repo_name": "delemottelab/gpcr-string-method-2019", "max_stars_repo_head_hexsha": "b50786a4a8747d56ad04ede525592eb31f1890fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "string-method/src/stringprocessor/processing_utils.py", "max_issues_repo_name": "delemottelab/gpcr-string-method-2019", "max_issues_repo_head_hexsha": "b50786a4a8747d56ad04ede525592eb31f1890fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "string-method/src/stringprocessor/processing_utils.py", "max_forks_repo_name": "delemottelab/gpcr-string-method-2019", "max_forks_repo_head_hexsha": "b50786a4a8747d56ad04ede525592eb31f1890fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-03-16T04:33:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-19T17:25:59.000Z", "avg_line_length": 41.7483870968, "max_line_length": 156, "alphanum_fraction": 0.6845927986, "include": true, "reason": "import numpy", "num_tokens": 1433}
|
/-
Copyright (c) 2020 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau
-/
import ring_theory.integrally_closed
import ring_theory.valuation.integers
/-!
# Integral elements over the ring of integers of a valution
The ring of integers is integrally closed inside the original ring.
-/
universes u v w
open_locale big_operators
namespace valuation
namespace integers
section comm_ring
variables {R : Type u} {Γ₀ : Type v} [comm_ring R] [linear_ordered_comm_group_with_zero Γ₀]
variables {v : valuation R Γ₀} {O : Type w} [comm_ring O] [algebra O R] (hv : integers v O)
include hv
open polynomial
lemma mem_of_integral {x : R} (hx : is_integral O x) : x ∈ v.integer :=
let ⟨p, hpm, hpx⟩ := hx in le_of_not_lt $ λ hvx, begin
rw [hpm.as_sum, eval₂_add, eval₂_pow, eval₂_X, eval₂_finset_sum, add_eq_zero_iff_eq_neg] at hpx,
replace hpx := congr_arg v hpx, refine ne_of_gt _ hpx,
rw [v.map_neg, v.map_pow],
refine v.map_sum_lt' (zero_lt_one₀.trans_le (one_le_pow_of_one_le' hvx.le _)) (λ i hi, _),
rw [eval₂_mul, eval₂_pow, eval₂_C, eval₂_X, v.map_mul, v.map_pow, ← one_mul (v x ^ p.nat_degree)],
cases (hv.2 $ p.coeff i).lt_or_eq with hvpi hvpi,
{ exact mul_lt_mul₀ hvpi (pow_lt_pow₀ hvx $ finset.mem_range.1 hi) },
{ erw hvpi, rw [one_mul, one_mul], exact pow_lt_pow₀ hvx (finset.mem_range.1 hi) }
end
protected lemma integral_closure : integral_closure O R = ⊥ :=
bot_unique $ λ r hr, let ⟨x, hx⟩ := hv.3 (hv.mem_of_integral hr) in algebra.mem_bot.2 ⟨x, hx⟩
end comm_ring
section fraction_field
variables {K : Type u} {Γ₀ : Type v} [field K] [linear_ordered_comm_group_with_zero Γ₀]
variables {v : valuation K Γ₀} {O : Type w} [comm_ring O] [is_domain O]
variables [algebra O K] [is_fraction_ring O K]
variables (hv : integers v O)
lemma integrally_closed : is_integrally_closed O :=
(is_integrally_closed.integral_closure_eq_bot_iff K).mp (valuation.integers.integral_closure hv)
end fraction_field
end integers
end valuation
|
{"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/ring_theory/valuation/integral.lean"}
|
# Copyright 2017 Softplan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from classification_algorithm import ClassificationAlgorithm
from sklearn.model_selection import train_test_split
import numpy as np
import psycopg2
from psycopg2.extensions import AsIs
import os
from boto.s3.connection import S3Connection
s3 = S3Connection(os.getenv('DATABASE_NAME', 'postgres'), os.getenv('DATABASE_USER', 'postgres'), os.getenv('DATABASE_PASSWORD', 'postgres'), os.getenv('DATABASE_HOST', 'intellead-classification-postgresql'), os.getenv('DATABASE_PORT', 5432))
def classification(customer, lead):
try:
inputs = get_dataset_input_from_database(customer)
outputs = get_dataset_output_from_database(customer)
algorithm = get_algorithm(customer)
print('Examples in dataset is: %d' % (len(inputs)))
inputs_training, inputs_test, outputs_training, outputs_test = train_test_split(inputs, outputs, test_size=0.2, random_state=42)
print('Examples used for training is: %d' % (len(inputs_training)))
print('Examples used for testing is: %d' % (len(inputs_test)))
clf = algorithm
clf.fit(inputs_training, np.ravel(outputs_training))
print('Score Trainning: %f%%' % (clf.score(inputs_training, outputs_training) * 100))
print('Score Test: %f%%' % (clf.score(inputs_test, outputs_test) * 100))
print('Lead data:')
print(lead)
data_to_predict = convert_dict_to_tuple(lead, customer)
print('Lead data to predict:')
print(data_to_predict)
lead_status = clf.predict(data_to_predict)
lead_status_value = lead_status[0]
proba = clf.predict_proba(data_to_predict)
max_proba = max(proba[0])
print('According to lead data, his status is: %s' % ("QUALIFICADO" if lead_status_value == '1' else "NÃO QUALIFICADO"))
print('Proba is: %d%%' % (max_proba*100))
lead_status_dict = dict()
dict.update(lead_status_dict, value=str(lead_status_value))
dict.update(lead_status_dict, proba=str(max_proba))
return lead_status_dict
except Exception as ex:
print(ex)
def get_dataset_input_from_database(customer):
rows = [];
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' example_value.value '
' FROM '
' example_values example_value '
' INNER JOIN examples example ON example.id = example_value.example_id '
' INNER JOIN fields field ON example_value.field_id = field.id '
' WHERE '
' example.customer = %s '
' AND field.type = \'input\' '
' AND field.customer = %s '
' ORDER BY '
' example_value.example_id , '
' field.name ', [customer, customer])
rows = cur.fetchall()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return join_rows(customer, rows)
def join_rows(customer, rows):
fields = count_fields(customer)
examples = count_examples(customer)
rows_array = np.array(rows)
joined = [[0 for x in range(fields)] for y in range(examples)]
for index, row in enumerate(rows_array):
field = index % fields
example = index // fields
joined[example][field] = row[0]
return joined
def count_fields(customer):
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' COUNT(*) '
' FROM '
' fields field '
' WHERE '
' field.type = \'input\' '
' AND field.customer = %s ', [customer])
count = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return count[0]
def count_examples(customer):
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' COUNT(*) '
' FROM '
' examples example '
' WHERE '
' example.customer = %s ', [customer])
count = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return count[0]
def get_dataset_output_from_database(customer):
rows = [];
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' example_value.value '
' FROM '
' example_values example_value '
' INNER JOIN examples example ON example.id = example_value.example_id '
' INNER JOIN fields field ON example_value.field_id = field.id '
' WHERE '
' example.customer = %s '
' AND field.type = \'output\' '
' AND field.customer = %s ', [customer, customer])
rows = cur.fetchall()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return np.array(rows)
def save_lead_in_dataset(data, customer):
example_id = max_example_id() + 1
example_value_id = max_example_value_id() + 1
save_example_in_dataset(example_id, customer)
try:
fields = get_customer_fields(customer)
inserts = []
for column in fields:
id = example_value_id
example_value_id += 1
field_id = int(column[1])
value = str(data[column[0]])
insert = (id, example_id, field_id, value)
inserts.append(insert)
inserts.append((example_value_id, example_id, get_customer_output_field(customer), '1'))
inserts.append((example_value_id, example_id, get_customer_email_field(customer), data['email']))
conn = get_connection()
cur = conn.cursor()
for insert in inserts:
cur.execute('insert into example_values (id, example_id, field_id, value) values %s', (insert,))
conn.commit()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def save_example_in_dataset(example_id, customer):
try:
columns = ['id', 'customer']
values = (example_id, customer)
conn = get_connection()
cur = conn.cursor()
cur.execute('insert into examples (%s) values %s', (AsIs(','.join(columns)), tuple(values)))
conn.commit()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def max_example_id():
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' MAX(example.id) '
' FROM '
' examples example ')
max = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return max[0]
def max_example_value_id():
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' MAX(example_value.id) '
' FROM '
' example_values example_value ')
max = cur.fetchone()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return max[0]
def convert_dict_to_tuple(data, customer):
tup = ()
for index, row in enumerate(get_customer_fields(customer)):
tup += (data[row[0]],)
return tup
def get_customer_fields(customer):
rows = [];
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' field.name, '
' field.id '
' FROM '
' fields field '
' WHERE '
' field.type = \'input\' '
' AND field.customer = %s '
' ORDER BY '
' field.name ', [customer])
rows = cur.fetchall()
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return np.array(rows)
def get_customer_output_field(customer):
id = None
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' field.id '
' FROM '
' fields field '
' WHERE '
' field.type = \'output\' '
' AND field.customer = %s '
' ORDER BY '
' field.name ', [customer])
id = cur.fetchone()[0]
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return id
def get_customer_email_field(customer):
id = None
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' field.id '
' FROM '
' fields field '
' WHERE '
' field.type = \'id\' '
' AND field.customer = %s '
' AND field.name = \'email\' '
' ORDER BY '
' field.name ', [customer])
id = cur.fetchone()[0]
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return id
def get_algorithm(customer):
algorithm = None
try:
conn = get_connection()
cur = conn.cursor()
cur.execute(' SELECT '
' algorithm '
' FROM '
' customer_config '
' WHERE '
' customer = %s ', [customer])
algorithm = cur.fetchone()[0]
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if algorithm is not None:
return ClassificationAlgorithm[algorithm].value
return ClassificationAlgorithm.KNN.value
def get_connection():
try:
conn = psycopg2.connect(host=os.getenv('DATABASE_HOST', 'intellead-classification-postgresql'), database=os.getenv('DATABASE_NAME', 'postgres'),
user=os.getenv('DATABASE_USER', 'postgres'), password=os.getenv('DATABASE_PASSWORD', 'postgres'))
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
return conn
|
{"hexsha": "3b6e1c7678354b11c31463fa6993eb5863c91a8e", "size": 12266, "ext": "py", "lang": "Python", "max_stars_repo_path": "service.py", "max_stars_repo_name": "vinigomes/intellead-classification", "max_stars_repo_head_hexsha": "8a5ed549606288f906f07dd30096a50675c1c71c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-10-01T02:30:08.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-29T20:27:03.000Z", "max_issues_repo_path": "service.py", "max_issues_repo_name": "vinigomes/intellead-classification", "max_issues_repo_head_hexsha": "8a5ed549606288f906f07dd30096a50675c1c71c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2017-12-08T01:06:46.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-01T15:37:11.000Z", "max_forks_repo_path": "service.py", "max_forks_repo_name": "vinigomes/intellead-classification", "max_forks_repo_head_hexsha": "8a5ed549606288f906f07dd30096a50675c1c71c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-03-01T16:02:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-26T00:46:00.000Z", "avg_line_length": 33.9778393352, "max_line_length": 242, "alphanum_fraction": 0.5428827654, "include": true, "reason": "import numpy", "num_tokens": 2485}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 6 20:16:27 2018
@author: Isaac
"""
import numpy
def sort_minimum(numbers):
""" This is the description of the function ~ Loves it1
Parameters
----------
numbers : array
array to sort
Returns
-------
array
printed array
"""
n = []
for x in numbers:
n.append(x)
n = [float(x) for x in n]
n.sort()
return n
def bubble_sort(numbers):
""" This is the description of the function ~ Loves it2
Parameters
----------
numbers : array
array to sort
Returns
-------
array
printed array
"""
n = []
for x in numbers:
n.append(x)
n = [float(x) for x in n]
n.sort()
return n
def complex_sort(numbers):
""" This is the description of the function ~ Loves it3
Parameters
----------
numbers : array
array to sort
Returns
-------
array
printed array
"""
return sorted(numbers, key=abs)
|
{"hexsha": "fed9ef8f001856e2169688de9d6bfcf2d9e1d59e", "size": 1247, "ext": "py", "lang": "Python", "max_stars_repo_path": "week6.py", "max_stars_repo_name": "IsaacW4/Operational-Research", "max_stars_repo_head_hexsha": "2f172a14e9302ea56a4beb8b0e334b84df7b406b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "week6.py", "max_issues_repo_name": "IsaacW4/Operational-Research", "max_issues_repo_head_hexsha": "2f172a14e9302ea56a4beb8b0e334b84df7b406b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "week6.py", "max_forks_repo_name": "IsaacW4/Operational-Research", "max_forks_repo_head_hexsha": "2f172a14e9302ea56a4beb8b0e334b84df7b406b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.0821917808, "max_line_length": 64, "alphanum_fraction": 0.4314354451, "include": true, "reason": "import numpy", "num_tokens": 267}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include <boost/container/flat_map.hpp>
#include <boost/optional/optional.hpp>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "kudu/common/common.pb.h"
#include "kudu/common/iterator.h"
#include "kudu/common/partition.h"
#include "kudu/common/rowblock.h"
#include "kudu/common/schema.h"
#include "kudu/common/wire_protocol.h"
#include "kudu/consensus/consensus.pb.h"
#include "kudu/consensus/consensus_meta.h"
#include "kudu/consensus/consensus_meta_manager.h"
#include "kudu/consensus/log.pb.h"
#include "kudu/consensus/log_anchor_registry.h"
#include "kudu/consensus/log_index.h"
#include "kudu/consensus/log_reader.h"
#include "kudu/consensus/log_util.h"
#include "kudu/consensus/metadata.pb.h"
#include "kudu/consensus/opid.pb.h"
#include "kudu/fs/block_id.h"
#include "kudu/fs/block_manager.h"
#include "kudu/fs/data_dirs.h"
#include "kudu/fs/dir_manager.h"
#include "kudu/fs/fs_manager.h"
#include "kudu/fs/io_context.h"
#include "kudu/gutil/map-util.h"
#include "kudu/gutil/ref_counted.h"
#include "kudu/gutil/strings/escaping.h"
#include "kudu/gutil/strings/human_readable.h"
#include "kudu/gutil/strings/join.h"
#include "kudu/gutil/strings/numbers.h"
#include "kudu/gutil/strings/stringpiece.h"
#include "kudu/gutil/strings/substitute.h"
#include "kudu/gutil/strings/util.h"
#include "kudu/master/sys_catalog.h"
#include "kudu/rpc/messenger.h"
#include "kudu/tablet/diskrowset.h"
#include "kudu/tablet/metadata.pb.h"
#include "kudu/tablet/rowset.h"
#include "kudu/tablet/rowset_metadata.h"
#include "kudu/tablet/tablet_mem_trackers.h"
#include "kudu/tablet/tablet_metadata.h"
#include "kudu/tablet/tablet_replica.h"
#include "kudu/tools/tool_action.h"
#include "kudu/tools/tool_action_common.h"
#include "kudu/tserver/tablet_copy_client.h"
#include "kudu/tserver/ts_tablet_manager.h"
#include "kudu/util/env.h"
#include "kudu/util/env_util.h"
#include "kudu/util/faststring.h"
#include "kudu/util/memory/arena.h"
#include "kudu/util/metrics.h"
#include "kudu/util/net/net_util.h"
#include "kudu/util/pb_util.h"
#include "kudu/util/status.h"
DEFINE_bool(dump_all_columns, true,
"If true, dumped rows include all of the columns in the rowset. If "
"false, dumped rows include just the key columns (in a comparable format).");
DEFINE_bool(dump_metadata, true,
"If true, dumps rowset metadata before dumping data. If false, "
"only dumps the data.");
DEFINE_int64(nrows, -1, "Number of rows to dump. If negative, dumps all rows.");
DEFINE_bool(list_detail, false,
"Print partition info for the local replicas");
DEFINE_int64(rowset_index, -1,
"Index of the rowset in local replica, default value(-1) "
"will dump all the rowsets of the local replica");
DEFINE_bool(clean_unsafe, false,
"Delete the local replica completely, not leaving a tombstone. "
"This is not guaranteed to be safe because it also removes the "
"consensus metadata (including Raft voting record) for the "
"specified tablet, which violates the Raft vote durability requirements.");
namespace kudu {
namespace tools {
using consensus::ConsensusMetadata;
using consensus::ConsensusMetadataManager;
using consensus::OpId;
using consensus::RaftConfigPB;
using consensus::RaftPeerPB;
using fs::IOContext;
using fs::ReadableBlock;
using log::LogEntryPB;
using log::LogEntryReader;
using log::LogIndex;
using log::LogReader;
using log::ReadableLogSegment;
using log::SegmentSequence;
using rpc::Messenger;
using rpc::MessengerBuilder;
using std::cout;
using std::endl;
using std::map;
using std::pair;
using std::shared_ptr;
using std::string;
using std::unique_ptr;
using std::vector;
using strings::Substitute;
using tablet::DiskRowSet;
using tablet::RowIteratorOptions;
using tablet::RowSetMetadata;
using tablet::TabletMetadata;
using tablet::TabletDataState;
using tserver::TabletCopyClient;
using tserver::TSTabletManager;
namespace {
const char* const kSeparatorLine =
"----------------------------------------------------------------------\n";
const char* const kTermArg = "term";
const char* const kTabletIdGlobArg = "tablet_id_pattern";
const char* const kTabletIdGlobArgDesc = "Tablet identifier pattern. "
"This argument supports basic glob syntax: '*' matches 0 or more wildcard "
"characters.";
string Indent(int indent) {
return string(indent, ' ');
}
Status FsInit(bool skip_block_manager, unique_ptr<FsManager>* fs_manager) {
FsManagerOpts fs_opts;
fs_opts.read_only = true;
fs_opts.skip_block_manager = skip_block_manager;
fs_opts.update_instances = fs::UpdateInstanceBehavior::DONT_UPDATE;
unique_ptr<FsManager> fs_ptr(new FsManager(Env::Default(), fs_opts));
RETURN_NOT_OK(fs_ptr->Open());
fs_manager->swap(fs_ptr);
return Status::OK();
}
// Parses a colon-delimited string containing a hostname or IP address and port
// into its respective parts. For example, "localhost:12345" parses into
// hostname=localhost, and port=12345.
//
// Does not allow a port with value 0.
Status ParseHostPortString(const string& hostport_str, HostPort* hostport) {
HostPort hp;
Status s = hp.ParseString(hostport_str, 0);
if (!s.ok()) {
return s.CloneAndPrepend(Substitute(
"error while parsing peer '$0'", hostport_str));
}
if (hp.port() == 0) {
return Status::InvalidArgument(
Substitute("peer '$0' has port of 0", hostport_str));
}
*hostport = hp;
return Status::OK();
}
// Find the last replicated OpId for the tablet_id from the WAL.
Status FindLastLoggedOpId(FsManager* fs, const string& tablet_id,
OpId* last_logged_opid) {
shared_ptr<LogReader> reader;
RETURN_NOT_OK(LogReader::Open(fs,
/*index*/nullptr,
tablet_id,
/*metric_entity*/nullptr,
/*file_cache*/nullptr,
&reader));
SegmentSequence segs;
reader->GetSegmentsSnapshot(&segs);
// Reverse iterate the segments to find the 'last replicated' entry quickly.
// Note that we still read the entries within a segment in sequential
// fashion, so the last entry within the first 'found' segment will
// give us the last_logged_opid.
vector<scoped_refptr<ReadableLogSegment>>::reverse_iterator seg;
bool found = false;
for (seg = segs.rbegin(); seg != segs.rend(); ++seg) {
LogEntryReader reader(seg->get());
while (true) {
unique_ptr<LogEntryPB> entry;
Status s = reader.ReadNextEntry(&entry);
if (s.IsEndOfFile()) break;
RETURN_NOT_OK_PREPEND(s, "Error in log segment");
if (entry->type() != log::REPLICATE) continue;
*last_logged_opid = entry->replicate().id();
found = true;
}
if (found) return Status::OK();
}
return Status::NotFound("No entries found in the write-ahead log");
}
// Parses a colon-delimited string containing a uuid, hostname or IP address,
// and port into its respective parts. For example,
// "1c7f19e7ecad4f918c0d3d23180fdb18:localhost:12345" parses into
// uuid=1c7f19e7ecad4f918c0d3d23180fdb18, hostname=localhost, and port=12345.
Status ParsePeerString(const string& peer_str,
string* uuid,
HostPort* hostport) {
string::size_type first_colon_idx = peer_str.find(':');
if (first_colon_idx == string::npos) {
return Status::InvalidArgument(Substitute("bad peer '$0'", peer_str));
}
string hostport_str = peer_str.substr(first_colon_idx + 1);
RETURN_NOT_OK(ParseHostPortString(hostport_str, hostport));
*uuid = peer_str.substr(0, first_colon_idx);
return Status::OK();
}
Status PrintReplicaUuids(const RunnerContext& context) {
unique_ptr<FsManager> fs_manager;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/true, &fs_manager));
scoped_refptr<ConsensusMetadataManager> cmeta_manager(
new ConsensusMetadataManager(fs_manager.get()));
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
// Load the cmeta file and print all peer uuids.
scoped_refptr<ConsensusMetadata> cmeta;
RETURN_NOT_OK(cmeta_manager->Load(tablet_id, &cmeta));
cout << JoinMapped(cmeta->CommittedConfig().peers(),
[](const RaftPeerPB& p){ return p.permanent_uuid(); },
" ") << endl;
return Status::OK();
}
Status BackupConsensusMetadata(FsManager* fs_manager,
const string& tablet_id) {
Env* env = fs_manager->env();
string cmeta_filename = fs_manager->GetConsensusMetadataPath(tablet_id);
string backup_filename = Substitute("$0.pre_rewrite.$1", cmeta_filename, env->NowMicros());
WritableFileOptions opts;
opts.mode = Env::MUST_CREATE;
opts.sync_on_close = true;
RETURN_NOT_OK(env_util::CopyFile(env, cmeta_filename, backup_filename, opts));
LOG(INFO) << "Backed up old consensus metadata to " << backup_filename;
return Status::OK();
}
Status RewriteRaftConfig(const RunnerContext& context) {
// Parse tablet ID argument.
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
if (tablet_id != master::SysCatalogTable::kSysCatalogTabletId) {
LOG(WARNING) << "Master will not notice rewritten Raft config of regular "
<< "tablets. A regular Raft config change must occur.";
}
// Parse peer arguments.
vector<pair<string, HostPort>> peers;
for (const auto& arg : context.variadic_args) {
pair<string, HostPort> parsed_peer;
RETURN_NOT_OK(ParsePeerString(arg,
&parsed_peer.first, &parsed_peer.second));
peers.push_back(parsed_peer);
}
DCHECK(!peers.empty());
// Make a copy of the old file before rewriting it.
Env* env = Env::Default();
FsManagerOpts fs_opts = FsManagerOpts();
fs_opts.skip_block_manager = true;
FsManager fs_manager(env, std::move(fs_opts));
RETURN_NOT_OK(fs_manager.Open());
RETURN_NOT_OK(BackupConsensusMetadata(&fs_manager, tablet_id));
// Load the cmeta file and rewrite the raft config.
scoped_refptr<ConsensusMetadataManager> cmeta_manager(new ConsensusMetadataManager(&fs_manager));
scoped_refptr<ConsensusMetadata> cmeta;
RETURN_NOT_OK(cmeta_manager->Load(tablet_id, &cmeta));
RaftConfigPB current_config = cmeta->CommittedConfig();
RaftConfigPB new_config = current_config;
new_config.clear_peers();
for (const auto& p : peers) {
RaftPeerPB new_peer;
new_peer.set_member_type(RaftPeerPB::VOTER);
new_peer.set_permanent_uuid(p.first);
HostPortPB new_peer_host_port_pb;
RETURN_NOT_OK(HostPortToPB(p.second, &new_peer_host_port_pb));
new_peer.mutable_last_known_addr()->CopyFrom(new_peer_host_port_pb);
new_config.add_peers()->CopyFrom(new_peer);
}
cmeta->set_committed_config(new_config);
return cmeta->Flush();
}
Status SetRaftTerm(const RunnerContext& context) {
// Parse tablet ID argument.
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
const string& new_term_str = FindOrDie(context.required_args, kTermArg);
int64_t new_term;
if (!safe_strto64(new_term_str, &new_term) || new_term <= 0) {
return Status::InvalidArgument("invalid term");
}
// Load the current metadata from disk and verify that the intended operation is safe.
Env* env = Env::Default();
FsManagerOpts fs_opts = FsManagerOpts();
fs_opts.skip_block_manager = true;
FsManager fs_manager(env, fs_opts);
RETURN_NOT_OK(fs_manager.Open());
// Load the cmeta file and rewrite the raft config.
scoped_refptr<ConsensusMetadataManager> cmeta_manager(new ConsensusMetadataManager(&fs_manager));
scoped_refptr<ConsensusMetadata> cmeta;
RETURN_NOT_OK(cmeta_manager->Load(tablet_id, &cmeta));
if (new_term <= cmeta->current_term()) {
return Status::InvalidArgument(Substitute(
"specified term $0 must be higher than current term $1",
new_term, cmeta->current_term()));
}
// Make a copy of the old file before rewriting it.
RETURN_NOT_OK(BackupConsensusMetadata(&fs_manager, tablet_id));
// Update and flush.
cmeta->set_current_term(new_term);
// The 'voted_for' field is relative to the term stored in 'current_term'. So, if we
// have changed to a new term, we need to also clear any previous vote record that was
// associated with the old term.
cmeta->clear_voted_for();
return cmeta->Flush();
}
Status CopyFromRemote(const RunnerContext& context) {
// Parse the tablet ID and source arguments.
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
const string& rpc_address = FindOrDie(context.required_args, "source");
HostPort hp;
RETURN_NOT_OK(ParseHostPortString(rpc_address, &hp));
// Copy the tablet over.
FsManager fs_manager(Env::Default(), FsManagerOpts());
RETURN_NOT_OK(fs_manager.Open());
scoped_refptr<ConsensusMetadataManager> cmeta_manager(new ConsensusMetadataManager(&fs_manager));
MessengerBuilder builder("tablet_copy_client");
shared_ptr<Messenger> messenger;
builder.Build(&messenger);
TabletCopyClient client(tablet_id, &fs_manager, cmeta_manager,
messenger, nullptr /* no metrics */);
RETURN_NOT_OK(client.Start(hp, nullptr));
RETURN_NOT_OK(client.FetchAll(nullptr));
return client.Finish();
}
Status DeleteLocalReplica(const RunnerContext& context) {
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
FsManager fs_manager(Env::Default(), FsManagerOpts());
RETURN_NOT_OK(fs_manager.Open());
scoped_refptr<ConsensusMetadataManager> cmeta_manager(new ConsensusMetadataManager(&fs_manager));
boost::optional<OpId> last_logged_opid = boost::none;
TabletDataState state = TabletDataState::TABLET_DATA_DELETED;
if (!FLAGS_clean_unsafe) {
state = TabletDataState::TABLET_DATA_TOMBSTONED;
// Tombstone the tablet. If we couldn't find the last committed OpId from
// the log, it's not an error. But if we receive any other error,
// indicate the user to delete with --clean_unsafe flag.
OpId opid;
Status s = FindLastLoggedOpId(&fs_manager, tablet_id, &opid);
if (s.ok()) {
last_logged_opid = opid;
} else if (s.IsNotFound()) {
LOG(INFO) << "Could not find any replicated OpId from WAL, "
<< "but proceeding with tablet tombstone: " << s.ToString();
} else {
LOG(ERROR) << "Error attempting to find last replicated OpId from WAL: " << s.ToString();
LOG(ERROR) << "Cannot delete (tombstone) the tablet, use --clean_unsafe to delete"
<< " the tablet permanently from this server.";
return s;
}
}
// Force the specified tablet on this node to be in 'state'.
scoped_refptr<TabletMetadata> meta;
RETURN_NOT_OK(TabletMetadata::Load(&fs_manager, tablet_id, &meta));
RETURN_NOT_OK(TSTabletManager::DeleteTabletData(meta, cmeta_manager, state, last_logged_opid));
return Status::OK();
}
Status SummarizeSize(FsManager* fs,
const vector<BlockId>& blocks,
StringPiece block_type,
int64_t* running_sum) {
int64_t local_sum = 0;
for (const auto& b : blocks) {
unique_ptr<fs::ReadableBlock> rb;
RETURN_NOT_OK_PREPEND(fs->OpenBlock(b, &rb),
Substitute("could not open block $0", b.ToString()));
uint64_t size = 0;
RETURN_NOT_OK_PREPEND(rb->Size(&size),
Substitute("could not get size for block $0", b.ToString()));
local_sum += size;
if (VLOG_IS_ON(1)) {
cout << Substitute("$0 block $1: $2 bytes $3",
block_type, b.ToString(),
size, HumanReadableNumBytes::ToString(size)) << endl;
}
}
*running_sum += local_sum;
return Status::OK();
}
namespace {
struct TabletSizeStats {
int64_t redo_bytes = 0;
int64_t undo_bytes = 0;
int64_t bloom_bytes = 0;
int64_t pk_index_bytes = 0;
map<string, int64_t, autodigit_less> column_bytes;
void Add(const TabletSizeStats& other) {
redo_bytes += other.redo_bytes;
undo_bytes += other.undo_bytes;
bloom_bytes += other.bloom_bytes;
pk_index_bytes += other.pk_index_bytes;
for (const auto& p : other.column_bytes) {
column_bytes[p.first] += p.second;
}
}
void AddToTable(const string& table_id,
const string& tablet_id,
const string& rowset_id,
DataTable* table) const {
vector<pair<string, int64_t>> to_print(column_bytes.begin(), column_bytes.end());
to_print.emplace_back("REDO", redo_bytes);
to_print.emplace_back("UNDO", undo_bytes);
to_print.emplace_back("BLOOM", bloom_bytes);
to_print.emplace_back("PK", pk_index_bytes);
int64_t total = 0;
for (const auto& e : to_print) {
table->AddRow({table_id, tablet_id, rowset_id, e.first,
HumanReadableNumBytes::ToString(e.second)});
total += e.second;
}
table->AddRow({table_id, tablet_id, rowset_id, "*", HumanReadableNumBytes::ToString(total)});
}
};
} // anonymous namespace
Status SummarizeDataSize(const RunnerContext& context) {
const string& tablet_id_pattern = FindOrDie(context.required_args, kTabletIdGlobArg);
unique_ptr<FsManager> fs;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/false, &fs));
vector<string> tablets;
RETURN_NOT_OK(fs->ListTabletIds(&tablets));
std::unordered_map<string, TabletSizeStats> size_stats_by_table_id;
DataTable output_table({ "table id", "tablet id", "rowset id", "block type", "size" });
for (const string& tablet_id : tablets) {
TabletSizeStats tablet_stats;
if (!MatchPattern(tablet_id, tablet_id_pattern)) continue;
scoped_refptr<TabletMetadata> meta;
RETURN_NOT_OK_PREPEND(TabletMetadata::Load(fs.get(), tablet_id, &meta),
Substitute("could not load tablet metadata for $0", tablet_id));
const string& table_id = meta->table_id();
for (const shared_ptr<RowSetMetadata>& rs_meta : meta->rowsets()) {
TabletSizeStats rowset_stats;
RETURN_NOT_OK(SummarizeSize(fs.get(), rs_meta->redo_delta_blocks(),
"REDO", &rowset_stats.redo_bytes));
RETURN_NOT_OK(SummarizeSize(fs.get(), rs_meta->undo_delta_blocks(),
"UNDO", &rowset_stats.undo_bytes));
RETURN_NOT_OK(SummarizeSize(fs.get(), { rs_meta->bloom_block() },
"Bloom", &rowset_stats.bloom_bytes));
if (rs_meta->has_adhoc_index_block()) {
RETURN_NOT_OK(SummarizeSize(fs.get(), { rs_meta->adhoc_index_block() },
"PK index", &rowset_stats.pk_index_bytes));
}
const auto& column_blocks_by_id = rs_meta->GetColumnBlocksById();
for (const auto& e : column_blocks_by_id) {
const auto& col_id = e.first;
const auto& block = e.second;
const auto& col_idx = meta->schema().find_column_by_id(col_id);
string col_key = Substitute(
"c$0 ($1)", col_id,
(col_idx != Schema::kColumnNotFound) ?
meta->schema().column(col_idx).name() : "?");
RETURN_NOT_OK(SummarizeSize(
fs.get(), { block }, col_key, &rowset_stats.column_bytes[col_key]));
}
rowset_stats.AddToTable(table_id, tablet_id, std::to_string(rs_meta->id()), &output_table);
tablet_stats.Add(rowset_stats);
}
tablet_stats.AddToTable(table_id, tablet_id, "*", &output_table);
size_stats_by_table_id[table_id].Add(tablet_stats);
}
for (const auto& e : size_stats_by_table_id) {
const auto& table_id = e.first;
const auto& stats = e.second;
stats.AddToTable(table_id, "*", "*", &output_table);
}
RETURN_NOT_OK(output_table.PrintTo(cout));
return Status::OK();
}
Status DumpWals(const RunnerContext& context) {
unique_ptr<FsManager> fs_manager;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/true, &fs_manager));
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
shared_ptr<LogReader> reader;
RETURN_NOT_OK(LogReader::Open(fs_manager.get(),
/*index*/nullptr,
tablet_id,
/*metric_entity*/nullptr,
/*file_cache*/nullptr,
&reader));
SegmentSequence segments;
reader->GetSegmentsSnapshot(&segments);
for (const scoped_refptr<ReadableLogSegment>& segment : segments) {
RETURN_NOT_OK(PrintSegment(segment));
}
return Status::OK();
}
Status ListBlocksInRowSet(const Schema& schema,
const RowSetMetadata& rs_meta) {
RowSetMetadata::ColumnIdToBlockIdMap col_blocks =
rs_meta.GetColumnBlocksById();
for (const RowSetMetadata::ColumnIdToBlockIdMap::value_type& e :
col_blocks) {
ColumnId col_id = e.first;
const BlockId& block_id = e.second;
cout << "Column block for column ID " << col_id;
int col_idx = schema.find_column_by_id(col_id);
if (col_idx != -1) {
cout << " (" << schema.column(col_idx).ToString() << ")";
}
cout << ": ";
cout << block_id.ToString() << endl;
}
for (const BlockId& block : rs_meta.undo_delta_blocks()) {
cout << "UNDO: " << block.ToString() << endl;
}
for (const BlockId& block : rs_meta.redo_delta_blocks()) {
cout << "REDO: " << block.ToString() << endl;
}
return Status::OK();
}
Status DumpBlockIdsForLocalReplica(const RunnerContext& context) {
unique_ptr<FsManager> fs_manager;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/false, &fs_manager));
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
scoped_refptr<TabletMetadata> meta;
RETURN_NOT_OK(TabletMetadata::Load(fs_manager.get(), tablet_id, &meta));
if (meta->rowsets().empty()) {
cout << "No rowsets found on disk for tablet "
<< tablet_id << endl;
return Status::OK();
}
cout << "Listing all data blocks in tablet "
<< tablet_id << ":" << endl;
Schema schema = meta->schema();
size_t idx = 0;
for (const shared_ptr<RowSetMetadata>& rs_meta : meta->rowsets()) {
cout << "Rowset " << idx++ << endl;
RETURN_NOT_OK(ListBlocksInRowSet(schema, *rs_meta));
}
return Status::OK();
}
Status DumpTabletMeta(FsManager* fs_manager,
const string& tablet_id, int indent) {
scoped_refptr<TabletMetadata> meta;
RETURN_NOT_OK(TabletMetadata::Load(fs_manager, tablet_id, &meta));
const Schema& schema = meta->schema();
cout << Indent(indent) << "Partition: "
<< meta->partition_schema().PartitionDebugString(meta->partition(),
meta->schema())
<< endl;
cout << Indent(indent) << "Table name: " << meta->table_name()
<< " Table id: " << meta->table_id() << endl;
cout << Indent(indent) << "Schema (version="
<< meta->schema_version() << "): "
<< schema.ToString() << endl;
tablet::TabletSuperBlockPB pb;
RETURN_NOT_OK_PREPEND(meta->ToSuperBlock(&pb), "Could not get superblock");
cout << "Superblock:\n" << pb_util::SecureDebugString(pb) << endl;
return Status::OK();
}
Status ListLocalReplicas(const RunnerContext& context) {
unique_ptr<FsManager> fs_manager;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/true, &fs_manager));
vector<string> tablets;
RETURN_NOT_OK(fs_manager->ListTabletIds(&tablets));
for (const string& tablet : tablets) {
if (FLAGS_list_detail) {
cout << "Tablet: " << tablet << endl;
RETURN_NOT_OK(DumpTabletMeta(fs_manager.get(), tablet, 2));
} else {
cout << tablet << endl;
}
}
return Status::OK();
}
Status DumpRowSetInternal(const IOContext& ctx,
const shared_ptr<RowSetMetadata>& rs_meta,
int indent,
int64_t* rows_left) {
tablet::RowSetDataPB pb;
rs_meta->ToProtobuf(&pb);
if (FLAGS_dump_metadata) {
cout << Indent(indent) << "RowSet metadata: " << pb_util::SecureDebugString(pb)
<< endl << endl;
}
scoped_refptr<log::LogAnchorRegistry> log_reg(new log::LogAnchorRegistry());
shared_ptr<DiskRowSet> rs;
RETURN_NOT_OK(DiskRowSet::Open(rs_meta,
log_reg.get(),
tablet::TabletMemTrackers(),
&ctx,
&rs));
vector<string> lines;
if (FLAGS_dump_all_columns) {
RETURN_NOT_OK(rs->DebugDump(&lines));
} else {
Schema key_proj = rs_meta->tablet_schema().CreateKeyProjection();
RowIteratorOptions opts;
opts.projection = &key_proj;
opts.io_context = &ctx;
unique_ptr<RowwiseIterator> it;
RETURN_NOT_OK(rs->NewRowIterator(opts, &it));
RETURN_NOT_OK(it->Init(nullptr));
Arena arena(1024);
RowBlock block(&key_proj, 100, &arena);
faststring key;
while (it->HasNext()) {
RETURN_NOT_OK(it->NextBlock(&block));
for (int i = 0; i < block.nrows(); i++) {
key_proj.EncodeComparableKey(block.row(i), &key);
lines.emplace_back(strings::b2a_hex(key.ToString()));
}
}
}
// Respect 'rows_left' when dumping the output.
int64_t limit = *rows_left >= 0 ?
std::min<int64_t>(*rows_left, lines.size()) : lines.size();
for (int i = 0; i < limit; i++) {
cout << lines[i] << endl;
}
if (*rows_left >= 0) {
*rows_left -= limit;
}
return Status::OK();
}
Status DumpRowSet(const RunnerContext& context) {
const int kIndent = 2;
unique_ptr<FsManager> fs_manager;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/false, &fs_manager));
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
scoped_refptr<TabletMetadata> meta;
RETURN_NOT_OK(TabletMetadata::Load(fs_manager.get(), tablet_id, &meta));
if (meta->rowsets().empty()) {
cout << Indent(0) << "No rowsets found on disk for tablet "
<< tablet_id << endl;
return Status::OK();
}
IOContext ctx;
ctx.tablet_id = meta->tablet_id();
int64_t rows_left = FLAGS_nrows;
// If rowset index is provided, only dump that rowset.
if (FLAGS_rowset_index != -1) {
for (const auto& rs_meta : meta->rowsets()) {
if (rs_meta->id() == FLAGS_rowset_index) {
return DumpRowSetInternal(ctx, rs_meta, kIndent, &rows_left);
}
}
return Status::InvalidArgument(
Substitute("Could not find rowset $0 in tablet id $1",
FLAGS_rowset_index, tablet_id));
}
// Rowset index not provided, dump all rowsets
size_t idx = 0;
for (const auto& rs_meta : meta->rowsets()) {
cout << endl << "Dumping rowset " << idx++ << endl << kSeparatorLine;
RETURN_NOT_OK(DumpRowSetInternal(ctx, rs_meta, kIndent, &rows_left));
}
return Status::OK();
}
Status DumpMeta(const RunnerContext& context) {
unique_ptr<FsManager> fs_manager;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/false, &fs_manager));
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
return DumpTabletMeta(fs_manager.get(), tablet_id, 0);
}
Status DumpDataDirs(const RunnerContext& context) {
unique_ptr<FsManager> fs_manager;
RETURN_NOT_OK(FsInit(/*skip_block_manager*/false, &fs_manager));
const string& tablet_id = FindOrDie(context.required_args, kTabletIdArg);
// Load the tablet meta to make sure the tablet's data directories are loaded
// into the manager.
scoped_refptr<TabletMetadata> unused_meta;
RETURN_NOT_OK(TabletMetadata::Load(fs_manager.get(), tablet_id, &unused_meta));
vector<string> data_dirs;
RETURN_NOT_OK(fs_manager->dd_manager()->FindDataDirsByTabletId(tablet_id,
&data_dirs));
for (const auto& dir : data_dirs) {
cout << dir << endl;
}
return Status::OK();
}
unique_ptr<Mode> BuildDumpMode() {
unique_ptr<Action> dump_block_ids =
ActionBuilder("block_ids", &DumpBlockIdsForLocalReplica)
.Description("Dump the IDs of all blocks belonging to a local replica")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.Build();
unique_ptr<Action> dump_data_dirs =
ActionBuilder("data_dirs", &DumpDataDirs)
.Description("Dump the data directories where the replica's data is stored")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.Build();
unique_ptr<Action> dump_meta =
ActionBuilder("meta", &DumpMeta)
.Description("Dump the metadata of a local replica")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.Build();
unique_ptr<Action> dump_rowset =
ActionBuilder("rowset", &DumpRowSet)
.Description("Dump the rowset contents of a local replica")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddOptionalParameter("dump_all_columns")
.AddOptionalParameter("dump_metadata")
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.AddOptionalParameter("nrows")
.AddOptionalParameter("rowset_index")
.Build();
unique_ptr<Action> dump_wals =
ActionBuilder("wals", &DumpWals)
.Description("Dump all WAL (write-ahead log) segments of "
"a local replica")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.AddOptionalParameter("print_entries")
.AddOptionalParameter("print_meta")
.AddOptionalParameter("truncate_data")
.Build();
return ModeBuilder("dump")
.Description("Dump a Kudu filesystem")
.AddAction(std::move(dump_block_ids))
.AddAction(std::move(dump_data_dirs))
.AddAction(std::move(dump_meta))
.AddAction(std::move(dump_rowset))
.AddAction(std::move(dump_wals))
.Build();
}
} // anonymous namespace
unique_ptr<Mode> BuildLocalReplicaMode() {
unique_ptr<Action> print_replica_uuids =
ActionBuilder("print_replica_uuids", &PrintReplicaUuids)
.Description("Print all tablet replica peer UUIDs found in a "
"tablet's Raft configuration")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.Build();
unique_ptr<Action> rewrite_raft_config =
ActionBuilder("rewrite_raft_config", &RewriteRaftConfig)
.Description("Rewrite a tablet replica's Raft configuration")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddRequiredVariadicParameter({
"peers", "List of peers where each peer is of "
"form 'uuid:hostname:port'" })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.Build();
unique_ptr<Action> set_term =
ActionBuilder("set_term", &SetRaftTerm)
.Description("Bump the current term stored in consensus metadata")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddRequiredParameter({ kTermArg, "the new raft term (must be greater "
"than the current term)" })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.Build();
unique_ptr<Mode> cmeta =
ModeBuilder("cmeta")
.Description("Operate on a local tablet replica's consensus "
"metadata file")
.AddAction(std::move(print_replica_uuids))
.AddAction(std::move(rewrite_raft_config))
.AddAction(std::move(set_term))
.Build();
unique_ptr<Action> copy_from_remote =
ActionBuilder("copy_from_remote", &CopyFromRemote)
.Description("Copy a tablet replica from a remote server")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddRequiredParameter({ "source", "Source RPC address of "
"form hostname:port" })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.Build();
unique_ptr<Action> list =
ActionBuilder("list", &ListLocalReplicas)
.Description("Show list of tablet replicas in the local filesystem")
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.AddOptionalParameter("list_detail")
.Build();
unique_ptr<Action> delete_local_replica =
ActionBuilder("delete", &DeleteLocalReplica)
.Description("Delete a tablet replica from the local filesystem. "
"By default, leaves a tombstone record.")
.AddRequiredParameter({ kTabletIdArg, kTabletIdArgDesc })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.AddOptionalParameter("clean_unsafe")
.Build();
unique_ptr<Action> data_size =
ActionBuilder("data_size", &SummarizeDataSize)
.Description("Summarize the data size/space usage of the given local replica(s).")
.AddRequiredParameter({ kTabletIdGlobArg, kTabletIdGlobArgDesc })
.AddOptionalParameter("fs_data_dirs")
.AddOptionalParameter("fs_metadata_dir")
.AddOptionalParameter("fs_wal_dir")
.AddOptionalParameter("format")
.Build();
return ModeBuilder("local_replica")
.Description("Operate on local tablet replicas via the local filesystem")
.AddMode(std::move(cmeta))
.AddAction(std::move(copy_from_remote))
.AddAction(std::move(data_size))
.AddAction(std::move(delete_local_replica))
.AddAction(std::move(list))
.AddMode(BuildDumpMode())
.Build();
}
} // namespace tools
} // namespace kudu
|
{"hexsha": "67a2d1a7f824b63f7807dc7114448ed8d99819fe", "size": 35331, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/kudu/tools/tool_action_local_replica.cc", "max_stars_repo_name": "toddlipcon/kudu", "max_stars_repo_head_hexsha": "e5ee5e08c68c9c661ce676ad629b4ad3abf57def", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2020-05-12T02:18:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-15T20:39:21.000Z", "max_issues_repo_path": "src/kudu/tools/tool_action_local_replica.cc", "max_issues_repo_name": "toddlipcon/kudu", "max_issues_repo_head_hexsha": "e5ee5e08c68c9c661ce676ad629b4ad3abf57def", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kudu/tools/tool_action_local_replica.cc", "max_forks_repo_name": "toddlipcon/kudu", "max_forks_repo_head_hexsha": "e5ee5e08c68c9c661ce676ad629b4ad3abf57def", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9495166488, "max_line_length": 99, "alphanum_fraction": 0.6837621352, "num_tokens": 8767}
|
from collections import namedtuple
import os
import re
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM
import h5py
import pandas as pd
import numpy as np
import numpy.ma as ma
from numpy.random import default_rng
from desc.skycatalogs.utils.common_utils import print_dated_msg
__all__ = ['LookupInfo', 'Cmp', 'MagNorm', 'convert_tophat_sed',
'write_sed_file', 'NORMWV_IX', 'get_star_sed_path',
'create_cosmology']
# Index for tophat bin containing 500 nm
NORMWV_IX = 13
def convert_tophat_sed(a_bins, f_nu_input, mag_norm_f, redshift=0,
wavelen_step=0.1):
'''
Given a tophat SED and redshift, produce an equivalent SED as lists of
wavelength and f_lambda. Also compute magnorm
Parameters
----------
a_bins: list of Tophat [tuples (start, width)] in Angstroms
f_nu: list of values for the tophats
mag_norm_f: an instance of MagNorm
redshift: needed for computing distance modulus. Should be
cosmoDC2 redshiftHubble, aka redshift_hubble in sky catalogs
wavelen_step: Re-cast tophat seds to use this bin width in nm (keeping
same step function in f_nu space).
return
------
arrays lambda, f_lambda where lambda is in nm and f_lambda is in
erg / (cm**2 * s * nm)
Also return final magnorm (including redshift adjustment) and f_nu value
at 500 nm
'''
lam_nm = 0.1 * np.array([b.start + 0.5 * b.width for b in a_bins])
lam_width_nm = 0.1 * np.array([b.width for b in a_bins])
f_nu = 1.0 * np.array(f_nu_input)
val_500nm = f_nu[NORMWV_IX]
# Convert from f_nu to f_lambda:
# In earlier versions tophats were in decreasing lambda order
if (lam_nm[0] > lam_nm[1]): # reverse
lam_nm[:] = lam_nm[::-1]
lam_width_nm[:] = lam_width_nm[::-1]
f_nu[:] = f_nu[::-1]
lam_min = lam_nm[0]
lam_max = lam_nm[-1] + lam_width_nm[-1]
# Keep the same step function but use fine bins instead of the
# original tophat widths.
n_bins = int((lam_max - lam_min) / wavelen_step)
lam_fine = np.empty(n_bins)
f_nu_fine = np.empty(n_bins)
boundaries = list(lam_nm)
boundaries.append(lam_max)
b_ix = 0
for i in range(n_bins):
lam_fine[i] = lam_min + wavelen_step * i
if (lam_fine[i] > boundaries[b_ix + 1]) :
b_ix = b_ix + 1
f_nu_fine[i] = f_nu[b_ix]
# Convert fnu to flambda, ignoring constant factors.
flambda = f_nu_fine/lam_fine**2
# Normalize so flambda value at 500 nm is 1.0
nm500_ix = int((500 - lam_min) / wavelen_step) + 1
flambda_norm = flambda / flambda[nm500_ix]
return lam_fine, flambda_norm, mag_norm_f(f_nu[NORMWV_IX],
redshift), val_500nm
def write_sed_file(path, wv, f_lambda, wv_unit=None, f_lambda_unit=None):
'''
Write a two-column text file. First column is wavelength,
second is luminosity value
If units are supplied, write a comment line at the top
Parameters
----------
path Where to write the file and what to call it
wv List or array of wavelength values
f_lambda List or array of luminosities. Must be the same length as wv
wv_unit String describing units for first column
f_lambda_unit String describing units for second column
'''
header = '# '
if wv_unit:
header += wv_unit + ' '
else:
header += ' lambda unit unknown '
if f_lambda_unit:
header += f_lambda_unit
else:
header += ' f_lambda unit unknown'
header += '\n'
with open(path, mode="w") as f:
f.write(header)
for i in range(len(wv)):
line = '{:8.2f} {:g}\n'.format(wv[i], f_lambda[i])
f.write(line)
f.close()
_standard_dict = {'lte' : 'starSED/phoSimMLT',
'bergeron' : 'starSED/wDs',
'km|kp' : 'starSED/kurucz'}
def get_star_sed_path(filename, name_to_folder=_standard_dict):
'''
Return numpy array of full paths relative to SIMS_SED_LIBRARY_DIR,
given filenames
Parameters
----------
filename list of strings. Usually full filename but may be missing final ".gz"
name_to_folder dict mapping regular expression (to be matched with
filename) to relative path for containing directory
Returns
-------
Full path for file, relative to SIMS_SED_LIBRARY_DIR
'''
compiled = { re.compile(k) : v for (k, v) in name_to_folder.items()}
path_list = []
for f in filename:
m = None
matched = False
for k,v in compiled.items():
f = f.strip()
m = k.match(f)
if m:
p = os.path.join(v, f)
if not p.endswith('.gz'):
p = p + '.gz'
path_list.append(p)
matched = True
break
if not matched:
raise ValueError(f'get_star_sed_path: Filename {f} does not match any known patterns')
return np.array(path_list)
def create_cosmology(config):
"""
Create a FlatLambdaCDM cosmology from a dictionary of input parameters.
This code is based on/borrowed from
https://github.com/LSSTDESC/gcr-catalogs/blob/master/GCRCatalogs/cosmodc2.py#L128
"""
cosmo_astropy_allowed = FlatLambdaCDM.__init__.__code__.co_varnames[1:]
cosmo_astropy = {k: v for k, v in config.items()
if k in cosmo_astropy_allowed}
cosmology = FlatLambdaCDM(**cosmo_astropy)
return cosmology
class MagNorm:
def __init__(self, cosmology):
"""
Parameters
----------
cosmology : astropy.cosmology
Cosmology object created from the gcr-catalogs galaxy catalog
cosmology specification.
"""
self.cosmology = cosmology
def dl(self, z):
"""
Return the luminosity distance in units of meters.
"""
# Conversion factor from Mpc to meters (obtained from pyccl).
MPC_TO_METER = 3.085677581491367e+22
return self.cosmology.luminosity_distance(z).value*MPC_TO_METER
def __call__(self, tophat_sed_value, redshift_hubble, one_maggy=4.3442e13):
one_Jy = 1e-26 # W/Hz/m**2
Lnu = tophat_sed_value*one_maggy # convert from maggies to W/Hz
Fnu = Lnu/4/np.pi/self.dl(redshift_hubble)**2
return -2.5*np.log10(Fnu/one_Jy) + 8.90
class LookupInfo(object):
'''
Stash information from the lookup file for a particular hp which
will be useful for Cmp class
Also save tophat scale
'''
def __init__(self, sed_library_dir, hp):
self.sed_lookup_file = os.path.join(sed_library_dir,
f'sed_fit_{hp}.h5')
self.cached = False
def cache_info(self):
if self.cached: return
with h5py.File(self.sed_lookup_file) as f:
# Make a copy which will exist after file is closed
self.sed_names = np.array(f['sed_names'])
self.disk_sed = np.array(f['disk_sed'])
self.bulge_sed = np.array(f['bulge_sed'])
self.galaxy_id = np.array(f['galaxy_id'])
self.cached = True
def get_orig_sed_file(self, cmp, galaxy_id, min_ix=0):
# Start searching for galaxy_id starting with min_ix
the_ix = -1
if cmp not in ['bulge', 'disk']:
raise ValueError(f'Unknown component type "{cmp}" ')
for i in range(min_ix, len(self.galaxy_id)):
if self.galaxy_id[i] == galaxy_id:
the_ix = i
break
if the_ix == -1:
raise ValueError(f'Galaxy {galaxy_id} not found')
if cmp == 'bulge':
return (self.sed_names[self.bulge_sed[the_ix]]).decode("utf-8")
else:
return (self.sed_names[self.disk_sed[the_ix]]).decode("utf-8")
# This class is no longer used. Consider deleting
class Cmp(object):
'''
Handle writing of SED files and booking for either disk or bulge
'''
def __init__(self, cmp_name, obj_coll, output_dir, hp, n_seds, bins,
lookup_info, mag_norm_f):
'''
Parameters
----------
cmp_name string one of 'bulge', 'disk'
obj_coll object collection coming from sky catalog, typically all
galaxies belonging to a particular pixel
output_dir string where to write output SED files
hp int in case we decide to embed in output filename
n_seds int how many SED files to write
bins list list of (start, width) tuples describing bins.
lookup_info LookupInfo information pertaining to a particular hp
mag_norm_f MagNorm Used for computing mag norm
'''
self.cmp_name = cmp_name
self.output_dir = output_dir
self.hp = hp
self.coll = obj_coll
self.n_seds = n_seds
self.n_seds_done = 0
self.bins = bins
lookup_info.cache_info()
self.lookup_info = lookup_info
self.mag_norm_f = mag_norm_f
def _write_sed(self, outpath, sed_list, bins, redshift,
wavelen_step=5.0, summary_only=False):
'''
Convert cosmoDC2-style tophat SEDs to a file of the form expected by
ImSim.
Parameters
----------
outpath string full path of output file
sed_list list of floats list of values as they appear in
cosmoDC2 catalog
bins list((start,width)) bin definitions
redshift -- for the object the sed file is associated with
Return
------
(magnorm, val_500nm) magnorm is our computed magnorm value,
including adjustment for redshift.
val_500nm is the sed value at or near 500 nm
'''
(lmbda, f_lambda,
magnorm, val_500nm) = convert_tophat_sed(bins, sed_list,
self.mag_norm_f,
redshift=redshift,
wavelen_step=wavelen_step)
if not summary_only:
write_sed_file(outpath, lmbda, f_lambda, wv_unit='nm')
start = (min([b.start for b in bins]))/10.0 # A to nm
return (magnorm, val_500nm) # for now
def _write_summary(self, ix, gal, sed, redshift, orig_magnorm, our_magnorm,
val_500nm, orig_sed_file, tp_sed_file):
# Filepath. Use same output dir.
print_dated_msg(f'Entered _write_summary for component {self.cmp_name}')
basename_csv = f'{self.cmp_name}_sed_hp{self.hp}_summary.csv'
outpath_csv = os.path.join(self.output_dir, basename_csv)
basename_csv_brief = f'{self.cmp_name}_sed_hp{self.hp}_brief.csv'
outpath_csv_brief = os.path.join(self.output_dir, basename_csv_brief)
basename_pq = f'{self.cmp_name}_sed_hp{self.hp}_summary.parquet'
outpath_pq = os.path.join(self.output_dir, basename_pq)
out_dict = {}
out_dict['chosen_ix'] = ix
out_dict['gal_id'] = gal
out_dict['redshift'] = redshift
out_dict['orig_magnorm'] = orig_magnorm
out_dict['our_magnorm'] = our_magnorm
out_dict['val_500nm'] = val_500nm
df = pd.DataFrame(data=out_dict)
# For convenience, output text file leaving off paths
df.to_csv(path_or_buf=outpath_csv_brief)
out_dict['orig_sed_file'] = orig_sed_file
out_dict['tp_sed_file'] = tp_sed_file
out_dict['tp_vals'] = sed
df = pd.DataFrame(data=out_dict)
df.to_csv(path_or_buf=outpath_csv)
df.to_parquet(outpath_pq)
def create(self, count_start=0, summary_only=False):
'''
Create SED files as specified at init time and also table describing
which tophat SEDs were used.
count_start may be > 0 in case some of the required files have already
been created and we just want to pick up where we left off.
[But initial draft won't support this since there are complications]
'''
# For debugging predictability
seed_dict = {}
seed_dict['bulge'] = 271423 + 2 * self.hp
seed_dict['disk'] = 1780247 + 2 * self.hp
print_dated_msg(f'Cmp.create called for component {self.cmp_name}')
### Really it should have _no_host_extinction suffix
### REALLY??
### but for now schema is not using it
### sed_col = 'sed_val_' + self.cmp_name + '_no_host_extinction'
sed_col = 'sed_val_' + self.cmp_name
sed = np.array(self.coll.get_native_attribute(sed_col))
magnorm_col = self.cmp_name + '_magnorm'
magnorm = np.array(self.coll.get_native_attribute(magnorm_col))
gal_id = np.array(self.coll.get_native_attribute('galaxy_id'))
redshift = np.array(self.coll.get_native_attribute('redshift_hubble'))
mask_inf = np.isinf(magnorm)
good_sed = ma.array(sed, mask=mask_inf).compressed()
good_gal_id = ma.array(gal_id, mask=mask_inf).compressed()
good_magnorm = ma.array(magnorm, mask=mask_inf).compressed()
good_redshift = ma.array(redshift, mask=mask_inf).compressed()
# Choose entries at random
rng = default_rng(seed_dict[self.cmp_name])
ix_list = rng.integers(low=0, high=len(good_magnorm), size=self.n_seds)
gal_chosen = [good_gal_id[i] for i in ix_list]
sed_chosen = [good_sed[i] for i in ix_list]
orig_magnorm_chosen = [good_magnorm[i] for i in ix_list]
redshift_chosen = [good_redshift[i] for i in ix_list]
our_magnorm = []
val_500nm = []
orig_sed_file = []
tp_sed_file = []
sed_rootdir = os.getenv('SIMS_SED_LIBRARY_DIR')
for i in range(len(sed_chosen)):
# Form output path
filename = f'{self.cmp_name}_random_sed_{self.hp}_{i}.txt'
outpath = os.path.join(self.output_dir, filename)
(our_mag, nm500) = self._write_sed(outpath, sed_chosen[i],
self.bins, redshift_chosen[i],
summary_only=summary_only)
our_magnorm.append(our_mag)
val_500nm.append(nm500)
tp_sed_file.append(outpath)
orig_sed = self.lookup_info.get_orig_sed_file(self.cmp_name,
gal_chosen[i],
min_ix=ix_list[i])
orig_sed_file.append(os.path.join(sed_rootdir, orig_sed))
if not summary_only:
print_dated_msg(f'Wrote file {i}')
# Make summary table and write to a file
self._write_summary(ix_list, gal_chosen, sed_chosen, redshift_chosen,
orig_magnorm_chosen, our_magnorm,
val_500nm, orig_sed_file, tp_sed_file)
|
{"hexsha": "dd5a0fe247d15b10c8c84d49eea5f17d5ed99650", "size": 15243, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/desc/skycatalogs/utils/sed_utils.py", "max_stars_repo_name": "LSSTDESC/skyCatalogs", "max_stars_repo_head_hexsha": "39807b6fb510e45d7db79cf903e2eaa59befa81b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-20T01:51:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T01:51:00.000Z", "max_issues_repo_path": "python/desc/skycatalogs/utils/sed_utils.py", "max_issues_repo_name": "LSSTDESC/skyCatalogs", "max_issues_repo_head_hexsha": "39807b6fb510e45d7db79cf903e2eaa59befa81b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-11-09T20:20:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T20:23:21.000Z", "max_forks_repo_path": "python/desc/skycatalogs/utils/sed_utils.py", "max_forks_repo_name": "LSSTDESC/skyCatalogs", "max_forks_repo_head_hexsha": "39807b6fb510e45d7db79cf903e2eaa59befa81b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.395465995, "max_line_length": 98, "alphanum_fraction": 0.6030965033, "include": true, "reason": "import numpy,from numpy,from astropy", "num_tokens": 3920}
|
# Takes one dimensional time series, performs delay embedding
one_d <-function(data, embed_dim = 4)
{
edata <- embed(data, embed_dim)
return(edata)
}
#Takes time series and scales, either using max values or taking log
pre_process <- function(data, scaling_method)
{
tmp <- data
if (scaling_method == "max") {
for (i in 1: ncol(tmp)) {
tmp[,i] <- tmp[,i]/max(abs(tmp[,i]))
}
}
return(tmp)
}
log_r <- function(tmp)
{
for(i in 1:ncol(tmp)) {
#replace each column in the data set with the log return
#replace corresponding column in dat with new log return value
tmp[,i] <- Delt(tmp[,i])
}
#Since first vale will always be N/A, remove it
tmp <- tmp[-1,]
return(tmp)
}
#Finds upper bound of diameter of the data
#to use for max_scale
find_diam <- function(time)
{
dim <- ncol(time)
diam_list = vector(length = dim)
for (i in 1:dim) {
high <- max(time[,i])
low <- min(time[,i])
diam_list[i] <- (high-low)
}
diam <- norm(diam_list, type = "2")
return(diam)
}
#Takes time series and finds persistence diagrams
nd_diag <- function(time, max_scale = 0.1, K_max = 10, window = 100)
{
tmp <- time
#TDA
maxdimension =1
Diags1.rips.ls = list()
total <- dim(tmp)[1]
step <- 1
spot <- seq(from=1, to=(total-window+1), by=step)
#Persistence diagrams
print("Computing persistence diagrams...")
pb <- txtProgressBar(min = 0, max = length(spot), style = 3)
for(i in 1:length(spot)){
Diags1.rips.ls[[i]] =
ripsDiag(tmp[spot[i]:(spot[i]+window-1), ], maxdimension=1, max_scale, library = "Dionysus")
# update progress bar
setTxtProgressBar(pb,i)
}
plot(Diags1.rips.ls[[2]]$diagram)
return(Diags1.rips.ls)
}
#Takes the diagrams from nd_diag and calculates the norm
nd_norm <- function(diag, max_scale, K_max = 10, scaling_method = "log"){
step <- 1
spot <- seq(from =1, to = length(diag), by = step)
Lands1.ls = list() # collects landscapes (in 2D) at particular date i per k
AUC_Land.m = matrix(0,length(spot),K_max) #collects values of lambda_k at particular date i per k
AUC_Land.v = vector(length=length(spot)) #collects values of L^1 at particular date
#Compute L^1
print("Computing the L1 norm of persistence landscapes...")
pb <- txtProgressBar(min = 0, max = length(spot), style = 3)
for (i in 1:length(spot)){
diagram <- diag[[i]]$diagram
# print(diagram)
# Look at persistence diagrams on a logarithmic scale
if(scaling_method=="log") {
# Only take logarithm of nonzero entries
for(j in 1:nrow(diagram)) {
if(diagram[j,1]==1) {
diagram[j,2] = log(diagram[j,2])
diagram[j,3] = log(diagram[j,3])
}
}
}
minim=Inf
maxim=-Inf
for(j in 1:nrow(diagram)) {
# print(diagram[j,1])
if(diagram[j,1]==1) {
minim = min(minim,diagram[j,2])
maxim = max(maxim,diagram[j,3])
}
}
#print(minim)
#print(maxim)
if(minim==Inf) # There is no 1-dimensional homology feature in this window
{
AUC_Land.v[i]=0
}
else
{
tseq <- seq(minim, maxim, length=500)
for (KK in 1:K_max){
Lands1.ls[[i]]=landscape(diagram, dimension=1, KK, tseq)
AUC_Land.m[i,KK]= pk.calc.auc.all(Lands1.ls[[i]],tseq, interval=c(minim,maxim))
AUC_Land.v[i]= AUC_Land.v[i]+AUC_Land.m[i,KK]
}
plot(Lands1.ls[[2]])
}
# update progress bar
setTxtProgressBar(pb,i)
}
return(AUC_Land.v)
}
#End line method - takes multi-D time series, processes,
#and finds the norm
#Default max_scale of 0 leads to find_diam being called
analyze_nd <- function(time, scaling_method = "log", returns = FALSE,
max_scale = 0, K_max = 10, window = 50) {
dates <- index(time)
if (returns) {
#Taking log-returns of the log of the data can lead to errors,
#so if returns are desired, we deactivate the log scaling
if (scaling_method =="log") {
scaling_method <- "none"
}
time <- log_r(time)
}
data <- pre_process(time, scaling_method)
if(max_scale == 0) {
max_scale <- find_diam(data)
}
diag <- nd_diag(data, max_scale, K_max, window)
norm <- nd_norm(diag, max_scale, K_max, scaling_method)
new_dates <- tail(dates, length(norm))
norm.xts <- xts(norm, order.by = new_dates)
return(norm.xts)
}
#End line method - takes one-D time series
#finds delay embedding, calls analyze_nd
analyze_1d <- function(time, dim = 4, scaling_method = "log", max_scale = 0, K_max = 10, window = 50, returns = FALSE) {
if (returns) {
#Taking log-returns of the log of the data can lead to errors,
#so if returns are desired, we deactivate the log scaling
if (scaling_method == "log") {
scaling_method <- "none"
}
time <- log_r(time)
}
dates <- index(time)
delay_data <- one_d(time, dim)
delay_data.xts <- xts(delay_data, order.by = tail(dates, length(delay_data[,1])))
analyze_nd(delay_data.xts, scaling_method, returns = FALSE, max_scale, K_max, window)
}
output <- function(tmp, filename , plot = "FALSE")
{
write.zoo(tmp, file = filename)
if (plot == "TRUE") {
plot(tmp, type = 'l')
}
}
get_impact_window <- function(data, index, window_length)
{
data[index:(index+window_length-1)]
}
|
{"hexsha": "57ecd679c85292cef58a7d54ceae8d8321665b6b", "size": 5523, "ext": "r", "lang": "R", "max_stars_repo_path": "Minor Project/TDA_Finance.r", "max_stars_repo_name": "sakshi-vats/tda-for-crypto", "max_stars_repo_head_hexsha": "8ffb690587f985b829ed507d0251715cd86bd265", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Minor Project/TDA_Finance.r", "max_issues_repo_name": "sakshi-vats/tda-for-crypto", "max_issues_repo_head_hexsha": "8ffb690587f985b829ed507d0251715cd86bd265", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Minor Project/TDA_Finance.r", "max_forks_repo_name": "sakshi-vats/tda-for-crypto", "max_forks_repo_head_hexsha": "8ffb690587f985b829ed507d0251715cd86bd265", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.765625, "max_line_length": 121, "alphanum_fraction": 0.6051059207, "num_tokens": 1610}
|
#define BOOST_TEST_DYN_LINK
#include <canard/net/ofp/v13/common/oxm_match_field.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/data/test_case.hpp>
#include <boost/test/data/monomorphic.hpp>
#include <stdexcept>
#include <boost/optional/optional.hpp>
#include <boost/optional/optional_io.hpp>
namespace of = canard::net::ofp;
namespace v13 = of::v13;
namespace match = v13::oxm_match_fields;
namespace proto = v13::protocol;
namespace bdata = boost::unit_test::data;
BOOST_AUTO_TEST_SUITE(oxm_match_test)
BOOST_AUTO_TEST_SUITE(ipv6_flabel_test)
BOOST_DATA_TEST_CASE(
no_mask_create_success_test
, bdata::make(std::vector<std::uint32_t>{0x00000000, 0x000fffff})
, value)
{
auto const sut = match::ipv6_flabel::create(value);
BOOST_TEST(sut.oxm_length() == sizeof(std::uint32_t));
BOOST_TEST(sut.oxm_value() == value);
BOOST_TEST(!sut.oxm_hasmask());
BOOST_TEST(sut.oxm_mask() == boost::none);
BOOST_TEST(sut.length()
== sizeof(std::uint32_t) + sizeof(std::uint32_t));
BOOST_TEST(!sut.is_wildcard());
BOOST_TEST(sut.is_exact());
}
BOOST_DATA_TEST_CASE(
has_mask_create_success_test
, bdata::make(std::vector<std::uint32_t>{0x00000000, 0x000fffff})
^ bdata::make(std::vector<std::uint32_t>{0x00000001, 0x000fffff})
, value, mask)
{
auto const sut = match::ipv6_flabel::create(value, mask);
BOOST_TEST(sut.oxm_length() == 2 * sizeof(std::uint32_t));
BOOST_TEST(sut.oxm_value() == value);
BOOST_TEST_REQUIRE(sut.oxm_hasmask());
BOOST_TEST(*sut.oxm_mask() == mask);
BOOST_TEST(sut.length()
== sizeof(std::uint32_t) + 2 * sizeof(std::uint32_t));
BOOST_TEST(!sut.is_wildcard());
BOOST_TEST(!sut.is_exact());
}
BOOST_DATA_TEST_CASE(
no_mask_create_failure_test
, bdata::make(std::vector<std::uint32_t>{0x00100000, 0xffffffff})
, value)
{
BOOST_CHECK_THROW(
match::ipv6_flabel::create(value), std::runtime_error);
}
BOOST_AUTO_TEST_CASE(has_mask_create_failure_test)
{
BOOST_CHECK_THROW(
match::ipv6_flabel::create(0x01, 0x00), std::runtime_error);
}
BOOST_AUTO_TEST_SUITE_END() // ipv6_flabel_test
BOOST_AUTO_TEST_SUITE_END() // oxm_match_test
|
{"hexsha": "3388f17e6aa719dc1bcad044ae9fa7ebe5152911", "size": 2443, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/v13/oxm_match/ipv6_flabel_test.cpp", "max_stars_repo_name": "amedama41/bulb", "max_stars_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/v13/oxm_match/ipv6_flabel_test.cpp", "max_issues_repo_name": "amedama41/bulb", "max_issues_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 8.0, "max_issues_repo_issues_event_min_datetime": "2016-07-21T11:29:13.000Z", "max_issues_repo_issues_event_max_datetime": "2016-12-03T05:16:42.000Z", "max_forks_repo_path": "test/v13/oxm_match/ipv6_flabel_test.cpp", "max_forks_repo_name": "amedama41/bulb", "max_forks_repo_head_hexsha": "2e9fd8a8c35cfc2be2ecf5f747f83cf36ffbbdbb", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0135135135, "max_line_length": 77, "alphanum_fraction": 0.6471551371, "num_tokens": 646}
|
macro symbol_func(cur_expr::Expr)
@assert cur_expr.head == :function
cur_call = cur_expr.args[1]
cur_func_name = cur_call.args[1]
cur_main_var = cur_call.args[2].args[1]
cur_param = Expr(
:kw,
Expr(
:(::),
:is_direct_call,
:Bool
),
false
)
push!(cur_call.args, cur_param)
cur_sub_expr = parse("""
$(cur_main_var).is_symbolic && !is_direct_call &&
return symbols("$(cur_func_name)")
""")
unshift!(cur_expr.args[2].args, cur_sub_expr)
cur_expr
end
|
{"hexsha": "78f26342f45b09dfb6f9e84335392001c6527f4b", "size": 520, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils/symbol_func.jl", "max_stars_repo_name": "djsegal/Fusion.jl", "max_stars_repo_head_hexsha": "a0540fbf3345a778965fa092e9e56907a44c6521", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-12-31T10:16:41.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-13T22:41:17.000Z", "max_issues_repo_path": "src/utils/symbol_func.jl", "max_issues_repo_name": "djsegal/Fusion.jl", "max_issues_repo_head_hexsha": "a0540fbf3345a778965fa092e9e56907a44c6521", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2017-04-11T05:06:15.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-23T05:06:33.000Z", "max_forks_repo_path": "src/utils/symbol_func.jl", "max_forks_repo_name": "djsegal/Fussy.jl", "max_forks_repo_head_hexsha": "a0540fbf3345a778965fa092e9e56907a44c6521", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-04-26T16:58:21.000Z", "max_forks_repo_forks_event_max_datetime": "2017-04-27T15:25:51.000Z", "avg_line_length": 16.7741935484, "max_line_length": 53, "alphanum_fraction": 0.625, "num_tokens": 154}
|
"""
Global Land Cover Facility GLCF MCD12Q1
http://glcf.umd.edu/data/lc/
"""
import numpy as np
import numpy.ma as ma
import matplotlib.patches as mpatches
CLASSES_NAMES = {
0: 'Water',
1: 'Evergreen needleleaf forest',
2: 'Evergreen broadleaf forest',
3: 'Deciduous needleleaf forest',
4: 'Deciduous broadleaf forest',
5: 'Mixed forest',
6: 'Closed shrublands',
7: 'Open shrublands',
8: 'Woody savannas',
9: 'Savannas',
10: 'Grasslands',
11: 'Permanent wetlands',
12: 'Croplands',
13: 'Urban and built-up',
14: 'Cropland/Natural vegetation mosaic',
15: 'Snow and ice',
16: 'Barren or sparsely vegetated',
254: 'Unclassified',
255: 'Fill value',
}
CMAP = {
0: (31, 120, 180),
1: (51, 160, 44),
2: (51, 121, 44),
3: (178, 223, 138),
4: (178, 188, 138),
5: (90, 160, 44),
6: (119, 160, 44),
7: (104, 160, 44),
8: (205, 191, 111),
9: (202, 160, 44),
10: (51, 219, 44),
11: (166, 206, 227),
12: (255, 127, 0),
13: (106, 106, 106),
14: (255, 77, 0),
15: (36, 243, 253),
16: (220, 240, 0),
254: (255, 0, 255),
255: (255, 0, 255),
}
def glcf_to_rgb(arr):
arr_rgb = np.zeros((arr.shape[0], arr.shape[1], 3), dtype=np.uint8)
for glcf_type, color in CMAP.items():
arr_rgb[arr == glcf_type] = color
return arr_rgb
def plot_glcf_labelmap(labels, ax=None):
import pylab as pl
if ax is None:
ax = pl.subplot(111)
vimg = glcf_to_rgb(labels)
vimg[labels.mask] = (0, 0, 0)
ax.imshow(vimg, interpolation='nearest')
lgd_patches = []
for glcf_type in sorted(np.unique(labels)):
if glcf_type is ma.masked:
continue
lgd_patches.append(
mpatches.Patch(
color=np.array(CMAP[glcf_type]) / 255.,
label=CLASSES_NAMES[glcf_type]
)
)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
handles=lgd_patches)
|
{"hexsha": "aadde694a60dc5a2db07731f445d41be3b046a43", "size": 2011, "ext": "py", "lang": "Python", "max_stars_repo_path": "rastercube/datasources/glcf.py", "max_stars_repo_name": "terrai/rastercube", "max_stars_repo_head_hexsha": "c8c6214fd682f72e94df4979f5d737cea4778617", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-06-23T15:11:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-02T19:32:11.000Z", "max_issues_repo_path": "rastercube/datasources/glcf.py", "max_issues_repo_name": "terrai/rastercube", "max_issues_repo_head_hexsha": "c8c6214fd682f72e94df4979f5d737cea4778617", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rastercube/datasources/glcf.py", "max_forks_repo_name": "terrai/rastercube", "max_forks_repo_head_hexsha": "c8c6214fd682f72e94df4979f5d737cea4778617", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-07-28T08:45:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-28T03:19:17.000Z", "avg_line_length": 24.5243902439, "max_line_length": 71, "alphanum_fraction": 0.5634012929, "include": true, "reason": "import numpy", "num_tokens": 723}
|
\graphicspath{ {img/BR/} }
\chapter[Bandwidth Reservation as a Coexistence Strategy in Opportunistic Spectrum Access Environments][Bandwidth Reservation in OSA]{Bandwidth Reservation as a Coexistence Strategy in Opportunistic Spectrum Access Environments}\label{BR_chap}
\section{Introduction}\label{sec:Introduction}
Because of the sensing and transmission problem described in the previous chapter, consequence of the hardware limitations of cognitive devices, there is always some probability of PU and SU transmission overlap and therefore some level of interference at the PUs.
In fact, OSA relies on the idea that it makes sense to minimally reduce the QoS of the primary network (PN) if it would result in a much higher spectrum efficiency \cite{ref:Pawelczak2009}.
Previous research efforts have been mainly focused on designing OSA for the best SU performance at a desired level of PU QoS protection \cite{ref:Domenico2012}. In contrast, the PN point of view has not received enough attention, let alone the consideration of the PN as an active agent collaborating in OSA. And, as we explained in the introduction, the consent of the spectrum owners is vital to the implementation of DSA mechanisms.
\subsection{Motivation}
The question addressed in this chapter is: is it worth for the PN to reserve part of its spectrum for easing OSA to a secondary network (SN)? The reserved channels will only be used by the PUs if the non-reserved ones are occupied by other PU transmissions.
Intuitively, it seems to be beneficial for the SUs, since it eases finding spectrum opportunities and assures that the free channels are adjacent, simplifying the SU's transmission hardware.
The incentives for the PN are not so clear. On the one hand it might avoid collisions with SU transmissions, resulting in less interference at PU receivers, but on the other hand it would reduce the amount of channels available to the PN resource allocation algorithm. In a PN exploiting the channels with best instantaneous propagation conditions, the reservation scheme would reduce the achievable rate.
Determining when the positive aspects compensate the negative ones is not a trivial question and depends on several characteristics of the system: the PU traffic, the SU traffic, the configuration of OSA parameters, the transmission parameters, etc.
The situation considered is not only theoretical. There are scenarios where it would be a feasible coexistence framework. For example, in areas where the regulators force OSA in a licensed spectrum band, it would be an affordable way for the licensed operator to minimize the harmful effects of SU activity. Or if a spectrum owner decides to grant access to certain spectrum bands in exchange for a pre-established flat-rate fee for SUs. This solution could be an alternative to the more complex automatic spectrum trading mechanisms in the literature \cite{ref:Gao2011}, \cite{ref:Yu2010}, and more feasible in the early stages of cognitive radio deployment, not requiring the introduction of new protocols, interfaces, etc, as spectrum trading does \cite{ref:Yang2011}. With the scheme described in this chapter, the licensed operator maintains the reserved band free of PU activity with a high probability and the cognitive users assure, by means of their sensing capabilities and short transmissions periods, a small collision probability and a low interference level with PU communications.
\subsection{Related Work}\label{sec:RelatedWork}
In many networking technologies we can find examples of mechanisms for exploiting spare network resources. For example, in circuit switched networks, it is usual to use protection provisioned capacity to accommodate low-priority traffic in failure-free conditions \cite{ref:Vasseur2004}.
In wireless networks, channel reservation schemes were applied to reduce the dropping probability of handover calls \cite{ref:Ramjee1996}, \cite{ref:Vazquez2006}.
Although the bandwidth reservation idea studied in this paper is conceptually similar to those classic schemes, its application in opportunistic spectrum access environments is completely different in many technical aspects: interference and propagation issues, the sensing of PU activity, the variability of the channel gain, the effect of BR on PU Shannon capacity, among others. Therefore, new research challenges have to be addressed.
Despite the intense research effort conducted on cognitive radio so far, the issue of bandwidth reservation for OSA and its impact on SU and PN performance has not been yet addressed in detail.
The term channel reservation in cognitive radio environments is used in \cite{ref:Zhu2007} and later works like \cite{ref:Martinez2009}, \cite{ref:Ahmed2009} and \cite{ref:Lai2011} to refer to a mechanism by which some of the channels not used by the PN are kept free of SU traffic by the secondary network (SN) itself. These ``reserved channels'' are used by the SN to accommodate ongoing SU transmissions that, forced by PUs, have to leave their initially occupied channels. In that case, the reservation is performed by the SN at the cost of an increased SU blocking probability. That idea is indeed an adaptation of the classic handoff channel reservation scheme to the cognitive environment.
To the best of our knowledge, only one work \cite{ref:Tang2006} has considered reserving part of the spectrum so that PUs only occupy the second part when the first one is congested.
While \cite{ref:Tang2006} assumed that the PN was completely unaffected by the introduction of a SN and by the reservation scheme, we base our study on the fact that, in real systems, both SU activity and bandwidth reservation do actually have an impact on PU performance. Therefore, we present the idea not as a way to minimize the SN blocking probability, but as a strategy to simultaneously improve PU and SU capacities in a coexistence scenario. Moreover we develop a detailed, realistic model comprising hardware constraints, multichannel OSA, propagation effects and interference.
In the framework of spectrum trading, a recent work \cite{ref:Wu2012} proposed a system in which the PN reserves a set of channels for dynamic spectrum auction. This work follows the concept of spectrum pooling, where secondary users temporarily rent from a pool of spectrum resources from primary users \cite{ref:Biglieri2012}. This approach requires the implementation of new protocols to support the required PU-SU signaling and makes the spectrum sensing no longer needed. It is therefore essentially different to the opportunistic access framework of our proposal.
There have been extensive research efforts involving OSA for low-complexity, hardware constrained devices \cite{ref:Park2011}, \cite{ref:Li2012} as well as multichannel access \cite{ref:Jeon2012}, \cite{ref:Xu2012_eff}.
Several works like \cite{ref:Jia2008_HC}, \cite{ref:Kim2008}, \cite{ref:Gabran2011} and \cite{ref:Cheng2011} consider both characteristics simultaneously. As in \cite{ref:Jia2008_HC} we consider that, because of the SU's hardware limitations, sensing can only be conducted within a small portion of the spectrum (channels) and requires a non-negligible time (scanning delay). Our model also includes imperfect sensing, bayesian estimation and the effects of PU interference on SU achievable bit rate.
Because we are interested in evaluating the capacity of the licensed network, the resource allocation scheme of the PN has to be capable of exploiting all the available bandwidth, implying full channel awareness. This is the goal of OFDM scheduling mechanisms \cite{ref:Sadr2009}. Our model considers one simple but effective mechanism described in \cite{ref:Sadr2009} and \cite{ref:Rhee2000}.
In works centered on cognitive MAC, the PN is usually characterized by collision probability \cite{ref:Gabran2011}, \cite{ref:Jung2012}, \cite{ref:Zhao2007_dec} and \cite{ref:Huang2009}, and less frequently by overlapping time \cite{ref:Huang2008_opp}.
However, for more general evaluations of PN and SN coexistence, interference level at PU receivers or PN Shannon capacity is preferred \cite{ref:Sun2012}. In OFDM related works, performance is usually evaluated in terms of capacity or achievable rate \cite{ref:Sadr2009}, \cite{ref:Rhee2000}, as we do in this work. The novelty in our analysis is obtaining closed-form analytical expressions for the capacity under OSA interference, with and without BR.
\subsection{Our Contribution}
The main contribution of this chapter is to develop a detailed framework for evaluating the impact of the bandwidth reservation scheme on PU and SU performance.
Based on the numerical results obtained, we discuss under which conditions and why it is recommendable or not to implement a PN channel reservation scheme for OSA and how to configure it.
As we explain in Section \ref{sec:System}, the SUs are characterized by hardware-limited radio, imperfect spectrum sensing, multichannel access, and a Maximum A Posteriory (MAP) estimator of the PU activity in the scanned channels. Section \ref{sec:System} also describes the PN resource allocation model.
In Section \ref{sec:Model} we derive the transition probabilities of the Markov model for the PU-SU channel occupation process.
By means of the Markov-reward model detailed in Section \ref{sec:Reward}, we evaluate the joint PU and SU performance in terms of expected total capacity, interference power and collision probability.
\section{System Description}\label{sec:System}
\subsection{Licensed Network}
The system under study consists of two overlay wireless networks. The licensed one operates with centralized access coordinated by an access node (AN).
As in previous works (e.g. \cite{ref:Zhao2007_dec}, \cite{ref:Kim2008},\cite{ref:Gelabert2011}, \cite{ref:Jiao2012}, \cite{ref:Tang2009}), PU traffic is assumed to follow a Poisson model: PU inter-arrival time and channel holding time are exponential random variables with rates $\lambda_{p}$ and $\mu_{p}$ respectively.
We are evaluating the impact of the spectrum reservation scheme on the capacity of the licensed network. Therefore we consider that the AN's resource allocation mechanism exploits all the available bandwidth independently of the number of PUs connected.
We only consider resource allocation in the downlink bandwidth, were the OSA operates.
\subsubsection{Radio resource allocation of the licensed network}
The downlink spectrum consists of $N$ channels or subcarriers, each one with a bandwidth of $W$ Hz. At each time-slot, each one of the $n_{p}$ PUs in the system is assigned one of the $N$ channels and part of the total transmission power $p_{PU}$ of the AN.
At time-slot $t$, the wireless link between the $j$-th PU and the AN is associated to $N$ channel gains, one for each channel. The channels are assumed to present a constant gain over the bandwidth $W$ and over the duration $\tau$ of the time-slot, and change over consecutive time-slots due to fading effects (block fading model, \cite{ref:Goldsmith2005}).
A perfect channel state information is assumed, so that the AN knows all the channel gains at each time-slot.
The objective of the resource management algorithm is to assign one channel to each PU and to allocate the power $p_{PU}$ among the $n_{p}$ assigned channels, at each time-slot, such that the total PU capacity is maximized.
A well known strategy to attain this objective is to use a flat transmit power spectral density (PSD) \cite{ref:Rhee2000}, i.e. all the assigned subcarriers are allocated the same power ($p_{PU}/n_{p}$). This simplifies the problem to a subcarrier allocation problem which is solved by allocating the best available subcarrier to the PU with the least achieved rate, and then allocating the best subcarrier among the $N-1$ remaining ones to the PU with the second least rate, and so on. This strategy is shown to assure fairness among users and to achieve 96\% of the optimum capacity (\cite{ref:Rhee2000}, \cite{ref:Sadr2009}).
\subsubsection{Bandwidth reservation for OSA}
The bandwidth reservation (BR) mechanism considered implies that the AN reserves $m$ adjacent downlink channels for OSA, and the remaining $N-m$ channels are assigned to the $n_{p}$ PUs following the subcarrier allocation strategy described, whenever $n_{p}\leq N-m$. In case $n_{p}>N-m$, part of the $m$ reserved channels (concretely $n_{p}+m-N$) are assigned to PU transmissions, such that the $n_{p}$ channels used for PUs remain adjacent among them. Summarizing, the spectrum available for PU downlink transmission contains $\text{max}\left(n_{p},N-m\right)$ adjacent channels. Figure \ref{BR_fig_allocation} shows an example of the BR mechanism studied.
\begin{figure}[ht]
\centering
\includegraphics[scale=0.9]{channelReservation.eps}
\caption[]{Channel reservation scheme in the licensed spectrum. Greyed-out channels are occupied by PUs.}\label{BR_fig_allocation}
\end{figure}
\subsection{Secondary Network}\label{secondarynetwork}
The unlicensed or secondary network operates in a decentralized, ad-hoc fashion.
Communication is always performed between pairs of SUs (\textit{cognitive pairs}) consisting of one sender and one receiver.
Every SU is assumed to be under the coverage area of the same licensed access node, thus the objective is to transmit data using one or several of the $N$ licensed channels causing the less possible degradation to PU communications. The SU access follows the interweave paradigm \cite{ref:Biglieri2012}, avoiding simultaneous transmissions with PUs.
In line with the low complexity, hardware limited approach of previous works (e.g. \cite{ref:Pawelczak2009} as \cite{ref:Jia2008_HC}, \cite{ref:Li2012}), the MAC protocol of the SUs is similar to the hardware-constrained MAC described in \cite{ref:Jia2008_HC}.
Summarizing, HC-MAC comprises three consecutive phases: \textit{contention}, \textit{sensing} and \textit{transmission}.
The contention procedure allows a pair of SUs (sender and receiver) to reserve the use of the spectrum in a certain area avoiding collisions with other SU transmissions.
Then, the cognitive pair starts to sense the spectrum in fixed-duration sensing slots to detect PU activity on each channel.
As in \cite{ref:Jia2008_HC}, the hardware limitations of the SUs are:
(1) Each SU is equipped with a single radio that can either transmit or receive, but not at the same time.
(2) When scanning the spectrum to detect PU activity, an SU can only sense one narrowband channel during each sensing slot.
(3) Once a cognitive pair decides to start a transmission, it can use up to $n_{s,\text{max}}$ noncontiguous channels simultaneously.
For the sake of completeness, we consider imperfect sensing, which can result in false positive or false negative PU activity detection. The false positive and false negative probabilities, $p_{f}$ and $p_{n}$, are computed for an energy level detector, using the formulas presented in \cite{ref:Pawelczak2009} and the references therein. The parameters involved are the sensing slot duration $s$, the channel bandwidth $W$, the observed signal to noise ratio, and the detection threshold $\theta$.
\begin{figure}[ht]
\centering
\includegraphics[scale=1]{slots.eps}
\caption[]{Sensing and access phases in OSA. The SU scans $\Delta=4$ channels, and detects $a=3$ available channels. In this case, the SU is leaving 1 free channel. This happens when the safety margin is $k=1$ or when the SU channel occupation limit is $n_{s,\text{max}}=2$. During the SU transmission, two PUs arrive, causing transmission overlap in channel 2 during time-slots 5 and 6. The numbers $\#n$ identify the PU to which the channel is assigned at each time-slot.}\label{BR_fig_slots}
\end{figure}
During the sensing phase, the SU scans $\Delta$ channels.
Because this phase lasts one time-slot, the sensing slot duration is $s=\tau/\Delta$.
If BR is used, the SU only transmits in the available channels found in the $m$ reserved channels. If not, the SU can transmit in the free channels found in $\Delta$.
In both cases the SU can use up to $n_{s,\text{max}}$ and, depending on the configuration, it may have to leave $k$ free channels, as a safety margin.
Figure \ref{BR_fig_slots} shows an example of the sensing and access phases.
Table \ref{BR_table_parameters} summarizes the most relevant parameters of the model. Some of them have been already presented in this section and others are explained in Sections \ref{sec:Model} and \ref{sec:Reward}. We also include a brief list of the abbreviations used.
\begin{table}
\begin{tabular}{ll} \hline
\textbf{Abbreviation} & \textbf{Definition}\\\hline
$PU$ & Primary User\\
$PN$ & Primary Network\\
$AN$ & Access Node\\
$SU$ & Secondary User\\
$OSA$ & Opportunistic Spectrum Access\\
$BR$ & Bandwidth Reservation\\
$MAC$ & Medium Access Control\\
$MAP$ & Maximum A Posteriori\\
$PSD$ & Power Spectral Density\\\hline
\textbf{Notation} & \textbf{Definition}\\\hline
$W$ & channel bandwidth\\
$\tau$ & duration of the time-slots\\
$\Delta$ & number of scanned channes\\
$k$ & safety margin\\
$n_{s,\text{max}}$ & maximum channels for SU Tx\\
$m$ & reserved channels in BR\\
$\lambda_{p}$ & PU's arrival rate\\
$\mu_{p}$ & PU's service rate\\
$\pi_{i}$ & probability of PU activity in channel $i$\\
$p_{s}$ & probability of SU arrival\\
$q_{s}$ & probability of SU departure\\
$\rho_{s}$ & SU traffic intensity\\
$T_{s}$ & average duration of an SU transmission\\
$n_{s}$ & number of SU's in the system\\
$n_{p}$ & number of PU's in the system\\
$C_{j_N}\left(n_{p}\right)$ & $j$-th PU capacity without interference\\
${C}_{N}\left(n_{p}\right)$ & PU capacity without interference\\
$C^{I}_{j,N}\left(n_{p},n_{s}\right)$ & $j$-th PU capacity with interference\\
$C^{\text{nbr}}_{N}\left(n_{p},n_{s}\right)$ & per state PN capacity without BR\\
$C^{\text{br}}_{N}\left(n_{p},n_{s}\right)$ & per state PN capacity with BR\\
$\bar{R}^{\text{nbr}}_{PU}$ & normalized PN capacity without BR\\
$\bar{R}^{\text{br}}_{PU}$ & normalized PN capacity with BR\\
$C'^{\text{nbr}}_{N}\left(n_{p},n_{s}\right)$ & per state SU capacity without BR\\
$C'^{\text{br}}_{N}\left(n_{p},n_{s}\right)$ & per state SU capacity with BR\\
$\bar{R}^{\text{nbr}}_{SU}$ & normalized SU capacity without BR\\
$\bar{R}^{\text{br}}_{SU}$ & normalized SU capacity with BR\\\hline
\end{tabular}
\centering
\caption{Summary of the most relevant parameters of the model}
\label{BR_table_parameters}
\end{table}
\section{Spectrum Occupation Model}\label{sec:Model}
\subsection{Spectrum Occupation Process}
Upon a PU arrival, the incoming PU is admitted in if there are available channels, and remains in the system for a random time, exponentially distributed with rate $\mu_{p}$. If $n_{p}=N$, incoming PUs are rejected (blocking event).
Because the system operates in a time-slot basis, we translate the PU channel occupation process into a set of probabilities $P_{p}(j|i)$, for $i,j \in \left\{0,1,\ldots,N\right\}$, defined as the probability that $n_{p}=j$ in time-slot $t+1$, given that $n_{p}=i$ in time-slot $t$.
These $P_{p}(j|i)$ are the elements of the $\left(N+1\right)\times\left(N+1\right)$ transition probability matrix $\mathbf{T}$. The stationary distribution $\bar{\pi} = \left(\pi_{0},\pi_{1},\dots,\pi_{N}\right)$ is obtained from the equilibrium equations $\bar{\pi} = \bar{\pi}\mathbf{T}$, $\left\|\bar{\pi}\right\|_{1} = \sum_{i=1}^{m}\pi_{i}=1$.
This process is essentially a discrete-time $M/M/N/N$ queue (in Kendall's notation), with blocking probability equal to $\pi_{N}$.
Let us now consider the PU process in conjunction with OSA. At each time slot, the probability that an SU attempts to access the spectrum is $p_{s}$.
An incoming SU scans the spectrum and then starts an SU transmission period using the detected free channels up to a maximum $n_{s,\text{max}}$. Considering $n_{s,\text{max}}$ as a safety limit, the OSA MAC assures that no other SU attempts to access the spectrum if another SU is using it. The model could be generalized to accommodate several concurrent SU transmissions, although, for evaluation purposes, it would be equivalent to increase $n_{s,\text{max}}$ in the presented model.
For an incoming SU, we define the opportunistic access probability, $O\left(n'_{s},n_{p}\right)$, as the probability that the SU occupies $n'_{s}$ channels in time-slot $t+1$ given that there are $n_{p}$ PUs using the spectrum in time-slot $t$.
The duration of an SU transmission period is, in average, $T_{s}$, after which the SU leaves the spectrum. The termination probability of an SU transmission is $q_{s}=1/T_{s}$.
As a result, considering the SU access process in absence of PU activity, the probability that the spectrum contains an SU transmission is given by $\rho_{s}=p_{s}/(p_{s}+q_{s})$, which we will refer to as the SU traffic intensity. Note that the actual probability of SU activity in the system will be lower than $\rho_{s}$, in general, because, the spectrum sensing may not always find spectrum opportunities.
This model describes the spectrum occupation process as a Markov chain, $Z_{t}$ with a state space consisting of a set of pairs, $\left(n_{s},n_{p}\right)$, such that $n_{s} \in \left\{0,1,\dots,n_{s,\text{max}}\right\}$ and $n_{p} \in \left\{0,1,\dots,N\right\}$.
The transition probabilities from state $Z_{t}=\left(n_{s},n_{p}\right)$ to $Z_{t+1}=\left(n'_{s},n'_{p}\right)$ are given by
\begin{equation}\label{TransitionProbabilities}
P\left(n'_{s},n'_{p}|n_{s},n_{p}\right) =
\begin{cases}
p_{s}O_{s}\left(n'_{s},n_{p}\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}=0,\:n'_{s}\neq0\\
q_{s}\left(n_{s}\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}\neq0,\:n'_{s} = 0\\
\left(\left(1-p_{s}\right) + p_{s}O_{s}\left(0,n_{p}\right)\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}=n'_{s}=0\\
\left(1-q_{s}\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}=n'_{s}\neq0\\
0&\mbox{otherwise}\\
\end{cases}
\end{equation}
% \begin{equation}\label{TransitionProbabilities}
% \begin{array}{l}
% P\left(n'_{s},n'_{p}|n_{s},n_{p}\right) = \\
% \begin{cases}
% p_{s}O_{s}\left(n'_{s},n_{p}\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}=0,\:n'_{s}\neq0\\
% q_{s}\left(n_{s}\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}\neq0,\:n'_{s} = 0\\
% \left(\left(1-p_{s}\right) + p_{s}O_{s}\left(0,n_{p}\right)\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}=n'_{s}=0\\
% \left(1-q_{s}\right)P_{p}\left(n'_{p}|n_{p}\right)&\mbox{if } n_{s}=n'_{s}\neq0\\
% 0&\mbox{otherwise}\\
% \end{cases}
% \end{array}
% \end{equation}
The following subsections develop the derivation of $O_{s}\left(n'_{s},n_{p}\right)$ for each spectrum reservation policy.
\subsection{SU Spectrum Sensing}
After the sensing phase, the SU generates an estimate $\hat{h}$ of the number $h$ of occupied channels in the scanned spectrum.
The estimation is based upon the sensing outcome, $\mathbf{x} = \left(x_{1},\dots,x_{\Delta}\right)$, where $x_{i} = 1$ if the SU detected PU activity in channel $i$ and $x_{i} = 0$ otherwise. We consider that the SU obtains $\hat{h}$ by a Maximum A Posteriori (MAP) estimation, $\hat{h}~=~\text{argmax }_{h} P\left(h|\mathbf{x}\right)$, which, applying Bayes' rule is equivalent to $\hat{h}~=~\text{argmax }_{h} P\left(h\right)P\left(\mathbf{x}|h\right)$ (see \cite{ref:Bertsekas2008}),
where $P\left(h\right)$ and $P\left(\mathbf{x}|h\right)$ can be obtained from the PU traffic and sensing error models, and depend on the spectrum reservation policy.
\subsubsection{Without spectrum reservation}
The conditional distribution $P\left(\mathbf{x}|h\right)$ of each outcome can be obtained as follows.
For each $\mathbf{x}$ the number of positive detections is $\left\|\mathbf{x}\right\|_{1}=\sum_{i=1}^{\Delta}x_{i}$.
For a given $h$, the number of false positives $f_{\text{p}}$ in $\mathbf{x}$ ranges from $f_{\text{p,min}}=\left(\left\|\mathbf{x}\right\|_{1}-h\right)^{+}$ to $f_{\text{p,max}}=\text{min}\left(\left\|\mathbf{x}\right\|_{1},\Delta-h\right)$.
For each $f_{\text{p}}$, the following equations provide the number of correct positives $c_{\text{p}}$, false negatives $f_{\text{n}}$ and correct negatives $c_{\text{n}}$ in $\mathbf{x}$:
\begin{equation}
\begin{array}{lcl}
c_{\text{p}} & = & \left\|\mathbf{x}\right\|_{1} - f_{\text{p}}\\
f_{\text{n}} & = & h-\left(\left\|\mathbf{x}\right\|_{1} - f_{\text{p}}\right)\\
c_{\text{n}} & = & \Delta - \left\|\mathbf{x}\right\|_{1} - f_{\text{n}}
\end{array}
\end{equation}
Then, adding the probabilities of all the possible $f_{\text{p}}$ values yields
\begin{equation}\label{Pxh}
P\left(\mathbf{x}|h\right) = \displaystyle\sum^{f_{\text{p,max}}}_{f_{\text{p}}=f_{\text{p,min}}}
{\Delta-h \choose f_{\text{p}}}{h \choose c_{\text{p}}} p_{n}^{f_{\text{n}}}\left(1-p_{n}\right)^{c_{\text{p}}}p_{f}^{f_{\text{p}}}\left(1-p_{f}\right)^{c_{\text{n}}}
\end{equation}
When $j$ PUs are randomly allocated over $N$ channels, the number $h$ of PUs in the $\Delta$ scanned channels is a hypergeometrical random variable denoted by $P_{\text{H}}\left(h;N,j,\Delta\right)$.
To obtain the distribution $P(h)$ we apply the law of total probability on every possible value of $h$:
\begin{equation}\label{Ph}
P\left(h\right) =
\begin{cases}
\sum_{j=h}^{N-\Delta+h}\pi_{j}P_{\text{H}}\left(h;N,j,\Delta\right)&,\mbox{if }h>0\\
\sum_{j=0}^{N-\Delta}\pi_{j}P_{\text{H}}\left(h;N,j,\Delta\right)&,\mbox{if }h=0
\end{cases}
\end{equation}
\subsubsection{With spectrum reservation}
The SU is assumed to scan, at least, the $m$ reserved channels. Therefore, $\mathbf{x}=\left(\mathbf{x}_{m}, \mathbf{x}_{\Delta-m}\right)$, where $\mathbf{x}_{m}$ is the observation obtained in the first $m$ scanned channels and $\mathbf{x}_{\Delta-m}$ is the observation in the $\Delta-m$ channels scanned in the non-reserved spectrum. Similarly, $h=h_{m}+h_{\Delta-m}$, where $h_{m}$ and $h_{\Delta-m}$ are the PUs in the reserved and non-reserved channels respectively. The probabilities $P\left(\mathbf{x}_{\Delta-m}|h_{\Delta-m}\right)$ and $P\left(h_{\Delta-m}\right)$ are computed using (\ref{Pxh}) and (\ref{Ph}) respectively, for $\Delta-m$ scanned channels. In the reserved spectrum, the PUs occupy contiguous channels, therefore, for a given $h_{m}$, we can obtain $f_{\text{p}}$, $c_{\text{n}}$, $f_{\text{n}}$, and $c_{\text{p}}$ in $\mathbf{x}_{m}$ as follows
\begin{equation}
\begin{array}{lcl}
f_{\text{p}} & = & \sum_{j=h_{m}+1}^{m}x_{j}\\
c_{\text{n}} & = & m-h_{m}-\sum_{j=h_{m}+1}^{m}x_{j}\\
f_{\text{n}} & = & h_{m}-\sum_{j=1}^{h_{m}}x_{j}\\
c_{\text{p}} & = & \sum_{j=1}^{h_{m}}x_{j}
\end{array}
\end{equation}
and the conditional probability $P\left(\mathbf{x}_{m}|h_{m}\right)$ is given by
\begin{equation}
P\left(\mathbf{x}_{m}|h_{m}\right) = p_{n}^{f_{\text{n}}}\left(1-p_{n}\right)^{c_{\text{p}}}p_{f}^{f_{\text{p}}}\left(1-p_{f}\right)^{c_{\text{n}}}
\end{equation}
The distribution $P(h_{m})$ is directly obtained from $\bar{\pi}$:
\begin{equation}
P\left(h_{m}\right) =
\begin{cases}
\pi_{N-m+h}&,\mbox{if }h_{m}>0\\
\sum_{j=0}^{N-m}\pi_{j}&,\mbox{if }h_{m}=0
\end{cases}
\end{equation}
\subsection{SU Spectrum Access}
Let $D_{\Delta}\left(a,h\right)$ denote the probability of finding a number $a$ of available channels given that, during the sensing phase, $h$ out of the $\Delta$ scanned channels are occupied by PU transmissions.
In order to compute $D_{\Delta}\left(a,h\right)$ let us define $X_{\hat{h}}$ as the set of possible values of $\mathbf{x}$ for which the outcome of the MAP estimator equals $\hat{h}$: $X_{\hat{h}} = \left\{\mathbf{x}|\hat{h} = \underset{h}{\text{argmax }} P\left(h\right)P\left(\mathbf{x}|h\right)\right\}$.
Finding $a$ free channels implies detecting PU activity in $\hat{h}=\Delta-a$, therefore
$D_{\Delta}\left(a,h\right)$ is given by
\begin{equation}
\begin{array}{lcl}
D_{\Delta}\left(a,h\right) & = & P(\hat{h}|h)\\
& = & \sum_{\mathbf{x}\in X_{\hat{h}=\Delta-a}}P\left(\mathbf{x}|h\right)
\end{array}
\end{equation}
Let us consider the no BR and the BR cases separately for the computation of $O_{s}\left(n'_{s},n_{p}\right)$ .
\subsubsection{Without spectrum reservation}
The probability that, in any time slot, $h$ channels out of $\Delta$ scanned channels are used by PUs, when there are $n_{p}$ PUs in the spectrum, is hypergeometrical. Therefore
\begin{equation}\label{Os1}
O_{s}\left(n'_{s},n_{p}\right) =
\begin{cases} \displaystyle\sum_{h=h_{\text{min}}}^{h_{\text{max}}}D_{\Delta}\left(n'_{s}+k,h\right)P_{\text{H}}\left(h;N,n_{p},\Delta\right)&\mbox{if } n'_{s}<n_{s,\text{max}}\\
\displaystyle\sum_{a=n'_{s}+k}^{\Delta}\displaystyle\sum_{h=h_{\text{min}}}^{h_{\text{max}}}D_{\Delta}\left(a,h\right)P_{\text{H}}\left(h;N,n_{p},\Delta\right)&\mbox{if } n'_{s}=n_{s,\text{max}}
\end{cases}
\end{equation}
where $h_{\text{min}} = \left(\Delta+n_{p}-N\right)^{+}$ and $h_{\text{max}} = \mbox{min}\left(\Delta,n_{p}\right)$ are the minimum and maximum number of PUs within $\Delta$.
\subsubsection{With spectrum reservation}
In this case, an incoming SU occupies $n'_{s}$ channels, always within the $m$ reserved ones. Therefore, the SU detects $a_{m}=\text{min}\left(m,n'_{s}+k\right)$ in the $m$ reserved channels and $a_{\Delta-m}=\left(n'_{s}+k-m\right)^{+}$ in the no reserved ones. For $n_{p}>N-m$, $h_{m}=n_{p}+m-N$ and $h_{\Delta-m} = \Delta-m$, therefore
\begin{equation}\label{Os2}
O_{s}\left(n'_{s},n_{p}\right) = D_{m}\left(a_{m},h_{m}\right)D_{\Delta-m}\left(a_{\Delta-m},h_{\Delta-m}\right)
\end{equation}
For $n_{p}\leq N-m$, $h_{m}=0$ and each value of $h_{\Delta-m}$ has a hypergeometric probability $P_{\text{H}}\left(h_{\Delta-m};N,n_{p},\Delta-m\right)$. Multiplying (\ref{Os1}) by the $D_{m}\left(a_{m},h_{m}\right)$ term, we can obtain $O_{s}\left(n'_{s},n_{p}\right)$. In case $n'_{s}=n_{s,\text{max}}$, the outer summation is done over $a_{\Delta-m}=\Delta-m,\ldots,n'_{s}+k-m$.
\section{Markov Reward Model for PU and SU Capacities}\label{sec:Reward}
In this section we construct a Markov reward model to estimate the achievable transmission rate for PUs and SUs. Such a model requires the definition of a reward function $R\left(n_{s},n_{p}\right)$ providing the Shannon capacity at each state. The objective is to obtain the expected average reward defined as
\begin{equation}\label{MarkovReward1}
\bar{R} = \underset{T\rightarrow\infty}{\text{lim }}\frac{1}{T}\displaystyle\sum_{t = 0}^{T}R\left(Z_{t}\right)
\end{equation}
For an ergodic, finite-state Markov chain, and provided that $\left|R\left(n_{s},n_{p}\right)\right|$ is bounded for every $\left(n_{s},n_{p}\right)$, the total expected average reward $\bar{R}$ is
\begin{equation}\label{MarkovReward2}
\bar{R} = \displaystyle\sum_{n_{p} = 0}^{N}\sum_{n_{s} = 0}^{m-k}R\left(n_{s},n_{p}\right)P(n_{s},n_{p})
\end{equation}
where $P(n_{s},n_{p})$ are the steady state probabilities of the Markov process defined by (\ref{TransitionProbabilities}).
\subsection{Propagation Model}
Before deriving the expressions for PU and SU capacities with and without BR, we have to establish the models for the pathloss and fading effects on the transmitted signals. Let $p_{PU}$ denote the total available transmission power at the PU Tx (AN) for the whole spectrum. The power transmitted in each channel, when there are $n_{p}$ PUs, is $p_{p}=p_{PU}/n_{p}$ (flat transmit PSD). The average received power at a PU terminal, after considering the path loss, is referred to as $p_{pp}$. Similarly, $p_{SU}$, $p_{s}$, and $p_{ss}$, are the total power at the SU Tx, the SU power in one channel, and the average SU received power in one channel, respectively. For interference computations, we use $p_{sp}$ to refer to the average SU power received at a PU Rx location and $p_{ps}$ for the PU power at an SU Rx location. Figure \ref{BR_fig_interference} shows a diagram with the transmission and interference powers between PUs and SUs.
\begin{figure}[ht]
\centering
\includegraphics[scale=1]{interference.eps}
\caption[]{Diagram of the interference between PUs and SUs.}\label{BR_fig_interference}
\end{figure}
For path loss estimation, we consider a common empirical method known as piecewise linear model (see \cite{ref:Goldsmith2005}). In this model, for a given distance $r$ between the Tx and the Rx, the average received power is approximately equal to $p_{pp}=p_{p}Kg(r)$, where $K$ is a constant depending on the antenna gains, and $g(r)$ is given by
\begin{equation}
g(r) =
\begin{cases}
1 &\mbox{if }r\leq 1\\
r^{-n_{1}} &\mbox{if }1<r\leq r_{c}\\
r_{c}^{-n_{1}}\left(\frac{r}{r_{c}}\right)^{-n_{2}} &\mbox{if }r> r_{c}\\
\end{cases}
\end{equation}
The parameters $n_{1}$, $n_{2}$ and $r_{c}$ are empirically determined depending on the propagation environment. Typical values are around $n_{1}=2$, $n_{2}=4$, and $r_{c}=100$ (that we use in our model).
The same $g(r)$ is also used for SU signals.
The instantaneous received power changes in consecutive time-slots due to multipath fading effects, and is characterized by a probability density function (\textit{pdf}).
In particular, assuming that the fading amplitude follows a Rayleigh distribution, the received power has the following \textit{pdf}, $f(p)=e^{-p/\bar{p}}/\bar{p}$, where $\bar{p}$ is the average power at the Rx location.
The capacity expressions of the following subsections are obtained for the $g(r)$ and $f(p)$ functions described. Note, however that for different fading models (e.g. Nakagami or Rice) or different path loss models (two ray, Okumura-Hata, etc) the procedure is similar, although the equations may be more mathematically involved or require numerical evaluation.
\subsection{PU Downlink Capacity without Interference}
Let us consider a PU that, in a given time-slot, is the $j$-th one to be assigned a channel by the AN.
Because the Tx power on each channel remains constant for a fixed $n_{p}$, the maximum achievable rate per channel is determined by the additive white Gaussian noise (AWGN) capacity model (see \cite{ref:Goldsmith2005}).
The capacity for the $j$-th PU is given by $C_{j,N}~=~W\text{log}_{2}\left(1+\text{\ttfamily{SNR}}_{p}\right)$, where $\text{\ttfamily{SNR}}_{p}$ is the average signal to noise ratio at the $j$-th PU Rx.
The $N$ subscript remarks that there are $N$ available channels to allocate PU transmissions.
Each one of the $N$ channels has a different gain for the $j$-th PU Rx, but, because $j-1$ channels are already assigned, the AN selects, for the $j$-th PU, the channel with the highest gain among the $N-j+1$ remaining ones.
In consequence the received power will be the highest among $N-j+1$ possible values.
The cumulative distribution function (CDF) for Rayleigh fading is $F(p)=(1-e^{-p/p_{pp}})$.
Therefore, the CDF of the highest value among $N-j+1$, is given by $\mathbf{P}(P\leq p) = (1-e^{-p/p_{pp}})^{N-j+1}$, resulting in the following \textit{pdf}
\begin{equation}
f_{j}(p) = \displaystyle\frac{d\mathbf{P}(P\leq p)}{dp} = \frac{N-j+1}{p_{pp}}e^{-\frac{p}{p_{pp}}}\left(1-e^{-\frac{p}{p_{pp}}}\right)^{N-j}
\end{equation}
We can now obtain $\text{\ttfamily{SNR}}_{p}$
\begin{equation}
\text{\ttfamily{SNR}}_{p} = \displaystyle\int_{p=0}^{\infty}\displaystyle\frac{p}{N_{0}W}f_{j}(p)dp = \displaystyle\frac{p_{pp}H_{N-j+1}}{N_{0}W}
\end{equation}
where $H_{n}~=~\sum_{i=0}^{n}i^{-1}$ is the $n$-th harmonic number. In the rest of this section, we will refer to $H_{N-j+1}$ as $H^{(j)}$. Applying $p_{pp}=p_{PU}Kg(r)/n_{p}$, we define
\begin{equation}
\text{\ttfamily{SNR}}_{p}(r) = \displaystyle\frac{p_{PU}H^{(j)}Kg(r)}{n_{p}N_{0}W}
\end{equation}
where $r$ denotes the distance between the PU Tx and Rx.
We could obtain the expectation of $C_{j,N}$ over all possible $r$ values in the cell but, for mathematical tractability, we will consider upper bounds for the average capacity on the coverage area. Because of the concavity of the $log(1+x)$ function, we know, by Jensen's inequality, that $\mathbf{E}\left[\text{log}_{2}\left(1+\text{\ttfamily{SNR}}_{p}(r)\right)\right]$ $\leq$ $\text{log}_{2}\left(1+\mathbf{E}\left[\text{\ttfamily{SNR}}_{p}(r)\right]\right)$.
Considering that all PU Rx locations are equally probable and the PU Tx is at the center of the cell, the \textit{pdf} of the distance $r$ over a cell of radius $R$ is given by $f_{r}(r)=2r/R^{2}$. Therefore
\begin{equation}\label{SNRp}
\underset{r}{\mathbf{E}}\left[\text{\ttfamily{SNR}}_{p}(r)\right] = \displaystyle\frac{p_{PU} H^{(j)}K}{n_{p}N_{0}W}\underset{r}{\mathbf{E}}\left[g(r)\right]
\end{equation}
where
\begin{equation}\label{Egr}
\underset{r}{\mathbf{E}}\left[g(r)\right] = \int_{r=0}^{R}g(r)f_{r}(r)dr = \frac{\left(2R^{2}\left(1+log(r_{c})\right)-r_{c}^{2}\right)}{R^{4}}
\end{equation}
The resulting $C_{j,N}(n_{p})$ expression is
\begin{equation}\label{CjN}
C_{j,N}(n_{p}) =
W\text{log}_{2}\left(1+\frac{p_{PU}H^{(j)}K}{n_{p}N_{0}W}\underset{r}{\mathbf{E}}\left[g(r)\right]\right)
\end{equation}
And finally, in absence of OSA interference, the total expected downlink capacity for an $N$-channel system containing $n_{p}$ PU transmissions is obtained by summing the average capacities $C_{j,N}\left(n_{p}\right)$ over the $n_{p}$ PUs
\begin{equation}\label{totalCapacity}
C_{N}\left(n_{p}\right) = \displaystyle\sum_{j=1}^{n_{p}}C_{j,N}\left(n_{p}\right)
\end{equation}
\subsection{PU Downlink Capacity with Interference}
Let us now consider a time-slot where the channel assigned to the $j$-th PU is occupied by an SU transmission, causing interference at the PU Rx.
By the AWGN model, the capacity for the $j$-th PU in this time-slot is now given by $C^{I}_{j,N}~=~W\text{log}_{2}\left(1+\text{\ttfamily{SINR}}_{p}\right)$, where $\text{\ttfamily{SINR}}_{p}$ is the average signal to interference and noise ratio at the $j$-th PU Rx.
For the general case of the PU Tx and SU Tx located at different places, the PU signal and the interference signal follow independent fading processes.
With the interference power \textit{pdf} given by $f^{I}(p)=e^{-p/p_{sp}}/p_{sp}$, the $\text{\ttfamily{SINR}}_{p}$ is obtained as follows
\begin{equation}\label{SINRp}
\begin{array}{lcl}
\text{\ttfamily{SINR}}_{p} & = & \displaystyle\int_{y=0}^{\infty}\displaystyle\int_{x=0}^{\infty}
\displaystyle\frac{x}{y+N_{0}W}f_{j}(x)f^{I}(y)dxdy \\
& = & \displaystyle\int_{y=0}^{\infty}\displaystyle\frac{p_{pp}H^{(j)}}{y + N_{0}W}f^{I}(y)dy \\
& = & \frac{p_{pp}H^{(j)}}{p_{sp}}\text{exp}\left(\frac{N_{0}W}{p_{sp}}\right)E_{1}\left(\frac{N_{0}W}{p_{sp}}\right)
\end{array}
\end{equation}
where $E_{1}(a)=\int_{a}^{\infty}t^{-1}e^{-t} dt$, is the exponential integral function (equivalent to the upper incomplete gamma function $\Gamma(0,a)$).
Considering that $p_{sp}=p_{SU}K'g(r')/n_{p}$, where $r'$ is the distance between the interfering SU Tx and the PU Rx, the $\text{\ttfamily{SINR}}_{p}$ (\ref{SINRp}) depends now on $r$ and $r'$. To obtain an upper bound of $C^{I}_{j,N}$ over the possible $r$, $r'$ values in the cell, we make the following considerations
\begin{equation}
\underset{r,r'}{\mathbf{E}}\left[\text{log}_{2}\left(1+\text{\ttfamily{SINR}}_{p}(r,r')\right]\right) \leq \\
\text{log}_{2}\left(1+\underset{r,r'}{\mathbf{E}}\left[\text{\ttfamily{SINR}}_{p}(r,r')\right]\right)
\end{equation}
which follows from Jensen's inequality. Assuming independence between $r$ and $r'$, we have that
\begin{equation}\label{E_SINRp}
\begin{array}{ll}
\underset{r,r'}{\mathbf{E}}\left[\text{\ttfamily{SINR}}_{p}(r,r')\right] & =
\underset{r'}{\mathbf{E}}\left[\underset{r}{\mathbf{E}}\left[\text{\ttfamily{SINR}}_{p}(r,r')\right]\right] \\
& = \underset{r'}{\mathbf{E}}\left[n_{s}H^{(j)}p_{PU}K\underset{r}{\mathbf{E}}\left[g(r)\right]
\frac{\text{exp}\left(\frac{n_{s}N_{0}W}{p_{SU}K'g(r')}\right) E_{1}\left(\frac{n_{s}N_{0}W}{p_{SU}K'g(r')}\right)}{n_{p}p_{SU}K'g(r')}\right]
\end{array}
\end{equation}
The function involving the expectation over $r'$ in the last term, that is, $\phi_{\text{\ttfamily{SINR}}_{p}}(r') = \underset{r}{\mathbf{E}}\left[\text{\ttfamily{SINR}}_{p}(r,r')\right]$, is strictly increasing in $r'$ ($\text{\ttfamily{SINR}}_{p}$ increases as the interfering Tx moves away) and concave for $r'>\delta$.
In particular, $\delta$ is the smallest $r'$ such that $r' \geq r_{c}$ and $\frac{\partial^{2}\phi_\text{\ttfamily{SINR}}(r')}{\partial r'^{2}} \leq 0$.
Let $f_{r'}(r')$ and $\bar{r}'$ denote the \textit{pdf} and the average value of $r'$ respectively.
If $f_{r'}(r')>0$ for every $r'$, and $\delta<\bar{r}'$ then
\begin{equation}
\underset{r'}{\mathbf{E}}\left[\phi_{\text{\ttfamily{SINR}}_{p}}(r')\right] \leq
\phi_{\text{\ttfamily{SINR}}_{p}}(\bar{r}')
\end{equation}
which can be easily checked by considering an alternative distribution $f_{r''}(r'')$, such that $f_{r''}(r'')=0$ for $r''<\delta$, $f_{r''}(r)\geq f_{r'}(r)$ for $r\geq\delta$, and having an expected value $\bar{r}''=\bar{r}'$. We have that
\begin{equation}
\underset{r'}{\mathbf{E}}\left[\phi_{\text{\ttfamily{SINR}}_{p}}(r')\right] < \underset{r''}{\mathbf{E}}\left[\phi_{\text{\ttfamily{SINR}}_{p}}(r'')\right]
\leq \phi_{\text{\ttfamily{SINR}}_{p}}(\bar{r}')
\end{equation}
where the first inequality follows from the strictly increasing property of $\phi_{\text{\ttfamily{SINR}}_{p}}(r')$ in $r'$, and the second one from the concavity of $\phi_{\text{\ttfamily{SINR}}_{p}}(r'')$ in the $f_{r''}(r'')$ domain ($r''\geq\delta$) allowing us to apply Jensen's inequality in this domain. Numerical evaluations showed that, for typical configurations, $\delta=r_{c}<<\bar{r}'$ and previous expression can be used to obtain an upper bound for the average PU capacity over the cell. Note that this approach is equivalent to consider that the interfering SU is located at a distance $\bar{r}'$ from the PU Rx. When every SU and PU locations within the cell are equally probable, the average distance is $\bar{r}' = \frac{128R}{45\pi}$ (see \cite{ref:Solomon1978}).
For a pessimistic estimation of the capacity under interference, using a more straightforward reasoning we can obtain a lower bound for $\underset{r'}{\mathbf{E}}\left[\phi_{\text{\ttfamily{SINR}}_{p}}(r')\right]$ by simply setting $g(r')=\underset{r'}{\mathbf{E}}\left[g(r')\right]$ in (\ref{E_SINRp}).
Finally, the capacity for the $j$-th PU under interference, $C^{I}_{j,N}(n_{s},n_{p})$, is given by
\begin{equation}\label{CIjN}
C^{I}_{j,N}(n_{s},n_{p}) = \\
W\text{log}_{2}\left(1+
\frac{n_{s}H^{(j)}p_{PU}K\underset{r}{\mathbf{E}}\left[g(r)\right]\text{exp}\left(\text{\ttfamily{SNR}}_{s}(\bar{r}')^{-1}\right)E_{1}\left(\text{\ttfamily{SNR}}_{s}(\bar{r}')^{-1}\right)}
{n_{p}p_{SU}K'g(\bar{r}')}\right)
\end{equation}
Where $\text{\ttfamily{SNR}}_{s}(r') = p_{SU}K'g(r')/n_{s}N_{0}W$.
The total capacity if all the $n_{p}$ channels experience interference is $C^{I}_{N}(n_{s},n_{p})~=~\sum_{j=1}^{n_{p}}C^{I}_{j,N}(n_{s},n_{p})$.
Next we derive the expressions for the total downlink capacity with and without BR.
\subsubsection{No bandwidth reservation for OSA}
Given $n_{p}$ and $n_{s}$, the number $i$ of collisions may take a value ranging from $c_{\text{min}} = \left(n_{p}+n_{s}-N\right)^{+}$ to $c_{\text{max}} = \text{min}\left(n_{p},n_{s}\right)$.
If there are $i$ collisions, each one of the $n_{p}$ PUs may be colliding probability $p_{c} = \frac{i}{n_{p}}$, or may not, with probability $1-p_{c}$. The expected capacity for the $j$-th PU is given by $C^{(i)}_{j,N}\left(n_{p},n_{s}\right) = C^{I}_{j,N}(n_{s},n_{p})p_{c} + C_{j,N}(n_{p})\left(1-p_{c}\right)$.
Summing the $n_{p}$ average capacities, we obtain the total capacity for $i$ collisions
\begin{equation}\label{CiN}
\begin{array}{lcl}
C^{(i)}_{N}\left(n_{p},n_{s}\right) & = & \sum_{j=1}^{n_{p}}C^{(i)}_{j,N}\left(n_{p},n_{s}\right)\\
& = & \sum_{j=1}^{n_{p}}C^{I}_{j,N}(n_{s},n_{p})\frac{i}{n_{p}} \\
& & + \sum_{j=1}^{n_{p}}C_{j,N}\left(n_{p},n_{s}\right)\left(1-\frac{i}{n_{p}}\right)\\
& = & C^{I}_{N}(n_{s},n_{p})\frac{i}{n_{p}} + C_{N}\left(n_{p},n_{s}\right)\left(1-\frac{i}{n_{p}}\right)
\end{array}
\end{equation}
The probability of $i$ collisions equals $P_{\text{H}}\left(i;N,n_{s},n_{p}\right)$, therefore, the total capacity without BR, $C^{\text{nbr}}_{N}\left(n_{p},n_{s}\right)$, is given by
\begin{equation}\label{CNOBR}
C^{\text{nbr}}_{N}\left(n_{p},n_{s}\right) = \displaystyle\sum_{i=c_{\text{min}}}^{c_{\text{max}}}C^{(i)}_{N}\left(n_{p},n_{s}\right)P_{\text{H}}\left(i;N,n_{s},n_{p}\right)
\end{equation}
The associate reward function $R^{\text{nbr}}_{PU}\left(n_{p},n_{s}\right)$ accounts for the PU capacity only when there is PU activity, normalizing respect $C_{N}\left(n_{p}\right)$, as follows
\begin{equation}\label{Roff}
R^{\text{nbr}}_{PU}\left(n_{p},n_{s}\right) = \displaystyle\frac{C^{\text{nbr}}_{N}\left(n_{p},n_{s}\right)}
{\left(1-\pi_{0}\right)C_{N}\left(n_{p}\right)}
\end{equation}
where $(1-\pi_{0})$ is the probability that there is at least one $PU$ transmission. It can be checked that, in expression (\ref{MarkovReward2}), the factors $P\left(n_{s},n_{p}\right)/\left(1-\pi_{0}\right)$ correspond to the probability distribution over the states where $n_{p}>0$.
\subsubsection{With bandwidth reservation for OSA}
According to the BR mechanism definition, when $n_{p}\leq N-m$, the system is collision free and the AN has $N-m$ channels to dynamically allocate PU transmissions, and the obtained downlink capacity with BR, $C_{N}^{\text{br}}\left(n_{p},n_{s}\right)$, equals the no-interference capacity for a spectrum with $N-m$ channels, $C_{N-m}\left(n_{p}\right)$, (\ref{totalCapacity}).
When $n_{p}>N-m$, the PUs occupy part of the reserved spectrum, and therefore collisions are possible. Given $n_{s}$, the number of channels with collisions is $c = \left(n_{s}-N+n_{p}\right)^{+}$,
and $C_{N}^{\text{br}}\left(n_{p},n_{s}\right) = C^{(c)}_{n_{p}}\left(n_{p},n_{s}\right)$, (\ref{CiN}). Summarizing, we have
\begin{equation}\label{expectedCapacity2}
C_{N}^{\text{br}}\left(n_{p},n_{s}\right) =
\begin{cases}
C_{N-m}\left(n_{p}\right) &,\mbox{if }n_{p}\leq N-m\\
C^{(c)}_{n_{p}}\left(n_{p},n_{s}\right) &,\mbox{if }n_{p} > N-m
\end{cases}
\end{equation}
The reward function $R^{\text{br}}_{PU}(n_{p},n_{s})$ is obtained as in (\ref{Roff}).
\subsection{SU Capacity without Interference}
During an SU transmission period, the SU uses the same power $p_{s}=p_{SU}/n_{s}$ in each of the $n_{s}$ occupied channels. Therefore, the SU AWGN capacity per channel is given by $C'~=~W\text{log}_{2}\left(1+\text{\ttfamily{SNR}}_{s}\right)$, where $\text{\ttfamily{SNR}}_{s}$ is the average SU signal to noise ratio in one channel. With Rayleigh fading and an average received power $p_{ss}=p_{SU}K'g(r')/n_{s}$, we have $\text{\ttfamily{SNR}}_{s}(r')~=~p_{SU}K'g(r')/n_{s}N_{0}W$.
Taking the expectation over $r'$ we have that $\underset{r'}{\mathbf{E}}\left[\text{log}_{2}\left(1+\text{\ttfamily{SNR}}_{s}(r')\right)\right]$ $\leq$ $\text{log}_{2}\left(1+\underset{r'}{\mathbf{E}}\left[\text{\ttfamily{SNR}}_{s}(r')\right]\right)$. This upper bound defines the SU capacity for $n_{s}$ channels as
\begin{equation}
\begin{array}{lcl}
C'(n_{s}) & = & n_{s}W\text{log}_{2}\left(1+\underset{r'}{\mathbf{E}}\left[\text{\ttfamily{SNR}}_{s}(r')\right]\right)\\
&=& n_{s}W\text{log}_{2}\left(1+\frac{p_{SU}K'\underset{r'}{\mathbf{E}}\left[g(r')\right]}{n_{s}N_{0}W}\right)
\end{array}
\end{equation}
The term $\underset{r'}{\mathbf{E}}\left[g(r')\right]$ can be computed exactly for the distribution of two points chosen at random within a disk of radius $R$
\begin{equation}\label{diskLinePicking}
f_{r'}\left(r'\right) = \displaystyle\frac{4r'}{\pi R^{2}}\text{cos}^{-1}\left(\displaystyle\frac{r'}{2R}\right) - \displaystyle\frac{2r'^{2}}{\pi R^{3}}\sqrt{1-\displaystyle\frac{r'^{2}}{4R^{2}}}
\end{equation}
However, for the sake of simplicity, we present an expression for $\underset{r'}{\mathbf{E}}\left[g(r')\right]$ obtained with a close approximation of (\ref{diskLinePicking}): $\tilde{f}_{r'}\left(r'\right) = c_{1}x+c_{2}x^2+c_{3}x^{3}$. The coefficients $c_{1}=-(64-27\pi)/3\pi R^{2}$, $c_{2}-4(3\pi-8)/\pi R^{2}=$ and $c_{3}=-(128-45\pi)/12\pi R^{4}$ are obtained from the equations $\tilde{f}_{r'}(0)=0$, $\tilde{f}_{r'}(2R)=0$, $\int_{0}^{2R}\tilde{f}_{r'}(r')dr'=1$, and $\int_{0}^{2R}r'\tilde{f}_{r'}(r')dr'=128\pi/45\pi$. The resulting expression is
\begin{equation}\label{Egrprime}
\begin{array}{ll}
\underset{r'}{\mathbf{E}}\left[g(r)\right] & \approx \displaystyle\int_{r'=0}^{2R}g(r)\tilde{f}_{r'}(r')dr'\\
& = c_{1}+\frac{-c_{3}+2c_{3}r_{c}^{2}}{4} + \frac{c_{2}}{12}\left(-8+24r_{c}-\frac{6r_{c}^{2}}{R}\right) -\frac{c_{1} c_{3}^{2}}{8 R^{2}}+c_{1}\text{Ln}[r_{c}]+c_{3} r_{c}^{2} \text{Ln}\left[\frac{2 R}{r_{c}}\right]
\end{array}
\end{equation}
\subsection{SU Capacity with Interference}
During a time-slot, the SU per-channel capacity under PU interference is $C'^{I} = W\text{log}_{2}\left(1+\text{\ttfamily{SINR}}_{s}\right)$, where $\text{\ttfamily{SINR}}_{s}$ denotes the SU signal to interference ratio.
When both signals experience Rayleigh fading, the $\text{\ttfamily{SINR}}_{s}$ is given by
\begin{equation}\label{SINRs}
\begin{array}{lcl}
\text{\ttfamily{SINR}}_{s} & = & \displaystyle\int_{y=0}^{\infty}\displaystyle\int_{x=0}^{\infty}
\displaystyle\frac{x}{y+N_{0}W}f'(x)f^{I}(y)dxdy \\
& = & \displaystyle\int_{y=0}^{\infty}\displaystyle\frac{p_{ss}}{y + N_{0}W}f^{I}(y)dy \\
& = & \frac{p_{pp}}{p_{ps}}e^{\frac{N_{0}W}{p_{ps}}}E_{1}\left(\frac{N_{0}W}{p_{ps}}\right)
\end{array}
\end{equation}
where $p_{ss}= p_{SU}K'g(r')/n_{s}$ and $p_{ps}=p_{PU}Kg(r)/n_{p}$. For the average capacity over all $r$ and $r'$ values, we can apply the same reasoning used for $C^{I}_{j,N}(n_{p},n_{s})$, to conclude that
\begin{equation}
\underset{r',r}{\mathbf{E}}\left[\text{log}_{2}\left(1+\text{\ttfamily{SINR}}_{s}(r',r)\right]\right) \leq \\
\text{log}_{2}\left(1+\underset{r'}{\mathbf{E}}\left[\text{\ttfamily{SINR}}_{s}(r',\bar{r})\right]\right)
\end{equation}
The corresponding SU per-channel capacity with interference, for an ($n_{p},n_{s})$ pair is
\begin{equation}\label{CISU}
C'^{I}(n_{s},n_{p}) = \\
W\text{log}_{2}\left(1+
\frac{n_{p}p_{SU}K'\underset{r'}{\mathbf{E}}\left[g(r')\right]\text{exp}\left(\text{\ttfamily{SNR}}'_{p}(\bar{r})^{-1}\right)E_{1}\left(\text{\ttfamily{SNR}}'_{p}(\bar{r})^{-1}\right)}
{n_{s}p_{PU}Kg(\bar{r})}\right)
\end{equation}
where $\text{\ttfamily{SNR}}'_{p}(r)=p_{PU}Kg(r)/n_{p}N_{0}W$.
\subsubsection{Without bandwidth reservation}
Let $z$ denote the number of channels with overlapping transmissions. Given $n_{s}$ and $n_{p}$, $z$ takes random values ranging from $z_{\text{min}}=\left(n_{s}+n_{p}-N\right)^{+}$ to $z_{\text{max}}=\text{min}\left(n_{s},n_{p}\right)$, with hypergeometric distribution. The SU capacity, without BR, in the $n_{s}$ used channels, $C'^{\text{nbr}}(n_{s},n_{p})$ is obtained by taking expectation over the $z$ values:
\begin{equation}
C'^{\text{nbr}}(n_{s},n_{p}) = \\ \displaystyle\sum_{z=z_{\text{min}}}^{z_{\text{max}}}\left(\left(n_{s}-z\right)C'(n_{s})+zC'^{I}(n_{s},n_{p})\right)P_{H}\left(z;N,n_{p},n_{s}\right)
\end{equation}
To define the reward function $R^{\text{nbr}}_{SU}\left(n_{s},n_{p}\right)$, we consider the maximum SU capacity, $\rho_{SU}C'(n_{s,\text{max}})$, corresponding to an ideal case where the SUs access the spectrum on every attempt, and transmit in $n_{s,\text{max}}$ channels without interference. Therefore
\begin{equation}\label{RoffSU}
R^{\text{nbr}}_{SU}\left(n_{s},n_{p}\right) = \frac{C'^{\text{nbr}}(n_{s},n_{p})}{\rho_{s}C'(n_{s,\text{max}})}
\end{equation}
\subsubsection{With bandwidth reservation}
In this case, given $n_{s}$ and $n_{p}$, the number of channels with overlapped transmissions is $z_{\text{min}}=\left(n_{s}+n_{p}-N\right)^{+}$, and therefore
\begin{equation}
C'^{\text{br}}(n_{s},n_{p}) =\left(n_{s}-z_{\text{min}}\right)C'(n_{s})+z_{\text{min}}C'^{I}(n_{s},n_{p})
\end{equation}
The reward $R^{\text{br}}_{SU}\left(n_{s},n_{p}\right)$, is obtained as in (\ref{RoffSU}).
\section{Numerical Results}\label{sec:Results}
\subsection{System Configuration}
The model developed in previous section allows us to obtain numerical results upon which we address the main issues presented in the introduction of the chapter: Is it justified the use of spectrum reservation for OSA? Under which conditions? Can we jointly improve the performance of both PUs and SUs?
To jointly evaluate PU-SU configurations, we have considered several combinations of PU and SU traffic intensities. To illustrate it, we show the results for $\rho_{s}=0.5$ and two PU traffic intensities: $\lambda_{p}=0.1$ (low PU traffic) and $\lambda_{p}=0.5$ (high PU traffic). Considering that each PU remains in the spectrum for $1/\mu_{p}=60$ s, the PU blocking probabilities are 0.2\% for $\lambda_{p}=0.1$, and 50\% for $\lambda_{p}=0.5$.
In all cases, the licensed spectrum band is divided into $N=15$ narrowband channels of $W=200$ KHz, the time-slot duration is $\tau=200$ $\mu$s, and the average duration of an SU transmission is $T_{s}=10$ time-slots. The remaining parameters of the model, such as transmission powers and cell radius, are provided in Table \ref{BR_table_sim_param}.
Regarding the sensing error probabilities, the detection threshold is $\theta=17.8$ dB as in \cite{ref:Pawelczak2009}, resulting in $p_{f}=8.3\times10^{-3}$ and $p_{n}=1.4\times10^{-16}$ for $\Delta=6$.
\begin{table}
\begin{tabular}{lr} \hline
\textbf{Parameter} & \textbf{Assigned value}\\\hline
number of channels, $N$&15\\
channel bandwidth, $W$&200 KHz\\
time slot duration, $\tau$&200 $\mu$s\\
PU average Tx time, $1/\mu_{p}$&60 s\\
SU average Tx time, $T_{s}$&$10\times\tau$\\
sensing detection threshold, $\theta$&17.8 dB\\
PU Tx power, $p_{p}$&10 W\\
SU Tx power, $p_{s}$&4 W\\
cell radius, $R$ &1000 m\\
$g(r)$ threshold distance, $r_{c}$&100 m\\
$g(r)$ propagation factor, $K=K'$&4 dB\\
receivers' noise figure, $F$&7 dB\\\hline
\end{tabular}
\centering
\caption{Parameter setting of the reference scenario used in numerical evaluations}
\label{BR_table_sim_param}
\end{table}
At each traffic scenario, we show the configurations of the $\Delta$ and $k$ parameters for two $n_{s,\text{max}}$ values: 3 and 6. With BR, the amount of reserved channels is $m=n_{s,\text{max}}$. The values of the safety parameter $k$ range from 0 to 3 and the scanned channels $\Delta$ range from $n_{s,\text{max}}+k$ to 10.
We obtain, for each ($n_{s,\text{max}}$, $k$, $\Delta$) tuple, the expected PU and SU normalized capacities with and without BR ($\overline{R}^{\text{nbr}}_{PU}$, $\overline{R}^{\text{nbr}}_{SU}$, $\overline{R}^{\text{br}}_{PU}$, $\overline{R}^{\text{br}}_{SU}$) defined in previous section.
We are also interested in evaluating PU performance in terms of collision probability and average interference power at PU receivers, two usual metrics in previous works.
In particular, the average interference, $\bar{R}^{\text{nbr}}_{I}$,$\bar{R}^{\text{br}}_{I}$, can be computed using the following per-state rewards in (\ref{MarkovReward2}):
\begin{equation}\label{InterferenceRewards}
\begin{array}{lcl}
R^{\text{nbr}}_{I}\left(n_{p},n_{s}\right)& = &\frac{1}{n_{p}}\displaystyle\sum_{i=c_{\text{min}}}^{c_{\text{max}}}i\textstyle\frac{p_{SU}K'\underset{r'}{\mathbf{E}}\left[g(r')\right]}{n_{s}}P_{\text{H}}\left(i;N,n_{s},n_{p}\right)\\
R^{\text{br}}_{I}\left(n_{p},n_{s}\right)& = &\frac{1}{n_{p}}\frac{p_{SU}K'\underset{r'}{\mathbf{E}}\left[g(r')\right]}{n_{s}}\left(n_{s}+n_{p}-N\right)^{+}\\
\end{array}
\end{equation}
The average collision probability ($\bar{R}^{\text{nbr}}_{c}$,$\bar{R}^{\text{br}}_{c}$) can be obtained from (\ref{InterferenceRewards}) by removing the signal power factor.
\begin{figure}
\begin{center}
\resizebox{9cm}{!}{\input{./img/BR/ConfigurationLowHighRev}}
\end{center}
\caption{Normalized PU and SU capacities for a low PU traffic scenario ($\lambda_{p}=0.1$). Larger points denote higher $k$ values.}\label{BR_fig_LowTraffic}
\end{figure}
Figure \ref{BR_fig_LowTraffic} shows the joint SU-PU performance for the low PU traffic scenario.
Squares correspond to no BR (NBR), and circles to BR performance values. Red and green points correspond to $n_{s,\text{max}}=$ 3 and 6, respectively.
Larger point sizes indicate larger $k$.
Increasing $\Delta$ allows the discovery of more spectrum opportunities, implying more SU capacity, but less PU capacity (especially without BR) due to a higher collision probability.
The protection parameter $k$ causes the opposite effect: higher $k$ increases the protection level to PUs (higher $\overline{R}^{\text{nbr}}_{PU}$) and reduces $\overline{R}^{\text{nbr}}_{SU}$. In consequence, the best PU performance, when no BR is used, is attained when $\Delta=n_{s,\text{max}}+k$.
When BR is used, it can be seen that, for each $n_{s,\text{max}}$, all the $\overline{R}^{\text{br}}_{PU}$ values are higher than $\overline{R}^{\text{nbr}}_{PU}$.
\begin{figure}
\begin{center}
\resizebox{9cm}{!}{\input{./img/BR/InterferenceLowHighRev}}
\end{center}
\caption{SU capacity versus interference caused to the PUs for low PU traffic. Larger points denote higher $k$ values. Collision probabilities at the extreme points are indicated.}\label{BR_fig_Interference}
\end{figure}
Figure \ref{BR_fig_Interference}, showing the average interference at the PUs vs. SU capacity, illustrates the reason of the differences between using BR or not: the superior performance of BR in terms of collision/interference, which compensates the negative effect of having fewer channels to allocate at the AN.
However, although BR always manages to reduce collision/interference, it is not always enough to compensate the drawbacks of BR in terms of PU achievable rate. In particular, when the PU traffic is high, there are values of $\overline{R}^{\text{nbr}}_{PU}$ than cannot be achieved with BR, as can be seen in Figure \ref{BR_fig_HighTraffic}. Note, however, that the PU capacity varies in a very short range. Nevertheless, Figure \ref{BR_fig_InterferenceHigh} shows that BR also reduces the interference in the high traffic regime.
\begin{figure}
\begin{center}
\resizebox{9cm}{!}{\input{./img/BR/ConfigurationHighHighRev}}
\end{center}
\caption{SU and PU capacities for a high PU traffic scenario ($\lambda_{p}=0.5$). Larger points denote higher $k$ values.}\label{BR_fig_HighTraffic}
\end{figure}
\begin{figure}
\begin{center}
\resizebox{9cm}{!}{\input{./img/BR/InterferenceHighHighRev}}
\end{center}
\caption{SU capacity versus interference caused to the PUs for high PU traffic.}\label{BR_fig_InterferenceHigh}
\end{figure}
\subsection{Studying PU Motivation to Use BR}
We have seen that the PU traffic intensity is closely related to the performance limits on $\overline{R}^{\text{br}}_{PU}$. This suggests that an AN implementing BR could decide to switch this feature on or off, depending on its own traffic load. To see where the traffic threshold could be located, we represent, in Figure \ref{BR_fig_Thresholds}, the highest values of $\overline{R}^{\text{br}}_{PU}$ and $\overline{R}^{\text{nbr}}_{PU}$ for $n_{s,\text{max}}=4$. To assess the effect of SU traffic as well, several $\rho_{s}$ values are used (0.1,0.5,0.8). It can be seen that, without BR, the PU capacity is higher for larger $\lambda_{p}$, because the spectrum sensing detects more PU activity and incoming SUs either do not transmit or transmit using very few channels, reducing the effects of interference/collision.
\begin{figure}[ht]
\begin{center}
\resizebox{9cm}{!}{\input{./img/BR/PUcapacityRev}}
\end{center}
\caption{Effect of PU and SU traffic intensities on PU capacity. Approximate values $\tilde{R}^{\text{nbr}}_{PU}$, $\tilde{R}^{\text{br}}_{PU}$ are represented with points and dotted lines. For each SU traffic intensity there is a PU traffic threshold value below which the PU would prefer to activate BR.}\label{BR_fig_Thresholds}
\end{figure}
We can obtain an estimation of the traffic thresholds without resorting to the Markov model by means of approximate expressions for $\overline{R}^{\text{nbr}}_{PU}$ and $\overline{R}^{\text{br}}_{PU}$. Assuming that the spectrum contains an SU transmission with probability $\rho_{s}$, we can compute the following average:
\begin{equation}
\tilde{R}^{\ast}_{PU} = C^{\ast}_{N}\left(\left\lfloor \tilde{n}_{p}\right\rceil,\left\lfloor \tilde{n}_{s}\right\rceil\right)\rho_{s} + C^{\ast}_{N}\left(\left\lfloor \tilde{n}_{p}\right\rceil\right)\left(1-\rho_{s}\right)
\end{equation}
where $\ast = \text{nbr}$ or $\text{br}$, $\left\lfloor x\right\rceil$ denotes the closest integer to $x$, and $\tilde{n}_{p}$, $\tilde{n}_{s}$ are estimates of the average values of $n_{p}$ and $n_{s}$. We have that $\tilde{n}_{p} = \sum_{i=0}^{N}i\pi_{i}$, and $\tilde{n}_{s} = \sum_{i=0}^{N}\theta_{\ast}(i)\pi_{i}$, where
\begin{equation}
\begin{array}{lcl}
\theta_{\text{nbr}}(n_{p}) & = & \text{min}\left(\frac{N-n_{p}}{N}\Delta-k,n_{s,\text{max}}\right)\\
\theta_{\text{br}}(n_{p}) & = & \text{min}\left(N-n_{p}-k,n_{s,\text{max}}\right)
\end{array}
\end{equation}
and $\frac{N-n_{p}}{N}\Delta$ is the expected number of available channels in $\Delta$.
The approximate capacity values obtained with this approximation are also shown in Figure \ref{BR_fig_Thresholds}.
Finally, Figure \ref{BR_fig_sim_interference} depicts the interference power at the PU receivers for different PU and SU traffic regimes. When the traffic increases, the BR average interference power converges to the no BR case, because the reserved channels are more frequently used by PU transmissions resulting in higher collision probability.
\begin{figure}[ht]
\begin{center}
\resizebox{9cm}{!}{\input{./img/BR/PUinterferenceRev}}
\end{center}
\caption{Effect of PU and SU traffic intensities on the interference at PUs.}\label{BR_fig_sim_interference}
\end{figure}
\section{Conclusions}\label{sec:Conclusion}
Even with the sensing capabilities of cognitive radios, OSA is always associated to some degree of transmission overlapping with PU transmissions and therefore to harmful interference at PU receivers.
This chapter studies the idea of a primary network reserving some part of its spectrum for OSA to reduce harmful interference from SUs.
The reserved channels are only used for PU transmissions when the rest of the channels overflow.
We have developed a Markov-reward model for evaluating the PU and SU capacities in coexistence scenarios with and without bandwidth reservation.
The numerical results showed that using BR implies moderate PU capacity reduction, e.g. reserving 4 out of 15 channels, resulted in a PU capacity reduction of 1\% in the worst case. If the PU traffic intensity is below a certain value, the PU capacity obtained with BR is higher than the capacity achieved without BR, because of the better interference (and collision probability) performance of BR.
The AN is therefore motivated to activate BR under low traffic regimes. For the SUs there is a remarkable performance improvement when using the reserved spectrum, resulting in a higher spectrum efficiency.
Our model is applicable to the design of BR activation policies, using either the Markov framework or a simpler approximate formulation.
|
{"hexsha": "c56fa48405903aa4816320d6c3a17332261f5560", "size": 63731, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/BR.tex", "max_stars_repo_name": "Mario-LopezTelecom/phdTesis", "max_stars_repo_head_hexsha": "f71367a04c6fccbd7b804ce8fe9e15ee873ae28b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tex/BR.tex", "max_issues_repo_name": "Mario-LopezTelecom/phdTesis", "max_issues_repo_head_hexsha": "f71367a04c6fccbd7b804ce8fe9e15ee873ae28b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/BR.tex", "max_forks_repo_name": "Mario-LopezTelecom/phdTesis", "max_forks_repo_head_hexsha": "f71367a04c6fccbd7b804ce8fe9e15ee873ae28b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 103.1245954693, "max_line_length": 1096, "alphanum_fraction": 0.7157741131, "num_tokens": 19691}
|
!!### MODULE: PRINTING subroutine Grid2
MODULE PRN_Grid2
!!#### PURPOSE
!! This subroutine prints a block of discrete function values
!! F(1:Nx,1:Ny) provided at points on a regular grid, or just with indices
!! if the values of <x(1:Nx)> and <y(1:Ny)> are not provided.
!!#### FORTRAN STANDARDS
USE ISO_varying_string !!((03-A-ISO_varying_string.f90))
!!#### EXTERNAL KINDS
USE KND_IntrinsicTypes,ONLY: KIND_Rsp,KIND_Rdp,KIND_S !!((01-A-KND_IntrinsicTypes.f90))
!!#### GLOBAL UNITS
USE PAR_IntrinsicLengths ,ONLY: LEN_Rsp,LEN_Rdp !!((02-A-PAR_IntrinsicLengths.f90))
USE PAR_Units,ONLY: window_unit !!((02-A-PAR_Units.f90))
!!#### GLOBAL USER MODULES
USE USR_fdbk !!((08-C-USR_fdbk.f90))
!!#### BINARY OPERATORS
USE BOP_Concatenate !!((03-A-BOP_Concatenate.f90))
!!#### GLOBAL FUNCTIONS
USE FUN_STR !!((05-B-FUN_STR.f90))
USE FUN_Sequence !!((03-A-FUN_Sequence.f90))
USE FUN_Default !!((04-A-FUN_Default.f90))
USE FUN_FMT !!((06-B-FUN_FMT.f90))
USE FUN_VSTR !!((05-B-FUN_VSTR.f90))
USE SUB_CLEAR !!((04-A-SUB_CLEAR.f90))
!!#### GLOBAL PRINTING
USE PRN_Array2 !!((08-B-PRN_Array2.f90))
!!#### DEFAULT IMPLICIT
IMPLICIT NONE
!!#### DEFAULT ACCESS
PRIVATE
!!#### LOCAL PARAMETERS
INTEGER,PARAMETER :: DEFAULT_unit = window_unit
!!#### PROCEDURE OVERLOADING
INTERFACE PRINT_Grid2
MODULE PROCEDURE PRINT_Grid2_Rsp
MODULE PROCEDURE PRINT_Grid2_Rdp
END INTERFACE
!!#### PUBLIC ACCESS LIST
PUBLIC :: PRINT_Grid2
CONTAINS
SUBROUTINE PRINT_Grid2_Rsp( Z , Unit , x , y )
!!#### PARAMETERS
USE KND_IntrinsicTypes,ONLY: KIND_R=>KIND_Rsp !!((01-A-KND_IntrinsicTypes.f90))
INTEGER,PARAMETER :: LEN_R = LEN_Rsp
INCLUDE "09-B-PRN_Grid2_R.f90.hdr"
!!--begin--
INCLUDE "09-B-PRN_Grid2_R.f90.bdy"
!!--end--
END SUBROUTINE
SUBROUTINE PRINT_Grid2_Rdp( Z , Unit , x , y )
!!#### PARAMETERS
USE KND_IntrinsicTypes,ONLY: KIND_R=>KIND_Rdp !!((01-A-KND_IntrinsicTypes.f90))
INTEGER,PARAMETER :: LEN_R = LEN_Rdp
INCLUDE "09-B-PRN_Grid2_R.f90.hdr"
!!--begin--
INCLUDE "09-B-PRN_Grid2_R.f90.bdy"
!!--end--
END SUBROUTINE
END MODULE
|
{"hexsha": "80d46635292664526614a0d9d7828c6228233cda", "size": 2453, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/09-B-PRN_Grid2.f90", "max_stars_repo_name": "wawiesel/tapack", "max_stars_repo_head_hexsha": "ac3e492bc7203a0e4167b37ba0278daa5d40d6ef", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/09-B-PRN_Grid2.f90", "max_issues_repo_name": "wawiesel/tapack", "max_issues_repo_head_hexsha": "ac3e492bc7203a0e4167b37ba0278daa5d40d6ef", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/09-B-PRN_Grid2.f90", "max_forks_repo_name": "wawiesel/tapack", "max_forks_repo_head_hexsha": "ac3e492bc7203a0e4167b37ba0278daa5d40d6ef", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0506329114, "max_line_length": 89, "alphanum_fraction": 0.5923359152, "num_tokens": 668}
|
import pandas as pd
import numpy as np
from tracking_grants import (
articles_f,
references_f,
wos_f,
altmetric_f,
trials_f,
awards_f,
)
def load_references():
return pd.read_csv(references_f)
def load_awards():
def research_topic(s):
if pd.notna(s):
primary_topic = None
secondary_topic = None
if "Secondary: " in s:
splits = s.split("Secondary: ")
secondary_topic = splits[-1]
s = splits[0:-1][0]
if "Primary: " in s:
splits = s.split("Primary: ")
primary_topic = splits[-1]
return primary_topic, secondary_topic
else:
return None, None
def currency_to_int(s):
s = s.replace("$", "").replace(",", "")
return int(float(s))
awards = pd.read_csv(awards_f)
# Process research topics
awards[["primary_topic", "secondary_topic"]] = pd.DataFrame(
awards["Research Topic"].map(research_topic).tolist()
)
awards = awards.drop(columns=["Research Topic"])
# Rename columns
awards = awards.rename(
columns={
"Award Amount": "award_amount",
"Award Number": "award_id",
"Fiscal Year": "award_year",
"Mechanism": "type",
"Proposal Number": "proposal_id",
"Proposal Title": "proposal_title",
"Program": "program",
}
)
# Process award amount
awards["award_amount"] = awards["award_amount"].map(currency_to_int)
export_cols = [
"award_id",
"award_amount",
"award_year",
"proposal_id",
"proposal_title",
"type",
"program",
"primary_topic",
"secondary_topic",
]
return awards[export_cols]
# Loading external files
def load_articles():
return pd.read_csv(articles_f)
def load_wos():
# Load metrics from WoS
wos = pd.read_csv(wos_f, low_memory=False, index_col="DOI")
wos.columns = [x.lower() for x in wos.columns.tolist()]
wos.index = wos.index.str.lower()
wos = wos.rename(
columns={
"citations": "wos_citations",
"relative citation score": "citation_score",
}
)
return wos
def load_altmetrics(keep_metrics=True, keep_dates=False, keep_ids=False):
altmetrics = pd.read_json(altmetric_f).T
# Filter out all articles had not altmetrics
altmetrics = altmetrics[altmetrics.altmetric_id.notna()]
# Transform all DOIs to lowercase
altmetrics.index = altmetrics.index.str.lower()
cols_to_keep = []
if keep_metrics:
metric_cols = {
"cited_by_posts_count": "posts_count",
"cited_by_rh_count": "research_highlight",
"cited_by_tweeters_count": "twitter_accounts",
"cited_by_patents_count": "patents",
"cited_by_msm_count": "news_outlets",
"cited_by_feeds_count": "blogs",
"cited_by_fbwalls_count": "fb_pages",
"cited_by_qna_count": "stackoverflow",
"cited_by_videos_count": "videos",
"cited_by_peer_review_sites_count": "peer_reviews",
"cited_by_weibo_count": "weibo",
"cited_by_gplus_count": "gplus",
"cited_by_rdts_count": "reddit_threads",
"cited_by_policies_count": "policies",
"cited_by_syllabi_count": "syllabi",
"cited_by_linkedin_count": "linkedin",
"cited_by_wikipedia_count": "wikipedia",
}
altmetrics = altmetrics.rename(columns=metric_cols)
metric_cols = list(metric_cols.values())
altmetrics[metric_cols] = altmetrics[metric_cols].astype(float)
cols_to_keep = cols_to_keep + metric_cols
if keep_dates:
dates = ["last_updated", "published_on", "added_on"]
for d in dates:
altmetrics[d] = pd.to_datetime(altmetrics[d], unit="s")
cols_to_keep = cols_to_keep + dates
if keep_ids:
id_cols = ["pmid", "pmc", "altmetric_id", "doi", "hollis_id", "arxiv_id"]
for _ in id_cols:
altmetrics[_] = altmetrics[_].astype(str)
cols_to_keep = cols_to_keep + id_cols
return altmetrics[cols_to_keep]
def load_metrics():
articles = load_articles()
altmetrics = load_altmetrics()
wos = load_wos()
trials = load_trials()
articles = articles.merge(wos, left_on="DOI", right_index=True, how="left",)
articles = articles.merge(altmetrics, left_on="DOI", right_index=True, how="left",)
# Add n_trials
articles = articles.merge(
trials.doi.value_counts().to_frame("n_trials"),
left_on="DOI",
right_index=True,
how="left",
)
return articles
def load_grants():
# Remove ref_id, grant_id, and score (!) as the individual references
# deposited # might apply to various grants and even contain slightly
# different formats for the publications
metrics = load_metrics()
grants = metrics.groupby(["program", "grant_id"]).mean().reset_index()
# Award metadata
awards = load_awards()
award_cols = ["grant_id", "award_amount", "type", "award_year"]
grants = grants.merge(
awards[award_cols], left_on="grant_id", right_on="grant_id", how="left"
)
grants.award_amount = grants.award_amount / 1000000
# Number of articles
grants = grants.merge(
metrics.groupby("grant_id")["DOI"].nunique().to_frame("n_dois"),
left_on="grant_id",
right_index=True,
)
# Sum of COCI citations
grants = grants.merge(
metrics.groupby("grant_id")["coci_citations"].sum().to_frame("total_coci"),
left_on="grant_id",
right_index=True,
)
grants.coci_citations = grants.coci_citations.replace(0, np.nan)
return grants
def load_trials():
trials = pd.read_csv(trials_f)
for c in ["BriefTitle", "NCTId", "OverallStatus", "Phase"]:
trials[c] = (
trials[c].map(lambda x: eval(x)).map(lambda x: x[0] if len(x) > 0 else None)
)
return trials.drop(columns="Condition")
|
{"hexsha": "95faefc61a99f004eb28e86bc62191bc4f454600", "size": 6157, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tracking_grants/utils/helpers.py", "max_stars_repo_name": "ScholCommLab/tracking-grants", "max_stars_repo_head_hexsha": "6590b877c3be8c057412c3fe6f3d0a1ea1b4119e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tracking_grants/utils/helpers.py", "max_issues_repo_name": "ScholCommLab/tracking-grants", "max_issues_repo_head_hexsha": "6590b877c3be8c057412c3fe6f3d0a1ea1b4119e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-06-22T01:29:12.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-22T06:07:00.000Z", "max_forks_repo_path": "src/tracking_grants/utils/helpers.py", "max_forks_repo_name": "ScholCommLab/military-grants", "max_forks_repo_head_hexsha": "6590b877c3be8c057412c3fe6f3d0a1ea1b4119e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1800947867, "max_line_length": 88, "alphanum_fraction": 0.6077635212, "include": true, "reason": "import numpy", "num_tokens": 1492}
|
using StructIO
using Test
# First, exercise the `@io` macro a bit, to ensure it can handle different
# kinds of type declarations
@io struct TwoUInts
x::UInt
y::UInt
end
abstract type AbstractType end
@io struct ConcreteType <: AbstractType
A::UInt32
B::UInt16
C::UInt128
D::UInt8
end align_packed
@io struct PackedNestedType
A::ConcreteType
B::ConcreteType
end align_packed
@io struct DefaultNestedType
A::ConcreteType
B::ConcreteType
end
@io struct PackedParametricType{T}
x::T
y::T
end align_packed
# Also test documenting a type
"""
This is a docstring
"""
@io struct ParametricType{S,T}
A::S
B::T
C::T
end
@testset "unpack()" begin
# Test native unpacking
buf = IOBuffer()
write(buf, UInt(1))
write(buf, UInt(2))
seekstart(buf)
@test unpack(buf, TwoUInts) == TwoUInts(1,2)
# Now, test explicitly setting the endianness
for endian in [:LittleEndian, :BigEndian]
buf = IOBuffer()
write(buf, fix_endian(UInt32(1), endian))
write(buf, fix_endian(UInt16(2), endian))
write(buf, fix_endian(UInt128(3), endian))
write(buf, fix_endian(UInt8(4), endian))
seekstart(buf)
@test unpack(buf, ConcreteType, endian) == ConcreteType(1, 2, 3, 4)
end
# Test packed nested types across endianness
for endian in [:LittleEndian, :BigEndian]
buf = IOBuffer()
write(buf, fix_endian(UInt32(1), endian))
write(buf, fix_endian(UInt16(2), endian))
write(buf, fix_endian(UInt128(3), endian))
write(buf, fix_endian(UInt8(4), endian))
write(buf, fix_endian(UInt32(5), endian))
write(buf, fix_endian(UInt16(6), endian))
write(buf, fix_endian(UInt128(7), endian))
write(buf, fix_endian(UInt8(8), endian))
seekstart(buf)
x = PackedNestedType(ConcreteType(1,2,3,4), ConcreteType(5,6,7,8))
@test unpack(buf, PackedNestedType, endian) == x
end
# Test mixed Packed/Default nested types across endianness
for endian in [:BigEndian, :LittleEndian]
# Helper function to write a value, then write zeros afterward to build
# a stream that mocks up a `Default` packing strategy memory layout
function write_skip(buf, x, field_idx)
n_written = write(buf, fix_endian(x, endian))
n_size = Int32(StructIO.fieldsize(ConcreteType, field_idx))
write(buf, zeros(UInt8, n_size - n_written))
end
buf = IOBuffer()
write_skip(buf, UInt32(1), 1)
write_skip(buf, UInt16(2), 2)
write_skip(buf, UInt128(3), 3)
write_skip(buf, UInt8(4), 4)
write_skip(buf, UInt32(5), 1)
write_skip(buf, UInt16(6), 2)
write_skip(buf, UInt128(7), 3)
write_skip(buf, UInt8(8), 4)
seekstart(buf)
x = DefaultNestedType(ConcreteType(1,2,3,4), ConcreteType(5,6,7,8))
@test unpack(buf, DefaultNestedType, endian) == x
end
end
@testset "pack()" begin
# Pack simple types
for endian in [:BigEndian, :LittleEndian]
buf = IOBuffer()
pack(buf, UInt8(1), endian)
pack(buf, Int16(2), endian)
pack(buf, UInt32(4), endian)
pack(buf, Int64(8), endian)
pack(buf, UInt128(16), endian)
@test position(buf) == 1 + 2 + 4 + 8 + 16
seekstart(buf)
@test unpack(buf, UInt8, endian) === UInt8(1)
@test unpack(buf, Int16, endian) === Int16(2)
@test unpack(buf, UInt32, endian) === UInt32(4)
@test unpack(buf, Int64, endian) === Int64(8)
@test unpack(buf, UInt128, endian) === UInt128(16)
end
# Pack a simple object
buf = IOBuffer()
tu = TwoUInts(2, 3)
pack(buf, tu)
# Test that the stream looks reasonable
@test position(buf) == sizeof(TwoUInts)
seekstart(buf)
@test read(buf, UInt) == 2
@test read(buf, UInt) == 3
# Test that we can unpack a packed stream
buf = IOBuffer()
pack(buf, tu)
seekstart(buf)
@test unpack(buf, TwoUInts) == TwoUInts(2, 3)
# Test packed/default nested types across endianness
for NT in [PackedNestedType, DefaultNestedType]
for endian in [:LittleEndian, :BigEndian]
buf = IOBuffer()
nt = NT(ConcreteType(1,2,3,4), ConcreteType(5,6,7,8))
pack(buf, nt)
seekstart(buf)
@test unpack(buf, NT) == nt
end
end
end
@testset "packed_sizeof()" begin
@test packed_sizeof(TwoUInts) == 2*sizeof(UInt)
@test packed_sizeof(ConcreteType) == 1 + 2 + 4 + 16
@test packed_sizeof(PackedNestedType) == 2*packed_sizeof(ConcreteType)
@test packed_sizeof(PackedParametricType{UInt8}) == 2
@test packed_sizeof(PackedParametricType{UInt32}) == 8
psCT = packed_sizeof(ConcreteType)
@test packed_sizeof(PackedParametricType{ConcreteType}) == 2*psCT
end
@testset "Documentation" begin
@test string(@doc ParametricType) == "This is a docstring\n"
end
|
{"hexsha": "29b61a3de27dc753417b11f0e1453708c0ae22be", "size": 4997, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/StructIO.jl-53d494c1-5632-5724-8f4c-31dff12d585f", "max_stars_repo_head_hexsha": "455543ce2f240402fca7fe65622734ff8a2eda8f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-04-02T20:37:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T01:29:00.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/StructIO.jl-53d494c1-5632-5724-8f4c-31dff12d585f", "max_issues_repo_head_hexsha": "455543ce2f240402fca7fe65622734ff8a2eda8f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2016-06-25T23:12:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T09:08:52.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/StructIO.jl-53d494c1-5632-5724-8f4c-31dff12d585f", "max_forks_repo_head_hexsha": "455543ce2f240402fca7fe65622734ff8a2eda8f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2016-04-02T20:22:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:31:29.000Z", "avg_line_length": 29.5680473373, "max_line_length": 79, "alphanum_fraction": 0.6265759456, "num_tokens": 1459}
|
import unittest
class NumpyTest(unittest.TestCase):
def test_numpy_is_importable(self):
import numpy
self.assertIsNotNone(numpy.nan)
|
{"hexsha": "5c77c1b01006b584008cc98099b9cae968591f89", "size": 156, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/python_rules/numpy_test.py", "max_stars_repo_name": "samwestmoreland/please", "max_stars_repo_head_hexsha": "1616742eeefca3dd0b3194e4c1ec9a8542ec13c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1992, "max_stars_repo_stars_event_min_datetime": "2016-08-08T11:14:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:29:57.000Z", "max_issues_repo_path": "test/python_rules/numpy_test.py", "max_issues_repo_name": "samwestmoreland/please", "max_issues_repo_head_hexsha": "1616742eeefca3dd0b3194e4c1ec9a8542ec13c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1059, "max_issues_repo_issues_event_min_datetime": "2016-08-03T17:11:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T16:27:30.000Z", "max_forks_repo_path": "test/python_rules/numpy_test.py", "max_forks_repo_name": "samwestmoreland/please", "max_forks_repo_head_hexsha": "1616742eeefca3dd0b3194e4c1ec9a8542ec13c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 213, "max_forks_repo_forks_event_min_datetime": "2016-12-09T15:37:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T23:08:26.000Z", "avg_line_length": 17.3333333333, "max_line_length": 39, "alphanum_fraction": 0.7243589744, "include": true, "reason": "import numpy", "num_tokens": 30}
|
SUBROUTINE RCOVSL (NAME,ITEM,IN,AMAT,SCR2,SCR3,OUT,Z,IZ,LCORE,
1 FIRST,RFNO)
C
C RCOVSL CALCULATES THE STATIC LOAD VECTORS FOR THE SUBSTRUCTURING
C PHASE 2 AND PHASE 3 OPERATIONS FROM THE SUBSTRUCTURE SOLN ITEM
C
LOGICAL FIRST
INTEGER NAME(2),AMAT,SCR2,SCR3,OUT,PMX,FMX,CMX,SLMX,T,
1 SIGNPF,SIGNC,PREC,SCR,RD,RDREW,WRT,WRTREW,REW,
2 OTYPP,SYSBUF,SOLN,SRD,SUBR(2),BUF1,FSS(2),IBUF(3),
3 IZ(1),RC,RFNO,TYPE
REAL Z(1)
COMMON /MPYADX/ PMX(7),FMX(7),CMX(7),SLMX(7),MCORE,T,SIGNPF,SIGNC,
1 PREC,SCR
COMMON /NAMES / RD,RDREW,WRT,WRTREW,REW,NOREW
COMMON /PACKX / ITYPP,OTYPP,IROWP,NROWP,INCP
COMMON /SYSTEM/ SYSBUF,NOUT
DATA SOLN / 4HSOLN /, SRD / 1 /
DATA SUBR / 4HRCOV , 4HSL /
C
C INITIALIZE
C
BUF1 = LCORE - SYSBUF + 1
ITYPP = 1
IROWP = 1
INCP = 1
MCORE = LCORE
T = 0
SIGNPF= 1
PREC = 0
C
C READ LOAD MATRIX FROM SOF ONTO GINO FILE
C
PMX(1) = IN
CALL RDTRL (PMX)
IF (PMX(1) .GT. 0) GO TO 5
ITM = ITEM
CALL MTRXI (SCR2,NAME,ITEM,Z(BUF1),RC)
IF (RC .EQ. 3) GO TO 600
IF (RC .NE. 1) GO TO 1000
PMX(1) = SCR2
CALL RDTRL (PMX)
5 NROWP = PMX(2)
TYPE = PMX(5)
IF (RFNO .EQ. 8 .AND. TYPE .LE. 2) TYPE = TYPE + 2
OTYPP = TYPE
IF (FIRST) GO TO 500
C
C PROCESS INITIAL SOLN DATA
C
ITM = SOLN
CALL SFETCH (NAME,SOLN,SRD,RC)
IF (RC .NE. 1) GO TO 1000
CALL SUREAD (FSS,2,N,RC)
IF (RC .NE. 1) GO TO 1100
CALL SUREAD (IBUF,3,N,RC)
IF (RC .NE. 1) GO TO 1100
IF (RFNO .EQ. 3) GO TO 600
NB = IBUF(2)
NST = IBUF(3)
C
C INTILIZE SCR1 FILE
C
CALL MAKMCB (FMX,AMAT,NROWP,2,TYPE)
CALL GOPEN (AMAT,Z(BUF1),WRTREW)
C
C PACK FACTOR MATRIX FOR R. F. 1,2
C
IF (RFNO.EQ.8 .OR. RFNO.EQ.9) GO TO 100
DO 40 I = 1,NST
DO 10 J = 1,NROWP
Z(J) = 0.0
10 CONTINUE
N = 1
CALL SJUMP (N)
IF (N .LT. 0) GO TO 1200
CALL SUREAD (NL,1,N,RC)
IF (RC .NE. 1) GO TO 1100
IF (NL .LT. 0) GO TO 40
IF (NL .EQ. 0) GO TO 30
IF (NROWP+2*NL .GE. BUF1) CALL MESAGE (-8,0,SUBR)
CALL SUREAD (Z(NROWP+1),2*NL,N,RC)
IF (RC .NE. 1) GO TO 1100
NROW = NROWP - 1
DO 20 J = 1,NL
NROW = NROW + 2
NR = IZ(NROW)
Z(NR)= Z(NROW+1)
20 CONTINUE
30 CALL PACK (Z(1),AMAT,FMX)
40 CONTINUE
CALL CLOSE (AMAT,REW)
CALL WRTTRL(FMX)
GO TO 500
C
C PACK FACTOR MATRIX FOR R. F. 8,9
C
100 CALL SUREAD (IZ(1),3*NB,N,RC)
IF (RC .NE. 1) GO TO 1100
CALL SUREAD (NL,1,N,RC)
IF (RC .NE. 1) GO TO 1100
IF (NL .LE. 0) GO TO 600
IF (NL .GE. BUF1) CALL MESAGE (-8,0,SUBR)
CALL SUREAD (IZ(1),NL,N,RC)
IF (RC .NE. 1) GO TO 1100
N = 1
CALL SJUMP (N)
IF (N .LT. 0) GO TO 1200
IP = 1
IF (RFNO .EQ. 8) IP = 2
IF (RFNO .EQ. 8) ITYPP = 3
IFACT = NL + 1
NFACT = NL + NL*IP
ICOL = NFACT + 1
NCOL = NFACT + IP*NROWP
IF (NCOL .GE. BUF1) CALL MESAGE (-8,0,SUBR)
C
DO 230 I = 1,NST
DO 210 J = ICOL,NCOL
Z(J) = 0.0
210 CONTINUE
N = 1
CALL SJUMP (N)
IF (N .LT. 0) GO TO 1200
CALL SUREAD (Z(IFACT),NL*IP,N,RC)
IF (RC .NE. 1) GO TO 1100
NROW = IFACT - IP
NRS = ICOL - IP
DO 220 J = 1,NL
NROW = NROW + IP
NR = NRS + IZ(J)*IP
Z(NR)= Z(NROW)
IF (IP .EQ. 2) Z(NR+1) = Z(NROW+1)
220 CONTINUE
CALL PACK (Z(ICOL),AMAT,FMX)
230 CONTINUE
CALL CLOSE (AMAT,REW)
CALL WRTTRL (FMX)
C
C OUT = LOADS*FACTORS
C
500 FMX(1) = AMAT
CALL RDTRL (FMX)
CMX(1) = 0
CALL MAKMCB (SLMX,OUT,PMX(3),2,TYPE)
SCR = SCR3
CALL MPYAD (Z,Z,Z)
CALL WRTTRL (SLMX)
GO TO 700
C
C NO SCALAR LOADS
C
600 OUT = 0
CALL CLOSE (AMAT,REW)
700 RETURN
C
C ERRORS
C
1000 CALL SMSG (RC-2,ITM,NAME)
GO TO 600
1100 CALL SMSG (RC+4,ITM,NAME)
GO TO 600
1200 CALL SMSG (7,ITM,NAME)
GO TO 600
END
|
{"hexsha": "587f6983097030cbc6cd2c1caba00662b47ba4ad", "size": 4405, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mis/rcovsl.f", "max_stars_repo_name": "ldallolio/NASTRAN-95", "max_stars_repo_head_hexsha": "6d2c175f5b53ebaec4ba2b5186f7926ef9d0ed47", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-01-09T14:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T11:51:42.000Z", "max_issues_repo_path": "mis/rcovsl.f", "max_issues_repo_name": "gassive/NASTRAN95", "max_issues_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-01-17T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:37:44.000Z", "max_forks_repo_path": "mis/rcovsl.f", "max_forks_repo_name": "gassive/NASTRAN95", "max_forks_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-07T20:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:16:01.000Z", "avg_line_length": 26.5361445783, "max_line_length": 73, "alphanum_fraction": 0.4964812713, "num_tokens": 1872}
|
import pandas as pd
import torch
import torch.optim as optim
from torch.utils.data import Subset, DataLoader, TensorDataset
from torchvision.transforms import ToTensor
from typing import Union
import numpy as np
from torchvision.datasets.mnist import MNIST
from src.AutoMLpy.optimizers.optimizer import HpOptimizer
from tests.pytorch_items.pytorch_models import CifarNet, MnistNet, CifarNetBatchNorm
from tests.pytorch_items.pytorch_training import train_pytorch_network
def get_MNIST_dataloaders(seed: int = 42):
train_split_ratio = 0.8
batch_size = 64
np.random.seed(seed)
full_train_dataset = MNIST('./datasets/', train=True, download=True, transform=ToTensor())
test_dataset = MNIST('./datasets/', train=False, download=True, transform=ToTensor())
indices = list(range(len(full_train_dataset)))
np.random.shuffle(indices)
split_index = np.floor(train_split_ratio * len(full_train_dataset)).astype(int)
train_indices = indices[:split_index]
train_dataset = Subset(full_train_dataset, train_indices)
valid_indices = indices[split_index:]
valid_dataset = Subset(full_train_dataset, valid_indices)
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=2, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=2)
return dict(train=train_loader, valid=valid_loader, test=test_loader)
class TorchMNISTHpOptimizer(HpOptimizer):
def build_model(self, **hp) -> torch.nn.Module:
model = MnistNet()
if torch.cuda.is_available():
model.cuda()
return model
def fit_model_(
self,
model: torch.nn.Module,
X: Union[np.ndarray, pd.DataFrame, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
verbose=False,
**hp
) -> object:
if hp.get("pre_normalized", True):
X = X/torch.max(X)
optimizer = optim.SGD(model.parameters(),
lr=hp.get("learning_rate", 1e-3),
momentum=hp.get("momentum", 0.9),
nesterov=hp.get("nesterov", True))
train_pytorch_network(
model,
loaders=dict(
train=DataLoader(
TensorDataset(torch.FloatTensor(X), torch.LongTensor(y)),
batch_size=hp.get("batch_size", 64),
num_workers=2,
shuffle=True
)
),
verbose=verbose,
optimizer=optimizer,
**hp
)
return model
def score(
self,
model: torch.nn.Module,
X: Union[np.ndarray, pd.DataFrame, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
**hp
) -> float:
if hp.get("pre_normalized", True):
X = X/torch.max(X)
model_device = next(model.parameters()).device
if isinstance(X, torch.Tensor):
X = X.float().to(model_device)
y = y.to(model_device)
test_acc = np.mean((torch.argmax(model(X), dim=-1) == y).cpu().detach().numpy())
return test_acc
class TorchCifar10HpOptimizer(HpOptimizer):
def build_model(self, **hp) -> torch.nn.Module:
if hp.get("use_batchnorm", True):
model = CifarNetBatchNorm()
else:
model = CifarNet()
if torch.cuda.is_available():
model.cuda()
return model
def fit_model_(
self,
model: torch.nn.Module,
X: Union[np.ndarray, pd.DataFrame, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
verbose=False,
**hp
) -> torch.nn.Module:
if hp.get("pre_normalized", True):
X = X/torch.max(X)
optimizer = optim.SGD(model.parameters(),
lr=hp.get("learning_rate", 1e-3),
momentum=hp.get("momentum", 0.9),
nesterov=hp.get("nesterov", True))
train_pytorch_network(
model,
loaders=dict(
train=DataLoader(
TensorDataset(torch.FloatTensor(X), torch.LongTensor(y)),
batch_size=hp.get("batch_size", 64),
num_workers=2,
shuffle=True
)
),
verbose=verbose,
optimizer=optimizer,
**hp
)
return model
def score(
self,
model: torch.nn.Module,
X: Union[np.ndarray, pd.DataFrame, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
**hp
) -> float:
if hp.get("pre_normalized", True):
X = X/torch.max(X)
model_device = next(model.parameters()).device
if isinstance(X, torch.Tensor):
X = X.float().to(model_device)
y = y.to(model_device)
test_acc = np.mean((torch.argmax(model(X), dim=-1) == y).cpu().detach().numpy())
return test_acc
if __name__ == '__main__':
from tests.pytorch_items.pytorch_datasets import get_torch_Cifar10_X_y
hp = dict(
epochs=15,
batch_size=64,
learning_rate=1e-3,
nesterov=True,
momentum=0.9,
use_batchnorm=True,
pre_normalized=False,
)
X_y_dict = get_torch_Cifar10_X_y()
opt = TorchCifar10HpOptimizer()
model_ = opt.build_model(**hp)
opt.fit_model_(
model_,
X_y_dict["train"]["x"],
X_y_dict["train"]["y"],
verbose=True,
**hp
)
test_acc = opt.score(
model_.cpu(),
X_y_dict["test"]["x"],
X_y_dict["test"]["y"],
**hp
)
print(test_acc)
|
{"hexsha": "a79d46700fb890f7f6eb23f737f73fec11374039", "size": 5910, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/pytorch_items/pytorch_hp_optimizers.py", "max_stars_repo_name": "JeremieGince/AutoMLpy", "max_stars_repo_head_hexsha": "59c2214da0eb6e767446cc2157395c348fddff4e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-19T22:48:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-20T01:08:42.000Z", "max_issues_repo_path": "tests/pytorch_items/pytorch_hp_optimizers.py", "max_issues_repo_name": "JeremieGince/AutoMLpy", "max_issues_repo_head_hexsha": "59c2214da0eb6e767446cc2157395c348fddff4e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/pytorch_items/pytorch_hp_optimizers.py", "max_forks_repo_name": "JeremieGince/AutoMLpy", "max_forks_repo_head_hexsha": "59c2214da0eb6e767446cc2157395c348fddff4e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.942408377, "max_line_length": 96, "alphanum_fraction": 0.5700507614, "include": true, "reason": "import numpy", "num_tokens": 1303}
|
[STATEMENT]
lemma ct_prefixE [elim?]:
assumes "ct_prefix xs ys"
obtains as zs where "ys = as @ zs" "ct_list_eq as xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>as zs. \<lbrakk>ys = as @ zs; ct_list_eq as xs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
ct_prefix xs ys
goal (1 subgoal):
1. (\<And>as zs. \<lbrakk>ys = as @ zs; ct_list_eq as xs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding ct_prefix_def
[PROOF STATE]
proof (prove)
using this:
\<exists>as bs. ys = as @ bs \<and> ct_list_eq as xs
goal (1 subgoal):
1. (\<And>as zs. \<lbrakk>ys = as @ zs; ct_list_eq as xs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
|
{"llama_tokens": 336, "file": "WebAssembly_Wasm_Checker_Types", "length": 3}
|
"""
# F-1 Score for Multi-Class Classification
## Exercise problems
Exercise 1. Prediction robots' performance comparison (Minsuk Heo 허민석, 2017))
robot1 = [[100, 80, 10, 10],
[0, 9, 0, 1],
[0, 1, 8, 1],
[0, 1, 0, 9]]
robot2 = [[198, 2, 0, 0],
[7, 1, 0, 2],
[0, 8, 1, 1],
[2, 3, 4, 1]]
-----------
Exercise 2. A sample (Baeldung, 2020)
samples = [[50, 3, 0, 0],
[26, 8, 0, 1],
[20, 2, 4, 0],
[12, 0, 0, 1]]
References
- Minsuk Heo 허민석 (2017) [머신러닝] 다중 분류 모델 성능 측정 (accuracy, f1 score, precision, recall on multiclass classification) https://www.youtube.com/watch?v=8DbC39cvvis&t=576s
- Baeldung (2020) F-1 Score for Multi-Class Classification https://www.baeldung.com/cs/multi-class-f1-score
-------------------------------------------------------------------------------------------------------------------"""
import numpy as np
def micro_precisions(model):
micro_precisions = []
length = len(model)
for i in range(0, length):
precision = model[i][i] / sum(model[i])
micro_precisions.append(precision)
return micro_precisions
def micro_recalls(model):
length = len(model)
arr = np.array(model) # convert list into array because transpose works with array only.
transposed_matrix = arr.transpose()
micro_recalls = []
for i in range(0, length):
recall = transposed_matrix[i][i] / sum(transposed_matrix[i])
micro_recalls.append(recall)
return micro_recalls
def macro_precision(model):
mac_precision = sum(micro_precisions(model)) / len(model)
return mac_precision
def macro_recall(model):
mac_recall = sum(micro_recalls(model))/ len(model)
return mac_recall
def f1_score(model):
mac_prec = macro_precision(model)
mac_rec = macro_recall(model)
f1 = 2 * (mac_prec * mac_rec) / (mac_prec + mac_rec)
return f1
|
{"hexsha": "e35822edb2320ec2581aabde31949bcc7e9134ed", "size": 1869, "ext": "py", "lang": "Python", "max_stars_repo_path": "Accuracy, precision, recall & f1/f1_score_exercise.py", "max_stars_repo_name": "CodingWillow/MachineLearning", "max_stars_repo_head_hexsha": "340c9d91d4178a2ab56921502bdcee73864a1a59", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Accuracy, precision, recall & f1/f1_score_exercise.py", "max_issues_repo_name": "CodingWillow/MachineLearning", "max_issues_repo_head_hexsha": "340c9d91d4178a2ab56921502bdcee73864a1a59", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Accuracy, precision, recall & f1/f1_score_exercise.py", "max_forks_repo_name": "CodingWillow/MachineLearning", "max_forks_repo_head_hexsha": "340c9d91d4178a2ab56921502bdcee73864a1a59", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4852941176, "max_line_length": 165, "alphanum_fraction": 0.6035313002, "include": true, "reason": "import numpy", "num_tokens": 580}
|
[STATEMENT]
lemma nsqn_quality_increases_dhops [elim]:
assumes "i\<in>kD(rt \<xi>)"
and "quality_increases \<xi> \<xi>'"
and "nsqn (rt \<xi>) i = nsqn (rt \<xi>') i"
shows "the (dhops (rt \<xi>) i) \<ge> the (dhops (rt \<xi>') i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. the (dhops (rt \<xi>') i) \<le> the (dhops (rt \<xi>) i)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
i \<in> kD (rt \<xi>)
quality_increases \<xi> \<xi>'
nsqn (rt \<xi>) i = nsqn (rt \<xi>') i
goal (1 subgoal):
1. the (dhops (rt \<xi>') i) \<le> the (dhops (rt \<xi>) i)
[PROOF STEP]
unfolding quality_increases_def
[PROOF STATE]
proof (prove)
using this:
i \<in> kD (rt \<xi>)
(\<forall>dip\<in>kD (rt \<xi>). dip \<in> kD (rt \<xi>') \<and> rt \<xi> \<sqsubseteq>\<^bsub>dip\<^esub> rt \<xi>') \<and> (\<forall>dip. sqn (rt \<xi>) dip \<le> sqn (rt \<xi>') dip)
nsqn (rt \<xi>) i = nsqn (rt \<xi>') i
goal (1 subgoal):
1. the (dhops (rt \<xi>') i) \<le> the (dhops (rt \<xi>) i)
[PROOF STEP]
by (clarsimp) (drule(1) bspec, clarsimp simp: rt_fresher_def2)
|
{"llama_tokens": 504, "file": "AODV_variants_c_gtobcast_C_Quality_Increases", "length": 3}
|
\section{Introduction}
\subsection{System Purpose}
RAVEN is a flexible and multi-purpose uncertainty quantification (UQ), regression analysis, probabilistic risk assessment
(PRA), data analysis and model optimization software. Depending on the tasks to be accomplished and on the
probabilistic
characterization of the problem, RAVEN perturbs (Monte-Carlo, latin hyper-cube, reliability surface search, etc.) the
response of the system under consideration by altering its own parameters. The system is modeled by third party
software (RELAP5-3D, MAAP5, BISON, etc.) and accessible to RAVEN either directly (software coupling) or
indirectly (via input/output files). The data generated by the sampling process is analyzed using classical statistical
and more advanced data mining approaches. RAVEN also manages the parallel dispatching (i.e. both on
desktop/workstation and large High-Performance Computing machines) of the software representing the physical
model. RAVEN heavily relies on artificial intelligence algorithms to construct surrogate models of complex physical
systems in order to perform uncertainty quantification, reliability analysis (limit state surface) and parametric studies.
\subsection{System Scope}
RAVEN’s scope is to provide a set of capabilities to build analysis flows based on UQ, PRA, Optimization and Data Analysis techniques to be applied to any physical model(s). The main objective of the software is to assist the engineer/user to:
\begin{itemize}
\item identify the best design (on any physics/model), its safety and confidence;
\item estimate the likelihood of undesired outcomes (risk analysis);
\item identify main drivers/events to act on for reducing impact/consequences of anomalous dynamic behaviors of the
system under analysis;
\item to construct analysis flows combining multiple physical models and analysis procedures.
\end{itemize}
In other words, the RAVEN software is aimed to be employed for:
\begin{itemize}
\item Uncertainty Quantification;
\item Sensitivity Analysis / Regression Analysis;
\item Probabilistic Risk and Reliability Analysis (PRA);
\item Data Mining Analysis;
\item Model Optimization.
\end{itemize}
The combination of all the previously mentioned analysis capabilities is a key component to
define safety margins in engineering design that are more representative of real prediction deficiencies.
This could reduce
cost and maintain a more coherent safety level of the system (no excess/no lack of safety margins in any operational
condition).
The risk analysis, assisted by the data mining algorithms, is used to find engineering solutions to reduce costs, while
preserving safety margins, or to increase safety at the minimum cost. These tasks can be automatically achieved by using
optimization algorithms available in the RAVEN software.
Moreover, the knowledge of the relationship between input and system response uncertainties allows identifying effective
experiments, which are the most suitable for increasing the accuracy of the model. This approach reduces time and cost
of the deployment of complex engineering systems and new technologies.
The RAVEN software employs several novel and unique techniques, based on extensive usage of artificial intelligence
algorithms, such as adaptive (smart) sampling, adaptive branching algorithms (Dynamic Event Tree), time-dependent
statistical analysis and data mining.
The overall set of algorithms implemented in the RAVEN software are designed to handle highly non-linear systems,
characterized by system response discontinuities and discrete variables. These capabilities are crucial for handling
complex system models, such as nuclear power plants.
For example, reliability surface analysis, as implemented in RAVEN, is unique and capable to handle non-linear,
discontinuous systems, allowing for faster and more accurate assessing of failure risk for complex systems.
In addition, the RAVEN software provides the unique capability to combine any model (e.g. physical models, surrogate
models, data analysis models, etc.) in a single entity (named Ensemble Model) where each model can feedback into others. This capability allows the user to analyze system that could be simulated only by using complex computational work-flows.
\subsection{User Characteristics}
The users of the RAVEN software are expected to be part of any of the
following categories:
\begin{itemize}
\item \textbf{Core developers (RAVEN core team)}: These are the developers of the RAVEN software. They will be responsible for following
and enforcing the appropriate software development standards. They will be responsible for designing, implementing and
maintaining the software.
\item \textbf{External developers}: A Scientist or Engineer that utilizes the RAVEN framework and wants to extend its capabilities (new interface to external
applications, new data analysis tecniques, new sampling strategies, etc).This user will typically have a background in modeling and
simulation techniques and/or numerical analysis but may only have a limited skill-set when it comes to object-oriented coding, C++/Python languages.
\item \textbf{Analysts}: These are users that will run the code and perform various analysis on the simulations they perform. These users may interact with developers of the system requesting new features and reporting bugs found and will typically make heavy use of the input file format.
\end{itemize}
|
{"hexsha": "0ede679642ba6c8de8841a283b6d4f376858e5c9", "size": 5504, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/sqa/sdd/ravenIntro.tex", "max_stars_repo_name": "rinelson456/raven", "max_stars_repo_head_hexsha": "1114246136a2f72969e75b5e99a11b35500d4eef", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 159, "max_stars_repo_stars_event_min_datetime": "2017-03-24T21:07:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T13:44:40.000Z", "max_issues_repo_path": "doc/sqa/sdd/ravenIntro.tex", "max_issues_repo_name": "rinelson456/raven", "max_issues_repo_head_hexsha": "1114246136a2f72969e75b5e99a11b35500d4eef", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1667, "max_issues_repo_issues_event_min_datetime": "2017-03-27T14:41:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:50:06.000Z", "max_forks_repo_path": "doc/sqa/sdd/ravenIntro.tex", "max_forks_repo_name": "rinelson456/raven", "max_forks_repo_head_hexsha": "1114246136a2f72969e75b5e99a11b35500d4eef", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 95, "max_forks_repo_forks_event_min_datetime": "2017-03-24T21:05:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T17:30:22.000Z", "avg_line_length": 75.397260274, "max_line_length": 292, "alphanum_fraction": 0.8117732558, "num_tokens": 1083}
|
import os
import keras
import numpy as np
import tensorflow as tf
from keras.models import load_model
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from .constants import model_dir
from .constants import default_test_folder_path
from .constants import default_train_folder_path
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(rescale=1./255)
def train_model(new_model,train_folder_path,test_folder_path):
# Check that both train and test folders are present (Catch both orders)
if os.path.isdir(train_folder_path):
# test_folder_path must also be a directory
if os.path.isdir(test_folder_path):
train(new_model, train_folder=train_folder_path, test_folder=test_folder_path)
print('\n The provided test folder is not a directory')
return # You must return
#Means train_folder_path is not a directory
print('\n The provided train folder is not a directory')
return
def _generator(folder_path =None, is_train_set=True):
"""
Accepts a training folder path and generate training set from it.
if a folder is not supplied, defaults to using ./datasets/training_set
No need to make default dataset folder constant because it's only used here
"""
if is_train_set:
if folder_path is None:
folder_path = default_train_folder_path
return train_datagen.flow_from_directory(folder_path,target_size=(64, 64),
batch_size=32,
class_mode='binary')
if folder_path is None:
folder_path = default_test_folder_path
return test_datagen.flow_from_directory(folder_path,target_size=(64, 64),
batch_size=32,
class_mode='binary')
def train(model_name, epochs=100, all_count=10000, train_folder=None, test_folder=None):
#Generate training data set
training_set = _generator(train_folder, is_train_set=True)
#Generate test data set
test_set = _generator(test_folder, is_train_set=False)
epoch_steps= all_count/ 32
model_path = os.path.join(model_dir, model_name)
print("Training")
classifier = Sequential()
# Step 1 - Convolution
classifier.add(
Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation='relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
# checkpoint
checkpoint = ModelCheckpoint(model_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
if os.path.isfile(model_path):
print ("Resumed model's weights from {}".format(model_path))
# load weights
classifier.load_weights(model_path)
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
classifier.fit_generator(
training_set,
steps_per_epoch=epoch_steps,
epochs=epochs,
verbose=1,
validation_data=test_set,
validation_steps=2000,
callbacks=callbacks_list)
#Model confidence
x, y = zip(*(test_set[i] for i in range(len(test_set))))
x_test, y_test = np.vstack(x), np.vstack(y)
loss, acc = classifier.evaluate(x_test, y_test.ravel(), batch_size=64)
print("Confidence: " ,round(acc*100),'%')
#print("Loss: ", loss)
# training_set.class_indices
train.label = training_set.class_indices
train.model = classifier
#save Model with Unique ID
def saveModel():
labels = train.label.keys()
labels = str(list(labels))+"_model.h5"
save = train.model.save(labels)
return save
def prepImage(testImage):
test_image = image.load_img(testImage, target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
return test_image
def setupTF():
config = tf.ConfigProto(device_count={'GPU': 1})
sess = tf.Session(config=config)
keras.backend.set_session(sess)
return
|
{"hexsha": "3d9372ff9785c26fc6184ce34d971ac573b90444", "size": 4971, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/train_model.py", "max_stars_repo_name": "Parakconcepts/mlclassification", "max_stars_repo_head_hexsha": "e364f3ce50d4e1199a5b6233c0b44b3d674b25d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/train_model.py", "max_issues_repo_name": "Parakconcepts/mlclassification", "max_issues_repo_head_hexsha": "e364f3ce50d4e1199a5b6233c0b44b3d674b25d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/train_model.py", "max_forks_repo_name": "Parakconcepts/mlclassification", "max_forks_repo_head_hexsha": "e364f3ce50d4e1199a5b6233c0b44b3d674b25d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0709677419, "max_line_length": 107, "alphanum_fraction": 0.662039831, "include": true, "reason": "import numpy", "num_tokens": 1078}
|
[STATEMENT]
lemma ord_option_Some1_iff: "ord_option R (Some a) y \<longleftrightarrow> (\<exists>b. y = Some b \<and> R a b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ord_option R (Some a) y = (\<exists>b. y = Some b \<and> R a b)
[PROOF STEP]
by (cases y; auto)
|
{"llama_tokens": 112, "file": "Markov_Models_ex_MDP_RP", "length": 1}
|
import itertools as it
import os
import tempfile
import xml.etree.ElementTree as ET
from typing import Any, List, Optional, Tuple, Type
from collections import OrderedDict
import gym
from gym.spaces import Dict, Box
import numpy as np
from mujoco_maze import maze_env_utils, maze_task
from mujoco_maze.agent_model import AgentModel
from mujoco_maze.maze_env import MazeEnv
class GoalMazeEnv(MazeEnv):
"""Make MazeEnv like GoalEnv, but with only positions for goals"""
def _get_obs_space(self) -> gym.spaces.Dict:
shape = self._get_obs()["observation"].shape
high = np.inf * np.ones(shape, dtype=np.float32)
low = -high
# Set velocity limits
wrapped_obs_space = self.wrapped_env.observation_space
high[: wrapped_obs_space.shape[0]] = wrapped_obs_space.high
low[: wrapped_obs_space.shape[0]] = wrapped_obs_space.low
# Set coordinate limits
low[0], high[0], low[1], high[1] = self._xy_limits()
# Set orientation limits
observation_space = Box(low, high, shape=shape, dtype="float32")
goal_space = Box(low[:2], high[:2], shape=(2,), dtype="float32")
return Dict(
OrderedDict(
[
("observation", observation_space),
("desired_goal", goal_space),
("achieved_goal", goal_space),
]
)
)
def _get_obs(self) -> np.ndarray:
observation = super()._get_obs()
achieved_goal = self.wrapped_env.get_xy()
if hasattr(self._task, "current_goal"):
desired_goal = self._task.current_goal.pos
else:
# Just use first goal in self._task.goals
desired_goal = self._task.goals[0].pos
return OrderedDict(
[
("observation", observation),
("desired_goal", desired_goal),
("achieved_goal", achieved_goal),
]
)
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, dict]:
self.t += 1
if self.wrapped_env.MANUAL_COLLISION:
old_pos = self.wrapped_env.get_xy()
old_objballs = self._objball_positions()
inner_next_obs, inner_reward, _, info = self.wrapped_env.step(action)
new_pos = self.wrapped_env.get_xy()
new_objballs = self._objball_positions()
# Checks that the new_position is in the wall
collision = self._collision.detect(old_pos, new_pos)
if collision is not None:
pos = collision.point + self._restitution_coef * collision.rest()
if self._collision.detect(old_pos, pos) is not None:
# If pos is also not in the wall, we give up computing the position
self.wrapped_env.set_xy(old_pos)
else:
self.wrapped_env.set_xy(pos)
# Do the same check for object balls
for name, old, new in zip(self.object_balls, old_objballs, new_objballs):
collision = self._objball_collision.detect(old, new)
if collision is not None:
pos = collision.point + self._restitution_coef * collision.rest()
if self._objball_collision.detect(old, pos) is not None:
pos = old
idx = self.wrapped_env.model.body_name2id(name)
self.wrapped_env.data.xipos[idx][:2] = pos
else:
inner_next_obs, inner_reward, _, info = self.wrapped_env.step(action)
next_obs = self._get_obs()
inner_reward = self._inner_reward_scaling * inner_reward
outer_reward = self._task.reward(next_obs["observation"])
done = self._task.termination(next_obs["observation"])
info["position"] = self.wrapped_env.get_xy()
return next_obs, inner_reward + outer_reward, done, info
|
{"hexsha": "dff853883d82dd33b74a60da2f0b0190a7f3825b", "size": 3956, "ext": "py", "lang": "Python", "max_stars_repo_path": "mujoco_maze/goal_maze_env.py", "max_stars_repo_name": "jypark0/mujoco-maze", "max_stars_repo_head_hexsha": "da477ecbf3451fd7cf907b83f03664a6e5358bcc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mujoco_maze/goal_maze_env.py", "max_issues_repo_name": "jypark0/mujoco-maze", "max_issues_repo_head_hexsha": "da477ecbf3451fd7cf907b83f03664a6e5358bcc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mujoco_maze/goal_maze_env.py", "max_forks_repo_name": "jypark0/mujoco-maze", "max_forks_repo_head_hexsha": "da477ecbf3451fd7cf907b83f03664a6e5358bcc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7835051546, "max_line_length": 87, "alphanum_fraction": 0.6066734075, "include": true, "reason": "import numpy", "num_tokens": 862}
|
# This README was generated directly from
# [this source file](https://github.com/fredrikekre/Literate.jl/blob/master/examples/README.jl)
# running these commands from the package root of Literate.jl:
|
{"hexsha": "376faa4235df87347d9dd8a78067db9172667115", "size": 202, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/readme.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/FocusedBlindDecon.jl-3b47aaff-9524-5d9d-a292-eeb6a187c032", "max_stars_repo_head_hexsha": "071e0f915cb96768f34f359d448804faaef555cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/readme.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/FocusedBlindDecon.jl-3b47aaff-9524-5d9d-a292-eeb6a187c032", "max_issues_repo_head_hexsha": "071e0f915cb96768f34f359d448804faaef555cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/readme.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/FocusedBlindDecon.jl-3b47aaff-9524-5d9d-a292-eeb6a187c032", "max_forks_repo_head_hexsha": "071e0f915cb96768f34f359d448804faaef555cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4, "max_line_length": 95, "alphanum_fraction": 0.7871287129, "num_tokens": 49}
|
"""Mean covariance estimation."""
from copy import deepcopy
import numpy as np
from .base import sqrtm, invsqrtm, logm, expm
from .ajd import ajd_pham
from .distance import distance_riemann
from .geodesic import geodesic_riemann
def _get_sample_weight(sample_weight, data):
"""Get the sample weights.
If none provided, weights init to 1. otherwise, weights are normalized.
"""
if sample_weight is None:
sample_weight = np.ones(data.shape[0])
if len(sample_weight) != data.shape[0]:
raise ValueError("len of sample_weight must be equal to len of data.")
sample_weight /= np.sum(sample_weight)
return sample_weight
def mean_riemann(covmats, tol=10e-9, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Riemannian metric.
The procedure is similar to a gradient descent minimizing the sum of
riemannian distance to the mean.
.. math::
\mathbf{C} = \arg\min{(\sum_i \delta_R ( \mathbf{C} , \mathbf{C}_i)^2)}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the gradient descent. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
# init
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
nu = 1.0
tau = np.finfo(np.float64).max
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter) and (nu > tol):
k = k + 1
C12 = sqrtm(C)
Cm12 = invsqrtm(C)
J = np.zeros((n_channels, n_channels))
for index in range(n_trials):
tmp = np.dot(np.dot(Cm12, covmats[index, :, :]), Cm12)
J += sample_weight[index] * logm(tmp)
crit = np.linalg.norm(J, ord='fro')
h = nu * crit
C = np.dot(np.dot(C12, expm(nu * J)), C12)
if h < tau:
nu = 0.95 * nu
tau = h
else:
nu = 0.5 * nu
return C
def mean_logeuclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the log-Euclidean
metric.
.. math::
\mathbf{C} = \exp{(\frac{1}{N} \sum_i \log{\mathbf{C}_i})}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * logm(covmats[index, :, :])
C = expm(T)
return C
def mean_kullback_sym(covmats, sample_weight=None):
"""Return the mean covariance matrix according to KL divergence.
This mean is the geometric mean between the Arithmetic and the Harmonic
mean, as shown in [1]_.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Moakher, Maher, and Philipp G. Batchelor. "Symmetric
positive-definite matrices: From geometry to applications and
visualization." In Visualization and Processing of Tensor Fields, pp.
285-298. Springer Berlin Heidelberg, 2006.
"""
C_Arithmetic = mean_euclid(covmats, sample_weight)
C_Harmonic = mean_harmonic(covmats, sample_weight)
C = geodesic_riemann(C_Arithmetic, C_Harmonic, 0.5)
return C
def mean_harmonic(covmats, sample_weight=None):
r"""Return the harmonic mean of a set of covariance matrices.
.. math::
\mathbf{C} = \left(\frac{1}{N} \sum_i {\mathbf{C}_i}^{-1}\right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * np.linalg.inv(covmats[index, :, :])
C = np.linalg.inv(T)
return C
def mean_logdet(covmats, tol=10e-5, maxiter=50, init=None, sample_weight=None):
r"""Return the mean covariance matrix according to the logdet metric.
This is an iterative procedure where the update is:
.. math::
\mathbf{C} = \left(\sum_i \left( 0.5 \mathbf{C} + 0.5 \mathbf{C}_i \right)^{-1} \right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
J += sample_weight[index] * np.linalg.inv(0.5 * Ci + 0.5 * C)
Cnew = np.linalg.inv(J)
crit = np.linalg.norm(Cnew - C, ord='fro')
C = Cnew
return C
def mean_wasserstein(covmats, tol=10e-4, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Wasserstein metric.
This is an iterative procedure where the update is [1]_:
.. math::
\mathbf{K} = \left(\sum_i \left( \mathbf{K} \mathbf{C}_i \mathbf{K} \right)^{1/2} \right)^{1/2}
with :math:`\mathbf{K} = \mathbf{C}^{1/2}`.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Barbaresco, F. "Geometric Radar Processing based on Frechet distance:
Information geometry versus Optimal Transport Theory", Radar Symposium
(IRS), 2011 Proceedings International.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
K = sqrtm(C)
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = np.dot(np.dot(K, Ci), K)
J += sample_weight[index] * sqrtm(tmp)
Knew = sqrtm(J)
crit = np.linalg.norm(Knew - K, ord='fro')
K = Knew
if k == maxiter:
print('Max iter reach')
C = np.dot(K, K)
return C
def mean_euclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the Euclidean metric :
.. math::
\mathbf{C} = \frac{1}{N} \sum_i \mathbf{C}_i
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
return np.average(covmats, axis=0, weights=sample_weight)
def mean_ale(covmats, tol=10e-7, maxiter=50, sample_weight=None):
"""Return the mean covariance matrix according using the AJD-based
log-Euclidean Mean (ALE). See [1].
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.4
References
----------
[1] M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint
Diagonalization and Geometric Mean of Symmetric Positive Definite
Matrices', PLoS ONE, 2015
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
crit = np.inf
k = 0
# init with AJD
B, _ = ajd_pham(covmats)
while (crit > tol) and (k < maxiter):
k += 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
update = np.diag(np.diag(expm(J)))
B = np.dot(B, invsqrtm(update))
crit = distance_riemann(np.eye(n_channels), update)
A = np.linalg.inv(B)
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
C = np.dot(np.dot(A.T, expm(J)), A)
return C
def mean_alm(covmats, tol=1e-14, maxiter=100,
verbose=False, sample_weight=None):
r"""Return Ando-Li-Mathias (ALM) mean
Find the geometric mean recursively [1]_, generalizing from:
.. math::
\mathbf{C} = A^{\frac{1}{2}}(A^{-\frac{1}{2}}B^{\frac{1}{2}}A^{-\frac{1}{2}})^{\frac{1}{2}}A^{\frac{1}{2}}
require a high number of iterations.
This is the adaptation of the Matlab code proposed by Dario Bini and
Bruno Iannazzo, http://bezout.dm.unipi.it/software/mmtoolbox/
Extremely slow, due to the recursive formulation.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop iterations
:param maxiter: maximum number of iteration, default 100
:param verbose: indicate when reaching maxiter
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.8.dev
References
----------
.. [1] T. Ando, C.-K. Li and R. Mathias, "Geometric Means", Linear Algebra
Appl. 385 (2004), 305-334.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
C = covmats
C_iter = np.zeros_like(C)
n_trials = covmats.shape[0]
if n_trials == 2:
alpha = sample_weight[1] / sample_weight[0] / 2
X = geodesic_riemann(covmats[0], covmats[1], alpha=alpha)
return X
else:
for k in range(maxiter):
for h in range(n_trials):
s = np.mod(np.arange(h, h + n_trials - 1) + 1, n_trials)
C_iter[h] = mean_alm(C[s], sample_weight=sample_weight[s])
norm_iter = np.linalg.norm(C_iter[0] - C[0], 2)
norm_c = np.linalg.norm(C[0], 2)
if (norm_iter / norm_c) < tol:
break
C = deepcopy(C_iter)
else:
if verbose:
print('Max number of iterations reached')
return C_iter.mean(axis=0)
def mean_identity(covmats, sample_weight=None):
r"""Return the identity matrix corresponding to the covmats sit size
.. math::
\mathbf{C} = \mathbf{I}_d
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:returns: the identity matrix of size n_channels
"""
C = np.eye(covmats.shape[1])
return C
def mean_covariance(covmats, metric='riemann', sample_weight=None, *args):
"""Return the mean covariance matrix according to the metric
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param metric: the metric (default 'riemann'), can be : 'riemann',
'logeuclid', 'euclid', 'logdet', 'identity', 'wasserstein', 'ale',
'alm', 'harmonic', 'kullback_sym' or a callable function
:param sample_weight: the weight of each sample
:param args: the argument passed to the sub function
:returns: the mean covariance matrix
"""
if callable(metric):
C = metric(covmats, sample_weight=sample_weight, *args)
else:
C = mean_methods[metric](covmats, sample_weight=sample_weight, *args)
return C
mean_methods = {'riemann': mean_riemann,
'logeuclid': mean_logeuclid,
'euclid': mean_euclid,
'identity': mean_identity,
'logdet': mean_logdet,
'wasserstein': mean_wasserstein,
'ale': mean_ale,
'harmonic': mean_harmonic,
'kullback_sym': mean_kullback_sym,
'alm': mean_alm}
def _check_mean_method(method):
"""checks methods """
if isinstance(method, str):
if method not in mean_methods.keys():
raise ValueError('Unknown mean method')
else:
method = mean_methods[method]
elif not hasattr(method, '__call__'):
raise ValueError('mean method must be a function or a string.')
return method
|
{"hexsha": "bbfba00ada95ca4b323dab1489addc7b7c3e9bf4", "size": 13774, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyriemann/utils/mean.py", "max_stars_repo_name": "qbarthelemy/pyRiemann", "max_stars_repo_head_hexsha": "b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-30T01:18:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T01:18:51.000Z", "max_issues_repo_path": "pyriemann/utils/mean.py", "max_issues_repo_name": "qbarthelemy/pyRiemann", "max_issues_repo_head_hexsha": "b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyriemann/utils/mean.py", "max_forks_repo_name": "qbarthelemy/pyRiemann", "max_forks_repo_head_hexsha": "b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7952380952, "max_line_length": 116, "alphanum_fraction": 0.6359082329, "include": true, "reason": "import numpy", "num_tokens": 3838}
|
[STATEMENT]
lemma finfun_snd_comp_conv: "finfun_snd (f \<circ>$ g) = (snd \<circ> f) \<circ>$ g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finfun_snd (f \<circ>$ g) = (snd \<circ> f) \<circ>$ g
[PROOF STEP]
by(simp add: finfun_snd_def)
|
{"llama_tokens": 118, "file": "FinFun_FinFun", "length": 1}
|
/-
Copyright (c) 2021 Eric Rodriguez. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Rodriguez
-/
import ring_theory.polynomial.cyclotomic.basic
import tactic.by_contra
import topology.algebra.polynomial
import number_theory.padics.padic_norm
/-!
# Evaluating cyclotomic polynomials
This file states some results about evaluating cyclotomic polynomials in various different ways.
## Main definitions
* `polynomial.eval(₂)_one_cyclotomic_prime(_pow)`: `eval 1 (cyclotomic p^k R) = p`.
* `polynomial.eval_one_cyclotomic_not_prime_pow`: Otherwise, `eval 1 (cyclotomic n R) = 1`.
* `polynomial.cyclotomic_pos` : `∀ x, 0 < eval x (cyclotomic n R)` if `2 < n`.
-/
namespace polynomial
open finset nat
open_locale big_operators
@[simp] lemma eval_one_cyclotomic_prime {R : Type*} [comm_ring R] {p : ℕ} [hn : fact p.prime] :
eval 1 (cyclotomic p R) = p :=
by simp only [cyclotomic_eq_geom_sum hn.out, geom_sum_def, eval_X, one_pow, sum_const, eval_pow,
eval_finset_sum, card_range, smul_one_eq_coe]
@[simp] lemma eval₂_one_cyclotomic_prime {R S : Type*} [comm_ring R] [semiring S] (f : R →+* S)
{p : ℕ} [fact p.prime] : eval₂ f 1 (cyclotomic p R) = p :=
by simp
@[simp] lemma eval_one_cyclotomic_prime_pow {R : Type*} [comm_ring R] {p : ℕ} (k : ℕ)
[hn : fact p.prime] : eval 1 (cyclotomic (p ^ (k + 1)) R) = p :=
by simp only [cyclotomic_prime_pow_eq_geom_sum hn.out, geom_sum_def, eval_X, one_pow, sum_const,
eval_pow, eval_finset_sum, card_range, smul_one_eq_coe]
@[simp] lemma eval₂_one_cyclotomic_prime_pow {R S : Type*} [comm_ring R] [semiring S] (f : R →+* S)
{p : ℕ} (k : ℕ) [fact p.prime] : eval₂ f 1 (cyclotomic (p ^ (k + 1)) R) = p :=
by simp
private lemma cyclotomic_neg_one_pos {n : ℕ} (hn : 2 < n) {R} [linear_ordered_comm_ring R] :
0 < eval (-1 : R) (cyclotomic n R) :=
begin
haveI := ne_zero.of_gt hn,
rw [←map_cyclotomic_int, ←int.cast_one, ←int.cast_neg, eval_int_cast_map,
int.coe_cast_ring_hom, int.cast_pos],
suffices : 0 < eval ↑(-1 : ℤ) (cyclotomic n ℝ),
{ rw [←map_cyclotomic_int n ℝ, eval_int_cast_map, int.coe_cast_ring_hom] at this,
exact_mod_cast this },
simp only [int.cast_one, int.cast_neg],
have h0 := cyclotomic_coeff_zero ℝ hn.le,
rw coeff_zero_eq_eval_zero at h0,
by_contra' hx,
have := intermediate_value_univ (-1) 0 (cyclotomic n ℝ).continuous,
obtain ⟨y, hy : is_root _ y⟩ := this (show (0 : ℝ) ∈ set.Icc _ _, by simpa [h0] using hx),
rw is_root_cyclotomic_iff at hy,
rw hy.eq_order_of at hn,
exact hn.not_le linear_ordered_ring.order_of_le_two,
end
lemma cyclotomic_pos {n : ℕ} (hn : 2 < n) {R} [linear_ordered_comm_ring R] (x : R) :
0 < eval x (cyclotomic n R) :=
begin
induction n using nat.strong_induction_on with n ih,
have hn' : 0 < n := pos_of_gt hn,
have hn'' : 1 < n := one_lt_two.trans hn,
dsimp at ih,
have := prod_cyclotomic_eq_geom_sum hn' R,
apply_fun eval x at this,
rw [divisors_eq_proper_divisors_insert_self_of_pos hn', insert_sdiff_of_not_mem,
prod_insert, eval_mul, eval_geom_sum] at this,
rotate,
{ simp only [lt_self_iff_false, mem_sdiff, not_false_iff, mem_proper_divisors, and_false,
false_and]},
{ simpa only [mem_singleton] using hn''.ne' },
rcases lt_trichotomy 0 (geom_sum x n) with h | h | h,
{ apply pos_of_mul_pos_right,
{ rwa this },
rw eval_prod,
refine prod_nonneg (λ i hi, _),
simp only [mem_sdiff, mem_proper_divisors, mem_singleton] at hi,
rw geom_sum_pos_iff hn'' at h,
cases h with hk hx,
{ refine (ih _ hi.1.2 (nat.two_lt_of_ne _ hi.2 _)).le; rintro rfl,
{ exact hn'.ne' (zero_dvd_iff.mp hi.1.1) },
{ exact even_iff_not_odd.mp (even_iff_two_dvd.mpr hi.1.1) hk } },
{ rcases eq_or_ne i 2 with rfl | hk,
{ simpa only [eval_X, eval_one, cyclotomic_two, eval_add] using hx.le },
refine (ih _ hi.1.2 (nat.two_lt_of_ne _ hi.2 hk)).le,
rintro rfl,
exact (hn'.ne' $ zero_dvd_iff.mp hi.1.1) } },
{ rw [eq_comm, geom_sum_eq_zero_iff_neg_one hn''] at h,
exact h.1.symm ▸ cyclotomic_neg_one_pos hn },
{ apply pos_of_mul_neg_left,
{ rwa this },
rw [geom_sum_neg_iff hn''] at h,
have h2 : {2} ⊆ n.proper_divisors \ {1},
{ rw [singleton_subset_iff, mem_sdiff, mem_proper_divisors, not_mem_singleton],
exact ⟨⟨h.1, hn⟩, (nat.one_lt_bit0 one_ne_zero).ne'⟩ },
rw [eval_prod, ←prod_sdiff h2, prod_singleton]; try { apply_instance },
apply mul_nonpos_of_nonneg_of_nonpos,
{ refine prod_nonneg (λ i hi, le_of_lt _),
simp only [mem_sdiff, mem_proper_divisors, mem_singleton] at hi,
refine ih _ hi.1.1.2 (nat.two_lt_of_ne _ hi.1.2 hi.2),
rintro rfl,
rw zero_dvd_iff at hi,
exact hn'.ne' hi.1.1.1 },
{ simpa only [eval_X, eval_one, cyclotomic_two, eval_add] using h.right.le } }
end
lemma eval_one_cyclotomic_not_prime_pow {R : Type*} [comm_ring R] {n : ℕ}
(h : ∀ {p : ℕ}, p.prime → ∀ k : ℕ, p ^ k ≠ n) : eval 1 (cyclotomic n R) = 1 :=
begin
rcases n.eq_zero_or_pos with rfl | hn',
{ simp },
have hn : 2 < n := two_lt_of_ne hn'.ne' (h nat.prime_two 0).symm (h nat.prime_two 1).symm,
have hn'' : 1 < n := by linarith,
suffices : eval 1 (cyclotomic n ℤ) = 1 ∨ eval 1 (cyclotomic n ℤ) = -1,
{ cases this with h h,
{ have := eval_int_cast_map (int.cast_ring_hom R) (cyclotomic n ℤ) 1,
simpa only [map_cyclotomic, int.cast_one, h, ring_hom.eq_int_cast] using this },
{ exfalso,
linarith [cyclotomic_pos hn (1 : ℤ)] }, },
rw [←int.nat_abs_eq_nat_abs_iff, int.nat_abs_one, nat.eq_one_iff_not_exists_prime_dvd],
intros p hp hpe,
haveI := fact.mk hp,
have hpn : p ∣ n,
{ apply hpe.trans,
nth_rewrite 1 ←int.nat_abs_of_nat n,
rw [int.nat_abs_dvd_iff_dvd, ←int.nat_cast_eq_coe_nat,
←one_geom_sum, ←eval_geom_sum, ←prod_cyclotomic_eq_geom_sum hn'],
apply eval_dvd,
apply finset.dvd_prod_of_mem,
simpa using and.intro hn'.ne' hn''.ne' },
have := prod_cyclotomic_eq_geom_sum hn' ℤ,
apply_fun eval 1 at this,
rw [eval_geom_sum, one_geom_sum, eval_prod, eq_comm,
←finset.prod_sdiff $ range_pow_padic_val_nat_subset_divisors' p, finset.prod_image] at this,
simp_rw [eval_one_cyclotomic_prime_pow, finset.prod_const, finset.card_range, mul_comm] at this,
rw [←finset.prod_sdiff $ show {n} ⊆ _, from _] at this,
any_goals {apply_instance},
swap,
{ simp only [not_exists, true_and, exists_prop, dvd_rfl, finset.mem_image, finset.mem_range,
finset.mem_singleton, finset.singleton_subset_iff, finset.mem_sdiff, nat.mem_divisors, not_and],
exact ⟨⟨hn'.ne', hn''.ne'⟩, λ t _, h hp _⟩ },
rw [←int.nat_abs_of_nat p, int.nat_abs_dvd_iff_dvd] at hpe,
obtain ⟨t, ht⟩ := hpe,
rw [finset.prod_singleton, ht, mul_left_comm, mul_comm, ←mul_assoc, mul_assoc] at this,
simp only [int.nat_cast_eq_coe_nat] at *,
have : (p ^ (padic_val_nat p n) * p : ℤ) ∣ n := ⟨_, this⟩,
simp only [←pow_succ', ←int.nat_abs_dvd_iff_dvd, int.nat_abs_of_nat, int.nat_abs_pow] at this,
exact pow_succ_padic_val_nat_not_dvd hn' this,
{ rintro x - y - hxy,
apply nat.succ_injective,
exact nat.pow_right_injective hp.two_le hxy }
end
end polynomial
|
{"author": "Mel-TunaRoll", "repo": "Lean-Mordell-Weil-Mel-Branch", "sha": "4db36f86423976aacd2c2968c4e45787fcd86b97", "save_path": "github-repos/lean/Mel-TunaRoll-Lean-Mordell-Weil-Mel-Branch", "path": "github-repos/lean/Mel-TunaRoll-Lean-Mordell-Weil-Mel-Branch/Lean-Mordell-Weil-Mel-Branch-4db36f86423976aacd2c2968c4e45787fcd86b97/src/ring_theory/polynomial/cyclotomic/eval.lean"}
|
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as jsp
import jaxtorch
import math
def alpha_sigma_to_t(alpha, sigma):
return jnp.arctan2(sigma, alpha) * 2 / math.pi
def get_cosine_alphas_sigmas(t):
return jnp.cos(t * math.pi/2), jnp.sin(t * math.pi/2)
def get_ddpm_alphas_sigmas(t, initial_snr=10.0):
log_snrs = -jnp.expm1(1e-4 + initial_snr * t**2).log()
alphas_squared = jax.nn.sigmoid(log_snrs)
sigmas_squared = jax.nn.sigmoid(-log_snrs)
return alphas_squared.sqrt(), sigmas_squared.sqrt()
def ddpm_t_to_cosine(t, initial_snr=10.0):
alpha, sigma = get_ddpm_alphas_sigmas(t, initial_snr)
return alpha_sigma_to_t(alpha, sigma)
def cosine_t_to_ddpm(t, initial_snr=10.0):
alpha, sigma = get_cosine_alphas_sigmas(t)
log_snr = jnp.log(alpha**2 / sigma**2)
return ((jnp.log1p(jnp.exp(-log_snr)) - 1e-4) / initial_snr).clamp(0,1).sqrt()
class NoiseSchedule(object):
def to_cosine(self, t):
raise NotImplementedError
def from_cosine(self, t):
raise NotImplementedError
def to_alpha_sigma(self, t):
return get_cosine_alphas_sigmas(self.to_cosine(t))
def from_alpha_sigma(self, alpha, sigma):
return self.from_cosine(alpha_sigma_to_t(alpha, sigma))
class Cosine(NoiseSchedule):
def to_cosine(self, t):
return t
def from_cosine(self, t):
return t
class DDPM(NoiseSchedule):
def __init__(self, initial_snr=10.0):
self.initial_snr = initial_snr
def to_cosine(self, t):
return ddpm_t_to_cosine(t, self.initial_snr)
def to_alpha_sigma(self, t):
return get_ddpm_alphas_sigmas(t, self.initial_snr)
def from_cosine(self, t):
return cosine_t_to_ddpm(t, self.initial_snr)
class LinearLogSnr(NoiseSchedule):
def __init__(self, initial_snr=10.0, final_snr=-10):
self.initial_snr = initial_snr
self.final_snr = final_snr
def to_cosine(self, t):
alpha, sigma = self.to_alpha_sigma(t)
return jnp.arctan2(sigma, alpha) * 2 / math.pi
def to_alpha_sigma(self, t):
log_snrs = self.initial_snr * (1-t) + self.final_snr * t
alphas_squared = jax.nn.sigmoid(log_snrs)
sigmas_squared = jax.nn.sigmoid(-log_snrs)
return alphas_squared.sqrt(), sigmas_squared.sqrt()
def from_cosine(self, t):
alpha, sigma = cosine.to_alpha_sigma(t)
log_snr = jnp.log(alpha**2 / sigma**2)
ct = (log_snr - self.initial_snr) / (self.final_snr - self.initial_snr)
return ct.clamp(0,1)
class Spliced(NoiseSchedule):
# Fixed to initial_snr=10 for now.
def to_cosine(self, t):
crossover_ddpm = 0.48536712
crossover_cosine = 0.80074257
big_t = t * (crossover_cosine + 1 - crossover_ddpm)
return jnp.where(big_t < crossover_cosine,
big_t,
ddpm_t_to_cosine(big_t - crossover_cosine + crossover_ddpm)
)
cosine = Cosine()
ddpm = DDPM()
ddpm2 = DDPM(14.0)
spliced = Spliced()
|
{"hexsha": "5b704328fc842b579ff0350df5841f51cd3ef233", "size": 3040, "ext": "py", "lang": "Python", "max_stars_repo_path": "jax-diffusion/jax-guided-diffusion/diffusion_models/schedules.py", "max_stars_repo_name": "Baughn/nixgan", "max_stars_repo_head_hexsha": "20639e37f8263187ef3928fa91974e9d9d0848d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jax-diffusion/jax-guided-diffusion/diffusion_models/schedules.py", "max_issues_repo_name": "Baughn/nixgan", "max_issues_repo_head_hexsha": "20639e37f8263187ef3928fa91974e9d9d0848d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jax-diffusion/jax-guided-diffusion/diffusion_models/schedules.py", "max_forks_repo_name": "Baughn/nixgan", "max_forks_repo_head_hexsha": "20639e37f8263187ef3928fa91974e9d9d0848d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7777777778, "max_line_length": 84, "alphanum_fraction": 0.6690789474, "include": true, "reason": "import numpy,import jax", "num_tokens": 888}
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Created By : Francisco Miras García <francisco.mirasg@gmail.com>
# version ='1.0'
# ---------------------------------------------------------------------------
"""
# Codigo para el ejercicio ACTIVIDAD
1.- Construye un detector de movimiento en una región de interés de la imagen marcada manualmente.
2.- Guarda 2 ó 3 segundos de la secuencia detectada en un archivo de vídeo.
Opcional: muestra el objeto seleccionado anulando el fondo.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import enum
import cv2 as cv
import numpy as np
from umucv.stream import autoStream, putText
from umucv.util import ROI, Video
from collections import deque
# ---------------------------------------------------------------------------
# DATA
# ---------------------------------------------------------------------------
TIME_OF_RECORDING = 2 # Tiempo de guardado en segundos
FPS = 30 # FPS de la camara
TOTAL_FRAMES_SAVE = TIME_OF_RECORDING * FPS # Tamaño de frames para guardar el tiempo requerido
SUAVIZAR_MEDIA = 10 # Numero de medias como referecia
'''
Se tiene en cuenta el valor medio de las ultimas medias para suavizar las diferencias en situacion de iluminación
variable, natural o con una camara con flickering o autofocus
'''
UMBRAL_INICIAL_DETEC = 10 # Valor inicial del umbral de detección
UMBRAL_INICIAL_RECORTE = 100 # Valor inicial del umbral de recorte
cv.namedWindow('input')
# ---------------------------------------------------------------------------
# Class
# ---------------------------------------------------------------------------
class Estado(enum.Enum): # Enumerado con los estados
ACTIVITY = 1
END = 2
class ControlAct: # Clase contenedora con las variables usadas durante la ejecucion
region: ROI
saved_trozo: np.ndarray
last_frames: deque
last_mean: deque
umbral_deteccion: float
umbral_recorte: float
video = None
estado: Estado
def __init__(self, roi=None):
if roi is None:
raise Exception('ROI no valido')
self.region = roi
self.saved_trozo = None
self.last_frames = deque(maxlen=TOTAL_FRAMES_SAVE)
self.last_mean = deque(maxlen=SUAVIZAR_MEDIA)
self.umbral_deteccion = UMBRAL_INICIAL_DETEC / 1000
self.estado = Estado.END
def reset_trozo(self):
self.saved_trozo = None
self.last_frames.clear()
self.last_mean.clear()
def start_video(self): # Inicia el video y guarda los ultimos x segundos de inmediato
if self.video or len(self.last_frames) < 1:
return
self.video = Video(fps=FPS, codec="MJPG", ext="avi")
self.video.ON = True
for f in self.last_frames:
self.video.write(f)
self.last_frames.clear()
def continue_video(self, f): # Añade el frame actual
if self.video:
self.video.write(f)
def stop_video(self): # Detiene el video
self.video.ON = False
self.video.release()
self.video = None
data = ControlAct(ROI('input'))
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def update_umbral(v):
v = max(1, v)
data.umbral_deteccion = v / 1000
print(f'Umbral de deteccion ajustado a {data.umbral_deteccion}')
def update_umbral_recorte(v):
v = max(1, v)
data.umbral_recorte = v / 1000
print(f'Umbral de recorte ajustado a {data.umbral_deteccion}')
def bgr2gray(x):
return cv.cvtColor(x, cv.COLOR_BGR2GRAY).astype(float) / 255
def gray2bgr(x):
r = cv.cvtColor(np.float32(x), cv.COLOR_GRAY2BGR).astype(float)
return r
# ---------------------------------------------------------------------------
# INIT
# ---------------------------------------------------------------------------
cv.createTrackbar('Umbral Deteccion', 'input', UMBRAL_INICIAL_DETEC, 1000, update_umbral)
cv.createTrackbar('Umbral Recorte', 'input', UMBRAL_INICIAL_RECORTE, 1000, update_umbral_recorte)
# ---------------------------------------------------------------------------
# CODE
# ---------------------------------------------------------------------------
for key, frame in autoStream():
if data.region.roi: # Si se ha seleccionado una región, la recortamos
[x1, y1, x2, y2] = data.region.roi
recorte = frame[y1:y2, x1:x2]
if key == ord('x'): # Pulsar x para eleminar región
data.region.roi = None
data.reset_trozo()
if key == ord('c'): # Pulsar c para capturar la región
if data.saved_trozo is not None:
data.reset_trozo()
data.saved_trozo = recorte
cv.imshow('Trozo Seleccionado', data.saved_trozo)
if data.saved_trozo is not None: # Si hay trozo guardado, empezamos con la vigilancia
diff = cv.absdiff(data.saved_trozo, recorte) # Calculamos la diferencia del actual con el guardado
diff = bgr2gray(diff)
mean = np.mean(diff)
means = mean # La media nos da una aproximación de cuanto ha cambiado con la de referncia
if len(data.last_mean) >= SUAVIZAR_MEDIA: # Si hay suficientes medias las suavizamos conforme el tiempo
means = np.mean(data.last_mean)
if np.abs(means - mean) <= data.umbral_deteccion: # Si la variación no es suficiente grande.
data.last_mean.append(mean)
data.last_frames.append(recorte) # Actualizamos las medias y guardamos frame
if data.estado is not Estado.END: # Si venimos de un estado de actividad
data.stop_video() # Detenemos grabación y destruimos ventanas residuales
print('Fin actividad')
data.estado = Estado.END
try:
cv.destroyWindow('mascara')
cv.destroyWindow('objeto')
except Exception:
pass
else: # Si Hay diferencia
mask = diff > data.umbral_recorte # Creamos mascara
cv.imshow('mascara', mask.astype(float))
mask = gray2bgr(mask)
objeto = np.zeros_like(recorte) # Recortamos la mascara
np.copyto(objeto, recorte, where=mask == 1)
cv.imshow('objeto', objeto)
putText(diff, 'ALERT', orig=(5, diff.shape[0] - 5)) # Notificamos la alerta
if data.estado is Estado.END: # Si no estamos grabando, empezamos
print('Actividad detectada')
data.start_video()
data.continue_video(recorte)
data.estado = Estado.ACTIVITY
elif data.estado is Estado.ACTIVITY: # Continuamos grabando
data.continue_video(recorte)
putText(diff, f'Mean(t) = {np.round(means, 4)}', orig=(5, 16 * 2))
putText(diff, f'Mean = {np.round(mean, 4)}', orig=(5, 16))
cv.imshow('Diferencia', diff)
data.last_frames.append(recorte)
cv.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 255), thickness=2)
putText(frame, f'{x2 - x1 + 1}x{y2 - y1 + 1}', orig=(x1, y1 - 8))
# Mostrar Principal
cv.imshow('input', frame)
cv.destroyAllWindows()
|
{"hexsha": "3c7a8a22a65bafe8f876f646fe66ee15cc2de4fc", "size": 7593, "ext": "py", "lang": "Python", "max_stars_repo_path": "2021-2022/Entregas/ACTIVIDAD.py", "max_stars_repo_name": "franciscomirasg/umucv", "max_stars_repo_head_hexsha": "703629d5152d55d00821aee02d30fbb3cca1b73e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2021-2022/Entregas/ACTIVIDAD.py", "max_issues_repo_name": "franciscomirasg/umucv", "max_issues_repo_head_hexsha": "703629d5152d55d00821aee02d30fbb3cca1b73e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2021-2022/Entregas/ACTIVIDAD.py", "max_forks_repo_name": "franciscomirasg/umucv", "max_forks_repo_head_hexsha": "703629d5152d55d00821aee02d30fbb3cca1b73e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.776119403, "max_line_length": 116, "alphanum_fraction": 0.5386540234, "include": true, "reason": "import numpy", "num_tokens": 1765}
|
#include <iostream>
#include <string>
#include <vector>
#include <boost/lockfree/queue.hpp>
#include "test/timed-throughput-fixture.h"
#include "test/timed-throughput.h"
#include "util/parse-cmd-line.h"
#include "util/util.h"
using boost::lockfree::queue;
using std::string;
using std::vector;
using test::timed_throughput;
using test::timed_throughput_fixture;
using util::CmdLineOpts;
#define MAX(a, b) ((a) > (b) ? (a) : (b))
typedef timed_throughput_fixture<queue<int>, int,
queue<int>, int> Fixture;
int main(int argc, char **argv) {
CmdLineOpts opts;
if (parseCmdLineOpts(argc, argv, opts) != 0)
return 1;
if (opts.use_large_test) {
std::cerr << "Large tests not supported" << std::endl;
return 1;
}
int max_num_threads = MAX(opts.num_producers, opts.num_consumers);
boost::lockfree::queue<int> small_queue(max_num_threads);
int small_item = 0;
Fixture fixture(&small_queue, &small_item, &small_queue, &small_item);
if (fixture.run(argc, argv))
return 0;
else
return 1;
}
|
{"hexsha": "d0beac22f1299a3fdf9188207a8178dd399c61c1", "size": 1060, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/boost-lockfree-queue-timed-test.cc", "max_stars_repo_name": "cookyt/parallel-multi-queue", "max_stars_repo_head_hexsha": "1543cc66815c7fbb4cd8e896ce2a9ce56e6213db", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/boost-lockfree-queue-timed-test.cc", "max_issues_repo_name": "cookyt/parallel-multi-queue", "max_issues_repo_head_hexsha": "1543cc66815c7fbb4cd8e896ce2a9ce56e6213db", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-02-01T21:27:03.000Z", "max_issues_repo_issues_event_max_datetime": "2015-02-01T21:27:03.000Z", "max_forks_repo_path": "src/boost-lockfree-queue-timed-test.cc", "max_forks_repo_name": "cookyt/parallel-multi-queue", "max_forks_repo_head_hexsha": "1543cc66815c7fbb4cd8e896ce2a9ce56e6213db", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0909090909, "max_line_length": 72, "alphanum_fraction": 0.6811320755, "num_tokens": 292}
|
# coding: utf-8
# # 卷积神经网络示例与各层可视化
# In[1]:
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
get_ipython().magic(u'matplotlib inline')
print ("当前TensorFlow版本为 [%s]" % (tf.__version__))
print ("所有包载入完毕")
# ## 载入 MNIST
# In[2]:
mnist = input_data.read_data_sets('data/', one_hot=True)
trainimg = mnist.train.images
trainlabel = mnist.train.labels
testimg = mnist.test.images
testlabel = mnist.test.labels
print ("MNIST ready")
# ## 定义模型
# In[3]:
# NETWORK TOPOLOGIES
n_input = 784
n_channel = 64
n_classes = 10
# INPUTS AND OUTPUTS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# NETWORK PARAMETERS
stddev = 0.1
weights = {
'c1': tf.Variable(tf.random_normal([7, 7, 1, n_channel], stddev=stddev)),
'd1': tf.Variable(tf.random_normal([14*14*64, n_classes], stddev=stddev))
}
biases = {
'c1': tf.Variable(tf.random_normal([n_channel], stddev=stddev)),
'd1': tf.Variable(tf.random_normal([n_classes], stddev=stddev))
}
print ("NETWORK READY")
# ## 定义图结构
# In[4]:
# MODEL
def CNN(_x, _w, _b):
# RESHAPE
_x_r = tf.reshape(_x, shape=[-1, 28, 28, 1])
# CONVOLUTION
_conv1 = tf.nn.conv2d(_x_r, _w['c1'], strides=[1, 1, 1, 1], padding='SAME')
# ADD BIAS
_conv2 = tf.nn.bias_add(_conv1, _b['c1'])
# RELU
_conv3 = tf.nn.relu(_conv2)
# MAX-POOL
_pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# VECTORIZE
_dense = tf.reshape(_pool, [-1, _w['d1'].get_shape().as_list()[0]])
# DENSE
_logit = tf.add(tf.matmul(_dense, _w['d1']), _b['d1'])
_out = {
'x_r': _x_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3
, 'pool': _pool, 'dense': _dense, 'logit': _logit
}
return _out
# PREDICTION
cnnout = CNN(x, weights, biases)
# LOSS AND OPTIMIZER
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=y, logits=cnnout['logit']))
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
corr = tf.equal(tf.argmax(cnnout['logit'], 1), tf.argmax(y, 1))
accr = tf.reduce_mean(tf.cast(corr, "float"))
# INITIALIZER
init = tf.global_variables_initializer()
print ("FUNCTIONS READY")
# ## 存储
# In[5]:
savedir = "nets/cnn_mnist_simple/"
saver = tf.train.Saver(max_to_keep=3)
save_step = 4
if not os.path.exists(savedir):
os.makedirs(savedir)
print ("SAVER READY")
# ## 运行
# In[6]:
# PARAMETERS
training_epochs = 20
batch_size = 100
display_step = 4
# LAUNCH THE GRAPH
sess = tf.Session()
sess.run(init)
# OPTIMIZE
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# ITERATION
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feeds = {x: batch_xs, y: batch_ys}
sess.run(optm, feed_dict=feeds)
avg_cost += sess.run(cost, feed_dict=feeds)
avg_cost = avg_cost / total_batch
# DISPLAY
if (epoch+1) % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch+1, training_epochs, avg_cost))
feeds = {x: batch_xs, y: batch_ys}
train_acc = sess.run(accr, feed_dict=feeds)
print ("TRAIN ACCURACY: %.3f" % (train_acc))
feeds = {x: mnist.test.images, y: mnist.test.labels}
test_acc = sess.run(accr, feed_dict=feeds)
print ("TEST ACCURACY: %.3f" % (test_acc))
# SAVE
if (epoch+1) % save_step == 0:
savename = savedir+"net-"+str(epoch+1)+".ckpt"
saver.save(sess, savename)
print ("[%s] SAVED." % (savename))
print ("OPTIMIZATION FINISHED")
# ## 恢复
# In[7]:
do_restore = 0
if do_restore == 1:
sess = tf.Session()
epoch = 20
savename = savedir+"net-"+str(epoch)+".ckpt"
saver.restore(sess, savename)
print ("NETWORK RESTORED")
else:
print ("DO NOTHING")
# ## CNN如何工作
# In[8]:
input_r = sess.run(cnnout['x_r'], feed_dict={x: trainimg[0:1, :]})
conv1 = sess.run(cnnout['conv1'], feed_dict={x: trainimg[0:1, :]})
conv2 = sess.run(cnnout['conv2'], feed_dict={x: trainimg[0:1, :]})
conv3 = sess.run(cnnout['conv3'], feed_dict={x: trainimg[0:1, :]})
pool = sess.run(cnnout['pool'], feed_dict={x: trainimg[0:1, :]})
dense = sess.run(cnnout['dense'], feed_dict={x: trainimg[0:1, :]})
out = sess.run(cnnout['logit'], feed_dict={x: trainimg[0:1, :]})
# ## 输入
# In[9]:
print ("Size of 'input_r' is %s" % (input_r.shape,))
label = np.argmax(trainlabel[0, :])
print ("Label is %d" % (label))
# PLOT
plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray'))
plt.title("Label of this image is " + str(label) + "")
plt.colorbar()
plt.show()
# # CONV 卷积层
# In[10]:
print ("SIZE OF 'CONV1' IS %s" % (conv1.shape,))
for i in range(3):
plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv1")
plt.colorbar()
plt.show()
# ## CONV + BIAS
# In[11]:
print ("SIZE OF 'CONV2' IS %s" % (conv2.shape,))
for i in range(3):
plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv2")
plt.colorbar()
plt.show()
# ## CONV + BIAS + RELU
# In[12]:
print ("SIZE OF 'CONV3' IS %s" % (conv3.shape,))
for i in range(3):
plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv3")
plt.colorbar()
plt.show()
# ## POOL
# In[13]:
print ("SIZE OF 'POOL' IS %s" % (pool.shape,))
for i in range(3):
plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th pool")
plt.colorbar()
plt.show()
# ## DENSE
# In[14]:
print ("SIZE OF 'DENSE' IS %s" % (dense.shape,))
print ("SIZE OF 'OUT' IS %s" % (out.shape,))
plt.matshow(out, cmap=plt.get_cmap('gray'))
plt.title("OUT")
plt.colorbar()
plt.show()
# ## CONVOLUTION FILTER 卷积核
# In[15]:
wc1 = sess.run(weights['c1'])
print ("SIZE OF 'WC1' IS %s" % (wc1.shape,))
for i in range(3):
plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray'))
plt.title(str(i) + "th conv filter")
plt.colorbar()
plt.show()
|
{"hexsha": "49a609df234971809a85e6b79debab66bf53284e", "size": 6151, "ext": "py", "lang": "Python", "max_stars_repo_path": "04_CNN_advances/cnn_mnist_simple.py", "max_stars_repo_name": "jastarex/DL_Notes", "max_stars_repo_head_hexsha": "4da8c5c90283d25655abde95263e44432aad343a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 203, "max_stars_repo_stars_event_min_datetime": "2017-11-19T08:45:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T08:39:02.000Z", "max_issues_repo_path": "04_CNN_advances/cnn_mnist_simple.py", "max_issues_repo_name": "datianshi21/DeepLearningCourseCodes", "max_issues_repo_head_hexsha": "4da8c5c90283d25655abde95263e44432aad343a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-09-19T17:18:46.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-23T02:30:05.000Z", "max_forks_repo_path": "04_CNN_advances/cnn_mnist_simple.py", "max_forks_repo_name": "datianshi21/DeepLearningCourseCodes", "max_forks_repo_head_hexsha": "4da8c5c90283d25655abde95263e44432aad343a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 145, "max_forks_repo_forks_event_min_datetime": "2017-11-19T17:21:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T08:39:01.000Z", "avg_line_length": 23.3878326996, "max_line_length": 93, "alphanum_fraction": 0.6163225492, "include": true, "reason": "import numpy", "num_tokens": 2074}
|
// Copyright (c) 2001-2011 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(BOOST_SPIRIT_KARMA_REAL_POLICIES_MAR_02_2007_0936AM)
#define BOOST_SPIRIT_KARMA_REAL_POLICIES_MAR_02_2007_0936AM
#if defined(_MSC_VER)
#pragma once
#endif
#include <boost/config/no_tr1/cmath.hpp>
#include <boost/math/special_functions/fpclassify.hpp>
#include <boost/type_traits/remove_const.hpp>
#include <boost/spirit/home/support/char_class.hpp>
#include <boost/spirit/home/karma/generator.hpp>
#include <boost/spirit/home/karma/char.hpp>
#include <boost/spirit/home/karma/numeric/int.hpp>
#include <boost/spirit/home/karma/numeric/detail/real_utils.hpp>
#include <boost/mpl/bool.hpp>
namespace boost { namespace spirit { namespace karma
{
///////////////////////////////////////////////////////////////////////////
//
// real_policies, if you need special handling of your floating
// point numbers, just overload this policy class and use it as a template
// parameter to the karma::real_generator floating point specifier:
//
// template <typename T>
// struct scientific_policy : karma::real_policies<T>
// {
// // we want the numbers always to be in scientific format
// static int floatfield(T n) { return fmtflags::scientific; }
// };
//
// typedef
// karma::real_generator<double, scientific_policy<double> >
// science_type;
//
// karma::generate(sink, science_type(), 1.0); // will output: 1.0e00
//
///////////////////////////////////////////////////////////////////////////
template <typename T>
struct real_policies
{
///////////////////////////////////////////////////////////////////////
// Expose the data type the generator is targeted at
///////////////////////////////////////////////////////////////////////
typedef T value_type;
///////////////////////////////////////////////////////////////////////
// By default the policy doesn't require any special iterator
// functionality. The floating point generator exposes its properties
// from here, so this needs to be updated in case other properties
// need to be implemented.
///////////////////////////////////////////////////////////////////////
typedef mpl::int_<generator_properties::no_properties> properties;
///////////////////////////////////////////////////////////////////////
// Specifies, which representation type to use during output
// generation.
///////////////////////////////////////////////////////////////////////
struct fmtflags
{
enum {
scientific = 0, // Generate floating-point values in scientific
// format (with an exponent field).
fixed = 1 // Generate floating-point values in fixed-point
// format (with no exponent field).
};
};
///////////////////////////////////////////////////////////////////////
// This is the main function used to generate the output for a
// floating point number. It is called by the real generator in order
// to perform the conversion. In theory all of the work can be
// implemented here, but it is the easiest to use existing
// functionality provided by the type specified by the template
// parameter `Inserter`.
//
// sink: the output iterator to use for generation
// n: the floating point number to convert
// p: the instance of the policy type used to instantiate this
// floating point generator.
///////////////////////////////////////////////////////////////////////
template <typename Inserter, typename OutputIterator, typename Policies>
static bool
call (OutputIterator& sink, T n, Policies const& p)
{
return Inserter::call_n(sink, n, p);
}
///////////////////////////////////////////////////////////////////////
// The default behavior is to not to require generating a sign. If
// 'force_sign()' returns true, then all generated numbers will
// have a sign ('+' or '-', zeros will have a space instead of a sign)
//
// n The floating point number to output. This can be used to
// adjust the required behavior depending on the value of
// this number.
///////////////////////////////////////////////////////////////////////
static bool force_sign(T)
{
return false;
}
///////////////////////////////////////////////////////////////////////
// Return whether trailing zero digits have to be emitted in the
// fractional part of the output. If set, this flag instructs the
// floating point generator to emit trailing zeros up to the required
// precision digits (as returned by the precision() function).
//
// n The floating point number to output. This can be used to
// adjust the required behavior depending on the value of
// this number.
///////////////////////////////////////////////////////////////////////
static bool trailing_zeros(T)
{
// the default behavior is not to generate trailing zeros
return false;
}
///////////////////////////////////////////////////////////////////////
// Decide, which representation type to use in the generated output.
//
// By default all numbers having an absolute value of zero or in
// between 0.001 and 100000 will be generated using the fixed format,
// all others will be generated using the scientific representation.
//
// The function trailing_zeros() can be used to force the output of
// trailing zeros in the fractional part up to the number of digits
// returned by the precision() member function. The default is not to
// generate the trailing zeros.
//
// n The floating point number to output. This can be used to
// adjust the formatting flags depending on the value of
// this number.
///////////////////////////////////////////////////////////////////////
static int floatfield(T n)
{
if (traits::test_zero(n))
return fmtflags::fixed;
T abs_n = traits::get_absolute_value(n);
return (abs_n >= 1e5 || abs_n < 1e-3)
? fmtflags::scientific : fmtflags::fixed;
}
///////////////////////////////////////////////////////////////////////
// Return the maximum number of decimal digits to generate in the
// fractional part of the output.
//
// n The floating point number to output. This can be used to
// adjust the required precision depending on the value of
// this number. If the trailing zeros flag is specified the
// fractional part of the output will be 'filled' with
// zeros, if appropriate
//
// Note: If the trailing_zeros flag is not in effect additional
// comments apply. See the comment for the fraction_part()
// function below. Moreover, this precision will be limited
// to the value of std::numeric_limits<T>::digits10 + 1
///////////////////////////////////////////////////////////////////////
static unsigned precision(T)
{
// by default, generate max. 3 fractional digits
return 3;
}
///////////////////////////////////////////////////////////////////////
// Generate the integer part of the number.
//
// sink The output iterator to use for generation
// n The absolute value of the integer part of the floating
// point number to convert (always non-negative).
// sign The sign of the overall floating point number to
// convert.
// force_sign Whether a sign has to be generated even for
// non-negative numbers
///////////////////////////////////////////////////////////////////////
template <typename OutputIterator>
static bool integer_part (OutputIterator& sink, T n, bool sign
, bool force_sign)
{
return sign_inserter::call(
sink, traits::test_zero(n), sign, force_sign) &&
int_inserter<10>::call(sink, n);
}
///////////////////////////////////////////////////////////////////////
// Generate the decimal point.
//
// sink The output iterator to use for generation
// n The fractional part of the floating point number to
// convert. Note that this number is scaled such, that
// it represents the number of units which correspond
// to the value returned from the precision() function
// earlier. I.e. a fractional part of 0.01234 is
// represented as 1234 when the 'Precision' is 5.
// precision The number of digits to emit as returned by the
// function 'precision()' above
//
// This is given to allow to decide, whether a decimal point
// has to be generated at all.
//
// Note: If the trailing_zeros flag is not in effect additional
// comments apply. See the comment for the fraction_part()
// function below.
///////////////////////////////////////////////////////////////////////
template <typename OutputIterator>
static bool dot (OutputIterator& sink, T /*n*/, unsigned /*precision*/)
{
return char_inserter<>::call(sink, '.'); // generate the dot by default
}
///////////////////////////////////////////////////////////////////////
// Generate the fractional part of the number.
//
// sink The output iterator to use for generation
// n The fractional part of the floating point number to
// convert. This number is scaled such, that it represents
// the number of units which correspond to the 'Precision'.
// I.e. a fractional part of 0.01234 is represented as 1234
// when the 'precision_' parameter is 5.
// precision_ The corrected number of digits to emit (see note
// below)
// precision The number of digits to emit as returned by the
// function 'precision()' above
//
// Note: If trailing_zeros() does not return true the 'precision_'
// parameter will have been corrected from the value the
// precision() function returned earlier (defining the maximal
// number of fractional digits) in the sense, that it takes into
// account trailing zeros. I.e. a floating point number 0.0123
// and a value of 5 returned from precision() will result in:
//
// trailing_zeros is not specified:
// n 123
// precision_ 4
//
// trailing_zeros is specified:
// n 1230
// precision_ 5
//
///////////////////////////////////////////////////////////////////////
template <typename OutputIterator>
static bool fraction_part (OutputIterator& sink, T n
, unsigned precision_, unsigned precision)
{
// allow for ADL to find the correct overload for floor and log10
using namespace std;
// The following is equivalent to:
// generate(sink, right_align(precision, '0')[ulong], n);
// but it's spelled out to avoid inter-modular dependencies.
typename remove_const<T>::type digits =
(traits::test_zero(n) ? 0 : floor(log10(n))) + 1;
bool r = true;
for (/**/; r && digits < precision_; digits = digits + 1)
r = char_inserter<>::call(sink, '0');
if (precision && r)
r = int_inserter<10>::call(sink, n);
return r;
}
///////////////////////////////////////////////////////////////////////
// Generate the exponential part of the number (this is called only
// if the floatfield() function returned the 'scientific' flag).
//
// sink The output iterator to use for generation
// n The (signed) exponential part of the floating point
// number to convert.
//
// The Tag template parameter is either of the type unused_type or
// describes the character class and conversion to be applied to any
// output possibly influenced by either the lower[...] or upper[...]
// directives.
///////////////////////////////////////////////////////////////////////
template <typename CharEncoding, typename Tag, typename OutputIterator>
static bool exponent (OutputIterator& sink, long n)
{
long abs_n = traits::get_absolute_value(n);
bool r = char_inserter<CharEncoding, Tag>::call(sink, 'e') &&
sign_inserter::call(sink, traits::test_zero(n)
, traits::test_negative(n), false);
// the C99 Standard requires at least two digits in the exponent
if (r && abs_n < 10)
r = char_inserter<CharEncoding, Tag>::call(sink, '0');
return r && int_inserter<10>::call(sink, abs_n);
}
///////////////////////////////////////////////////////////////////////
// Print the textual representations for non-normal floats (NaN and
// Inf)
//
// sink The output iterator to use for generation
// n The (signed) floating point number to convert.
// force_sign Whether a sign has to be generated even for
// non-negative numbers
//
// The Tag template parameter is either of the type unused_type or
// describes the character class and conversion to be applied to any
// output possibly influenced by either the lower[...] or upper[...]
// directives.
//
// Note: These functions get called only if fpclassify() returned
// FP_INFINITY or FP_NAN.
///////////////////////////////////////////////////////////////////////
template <typename CharEncoding, typename Tag, typename OutputIterator>
static bool nan (OutputIterator& sink, T n, bool force_sign)
{
return sign_inserter::call(
sink, false, traits::test_negative(n), force_sign) &&
string_inserter<CharEncoding, Tag>::call(sink, "nan");
}
template <typename CharEncoding, typename Tag, typename OutputIterator>
static bool inf (OutputIterator& sink, T n, bool force_sign)
{
return sign_inserter::call(
sink, false, traits::test_negative(n), force_sign) &&
string_inserter<CharEncoding, Tag>::call(sink, "inf");
}
};
}}}
#endif // defined(BOOST_SPIRIT_KARMA_REAL_POLICIES_MAR_02_2007_0936AM)
|
{"hexsha": "139856dbdbad2412a729355a61fd3bcbd67c8a97", "size": 16610, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "master/core/third/boost/spirit/home/karma/numeric/real_policies.hpp", "max_stars_repo_name": "importlib/klib", "max_stars_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 460.0, "max_stars_repo_stars_event_min_datetime": "2016-01-13T12:49:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T04:10:40.000Z", "max_issues_repo_path": "master/core/third/boost/spirit/home/karma/numeric/real_policies.hpp", "max_issues_repo_name": "isuhao/klib", "max_issues_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 197.0, "max_issues_repo_issues_event_min_datetime": "2017-07-06T16:53:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-31T17:57:51.000Z", "max_forks_repo_path": "master/core/third/boost/spirit/home/karma/numeric/real_policies.hpp", "max_forks_repo_name": "isuhao/klib", "max_forks_repo_head_hexsha": "a59837857689d0e60d3df6d2ebd12c3160efa794", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 148.0, "max_forks_repo_forks_event_min_datetime": "2016-01-17T03:16:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:20:36.000Z", "avg_line_length": 49.7305389222, "max_line_length": 86, "alphanum_fraction": 0.4836845274, "num_tokens": 3080}
|
function [newnode,newelem,newelem0]=surfboolean(node,elem,varargin)
%
% [newnode,newelem,newelem0]=surfboolean(node1,elem1,op2,node2,elem2,op3,node3,elem3,...)
%
% merge two or more triangular meshes and resolve intersecting elements
%
% author: Qianqian Fang <fangq at nmr.mgh.harvard.edu>
%
% input:
% node: node coordinates, dimension (nn,3)
% elem: tetrahedral element or triangle surface (ne,3)
% op: a string of a boolean operator, possible op values include
% 'union' or 'or': the outter surface of the union of the enclosed space
% 'inter' or 'and': the surface of the domain contained by both meshes
% 'diff' or '-': the surface of the domain in mesh 1 excluding that of
% mesh 2
% 'all' or 'xor' or '+': the output contains 4 subsurfaces, identified by the 4th
% column of newelem:
% 1: mesh 1 outside of mesh 2
% 2: mesh 2 outside of mesh 1
% 3: mesh 1 inside of mesh 2
% 4: mesh 2 inside of mesh 1
% you can use newelem(find(mod(newelem(:,4),2)==1),:) to
% get mesh 1 cut by mesh 2, or newelem(find(mod(newelem(:,4),2)==0),:)
% to get mesh 2 cut by mesh 1;
% 'first': combine 1 and 3 from the output of 'all'
% 'second': combine 2 and 4 from the output of 'all'
% 'self': test for self-intersections; only the first mesh is
% tested; other inputs are ignored.
% 'decouple': separate two shells and make sure there is no intersection;
% the input surfaces must be closed and ordered from outer to inner
%
% output:
% newnode: the node coordinates after boolean operations, dimension (nn,3)
% newelem: tetrahedral element or surfaces after boolean operations (nn,4) or (nhn,5)
% newelem0: when the operator is 'self', return the intersecting
% element list in terms of the input node list (experimental)
%
% example:
%
% [node1,face1,elem1]=meshabox([0 0 0],[10 10 10],1,1);
% [node2,face2,elem2]=meshabox([0 0 0]+5,[10 10 10]+5,1,1);
% [newnode,newface]=surfboolean(node1,face1,'union',node2,face2);
% plotmesh(newnode,newface);
% figure;
% [newnode,newface]=surfboolean(node1,face1,'diff',node2,face2);
% plotmesh(newnode,newface,'x>5');
%
% -- this function is part of iso2mesh toolbox (http://iso2mesh.sf.net)
%
allinputs=varargin;
opt=struct;
if(length(allinputs)>0 && isstruct(allinputs{end}))
opt=allinputs{end};
allinputs{end}=[];
end
len=length(varargin);
newnode=node;
newelem=elem;
if(len>0 && mod(len,3)~=0)
error('you must give operator, node and element in trilet forms');
end
exesuff=fallbackexeext(getexeext,'gtsset');
for i=1:3:len
op=varargin{i};
no=varargin{i+1};
el=varargin{i+2};
opstr=op;
if(strcmp(op,'or')) opstr='union'; end
if(strcmp(op,'xor')) opstr='all'; end
if(strcmp(op,'and')) opstr='inter'; end
if(strcmp(op,'-')) opstr='diff'; end
if(strcmp(op,'self')) opstr='inter -s'; end
if(strcmp(op,'first') || strcmp(op,'second') || strcmp(op,'+'))
opstr='all';
end
deletemeshfile(mwpath('pre_surfbool*.gts'));
deletemeshfile(mwpath('post_surfbool.off'));
if(strcmp(opstr,'all'))
deletemeshfile(mwpath('s1out2.off'));
deletemeshfile(mwpath('s1in2.off'));
deletemeshfile(mwpath('s2out1.off'));
deletemeshfile(mwpath('s2in1.off'));
end
if(strcmp(op,'decouple'))
if(exist('node1','var')==0)
node1=node;
elem1=elem;
newnode(:,4)=1;
newelem(:,4)=1;
end
opstr=['-q --shells 2'];
saveoff(node1(:,1:3),elem1(:,1:3),mwpath('pre_decouple1.off'));
if(isstruct(el))
if(isfield(el,'MoreOptions'))
opstr=[opstr el.MoreOptions];
end
else
opstr=[opstr ' --decouple-inin 1'];
end
if(size(no,2)~=3)
opstr=['-q --shells ' num2str(no)];
cmd=sprintf('cd "%s" && "%s%s" "%s" %s',mwpath,mcpath('meshfix'),exesuff,...
mwpath('pre_decouple1.off'),opstr);
else
saveoff(no(:,1:3),el(:,1:3),mwpath('pre_decouple2.off'));
cmd=sprintf('cd "%s" && "%s%s" "%s" "%s" %s',mwpath,mcpath('meshfix'),exesuff,...
mwpath('pre_decouple1.off'),mwpath('pre_decouple2.off'),opstr);
end
else
savegts(newnode(:,1:3),newelem(:,1:3),mwpath('pre_surfbool1.gts'));
savegts(no(:,1:3),el(:,1:3),mwpath('pre_surfbool2.gts'));
cmd=sprintf('cd "%s" && "%s%s" %s "%s" "%s" -v > "%s"',mwpath,mcpath('gtsset'),exesuff,...
opstr,mwpath('pre_surfbool1.gts'),mwpath('pre_surfbool2.gts'),mwpath('post_surfbool.off'));
end
[status outstr]=system(cmd);
if(status~=0 && strcmp(op,'self')==0)
error(sprintf('surface boolean command failed:\n%s\nERROR: %s\n',cmd,outstr));
end
if(status~=0 && strcmp(op,'self') && ~isempty(strfind(outstr,'(new_ear): assertion failed')))
fprintf(1,'no self-intersection was found! (ignore the above error)\n');
newnode=[];
newelem=[];
newelem0=[];
return;
end
if(strcmp(opstr,'all'))
% tag the 4 piceses of meshes, this tag do not propagate to the next boolean operation
[nnode nelem]=readoff(mwpath('s1out2.off'));
newelem=[nelem ones(size(nelem,1),1)];
newnode=[nnode ones(size(nnode,1),1)];
[nnode nelem]=readoff(mwpath('s1in2.off'));
newelem=[newelem; nelem+size(newnode,1) 3*ones(size(nelem,1),1)];
newnode=[newnode; nnode 3*ones(size(nnode,1),1)];
[nnode nelem]=readoff(mwpath('s2out1.off'));
newelem=[newelem; nelem+size(newnode,1) 2*ones(size(nelem,1),1)];
newnode=[newnode; nnode 2*ones(size(nnode,1),1)];
[nnode nelem]=readoff(mwpath('s2in1.off'));
newelem=[newelem; nelem+size(newnode,1) 4*ones(size(nelem,1),1)];
newnode=[newnode; nnode 4*ones(size(nnode,1),1)];
if(strcmp(op,'first'))
newelem=newelem(find(mod(newelem(:,4),2)==1),:);
[newnode,nelem]=removeisolatednode(newnode,newelem(:,1:3));
newelem=[nelem newelem(:,4)];
elseif(strcmp(op,'second'))
newelem=newelem(find(mod(newelem(:,4),2)==0),:);
[newnode,nelem]=removeisolatednode(newnode,newelem(:,1:3));
newelem=[nelem,newelem(:,4)];
end
elseif(strcmp(op,'decouple'))
[node1,elem1]=readoff(mwpath('pre_decouple1_fixed.off'));
newelem=[newelem;elem1+size(newnode,1) (i+1)*ones(size(elem1,1),1)];
newnode=[newnode;node1 (i+1)*ones(size(node1,1),1)];
else
[newnode,newelem]=readoff(mwpath('post_surfbool.off'));
if(strcmp(op,'self'))
fprintf(1,'a total of %d self-intersecting elements were found\n',size(newelem,1));
if(nargout>=3)
[found,newelem0]=ismember(newnode,node,'rows');
if(~all(found))
error('self intersecting elements contain new nodes');
end
newelem0=newelem0(newelem);
end
return;
end
end
end
|
{"author": "vigente", "repo": "gerardus", "sha": "4d7c5195b826967781f1bb967872410e66b7cd3d", "save_path": "github-repos/MATLAB/vigente-gerardus", "path": "github-repos/MATLAB/vigente-gerardus/gerardus-4d7c5195b826967781f1bb967872410e66b7cd3d/matlab/ThirdPartyToolbox/Iso2meshToolbox/surfboolean.m"}
|
[STATEMENT]
lemma conjugate_char_1:
"conjugate f g \<longleftrightarrow> (\<forall>x y . f(x \<sqinter> -(g y)) \<le> f x \<sqinter> -y \<and> g(y \<sqinter> -(f x)) \<le> g y \<sqinter> -x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. conjugate f g = (\<forall>x y. f (x \<sqinter> - g y) \<le> f x \<sqinter> - y \<and> g (y \<sqinter> - f x) \<le> g y \<sqinter> - x)
[PROOF STEP]
by (simp add: conjugate_char_1_pp)
|
{"llama_tokens": 187, "file": "Stone_Algebras_P_Algebras", "length": 1}
|
import numpy as np
import pandas as pd
import torch
import torchvision
from am_utils.utils import walk_dir
from torch.utils.data import DataLoader
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from tqdm import tqdm
from ..dataset.dataset_object_inference import DatasetObjectInference, DatasetObjectInferenceMosaic
from ..transforms.bbox import get_test_transform
from ..utils.utils import collate_fn
from ..utils.utils import remove_overlapping_boxes, get_boxes_above_threshold
def get_df_of_file_list(input_dir, id_name='image_id'):
"""
List files in given folder and generate a dataframe for the data loader.
Parameters
----------
input_dir : str
Input directory
id_name : str, optional
Column name to specify image ID.
Default is 'image_id'
Returns
-------
pd.DataFrame
Dataframe with a list of input files.
"""
files = walk_dir(input_dir)
files = [fn[len(input_dir) + 1:] for fn in files]
df = pd.DataFrame({id_name: files})
return df
def load_detection_model(model_fn, num_classes=2, device=None):
"""
Load the object detection model from a given file.
Parameters
----------
model_fn : str
Model filename with the full path.
num_classes : int, optional
Number of classes in the object detection model.
Default is 2 (one class + background).
device : torch.device
Device to send the model to ('cpu' or 'cuda').
If None, the device will be detected automatically.
Default is None.
Returns
-------
model:
Torch model with loaded weights.
"""
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# Load the trained weights
model.load_state_dict(torch.load(model_fn))
model.eval()
if device is None:
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
return model
def detect_bboxes(input_dir, model_fn, batch_size=2, maxsize=None,
detection_threshold=0.5, overlap_threshold=0.1, id_name='image_id'):
"""
Detect object bounding boxes in all image in give directory and return dataframe with the results.
Parameters
----------
input_dir : str
Input directory.
model_fn : str
Model filename with the full path.
batch_size : int, optional
Batch size for predictions.
Default is 2.
maxsize : int, optional
Pad the input image to a square with this size.
Default is None.
detection_threshold : float, optional
Threshold (between 0 and 1) for the confidence of the bounding boxes.
Bounding boxes with a confidence score lower than `detection_threshold` will not be included.
Default is 0.5.
overlap_threshold : float, optional
Maximum allowed intersection-over-union (IOU) score for two bounding boxes.
If two boxes overlap with a higher score, the box with a lower confidence score will be removed
id_name : str, optional
Column name to specify image ID.
Default is 'image_id'
Returns
-------
pd.DataFrame
Dataframe with detected bounding box coordinates.
"""
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = load_detection_model(model_fn, device=device)
loader_kwargs = dict(batch_size=batch_size,
shuffle=False,
num_workers=batch_size,
drop_last=False)
df = get_df_of_file_list(input_dir)
ds = DatasetObjectInference(df, input_dir,
get_test_transform(),
maxsize=maxsize)
dl = DataLoader(ds, collate_fn=collate_fn, **loader_kwargs)
results = pd.DataFrame()
for images, image_ids in tqdm(dl):
images = list(image.to(device) for image in images)
outputs = model(images)
for i in range(len(outputs)):
bboxes, scores = get_boxes_above_threshold(outputs[i], detection_threshold)
bboxes, scores = remove_overlapping_boxes(bboxes, scores,
overlap_threshold, return_full=True)
bboxes = bboxes[scores > 0].data.cpu().numpy()
scores = scores[scores > 0].data.cpu().numpy()
results = __append_detections(bboxes, scores, results, image_ids[i], id_name)
return results
def __append_detections(bboxes, scores, results, image_id, id_name):
cur_results = pd.DataFrame(np.int_(np.round_(bboxes)), columns=['x1', 'y1', 'x2', 'y2'])
cur_results['scores'] = scores
cur_results[id_name] = image_id
results = pd.concat([results, cur_results], ignore_index=True)
return results
def __get_mosaic_df(df, imgshape, maxsize):
step = int(maxsize / 2)
ind_i = np.arange(int(imgshape[0] / step + 1)) * step if imgshape[0] > maxsize else [0]
ind_j = np.arange(int(imgshape[1] / step + 1)) * step if imgshape[1] > maxsize else [0]
boxes = []
for i in ind_i:
for j in ind_j:
boxes.append([j, i, j + maxsize, i + maxsize])
df_new = pd.DataFrame()
for i in range(len(df)):
cur_df = pd.DataFrame(boxes, columns=['x1', 'y1', 'x2', 'y2'])
cur_df['image_id'] = df.iloc[i]['image_id']
df_new = pd.concat([df_new, cur_df], ignore_index=True)
return df_new
def __add_shift(boxes, shift):
boxes[:, 0] += shift[0]
boxes[:, 2] += shift[0]
boxes[:, 1] += shift[1]
boxes[:, 3] += shift[1]
return boxes
def detect_bboxes_mosaic(input_dir, model_fn, maxsize, imgshape, batch_size=2,
detection_threshold=0.5, overlap_threshold=0.1, id_name='image_id'):
"""
Detect object bounding boxes in all image in give directory and return dataframe with the results.
Parameters
----------
input_dir : str
Input directory.
model_fn : str
Model filename with the full path.
maxsize : int
Pad the input image to a square with this size.
imgshape : tuple
Shape of the input image.
If greater than `maxsize`, the mosaic option will be used to crop ROI of `maxsize`.
batch_size : int, optional
Batch size for predictions.
Default is 2.
detection_threshold : float, optional
Threshold (between 0 and 1) for the confidence of the bounding boxes.
Bounding boxes with a confidence score lower than `detection_threshold` will not be included.
Default is 0.5.
overlap_threshold : float, optional
Maximum allowed intersection-over-union (IOU) score for two bounding boxes.
If two boxes overlap with a higher score, the box with a lower confidence score will be removed
id_name : str, optional
Column name to specify image ID.
Default is 'image_id'
Returns
-------
pd.DataFrame
Dataframe with detected bounding box coordinates.
"""
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = load_detection_model(model_fn, device=device)
loader_kwargs = dict(batch_size=batch_size,
shuffle=False,
num_workers=batch_size,
drop_last=False)
df = __get_mosaic_df(get_df_of_file_list(input_dir), imgshape, maxsize)
ds = DatasetObjectInferenceMosaic(df, input_dir,
get_test_transform(),
maxsize=maxsize)
dl = DataLoader(ds, collate_fn=collate_fn, **loader_kwargs)
results = pd.DataFrame()
for images, image_ids, start_coord in tqdm(dl):
images = list(image.to(device) for image in images)
outputs = model(images)
for i in range(len(outputs)):
bboxes, scores = get_boxes_above_threshold(outputs[i], detection_threshold)
bboxes = bboxes.data.cpu().numpy()
scores = scores.data.cpu().numpy()
bboxes = __add_shift(bboxes, start_coord[i])
results = __append_detections(bboxes, scores, results, image_ids[i], id_name)
results2 = pd.DataFrame()
for image_id in results['image_id'].unique():
cur_df = results[results['image_id'] == image_id]
bboxes = torch.tensor(cur_df[['x1', 'y1', 'x2', 'y2']].values).to(device)
scores = torch.tensor(cur_df['scores'].values).to(device)
bboxes, scores = remove_overlapping_boxes(bboxes, scores,
overlap_threshold, return_full=True)
bboxes = bboxes[scores > 0].data.cpu().numpy()
scores = scores[scores > 0].data.cpu().numpy()
results2 = __append_detections(bboxes, scores, results2, image_id, id_name)
return results2
|
{"hexsha": "29fee7debf540ea734dd7914a8a88c2d382b7fc6", "size": 9089, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml_utils/predict/predict_bbox.py", "max_stars_repo_name": "amedyukhina/ml_utils", "max_stars_repo_head_hexsha": "00176a015ff3b38f28637e66d4c89ec111247806", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ml_utils/predict/predict_bbox.py", "max_issues_repo_name": "amedyukhina/ml_utils", "max_issues_repo_head_hexsha": "00176a015ff3b38f28637e66d4c89ec111247806", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml_utils/predict/predict_bbox.py", "max_forks_repo_name": "amedyukhina/ml_utils", "max_forks_repo_head_hexsha": "00176a015ff3b38f28637e66d4c89ec111247806", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7136929461, "max_line_length": 109, "alphanum_fraction": 0.6445153482, "include": true, "reason": "import numpy", "num_tokens": 2068}
|
function [normalized_speed, actual_speed] = normalize_speed(speed, failures, skipping, tracker, sequence)
% normalize_speed Normalizes tracker speed estimate
%
% This function normalizes speed estimates based on performance profile and some information about
% the way the measurement was obtained (sequence, number of failures, frame skipping).
%
% Input:
% - speed (double): The initial speed estimate.
% - failures (double): Number of failures of the tracker.
% - skipping (integer): Number of skipped frames after each failure.
% - tracker (structure): A valid tracker descriptor.
% - sequence (structure): A valid sequence descriptor.
%
% Output:
% - normalized_speed (double): Normalized speed estimate.
% - actual_speed (double): Corrected raw speed based on supplied information,
%
if ~isfield(tracker, 'performance')
error('Tracker %s has no performance profile, unable to normalize speed.', tracker.identifier);
end;
performance = tracker.performance;
factor = performance.nonlinear_native;
startup = 0;
if strcmpi(tracker.interpreter, 'matlab')
if isfield(performance, 'matlab_startup')
startup = performance.matlab_startup;
else
model = get_global_variable('matlab_startup_model', []);
if ~isempty(model)
startup = model(1) * performance.reading + model(2);
end;
end;
end
failure_count = cellfun(@(x) numel(x), failures, 'UniformOutput', true);
if tracker.trax
actual_length = sequence.length - (skipping - 1) * failure_count;
full_length = sequence.length;
startup_time = startup * (1 + failure_count);
else
full_length = cellfun(@(x) sum(sequence.length - x - (skipping - 1)), failures, 'UniformOutput', true) + sequence.length;
actual_length = full_length;
startup_time = startup * (1 + failure_count);
end;
actual_speed = (((speed .* full_length) - startup_time) ./ actual_length);
normalized_speed = actual_speed / factor;
|
{"author": "votchallenge", "repo": "toolkit-legacy", "sha": "2fb78d5301dadc102fb329b3a3f1bb02c670e8ee", "save_path": "github-repos/MATLAB/votchallenge-toolkit-legacy", "path": "github-repos/MATLAB/votchallenge-toolkit-legacy/toolkit-legacy-2fb78d5301dadc102fb329b3a3f1bb02c670e8ee/analysis/normalize_speed.m"}
|
"""Errand OpenAcc backend module
"""
import os
import numpy
from errand.backend import CppBackendBase, cpp_varclass_template
from errand.compiler import Compilers
from errand.system import select_system
from errand.util import which
struct_template = """
typedef struct arguments {{
{args}
}} ARGSTYPE;
typedef struct wrap_args {{
ARGSTYPE * data;
int tid;
int state;
}} WRAPARGSTYPE;
"""
host_vardef_template = """
{vartype} {varname} = {vartype}();
"""
varglobal_template = """
ARGSTYPE struct_args = {{
{varassign}
}};
"""
h2dcopy_template = """
extern "C" int {name}(void * data, void * _attrs, int attrsize) {{
{hvar}.data = ({dtype} *) data;
{hvar}._attrs = (int *) malloc(attrsize * sizeof(int));
memcpy({hvar}._attrs, _attrs, attrsize * sizeof(int));
return 0;
}}
"""
h2dmalloc_template = """
extern "C" int {name}(void * data, void * _attrs, int attrsize) {{
{hvar}.data = ({dtype} *) data;
{hvar}._attrs = (int *) malloc(attrsize * sizeof(int));
memcpy({hvar}._attrs, _attrs, attrsize * sizeof(int));
return 0;
}}
"""
d2hcopy_template = """
extern "C" int {name}(void * data) {{
return 0;
}}
"""
devfunc_template = """
void * _kernel(void * ptr){{
{argdef}
WRAPARGSTYPE * args = (WRAPARGSTYPE *)ptr;
args->state = 1;
{argassign}
#pragma acc enter data create({creates})
#pragma acc update device({dev_updates})
#pragma acc parallel num_gangs({ngangs}) num_workers({nworkers}) \
vector_length({veclen})
{{
{body}
}}
#pragma acc update self ({host_updates})
#pragma acc exit data delete({deletes})
args->state = 2;
isfinished = 1;
return NULL;
}}
"""
calldevmain_template = """
pthread_t thread;
WRAPARGSTYPE args;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
args.tid = 0;
args.state = 0;
args.data = &struct_args;
pthread_create(&thread, &attr, _kernel, &args);
while (args.state == 0) {{
do {{ }} while(0);
}}
"""
class OpenAccCppBackend(CppBackendBase):
name = "openacc-c++"
codeext = "cpp"
libext = "so"
def __init__(self, workdir, compile):
compilers = Compilers(self.name, compile)
targetsystem = select_system("cpu")
super(OpenAccCppBackend, self).__init__(workdir, compilers,
targetsystem)
#def compiler_option(self):
# return self.option + "--compiler-options '-fPIC' --shared"
def code_header(self):
return """
#include <pthread.h>
#include <errno.h>
#include <unistd.h>
#include "string.h"
#include "stdlib.h"
#include "stdio.h"
"""
def getname_h2dcopy(self, arg):
return "h2dcopy_%s" % arg["curname"]
def getname_h2dmalloc(self, arg):
return "h2dmalloc_%s" % arg["curname"]
def getname_d2hcopy(self, arg):
return "d2hcopy_%s" % arg["curname"]
def getname_vartype(self, arg, devhost):
ndim, dname = self.getname_argpair(arg)
return "%s_%s_dim%s" % (devhost, dname, ndim)
def getname_var(self, arg, devhost):
return devhost + "_" + arg["curname"]
def len_numpyattrs(self, arg):
return 3 + len(arg["data"].shape)*2
def get_numpyattrs(self, arg):
data = arg["data"]
return ((data.ndim, data.itemsize, data.size) + data.shape +
tuple([int(s//data.itemsize) for s in data.strides]))
def code_varclass(self):
dvs = {}
for arg in self.inargs+self.outargs:
ndim, dname = self.getname_argpair(arg)
if dname in dvs:
dvsd = dvs[dname]
else:
dvsd = {}
dvs[dname] = dvsd
if ndim not in dvsd:
oparg = ", ".join(["int dim%d"%d for d in
range(arg["data"].ndim)])
offset = "+".join(["s[%d]*dim%d"%(d,d) for d in
range(arg["data"].ndim)])
attrsize = self.len_numpyattrs(arg)
hvartype = self.getname_vartype(arg, "host")
dvsd[ndim] = cpp_varclass_template.format(vartype=hvartype, oparg=oparg,
offset=offset, funcprefix="", dtype=dname,
attrsize=attrsize)
return "\n".join([y for x in dvs.values() for y in x.values()])
def code_struct(self):
out = []
for arg in self.inargs+self.outargs:
ndim, dname = self.getname_argpair(arg)
out.append("%s * %s;" % (self.getname_vartype(arg, "host"),
self.getname_var(arg, "host")))
#out.append("int tid;")
return struct_template.format(args="\n".join(out))
def code_varglobal(self):
out = []
for arg in self.inargs+self.outargs:
ndim, dname = self.getname_argpair(arg)
varname = self.getname_var(arg, "host")
out.append(".{name} = &{name}".format(name=varname))
return varglobal_template.format(varassign=",\n".join(out))
def code_vardef(self):
out = ""
for arg in self.inargs+self.outargs:
ndim, dname = self.getname_argpair(arg)
out += host_vardef_template.format(vartype=self.getname_vartype(arg,
"host"), varname=self.getname_var(arg, "host"))
return out
def code_devfunc(self):
argdef = []
argassign = []
creates = []
deletes = []
host_updates = []
dev_updates = []
body = str(self.order.get_section(self.name))
for arg in self.inargs+self.outargs:
ndim, dname = self.getname_argpair(arg)
#argdef.append("host_%s_dim%s %s = host_%s_dim%s();" %
# (dname, ndim, arg["curname"], dname, ndim))
argdef.append("host_%s_dim%s %s;" %
(dname, ndim, arg["curname"]))
argassign.append("%s = *(args->data->host_%s);" %
(arg["curname"], arg["curname"]))
accstr = ("{name}.data[0:{name}._attrs[2]], "
"{name}._attrs[0:{name}._attrs[2]]").format(name=arg["curname"])
creates.append(accstr)
deletes.append("{name}.data, {name}._attrs".format(name=arg["curname"]))
for arg in self.inargs:
ndim, dname = self.getname_argpair(arg)
accstr = ("{name}.data[0:{name}._attrs[2]], "
"{name}._attrs[0:{name}._attrs[2]]").format(name=arg["curname"])
dev_updates.append(accstr)
for arg in self.outargs:
ndim, dname = self.getname_argpair(arg)
host_updates.append("{name}.data[0:{name}._attrs[2]]".
format(name=arg["curname"]))
gangs = numpy.prod(self.nteams)
workers = numpy.prod(self.nmembers)
veclen = numpy.prod(self.nassigns)
return devfunc_template.format(argdef="\n".join(argdef), body=body,
argassign="\n".join(argassign),
creates=", \\\n".join(creates),
dev_updates=", \\\n".join(dev_updates),
host_updates=", \\\n".join(host_updates),
deletes=", \\\n".join(deletes),
ngangs=str(gangs), nworkers=str(workers),
veclen=str(veclen))
def code_h2dcopyfunc(self):
out = ""
for arg in self.inargs:
ndim, dname = self.getname_argpair(arg)
fname = self.getname_h2dcopy(arg)
template = self.get_template("h2dcopy")
hvar = self.getname_var(arg, "host")
out += template.format(hvar=hvar, name=fname, dtype=dname)
for arg in self.outargs:
ndim, dname = self.getname_argpair(arg)
fname = self.getname_h2dmalloc(arg)
template = self.get_template("h2dmalloc")
hvar = self.getname_var(arg, "host")
out += template.format(hvar=hvar, name=fname, dtype=dname)
return out
def code_d2hcopyfunc(self):
out = ""
for arg in self.outargs:
ndim, dname = self.getname_argpair(arg)
fname = self.getname_d2hcopy(arg)
template = self.get_template("d2hcopy")
hvar = self.getname_var(arg, "host")
out += template.format(hvar=hvar, name=fname, dtype=dname)
return out
def code_calldevmain(self):
#
# argassign = []
#
# for arg in self.inargs+self.outargs:
#
# args.append(self.getname_var(arg, "host"))
#
# testing
#args.append("1")
return calldevmain_template.format()
def get_template(self, name):
if name == "h2dcopy":
return h2dcopy_template
elif name == "h2dmalloc":
return h2dmalloc_template
elif name == "d2hcopy":
return d2hcopy_template
|
{"hexsha": "5fe6dec109aa859da5f214f20499ae74367bcfd4", "size": 8986, "ext": "py", "lang": "Python", "max_stars_repo_path": "errand/openacc_cpp.py", "max_stars_repo_name": "grnydawn/errand", "max_stars_repo_head_hexsha": "19c4fa4bb8c6698d56f2d671c1cba3ee070529ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "errand/openacc_cpp.py", "max_issues_repo_name": "grnydawn/errand", "max_issues_repo_head_hexsha": "19c4fa4bb8c6698d56f2d671c1cba3ee070529ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "errand/openacc_cpp.py", "max_forks_repo_name": "grnydawn/errand", "max_forks_repo_head_hexsha": "19c4fa4bb8c6698d56f2d671c1cba3ee070529ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6191780822, "max_line_length": 88, "alphanum_fraction": 0.5624304474, "include": true, "reason": "import numpy", "num_tokens": 2326}
|
"""
A basic cost function, where the computed cost is the size
(number of children) of the current expression.
"""
function astsize(n::ENode, g::EGraph, an::Type{<:AbstractAnalysis})
cost = 1 + arity(n)
for id ∈ n.args
eclass = geteclass(g, id)
!hasdata(eclass, an) && (cost += Inf; break)
cost += last(getdata(eclass, an))
end
return cost
end
"""
A basic cost function, where the computed cost is the size
(number of children) of the current expression, times -1.
Strives to get the largest expression
"""
astsize_inv(n::ENode, g::EGraph, an::Type{<:AbstractAnalysis}) = -1 * astsize(n, g, an)
"""
An [`AbstractAnalysis`](@ref) that computes the cost of expression nodes
and chooses the node with the smallest cost for each E-Class.
This abstract type is parametrised by a function F.
This is useful for the analysis storage in [`EClass`](@ref)
"""
abstract type ExtractionAnalysis{F} <: AbstractAnalysis end
make(a::Type{ExtractionAnalysis{F}}, g::EGraph, n::ENode) where F = (n, F(n, g, a))
join(a::Type{<:ExtractionAnalysis}, from, to) = last(from) <= last(to) ? from : to
islazy(a::Type{<:ExtractionAnalysis}) = true
function rec_extract(g::EGraph, an::Type{<:ExtractionAnalysis}, id::EClassId)
eclass = geteclass(g, id)
anval = getdata(eclass, an, missing)
if anval === missing
analyze!(g, an, id)
anval = getdata(eclass, an)
end
(cn, ck) = anval
(!isterm(termtype(cn)) || ck == Inf) && return cn.head
extractnode(g, cn, an; eclass=eclass)
end
function extractnode(g::EGraph, n::ENode, an::Type{<:ExtractionAnalysis}; eclass=nothing)
children = map(n.args) do a
rec_extract(g, an, a)
end
meta = nothing
if !isnothing(eclass)
meta = getdata(eclass, MetadataAnalysis, nothing)
end
T = termtype(n)
similarterm(T, n.head, children; metadata = meta)
end
# TODO CUSTOMTYPES document how to for custom types
# TODO maybe extractor can just be the array of extracted children?
function extractnode(g::EGraph, n::ENode{Expr}, extractor::Function)::Expr
return Expr(n.head, map(extractor, n.args)...)
end
function extractnode(g::EGraph, n::ENode{T}, extractor::Function) where T
if arity(n) > 0
error("ENode extraction is not defined for non-literal type $T")
end
return n.head
end
"""
Given an [`ExtractionAnalysis`](@ref), extract the expression
with the smallest computed cost from an [`EGraph`](@ref)
"""
function extract!(g::EGraph, a::Type{ExtractionAnalysis{F}} where F; root=-1)
# @show root g.root
if root == -1
root = g.root
end
# @show root g.root
analyze!(g, a, root)
!(a ∈ g.analyses) && error("Extraction analysis is not associated to EGraph")
rec_extract(g, a, root)
end
"""
Given a cost function, extract the expression
with the smallest computed cost from an [`EGraph`](@ref)
"""
function extract!(g::EGraph, costfun::Function; root=-1)
extran = ExtractionAnalysis{costfun}
extract!(g, extran; root=root)
end
macro extract(expr, theory, costfun)
quote
let g = EGraph($expr)
saturate!(g, $theory)
ex = extract!(g, $costfun)
(g, ex)
end
end |> esc
end
|
{"hexsha": "ef189e8d9b2fa96203b41f64285a3f639661adb2", "size": 3239, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/EGraphs/extraction.jl", "max_stars_repo_name": "gpeairs/Metatheory.jl", "max_stars_repo_head_hexsha": "782469676fb01db5eb3dc5f385539830b9116bea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/EGraphs/extraction.jl", "max_issues_repo_name": "gpeairs/Metatheory.jl", "max_issues_repo_head_hexsha": "782469676fb01db5eb3dc5f385539830b9116bea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/EGraphs/extraction.jl", "max_forks_repo_name": "gpeairs/Metatheory.jl", "max_forks_repo_head_hexsha": "782469676fb01db5eb3dc5f385539830b9116bea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7155963303, "max_line_length": 89, "alphanum_fraction": 0.6573016363, "num_tokens": 921}
|
/*
Copyright (c) 2014-15 Ableton AG, Berlin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "Convert.hpp"
#include "Warnings.hpp"
SUPPRESS_WARNINGS
#include <QtGui/QColor>
#include <QtGui/QFont>
#include <boost/algorithm/clamp.hpp>
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/optional.hpp>
#include <boost/variant/apply_visitor.hpp>
#include <boost/variant/get.hpp>
#include <boost/variant/static_visitor.hpp>
RESTORE_WARNINGS
#include <algorithm>
#include <cmath>
#include <iostream>
#include <string>
#include <unordered_map>
namespace aqt
{
namespace stylesheets
{
namespace
{
SUPPRESS_WARNINGS
const std::string kRgbaColorExpr = "rgba";
const std::string kRgbColorExpr = "rgb";
const std::string kHslaColorExpr = "hsla";
const std::string kHslColorExpr = "hsl";
const std::string kHsbaColorExpr = "hsba";
const std::string kHsbColorExpr = "hsb";
const std::string kUrlExpr = "url";
const std::string kTrue = "true";
const std::string kYes = "yes";
const std::string kFalse = "false";
const std::string kNo = "no";
RESTORE_WARNINGS
/**
If the first token in the list is a font style token,
convert it to a font style and remove it from the list.
*/
QFont::Style takeFontStyleFromTokenList(QStringList& tokens)
{
static const std::map<QString, QFont::Style> dictionary = {
{"italic", QFont::StyleItalic},
{"upright", QFont::StyleNormal},
{"oblique", QFont::StyleOblique}};
return (!tokens.isEmpty() && dictionary.count(tokens.at(0)))
? dictionary.at(tokens.takeFirst())
: QFont::StyleNormal;
}
/**
If the first token in the list is a capitalization style token,
convert it to a capitalization style and remove it from the list.
*/
QFont::Capitalization takeCapitalizationStyleFromTokenList(QStringList& tokens)
{
static const std::map<QString, QFont::Capitalization> dictionary = {
{"mixedcase", QFont::MixedCase},
{"alluppercase", QFont::AllUppercase},
{"alllowercase", QFont::AllLowercase},
{"smallcaps", QFont::SmallCaps},
{"capitalize", QFont::Capitalize}};
return (!tokens.isEmpty() && dictionary.count(tokens.at(0)))
? dictionary.at(tokens.takeFirst())
: QFont::MixedCase;
}
/**
If the first token in the list is a font weight token,
convert it to a font weight and remove it from the list.
*/
QFont::Weight takeFontWeightFromTokenList(QStringList& tokens)
{
static const std::map<QString, QFont::Weight> dictionary = {
{"light", QFont::Light},
{"bold", QFont::Bold},
{"demibold", QFont::DemiBold},
{"black", QFont::Black},
{"regular", QFont::Normal}};
return (!tokens.isEmpty() && dictionary.count(tokens.at(0)))
? dictionary.at(tokens.takeFirst())
: QFont::Normal;
}
/**
If the first token in the list is a font hinting token,
convert it to a font hinting and remove it from the list.
*/
QFont::HintingPreference takeFontHintingFromTokenList(QStringList& tokens)
{
static const std::map<QString, QFont::HintingPreference> dictionary = {
{"defaulthinting", QFont::PreferDefaultHinting},
{"nohinting", QFont::PreferNoHinting},
{"verticalhinting", QFont::PreferVerticalHinting},
{"fullhinting", QFont::PreferFullHinting}};
return (!tokens.isEmpty() && dictionary.count(tokens.at(0)))
? dictionary.at(tokens.takeFirst())
: QFont::PreferDefaultHinting;
}
struct FontSize {
int pixelSize = 0;
qreal pointSize = 0.0;
};
/**
If the first token in the list is a font size token,
convert it to a font size and remove it from the list.
*/
FontSize takeFontSizeFromTokenList(QStringList& tokens)
{
FontSize fontSize;
if (!tokens.isEmpty()) {
const QString sizeStr = tokens.takeFirst();
if (sizeStr.contains(QRegExp("^\\d+px$"))) {
fontSize.pixelSize = sizeStr.split(QRegExp("px")).at(0).toInt();
} else if (sizeStr.contains(QRegExp("^\\d+(\\.\\d+)?pt$"))) {
fontSize.pointSize = sizeStr.split(QRegExp("pt")).at(0).toDouble();
} else {
tokens.prepend(sizeStr);
}
}
return fontSize;
}
/**
Extract the font style from the string.
Font declarations must conform to a limited subset of the W3 font spec
(http://www.w3.org/TR/css3-fonts/#font-prop), see the following:
@code
// <style> <variant> <weight> <size> <family>
// e.g.:
font: "italic smallcaps bold 16px Times New Roman"
@endcode
*/
QFont fontDeclarationToFont(const QString& fontDecl)
{
QStringList tokens = fontDecl.split(QRegExp("\\s* \\s*"), QString::SkipEmptyParts);
const QFont::Style fontStyle = takeFontStyleFromTokenList(tokens);
const QFont::Capitalization capMode = takeCapitalizationStyleFromTokenList(tokens);
const QFont::Weight weight = takeFontWeightFromTokenList(tokens);
const QFont::HintingPreference hinting = takeFontHintingFromTokenList(tokens);
const FontSize size = takeFontSizeFromTokenList(tokens);
const QString familyName = tokens.join(' ');
QFont font(familyName, 0, weight);
if (size.pointSize > 0) {
font.setPointSizeF(size.pointSize);
}
if (size.pixelSize > 0) {
font.setPixelSize(size.pixelSize);
}
font.setCapitalization(capMode);
font.setStyle(fontStyle);
font.setHintingPreference(hinting);
return font;
}
//----------------------------------------------------------------------------------------
struct Undefined {
};
using ExprValue = boost::variant<Undefined, QColor, QUrl>;
int rgbColorOrPercentage(const std::string& arg)
{
if (!arg.empty() && arg.back() == '%') {
auto factor = boost::lexical_cast<float>(arg.substr(0, arg.size() - 1));
return boost::algorithm::clamp(int(std::round(255 * factor / 100.0f)), 0, 255);
}
return boost::algorithm::clamp(boost::lexical_cast<int>(arg), 0, 255);
}
int transformAlphaFromFloatRatio(const std::string& arg)
{
auto factor = boost::lexical_cast<float>(arg);
return boost::algorithm::clamp(int(std::round(256 * factor)), 0, 255);
}
double hslHue(const std::string& arg)
{
return boost::algorithm::clamp(boost::lexical_cast<int>(arg) / 360.0, 0.0, 1.0);
}
double percentageToFactor(const std::string& arg)
{
if (!arg.empty() && arg.back() == '%') {
return boost::algorithm::clamp(
boost::lexical_cast<int>(arg.substr(0, arg.size() - 1)) / 100.0, 0.0, 1.0);
}
throw boost::bad_lexical_cast();
}
double factorFromFloat(const std::string& arg)
{
return boost::algorithm::clamp(boost::lexical_cast<double>(arg), 0.0, 1.0);
}
ExprValue makeRgbaColor(const std::vector<std::string>& args)
{
if (args.size() == 4u) {
try {
return QColor(rgbColorOrPercentage(args[0]), rgbColorOrPercentage(args[1]),
rgbColorOrPercentage(args[2]), transformAlphaFromFloatRatio(args[3]));
} catch (const boost::bad_lexical_cast&) {
throw ConvertException(
std::string().append(kRgbaColorExpr).append("() expression with bad value"));
}
}
throw ConvertException(
std::string().append(kRgbaColorExpr).append("() expression expects 4 arguments"));
}
ExprValue makeRgbColor(const std::vector<std::string>& args)
{
if (args.size() == 3u) {
try {
return QColor(rgbColorOrPercentage(args[0]), rgbColorOrPercentage(args[1]),
rgbColorOrPercentage(args[2]), 0xff);
} catch (const boost::bad_lexical_cast&) {
throw ConvertException(
std::string().append(kRgbColorExpr).append("() expression with bad value"));
}
}
throw ConvertException(std::string().append(kRgbColorExpr).append("() expression expects 3 arguments"));
}
ExprValue makeHslaColor(const std::vector<std::string>& args)
{
if (args.size() == 4u) {
try {
QColor color;
color.setHslF(hslHue(args[0]), percentageToFactor(args[1]),
percentageToFactor(args[2]), factorFromFloat(args[3]));
return color;
} catch (const boost::bad_lexical_cast&) {
throw ConvertException(
std::string().append(kHslaColorExpr).append("() expression with bad values"));
}
}
throw ConvertException(
std::string().append(kHslaColorExpr).append("() expression expects 4 arguments"));
}
ExprValue makeHslColor(const std::vector<std::string>& args)
{
if (args.size() == 3u) {
try {
QColor color;
color.setHslF(
hslHue(args[0]), percentageToFactor(args[1]), percentageToFactor(args[2]), 1.0);
return color;
} catch (const boost::bad_lexical_cast&) {
throw ConvertException(
std::string().append(kHslColorExpr).append("() expression with bad values"));
}
}
throw ConvertException(
std::string().append(kHslColorExpr).append("() expression expects 3 arguments"));
}
ExprValue makeHsbaColor(const std::vector<std::string>& args)
{
if (args.size() == 4u) {
try {
QColor color;
color.setHsvF(hslHue(args[0]), percentageToFactor(args[1]),
percentageToFactor(args[2]), factorFromFloat(args[3]));
return color;
} catch (const boost::bad_lexical_cast&) {
throw ConvertException(
std::string().append(kHslaColorExpr).append("() expression with bad values"));
}
}
throw ConvertException(
std::string().append(kHslaColorExpr).append("() expression expects 3 arguments"));
}
ExprValue makeHsbColor(const std::vector<std::string>& args)
{
if (args.size() == 3u) {
try {
QColor color;
color.setHsvF(
hslHue(args[0]), percentageToFactor(args[1]), percentageToFactor(args[2]), 1.0);
return color;
} catch (const boost::bad_lexical_cast&) {
throw ConvertException(
std::string().append(kHslColorExpr).append("() expression with bad values"));
}
}
throw ConvertException(
std::string().append(kHslColorExpr).append("() expression expects 3 arguments"));
}
//------------------------------------------------------------------------------
ExprValue makeUrl(const std::vector<std::string>& args)
{
if (args.size() == 1u) {
return QUrl(QString::fromStdString(args.front()));
}
throw ConvertException(
std::string().append(kUrlExpr).append("() expression expects 1 argument"));
}
//------------------------------------------------------------------------------
ExprValue evaluateExpression(const Expression& expr)
{
using ExprEvaluator = std::function<ExprValue(const std::vector<std::string>&)>;
using FuncMap = std::unordered_map<std::string, ExprEvaluator>;
static FuncMap funcMap = {
{kRgbaColorExpr, &makeRgbaColor},
{kRgbColorExpr, &makeRgbColor},
{kHslaColorExpr, &makeHslaColor},
{kHslColorExpr, &makeHslColor},
{kHsbaColorExpr, &makeHsbaColor},
{kHsbColorExpr, &makeHsbColor},
{kUrlExpr, &makeUrl},
};
auto iFind = funcMap.find(expr.name);
if (iFind != funcMap.end()) {
return iFind->second(expr.args);
}
throw ConvertException(
std::string("Unsupported expression '").append(expr.name).append("'"));
}
struct PropValueVisitor : public boost::static_visitor<boost::optional<QColor>> {
boost::optional<QColor> operator()(const std::string& value)
{
auto qvalue = QVariant(QString::fromStdString(value));
if (qvalue.canConvert(QMetaType::QColor)) {
return qvalue.value<QColor>();
}
return boost::none;
}
boost::optional<QColor> operator()(const Expression& expr)
{
auto value = evaluateExpression(expr);
if (const QColor* color = boost::get<QColor>(&value)) {
return *color;
}
throw ConvertException(
std::string("Not a color expression '").append(expr.name).append("'"));
}
};
} // anon namespace
//------------------------------------------------------------------------------
boost::optional<QFont> PropertyValueConvertTraits<QFont>::convert(
const PropertyValue& value) const
{
if (const std::string* str = boost::get<std::string>(&value)) {
QVariant qvalue = QVariant::fromValue(QString::fromStdString(*str));
if (qvalue.canConvert(QMetaType::QString)) {
return fontDeclarationToFont(qvalue.toString());
}
}
return boost::none;
}
boost::optional<QColor> PropertyValueConvertTraits<QColor>::convert(
const PropertyValue& value) const
{
PropValueVisitor visitor;
return boost::apply_visitor(visitor, value);
}
boost::optional<QString> PropertyValueConvertTraits<QString>::convert(
const PropertyValue& value) const
{
if (const std::string* str = boost::get<std::string>(&value)) {
return QString::fromStdString(*str);
}
return boost::none;
}
boost::optional<double> PropertyValueConvertTraits<double>::convert(
const PropertyValue& value) const
{
if (const std::string* str = boost::get<std::string>(&value)) {
bool ok;
const auto doubleValue = QString::fromStdString(*str).toDouble(&ok);
if (ok) {
return boost::make_optional(doubleValue);
}
}
return boost::none;
}
boost::optional<bool> PropertyValueConvertTraits<bool>::convert(
const PropertyValue& value) const
{
if (const std::string* str = boost::get<std::string>(&value)) {
auto lstr = boost::algorithm::to_lower_copy(*str);
if (lstr == kTrue || lstr == kYes) {
return boost::make_optional(true);
} else if (lstr == kFalse || lstr == kNo) {
return boost::make_optional(false);
}
}
return boost::none;
}
boost::optional<QUrl> PropertyValueConvertTraits<QUrl>::convert(
const PropertyValue& value) const
{
struct PropValueToUrlVisitor : public boost::static_visitor<boost::optional<QUrl>> {
boost::optional<QUrl> operator()(const std::string& str)
{
return QUrl(QString::fromStdString(str));
}
boost::optional<QUrl> operator()(const Expression& expr)
{
auto exprValue = evaluateExpression(expr);
if (const QUrl* url = boost::get<QUrl>(&exprValue)) {
return *url;
}
throw ConvertException(
std::string("Not an url expression '").append(expr.name).append("'"));
}
};
PropValueToUrlVisitor visitor;
return boost::apply_visitor(visitor, value);
}
//----------------------------------------------------------------------------------------
namespace
{
struct PropValueToVariantVisitor : public boost::static_visitor<QVariant> {
QVariant operator()(const std::string& value)
{
return QVariant(QString::fromStdString(value));
}
QVariant operator()(const Expression& expr)
{
struct ExprValueToVariantVisitor : public boost::static_visitor<QVariant> {
QVariant operator()(const Undefined&)
{
return QVariant();
}
QVariant operator()(const QColor& color)
{
return QVariant(color);
}
QVariant operator()(const QUrl& url)
{
return QVariant(url);
}
};
auto exprValue = evaluateExpression(expr);
ExprValueToVariantVisitor visitor;
return boost::apply_visitor(visitor, exprValue);
}
};
} // anon namespace
QVariant convertValueToVariant(const PropertyValue& value)
{
PropValueToVariantVisitor visitor;
return boost::apply_visitor(visitor, value);
}
QVariantList convertValueToVariantList(const PropertyValues& values)
{
QVariantList result;
for (const auto& propValue : values) {
result.push_back(convertValueToVariant(propValue));
}
return result;
}
} // namespace stylesheets
} // namespace aqt
|
{"hexsha": "7f83bb443cdbb05f69696e9902a8dc02f98ea7cb", "size": 16211, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Convert.cpp", "max_stars_repo_name": "FMeinicke/aqt-stylesheets", "max_stars_repo_head_hexsha": "83a26ea9acfef80b98f126f2b706ee7f6175b42a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 141.0, "max_stars_repo_stars_event_min_datetime": "2015-10-07T11:18:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T07:54:57.000Z", "max_issues_repo_path": "src/Convert.cpp", "max_issues_repo_name": "sirvelo/aqt-stylesheets", "max_issues_repo_head_hexsha": "4975488fcc34c52833d73e956eef90cc76ffc0f1", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 33.0, "max_issues_repo_issues_event_min_datetime": "2015-11-18T16:35:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T17:01:34.000Z", "max_forks_repo_path": "src/Convert.cpp", "max_forks_repo_name": "sirvelo/aqt-stylesheets", "max_forks_repo_head_hexsha": "4975488fcc34c52833d73e956eef90cc76ffc0f1", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 38.0, "max_forks_repo_forks_event_min_datetime": "2015-10-07T11:18:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T03:31:55.000Z", "avg_line_length": 29.6904761905, "max_line_length": 106, "alphanum_fraction": 0.6709641601, "num_tokens": 3964}
|
/*
* Copyright (C) 2014-2016 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <boost/filesystem.hpp>
#include "gazebo/gui/Actions.hh"
#include "gazebo/gui/GuiIface.hh"
#include "gazebo/gui/MainWindow.hh"
#include "gazebo/gui/building/BuildingEditorEvents.hh"
#include "gazebo/gui/BuildingEditor_TEST.hh"
#include "test_config.h"
/////////////////////////////////////////////////
void BuildingEditor_TEST::ExitBuildingEditor()
{
this->resMaxPercentChange = 5.0;
this->shareMaxPercentChange = 2.0;
this->Load("worlds/empty.world", false, false, true);
gazebo::gui::MainWindow *mainWindow = new gazebo::gui::MainWindow();
QVERIFY(mainWindow != NULL);
// Create the main window.
mainWindow->Load();
mainWindow->Init();
mainWindow->show();
this->ProcessEventsAndDraw(mainWindow);
// Get the user camera and scene
gazebo::rendering::UserCameraPtr cam = gazebo::gui::get_active_camera();
QVERIFY(cam != NULL);
gazebo::rendering::ScenePtr scene = cam->GetScene();
QVERIFY(scene != NULL);
gazebo::gui::g_editBuildingAct->trigger();
this->ProcessEventsAndDraw(mainWindow);
// simulate events to discard model and exit the building editor
gazebo::gui::editor::Events::newBuildingModel();
gazebo::gui::editor::Events::finishBuildingModel();
this->ProcessEventsAndDraw(mainWindow);
cam->Fini();
mainWindow->close();
delete mainWindow;
}
// Generate a main function for the test
QTEST_MAIN(BuildingEditor_TEST)
|
{"hexsha": "a3fd8a7b5e8a649f11007f3301e45705644dcb98", "size": 2016, "ext": "cc", "lang": "C++", "max_stars_repo_path": "gazebo/gui/BuildingEditor_TEST.cc", "max_stars_repo_name": "otamachan/ros-indigo-gazebo7-deb", "max_stars_repo_head_hexsha": "abc6b40247cdce14d9912096a0ad5135d420ce04", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2017-07-14T19:36:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-01T06:47:59.000Z", "max_issues_repo_path": "gazebo/gui/BuildingEditor_TEST.cc", "max_issues_repo_name": "otamachan/ros-indigo-gazebo7-deb", "max_issues_repo_head_hexsha": "abc6b40247cdce14d9912096a0ad5135d420ce04", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 20.0, "max_issues_repo_issues_event_min_datetime": "2017-07-20T21:04:49.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-19T19:32:38.000Z", "max_forks_repo_path": "gazebo/gui/BuildingEditor_TEST.cc", "max_forks_repo_name": "otamachan/ros-indigo-gazebo7-deb", "max_forks_repo_head_hexsha": "abc6b40247cdce14d9912096a0ad5135d420ce04", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6470588235, "max_line_length": 75, "alphanum_fraction": 0.7162698413, "num_tokens": 488}
|
# TODO
struct GEFile <: MRIFile
filename::String
end
function MRIBase.RawAcquisitionData(f::GEFile)
error("Not yet implemented!")
end
|
{"hexsha": "45eb5b312e1bc8556ac4478b2fec96c653dad72c", "size": 139, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "MRIFiles/src/GE/GE.jl", "max_stars_repo_name": "aTrotier/MRIReco.jl", "max_stars_repo_head_hexsha": "7437e5b41a5fdd0f4dff73a7d1913b78c0285493", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MRIFiles/src/GE/GE.jl", "max_issues_repo_name": "aTrotier/MRIReco.jl", "max_issues_repo_head_hexsha": "7437e5b41a5fdd0f4dff73a7d1913b78c0285493", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MRIFiles/src/GE/GE.jl", "max_forks_repo_name": "aTrotier/MRIReco.jl", "max_forks_repo_head_hexsha": "7437e5b41a5fdd0f4dff73a7d1913b78c0285493", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.4444444444, "max_line_length": 46, "alphanum_fraction": 0.7553956835, "num_tokens": 38}
|
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
import boto3
import time
import re
import warnings
import avro
import socket
import pyarrow as pa
import json
import random
import uuid
import great_expectations as ge
from io import BytesIO
from pyhive import hive
from urllib.parse import urlparse
from typing import TypeVar, Optional, Dict, Any
from confluent_kafka import Producer
from hsfs import client, feature, util
from hsfs.core import (
feature_group_api,
dataset_api,
job_api,
ingestion_job_conf,
kafka_api,
statistics_api,
training_dataset_api,
training_dataset_job_conf,
feature_view_api,
transformation_function_engine,
)
from hsfs.constructor import query
from hsfs.client import exceptions, external, hopsworks
from hsfs.feature_group import FeatureGroup
HAS_FAST = False
try:
from fastavro import schemaless_writer
from fastavro.schema import parse_schema
HAS_FAST = True
except ImportError:
pass
class Engine:
APP_OP_INSERT_FG = "insert_fg"
def __init__(self):
self._dataset_api = dataset_api.DatasetApi()
self._job_api = job_api.JobApi()
self._kafka_api = kafka_api.KafkaApi()
# cache the sql engine which contains the connection pool
self._mysql_online_fs_engine = None
def sql(self, sql_query, feature_store, online_conn, dataframe_type, read_options):
if not online_conn:
return self._sql_offline(sql_query, feature_store, dataframe_type)
else:
return self._jdbc(sql_query, online_conn, dataframe_type, read_options)
def _sql_offline(self, sql_query, feature_store, dataframe_type):
with self._create_hive_connection(feature_store) as hive_conn:
result_df = pd.read_sql(sql_query, hive_conn)
return self._return_dataframe_type(result_df, dataframe_type)
def _jdbc(self, sql_query, connector, dataframe_type, read_options):
if self._mysql_online_fs_engine is None:
self._mysql_online_fs_engine = util.create_mysql_engine(
connector, "external" in read_options and read_options["external"]
)
with self._mysql_online_fs_engine.connect() as mysql_conn:
result_df = pd.read_sql(sql_query, mysql_conn)
return self._return_dataframe_type(result_df, dataframe_type)
def read(self, storage_connector, data_format, read_options, location):
if storage_connector.type == storage_connector.HOPSFS:
df_list = self._read_hopsfs(location, data_format)
elif storage_connector.type == storage_connector.S3:
df_list = self._read_s3(storage_connector, location, data_format)
else:
raise NotImplementedError(
"{} Storage Connectors for training datasets are not supported yet for external environments.".format(
storage_connector.type
)
)
return pd.concat(df_list, ignore_index=True)
def _read_pandas(self, data_format, obj):
if data_format.lower() == "csv":
return pd.read_csv(obj)
elif data_format.lower() == "tsv":
return pd.read_csv(obj, sep="\t")
elif data_format.lower() == "parquet":
return pd.read_parquet(BytesIO(obj.read()))
else:
raise TypeError(
"{} training dataset format is not supported to read as pandas dataframe.".format(
data_format
)
)
def _read_hopsfs(self, location, data_format):
# providing more informative error
try:
from pydoop import hdfs
except ImportError as err:
raise ModuleNotFoundError(
"Reading training dataset from HopsFS requires `pydoop`"
) from err
util.setup_pydoop()
path_list = hdfs.ls(location, recursive=True)
df_list = []
for path in path_list:
if (
hdfs.path.isfile(path)
and not path.endswith("_SUCCESS")
and hdfs.path.getsize(path) > 0
):
df_list.append(self._read_pandas(data_format, path))
return df_list
def _read_s3(self, storage_connector, location, data_format):
# get key prefix
path_parts = location.replace("s3://", "").split("/")
_ = path_parts.pop(0) # pop first element -> bucket
prefix = "/".join(path_parts)
if storage_connector.session_token is not None:
s3 = boto3.client(
"s3",
aws_access_key_id=storage_connector.access_key,
aws_secret_access_key=storage_connector.secret_key,
aws_session_token=storage_connector.session_token,
)
else:
s3 = boto3.client(
"s3",
aws_access_key_id=storage_connector.access_key,
aws_secret_access_key=storage_connector.secret_key,
)
df_list = []
object_list = {"is_truncated": True}
while object_list.get("is_truncated", False):
if "NextContinuationToken" in object_list:
object_list = s3.list_objects_v2(
Bucket=storage_connector.bucket,
Prefix=prefix,
MaxKeys=1000,
ContinuationToken=object_list["NextContinuationToken"],
)
else:
object_list = s3.list_objects_v2(
Bucket=storage_connector.bucket,
Prefix=prefix,
MaxKeys=1000,
)
for obj in object_list["Contents"]:
if not obj["Key"].endswith("_SUCCESS") and obj["Size"] > 0:
obj = s3.get_object(
Bucket=storage_connector.bucket,
Key=obj["Key"],
)
df_list.append(self._read_pandas(data_format, obj["Body"]))
return df_list
def read_options(self, data_format, provided_options):
return {}
def read_stream(
self,
storage_connector,
message_format,
schema,
options,
include_metadata,
):
raise NotImplementedError(
"Streaming Sources are not supported for pure Python Environments."
)
def show(self, sql_query, feature_store, n, online_conn):
return self.sql(sql_query, feature_store, online_conn, "default", {}).head(n)
def register_on_demand_temporary_table(self, on_demand_fg, alias):
# No op to avoid query failure
pass
def register_hudi_temporary_table(
self, hudi_fg_alias, feature_store_id, feature_store_name, read_options
):
# No op to avoid query failure
pass
def profile_by_spark(self, metadata_instance):
stat_api = statistics_api.StatisticsApi(
metadata_instance.feature_store_id, metadata_instance.ENTITY_TYPE
)
job = stat_api.compute(metadata_instance)
print(
"Statistics Job started successfully, you can follow the progress at {}".format(
self._get_job_url(job.href)
)
)
self._wait_for_job(job)
def profile(
self,
df,
relevant_columns,
correlations,
histograms,
exact_uniqueness=True,
):
# TODO: add statistics for correlations, histograms and exact_uniqueness
if not relevant_columns:
stats = df.describe()
else:
target_cols = [col for col in df.columns if col in relevant_columns]
stats = df[target_cols].describe()
final_stats = []
for col in stats.columns:
stat = self._convert_pandas_statistics(stats[col].to_dict())
stat["dataType"] = (
"Fractional"
if isinstance(stats[col].dtype, type(np.dtype(np.float64)))
else "Integral"
)
stat["isDataTypeInferred"] = "false"
stat["column"] = col.split(".")[-1]
stat["completeness"] = 1
final_stats.append(stat)
return json.dumps({"columns": final_stats})
def _convert_pandas_statistics(self, stat):
# For now transformation only need 25th, 50th, 75th percentiles
# TODO: calculate properly all percentiles
percentiles = [0] * 100
percentiles[24] = stat["25%"]
percentiles[49] = stat["50%"]
percentiles[74] = stat["75%"]
return {
"mean": stat["mean"],
"sum": stat["mean"] * stat["count"],
"maximum": stat["max"],
"stdDev": stat["std"],
"minimum": stat["min"],
"approxPercentiles": percentiles,
}
def validate(self, dataframe: pd.DataFrame, expectations, log_activity=True):
raise NotImplementedError(
"Deequ data validation is only available with Spark Engine. Use validate_with_great_expectations"
)
def validate_with_great_expectations(
self,
dataframe: pd.DataFrame,
expectation_suite: TypeVar("ge.core.ExpectationSuite"),
ge_validate_kwargs: Optional[Dict[Any, Any]] = {},
):
report = ge.from_pandas(
dataframe, expectation_suite=expectation_suite
).validate(**ge_validate_kwargs)
return report
def set_job_group(self, group_id, description):
pass
def convert_to_default_dataframe(self, dataframe):
if isinstance(dataframe, pd.DataFrame):
upper_case_features = [
col for col in dataframe.columns if any(re.finditer("[A-Z]", col))
]
if len(upper_case_features) > 0:
warnings.warn(
"The ingested dataframe contains upper case letters in feature names: `{}`. Feature names are sanitized to lower case in the feature store.".format(
upper_case_features
),
util.FeatureGroupWarning,
)
# making a shallow copy of the dataframe so that column names are unchanged
dataframe_copy = dataframe.copy(deep=False)
dataframe_copy.columns = [x.lower() for x in dataframe_copy.columns]
return dataframe_copy
raise TypeError(
"The provided dataframe type is not recognized. Supported types are: pandas dataframe. "
+ "The provided dataframe has type: {}".format(type(dataframe))
)
def parse_schema_feature_group(self, dataframe):
arrow_schema = pa.Schema.from_pandas(dataframe)
return [
feature.Feature(
feat_name.lower(),
self._convert_pandas_type(feat_name, feat_type, arrow_schema),
)
for feat_name, feat_type in dataframe.dtypes.items()
]
def parse_schema_training_dataset(self, dataframe):
raise NotImplementedError(
"Training dataset creation from Dataframes is not "
+ "supported in Python environment. Use HSFS Query object instead."
)
def _convert_pandas_type(self, feat_name, dtype, arrow_schema):
if dtype == np.dtype("O"):
return self._infer_type_pyarrow(feat_name, arrow_schema)
return self._convert_simple_pandas_type(dtype)
def _convert_simple_pandas_type(self, dtype):
# This is a simple type conversion between pandas type and pyspark types.
# In PySpark they use PyArrow to do the schema conversion, but this python layer
# should be as thin as possible. Adding PyArrow will make the library less flexible.
# If the conversion fails, users can always fall back and provide their own types
if dtype == np.dtype("O"):
return "string"
elif dtype == np.dtype("int32"):
return "int"
elif dtype == np.dtype("int64"):
return "bigint"
elif dtype == np.dtype("float32"):
return "float"
elif dtype == np.dtype("float64"):
return "double"
elif dtype == np.dtype("datetime64[ns]"):
return "timestamp"
elif dtype == np.dtype("bool"):
return "bool"
return "string"
def _infer_type_pyarrow(self, field, schema):
arrow_type = schema.field(field).type
if pa.types.is_list(arrow_type):
# figure out sub type
subtype = self._convert_simple_pandas_type(
arrow_type.value_type.to_pandas_dtype()
)
return "array<{}>".format(subtype)
return "string"
def save_dataframe(
self,
feature_group: FeatureGroup,
dataframe: pd.DataFrame,
operation: str,
online_enabled: bool,
storage: bool,
offline_write_options: dict,
online_write_options: dict,
validation_id: int = None,
):
if feature_group.stream:
return self._write_dataframe_kafka(
feature_group, dataframe, offline_write_options
)
else:
# for backwards compatibility
return self.legacy_save_dataframe(
feature_group,
dataframe,
operation,
online_enabled,
storage,
offline_write_options,
online_write_options,
validation_id,
)
def legacy_save_dataframe(
self,
feature_group,
dataframe,
operation,
online_enabled,
storage,
offline_write_options,
online_write_options,
validation_id=None,
):
# App configuration
app_options = self._get_app_options(offline_write_options)
# Setup job for ingestion
# Configure Hopsworks ingestion job
print("Configuring ingestion job...")
fg_api = feature_group_api.FeatureGroupApi(feature_group.feature_store_id)
ingestion_job = fg_api.ingestion(feature_group, app_options)
# Upload dataframe into Hopsworks
print("Uploading Pandas dataframe...")
self._dataset_api.upload(feature_group, ingestion_job.data_path, dataframe)
# Launch job
print("Launching ingestion job...")
self._job_api.launch(ingestion_job.job.name)
print(
"Ingestion Job started successfully, you can follow the progress at {}".format(
self._get_job_url(ingestion_job.job.href)
)
)
self._wait_for_job(ingestion_job.job, offline_write_options)
return ingestion_job.job
def get_training_data(
self, training_dataset_obj, feature_view_obj, query_obj, read_options
):
df = query_obj.read(read_options=read_options)
if training_dataset_obj.splits:
split_df = self._split_df(df, training_dataset_obj.splits)
transformation_function_engine.TransformationFunctionEngine.populate_builtin_transformation_functions(
training_dataset_obj, feature_view_obj, split_df
)
else:
split_df = df
transformation_function_engine.TransformationFunctionEngine.populate_builtin_transformation_functions(
training_dataset_obj, feature_view_obj, split_df
)
# TODO: apply transformation
return split_df
def _split_df(self, df, splits):
"""
Split a df into slices defined by `splits`. `splits` is a `dict(str, int)` which keys are name of split
and values are split ratios.
"""
split_column = f"_SPLIT_INDEX_{uuid.uuid1()}"
result_dfs = {}
items = splits.items()
if (
sum(splits.values()) != 1
or sum([v > 1 or v < 0 for v in splits.values()]) > 1
):
raise ValueError(
"Sum of split ratios should be 1 and each values should be in range [0, 1)"
)
df_size = len(df)
groups = []
for i, item in enumerate(items):
groups += [i] * int(df_size * item[1])
groups += [len(items) - 1] * (df_size - len(groups))
random.shuffle(groups)
df[split_column] = groups
for i, item in enumerate(items):
result_dfs[item[0]] = df[df[split_column] == i].drop(split_column, axis=1)
return result_dfs
def write_training_dataset(
self,
training_dataset,
dataset,
user_write_options,
save_mode,
feature_view_obj=None,
to_df=False,
):
if not feature_view_obj and not isinstance(dataset, query.Query):
raise Exception(
"Currently only query based training datasets are supported by the Python engine"
)
# As for creating a feature group, users have the possibility of passing
# a spark_job_configuration object as part of the user_write_options with the key "spark"
spark_job_configuration = user_write_options.pop("spark", None)
td_app_conf = training_dataset_job_conf.TrainingDatsetJobConf(
query=dataset,
overwrite=(save_mode == "overwrite"),
write_options=user_write_options,
spark_job_configuration=spark_job_configuration,
)
if feature_view_obj:
fv_api = feature_view_api.FeatureViewApi(feature_view_obj.featurestore_id)
td_job = fv_api.compute_training_dataset(
feature_view_obj.name,
feature_view_obj.version,
training_dataset.version,
td_app_conf,
)
else:
td_api = training_dataset_api.TrainingDatasetApi(
training_dataset.feature_store_id
)
td_job = td_api.compute(training_dataset, td_app_conf)
print(
"Training dataset job started successfully, you can follow the progress at {}".format(
self._get_job_url(td_job.href)
)
)
# If the user passed the wait_for_job option consider it,
# otherwise use the default True
self._wait_for_job(td_job, user_write_options)
return td_job
def _create_hive_connection(self, feature_store):
return hive.Connection(
host=client.get_instance()._host,
port=9085,
# database needs to be set every time, 'default' doesn't work in pyhive
database=feature_store,
auth="CERTIFICATES",
truststore=client.get_instance()._get_jks_trust_store_path(),
keystore=client.get_instance()._get_jks_key_store_path(),
keystore_password=client.get_instance()._cert_key,
)
def _return_dataframe_type(self, dataframe, dataframe_type):
if dataframe_type.lower() in ["default", "pandas"]:
return dataframe
if dataframe_type.lower() == "numpy":
return dataframe.values
if dataframe_type == "python":
return dataframe.values.tolist()
raise TypeError(
"Dataframe type `{}` not supported on this platform.".format(dataframe_type)
)
def is_spark_dataframe(self, dataframe):
return False
def save_stream_dataframe(
self,
feature_group,
dataframe,
query_name,
output_mode,
await_termination,
timeout,
write_options,
):
raise NotImplementedError(
"Stream ingestion is not available on Python environments, because it requires Spark as engine."
)
def get_empty_appended_dataframe(self, dataframe, new_features):
"""No-op in python engine, user has to write to feature group manually for schema
change to take effect."""
return None
def save_empty_dataframe(self, feature_group, dataframe):
"""Wrapper around save_dataframe in order to provide no-op."""
pass
def _get_job_url(self, href: str):
"""Use the endpoint returned by the API to construct the UI url for jobs
Args:
href (str): the endpoint returned by the API
"""
url = urlparse(href)
url_splits = url.path.split("/")
project_id = url_splits[4]
job_name = url_splits[6]
ui_url = url._replace(
path="p/{}/jobs/named/{}/executions".format(project_id, job_name)
)
ui_url = client.get_instance().replace_public_host(ui_url)
return ui_url.geturl()
def _get_app_options(self, user_write_options={}):
"""
Generate the options that should be passed to the application doing the ingestion.
Options should be data format, data options to read the input dataframe and
insert options to be passed to the insert method
Users can pass Spark configurations to the save/insert method
Property name should match the value in the JobConfiguration.__init__
"""
spark_job_configuration = user_write_options.pop("spark", None)
return ingestion_job_conf.IngestionJobConf(
data_format="PARQUET",
data_options=[],
write_options=user_write_options,
spark_job_configuration=spark_job_configuration,
)
def _wait_for_job(self, job, user_write_options=None):
# If the user passed the wait_for_job option consider it,
# otherwise use the default True
while user_write_options is None or user_write_options.get(
"wait_for_job", True
):
executions = self._job_api.last_execution(job)
if len(executions) > 0:
execution = executions[0]
else:
return
if execution.final_status.lower() == "succeeded":
return
elif execution.final_status.lower() == "failed":
raise exceptions.FeatureStoreException(
"The Hopsworks Job failed, use the Hopsworks UI to access the job logs"
)
elif execution.final_status.lower() == "killed":
raise exceptions.FeatureStoreException("The Hopsworks Job was stopped")
time.sleep(3)
def add_file(self, file):
# if streaming connectors are implemented in the future, this method
# can be used to materialize certificates locally
return file
def _write_dataframe_kafka(
self,
feature_group: FeatureGroup,
dataframe: pd.DataFrame,
offline_write_options: dict,
):
# setup kafka producer
producer = Producer(self._get_kafka_config(offline_write_options))
# setup complex feature writers
feature_writers = {
feature: self._get_encoder_func(
feature_group._get_feature_avro_schema(feature)
)
for feature in feature_group.get_complex_features()
}
# setup row writer function
writer = self._get_encoder_func(feature_group._get_encoded_avro_schema())
# loop over rows
for r in dataframe.itertuples(index=False):
# itertuples returns Python NamedTyple, to be able to serialize it using
# avro, create copy of row only by converting to dict, which preserves datatypes
row = r._asdict()
# transform special data types
# here we might need to handle also timestamps and other complex types
# possible optimizaiton: make it based on type so we don't need to loop over
# all keys in the row
for k in row.keys():
# for avro to be able to serialize them, they need to be python data types
if isinstance(row[k], np.ndarray):
row[k] = row[k].tolist()
if isinstance(row[k], pd.Timestamp):
row[k] = row[k].to_pydatetime()
# encode complex features
row = self._encode_complex_features(feature_writers, row)
# encode feature row
with BytesIO() as outf:
writer(row, outf)
encoded_row = outf.getvalue()
# assemble key
key = "".join([str(row[pk]) for pk in sorted(feature_group.primary_key)])
# produce
producer.produce(
topic=feature_group._online_topic_name, key=key, value=encoded_row
)
# Trigger internal callbacks to empty op queue
producer.poll(0)
# make sure producer blocks and everything is delivered
producer.flush()
# start backfilling job
job_name = "{fg_name}_{version}_offline_fg_backfill".format(
fg_name=feature_group.name, version=feature_group.version
)
job = self._job_api.get(job_name)
if offline_write_options is not None and offline_write_options.get(
"start_offline_backfill", True
):
print("Launching offline feature group backfill job...")
self._job_api.launch(job_name)
print(
"Backfill Job started successfully, you can follow the progress at {}".format(
self._get_job_url(job.href)
)
)
self._wait_for_job(job, offline_write_options)
return job
def _encode_complex_features(
self, feature_writers: Dict[str, callable], row: dict
) -> dict:
for feature_name, writer in feature_writers.items():
with BytesIO() as outf:
writer(row[feature_name], outf)
row[feature_name] = outf.getvalue()
return row
def _get_encoder_func(self, writer_schema: str) -> callable:
if HAS_FAST:
schema = json.loads(writer_schema)
parsed_schema = parse_schema(schema)
return lambda record, outf: schemaless_writer(outf, parsed_schema, record)
parsed_schema = avro.schema.parse(writer_schema)
writer = avro.io.DatumWriter(parsed_schema)
return lambda record, outf: writer.write(record, avro.io.BinaryEncoder(outf))
def _get_kafka_config(self, write_options: dict = {}) -> dict:
config = {
"security.protocol": "SSL",
"ssl.ca.location": client.get_instance()._get_ca_chain_path(),
"ssl.certificate.location": client.get_instance()._get_client_cert_path(),
"ssl.key.location": client.get_instance()._get_client_key_path(),
"client.id": socket.gethostname(),
}
if isinstance(client.get_instance(), hopsworks.Client) or write_options.get(
"internal_kafka", False
):
config["bootstrap.servers"] = ",".join(
[
endpoint.replace("INTERNAL://", "")
for endpoint in self._kafka_api.get_broker_endpoints(
externalListeners=False
)
]
)
elif isinstance(client.get_instance(), external.Client):
config["bootstrap.servers"] = ",".join(
[
endpoint.replace("EXTERNAL://", "")
for endpoint in self._kafka_api.get_broker_endpoints(
externalListeners=True
)
]
)
return config
|
{"hexsha": "30f7fcc58cc1f7dcb3e216ef8069bcfed7a01029", "size": 28209, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/hsfs/engine/python.py", "max_stars_repo_name": "gibchikafa/feature-store-api", "max_stars_repo_head_hexsha": "314a4d9a390bc371f4495f58c317797302f828ba", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/hsfs/engine/python.py", "max_issues_repo_name": "gibchikafa/feature-store-api", "max_issues_repo_head_hexsha": "314a4d9a390bc371f4495f58c317797302f828ba", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/hsfs/engine/python.py", "max_forks_repo_name": "gibchikafa/feature-store-api", "max_forks_repo_head_hexsha": "314a4d9a390bc371f4495f58c317797302f828ba", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3987096774, "max_line_length": 168, "alphanum_fraction": 0.6092736361, "include": true, "reason": "import numpy", "num_tokens": 5660}
|
"""
This file defines a class MicrogridEnv that wraps the Simulator in this package, so that it follows the
OpenAI gym (https://github.com/openai/gym) format.
"""
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
from microgridRLsimulator.simulate.simulator import Simulator
from microgridRLsimulator.simulate.gridaction import GridAction
import copy
class MicrogridEnv(gym.Env):
def __init__(self, start_date, end_date, case, purpose="Train", params=None):
"""
:param start_date: datetime for the start of the simulation
:param end_date: datetime for the end of the simulation
:param case: case name (string)
"""
self.simulator = Simulator(start_date, end_date, case, params=params)
self.action_space = make_action_space(self.simulator)
self.observation_space = make_observation_space(self.simulator)
self.state = None
self.np_random = None
self.purpose = purpose
self.seed()
def sample(self):
return self.simulator.sample()
def set_state(self, state):
self.simulator.set_state(state)
def get_state(self):
return copy.deepcopy(self.simulator.grid_state)
def seed(self, seed=None):
np.random.seed(seed)
self.np_random, seed = seeding.np_random(seed)
self.action_space.seed(seed)
self.observation_space.seed(seed)
return [seed]
def reset(self):
self.state = self.simulator.reset()
return self._observation(self.state)
def render(self, path):
print(f"Plots store in {path}")
self.simulator.plot(path)
def step(self, action):
"""
Step function, as in gym.
May also accept a state as input (useful for MCTS, for instance).
"""
assert self.action_space.contains(action) or isinstance(action, GridAction)
self.state, reward, done, info = self.simulator.step(action)
return self._observation(self.state), reward, done, info
@staticmethod
def _observation(state):
return np.array(state, np.float32)
def make_action_space(simulator):
if simulator.env_config['action_space'].lower() == "discrete":
return spaces.Discrete(simulator.grid.gather_action_space())
lower, upper = simulator.grid.gather_action_space()
action_space = spaces.Box(lower, upper, dtype=np.float32)
return action_space
def make_observation_space(simulator):
lower, upper = simulator.grid.gather_observation_space()
observation_space = spaces.Box(lower, upper, dtype=np.float32)
return observation_space
|
{"hexsha": "df38c6599db4a7c8ae41d56f62db2d6a9dcd0e04", "size": 2651, "ext": "py", "lang": "Python", "max_stars_repo_path": "microgridRLsimulator/gym_wrapper/microgrid_env.py", "max_stars_repo_name": "d3sm0/microgridRLsimulator", "max_stars_repo_head_hexsha": "2721dd56430ff81a5ebd86fef6a94ed4acd1f26d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "microgridRLsimulator/gym_wrapper/microgrid_env.py", "max_issues_repo_name": "d3sm0/microgridRLsimulator", "max_issues_repo_head_hexsha": "2721dd56430ff81a5ebd86fef6a94ed4acd1f26d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "microgridRLsimulator/gym_wrapper/microgrid_env.py", "max_forks_repo_name": "d3sm0/microgridRLsimulator", "max_forks_repo_head_hexsha": "2721dd56430ff81a5ebd86fef6a94ed4acd1f26d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.125, "max_line_length": 103, "alphanum_fraction": 0.6872878159, "include": true, "reason": "import numpy", "num_tokens": 578}
|
from models.spacy_based_ir import SpacyIR
from models.bert_sts import BertSTSIR
from models.bert_nli import BertNLIIR
from models.bert_cnn import BertCNNIR
from tqdm import tqdm
import argparse
import pickle
import numpy as np
import os
def choose_model(topk=50):
irmodel = SpacyIR(topk=50)
return irmodel
def read_data_to_score(factfile,is_fact_fact=False,datasets=None):
data = {}
base_dir = os.environ['PREPARED_DATA'] + "/hypothesis/"
if not is_fact_fact:
fnames = datasets
else:
base_dir = os.environ['PREPARED_DATA'] + "/knowledge/"
fnames = ["openbook.txt"]
facts = []
factlines = open(os.environ['PREPARED_DATA'] + "/knowledge/"+factfile,"r").readlines()
for fact in tqdm(factlines,desc="Processing Facts:"):
fact=fact.strip().replace('"',"")
facts.append(fact)
for fname in fnames:
lines = open(base_dir+fname,"r").readlines()
for index,line in tqdm(enumerate(lines),desc="Reading From "+fname+" :"):
if not is_fact_fact:
line = line.strip().split("\t")
idx = line[0]
choices = line[2:6]
assert len(choices) == 4
for index,choice in enumerate(choices):
nidx=idx+"__ch_"+str(index)
data[nidx]=choice
else:
line = line.strip().replace('"',"")
data[str(index)]=line
return {"data":data,"facts":facts}
# irmodel.predict(data,outfile,tokenfile)
# return
datasets = ["hyp-ques-test.tsv","hyp-ques-train.tsv","hyp-ques-val.tsv"]
datasets = ["hyp-ques-test.tsv","hyp-ques-val.tsv"]
# irmodel = choose_model(topk=50)
# data = read_data_to_score("openbook.txt")
# pred_data(data,irmodel,os.environ['PREPARED_DATA'] + "/ranked/scapy-openbook.json")
# irmodel = BertSTSIR(topk=50,output_dir="/scratch/pbanerj6/stsb_output",model="pytorch_model.bin.3",eval_batch_size=256)
# data = read_data_to_score("openbook.txt",is_fact_fact=True)
# irmodel.predict(data,os.environ['PREPARED_DATA'] + "/ranked/sts-factfact-orig.json","/scratch/pbanerj6/factfact.tokens")
# irmodel = BertSTSIR(topk=50,output_dir="/scratch/pbanerj6/stsb_output",model="pytorch_model.bin.3",eval_batch_size=1024)
# data = read_data_to_score("openbook.txt")
# irmodel.predict(data,os.environ['PREPARED_DATA'] + "/ranked/sts-openbook.json","/scratch/pbanerj6/hypfacttokens/nli.tokens")
# irmodel = choose_model(topk=100)
# data = read_data_to_score("omcs.txt",datasets=datasets)
# irmodel.predict(data,os.environ['PREPARED_DATA'] + "/ranked/scapy-omcs.json")
# irmodel = BertSTSIR(topk=50,output_dir="/scratch/pbanerj6/stsb_output",model="pytorch_model.bin.4",eval_batch_size=1024)
# data = read_data_to_score("openbook.txt",datasets=datasets)
# irmodel.predict(data,os.environ['PREPARED_DATA'] + "/ranked/sts-factfact-orig.json","/scratch/pbanerj6/hyptestvaltokens/sts.tokens")
# irmodel = BertCNNIR(topk=50,output_dir="/scratch/pbanerj6/sml-class-bert-large-cnn-full",model="best_model.bin",eval_batch_size=1024)
# data = read_data_to_score("openbook.txt",datasets=datasets)
# irmodel.predict(data,os.environ['PREPARED_DATA'] + "/ranked/cnn-openbook.json","/scratch/pbanerj6/cnntokens/nli.tokens")
irmodel = BertNLIIR(topk=50,output_dir="/scratch/pbanerj6/sml-class-bert-large-v2-5e6-full",model="best_model.bin",eval_batch_size=1024)
data = read_data_to_score("openbook.txt",datasets=datasets)
irmodel.predict(data,os.environ['PREPARED_DATA'] + "/ranked/simplebert-openbook.json","/scratch/pbanerj6/cnntokens/nli.tokens")
# model_path = "/scratch/pbanerj6/qnli_orig_output/"
# model = "pytorch_model.bin.4"
# outfile = "../data/ranked/qnli-openbook.json"
# irmodel = BertNLIIR(topk=50,output_dir=model_path,model=model,eval_batch_size=2048)
# data = read_data_to_score("openbook.txt",datasets=datasets)
# irmodel.predict(data,outfile,"/scratch/pbanerj6/hyptestvaltokens/sts.tokens")
|
{"hexsha": "b0f8357f2dc6568d23f6223fd33eb1ab5b227d19", "size": 3979, "ext": "py", "lang": "Python", "max_stars_repo_path": "ir/run_ir.py", "max_stars_repo_name": "tbmihailov/OBQA", "max_stars_repo_head_hexsha": "653c5c64ae7eb164bde0b381813afe5f664dcf67", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ir/run_ir.py", "max_issues_repo_name": "tbmihailov/OBQA", "max_issues_repo_head_hexsha": "653c5c64ae7eb164bde0b381813afe5f664dcf67", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ir/run_ir.py", "max_forks_repo_name": "tbmihailov/OBQA", "max_forks_repo_head_hexsha": "653c5c64ae7eb164bde0b381813afe5f664dcf67", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.7252747253, "max_line_length": 136, "alphanum_fraction": 0.7014325207, "include": true, "reason": "import numpy", "num_tokens": 1121}
|
# referring to https://zhuanlan.zhihu.com/p/387853124
import os.path
import tensorrt as trt
import pycuda.driver as cuda
from util import GiB, HostDeviceMem
class TensorrtBase:
"""
Parent Class
"""
trt_logger = trt.Logger(trt.Logger.ERROR)
# make order consistency via onnx.
input_names = []
output_names = []
def __init__(self, engine_file_path, *, gpu_id=0):
cuda.init()
# Create CUDA context
self.cuda_ctx = cuda.Device(gpu_id).make_context()
# Prepare the runtine engine
self.engine = self._load_engine(engine_file_path)
self.binding_names = self.input_names + self.output_names
# self.context = self.engine.create_execution_context()
# self.buffers = self._allocate_buffer(dynamic_factor)
@classmethod
def build_engine(cls,
onnx_file_path,
engine_file_path,
*,
use_fp16=True,
dynamic_shapes={},
max_batch_size=1):
"""Build TensorRT Engine when engine file doesn't exist
:use_fp16: set mixed flop computation if the platform has fp16.
:dynamic_shapes: {binding_name: (min, opt, max)}, default {} represents not using dynamic.
:dynamic_batch_size: set it to 1 if use fixed batch size, else using max batch size
"""
builder = trt.Builder(cls.trt_logger)
network = builder.create_network(
1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
# config.set_tactic_sources(trt.TacticSource.CUBLAS_LT)
# Default workspace is 2G
config.max_workspace_size = GiB(2)
if builder.platform_has_fast_fp16 and use_fp16:
config.set_flag(trt.BuilderFlag.FP16)
# parse ONNX
parser = trt.OnnxParser(network, cls.trt_logger)
with open(onnx_file_path, 'rb') as model:
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
print("===> Completed parsing ONNX file")
# default = 1 for fixed batch size
builder.max_batch_size = 1
if len(dynamic_shapes) > 0:
print(f"===> using dynamic shapes: {str(dynamic_shapes)}")
builder.max_batch_size = max_batch_size
profile = builder.create_optimization_profile()
# set profile shape for input binding
for binding_name, dynamic_shape in dynamic_shapes.items():
min_shape, opt_shape, max_shape = dynamic_shape
profile.set_shape(
binding_name, min_shape, opt_shape, max_shape)
config.add_optimization_profile(profile)
# Remove existing engine file
if os.path.isfile(engine_file_path):
try:
os.remove(engine_file_path)
except Exception:
print(f"Cannot remove existing file: {engine_file_path}")
print("===> Creating Tensorrt Engine...")
engine = builder.build_engine(network, config)
if engine:
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
print("===> Serialized Engine Saved at: ", engine_file_path)
else:
print("===> build engine error")
# return engine
def _load_engine(self, engine_file_path):
"""Load engine when engine file exists"""
# Force init TensorRT plugins
trt.init_libnvinfer_plugins(None, '')
with open(engine_file_path, "rb") as f, \
trt.Runtime(self.trt_logger) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
return engine
def _allocate_buffer(self, dynamic_factor=1):
"""Allocate buffer when output shape is fixed. Normally when only batch is dynamic.
:dynamic_factor: normally expand the buffer size for dynamic shape.It equal to h * w
"""
inputs = []
outputs = []
bindings = [None] * len(self.binding_names)
stream = cuda.Stream()
for binding in self.binding_names:
binding_idx = self.engine[binding]
if binding_idx == -1:
print("Error Binding Names!")
continue
dims = self.engine.get_binding_shape(binding)
# trt.volume() return negtive volue if -1 in shape
size = abs(trt.volume(dims)) * self.engine.max_batch_size * dynamic_factor
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings[binding_idx] = int(device_mem)
# Append to the appropriate list.
if self.engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def do_inference(self, inf_in_list, *, binding_shape_map=None, batch_size=1):
"""Main function for inference
:inf_in_list: input list.
:binding_shape_map: {<binding_name>: <shape>}, leave it to None for fixed shape
"""
inputs, outputs, bindings, stream = self.buffers
with self.engine.create_execution_context() as context:
if binding_shape_map:
context.active_optimization_profile = 0
for binding_name, shape in binding_shape_map.items():
binding_idx = self.engine[binding_name]
context.set_binding_shape(binding_idx, shape)
# transfer input data to device
for i in range(len(inputs)):
inputs[i].host = inf_in_list[i]
cuda.memcpy_htod_async(inputs[i].device, inputs[i].host, stream)
# do inference
context.execute_async(batch_size=batch_size, bindings=bindings,
stream_handle=stream.handle)
# copy data from device to host
for i in range(len(outputs)):
cuda.memcpy_dtoh_async(outputs[i].host, outputs[i].device, stream)
stream.synchronize()
trt_outputs = [out.host.copy() for out in outputs]
return trt_outputs
def __del__(self):
self.cuda_ctx.pop()
del self.cuda_ctx
|
{"hexsha": "377f34aa89f64a52d1aeccfab6757e9414eb00d0", "size": 6918, "ext": "py", "lang": "Python", "max_stars_repo_path": "libs/dynamic_base.py", "max_stars_repo_name": "MichaelWU0726/x2trt", "max_stars_repo_head_hexsha": "75f34a8574315178589502ab14f64289e5c49061", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libs/dynamic_base.py", "max_issues_repo_name": "MichaelWU0726/x2trt", "max_issues_repo_head_hexsha": "75f34a8574315178589502ab14f64289e5c49061", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/dynamic_base.py", "max_forks_repo_name": "MichaelWU0726/x2trt", "max_forks_repo_head_hexsha": "75f34a8574315178589502ab14f64289e5c49061", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9272727273, "max_line_length": 99, "alphanum_fraction": 0.5938132408, "include": true, "reason": "import pycuda", "num_tokens": 1402}
|
import numpy as np
from natasy.neural_network import Initialization, initializations
def test_initialization():
assert Initialization()
def test__zeros_initialization():
W, b = initializations._zeros_initialization(4, 6)
assert W.shape == (4, 6)
assert b.shape == (4, 1)
assert W.all() == 0 and b.all() == 0
def test__he_initialization():
W, b = initializations._He_initialization(4, 6)
assert W.shape == (4, 6)
assert b.shape == (4, 1)
|
{"hexsha": "9ec55123e6b5631bdeeea1af50318a6aa99c16d3", "size": 477, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_initializations.py", "max_stars_repo_name": "disooqi/DNN", "max_stars_repo_head_hexsha": "f87a10afba0810778ab3669f30e20128779f9da0", "max_stars_repo_licenses": ["AFL-3.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-03-03T11:01:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T15:53:47.000Z", "max_issues_repo_path": "tests/test_initializations.py", "max_issues_repo_name": "disooqi/DNN", "max_issues_repo_head_hexsha": "f87a10afba0810778ab3669f30e20128779f9da0", "max_issues_repo_licenses": ["AFL-3.0"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2018-10-31T16:54:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-28T06:05:56.000Z", "max_forks_repo_path": "tests/test_initializations.py", "max_forks_repo_name": "disooqi/Natasy", "max_forks_repo_head_hexsha": "f87a10afba0810778ab3669f30e20128779f9da0", "max_forks_repo_licenses": ["AFL-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6818181818, "max_line_length": 65, "alphanum_fraction": 0.677148847, "include": true, "reason": "import numpy", "num_tokens": 134}
|
#!/usr/bin/env python3
##@package openzgy.impl.histogram
import numpy as np
class HistogramData:
def __init__(self, range_hint=None, dtype=np.float32):
self._hmin, self._hmax = self._suggestHistogramRange(range_hint, dtype)
self._dtype = dtype
self._size = 256
# Uncomment the next lines to enable the temporary large histogram.
# One problem is that the histogram may lose the zero centric
# property that the user specified range might have, making the
# result look bad.
#if np.issubdtype(dtype, np.integer):
# self._size = max(256, min(65536, self._hmax - self._hmin + 1))
self._bins = np.zeros(self._size, dtype=np.int64)
if False:
print("@ Histogram ", self._hmin, self._hmax, "for data", range_hint)
@staticmethod
def _suggestHistogramRange(range_hint, dtype):
"""
Choose the histogram range to use.
The histogram range is normally set to the coding range for
integral data and the actual data range for float data.
This method takes care of corner cases and (future) subtle
tewaks such as making the range zero centric.
"""
# This logic in C++ is found in GenLodImpl::suggestHistogramRange()
# and has a much more detailed explanation of what is going on.
bogus = (-128, +127)
if np.issubdtype(dtype, np.integer):
# The histogram is built from storage values so its maximum
# possible range is already known. For int8 there will be one
# bin for every possible value. For int16 we might do the same,
# temporarily producing a histogram with 65,536 values and then
# whittling it down to 256 bins before storing it. But for now
# just map to the user provided coding range and hope that the
# user didn't decide to use just a small part of the available
# integer storage values.
return (np.iinfo(dtype).min, np.iinfo(dtype).max)
else:
# Choose histogram range based on the min/max value collected
# while writing lod 0. Always end up with a sane interval with
# min < max to avoid problems generating the histogram and also
# for applications reading the file later. For completely empty
# files just use (-1,+1) which is as good a default as any.
if (not range_hint or
not np.isfinite(range_hint[0]) or
not np.isfinite(range_hint[1]) or
range_hint[0] > range_hint[1]):
return bogus # nothing written or error.
elif range_hint[0] < range_hint[1]:
# This is the normal case for floating point data.
# Don't return numpy types. They have weird rules.
return (float(range_hint[0]), float(range_hint[1]))
elif range_hint[0] > 0: # At this point, hint[0] == hint[1]
return (0, range_hint[0]) # single positive value
elif range_hint[0] < 0:
return (range_hint[0], 0) # single negative value
else:
return bogus # all zero
def _histogram_data(self, data):
if not np.issubdtype(self._dtype, np.integer):
# numpy.histogram is documented to ignore values outside range.
# Handling of NaN is undocumented and currently reports errors
# from low level code. So, map NaN to +Inf just in case.
# TODO-Performance: This is not really a good idea.
data = np.copy(data)
data[np.isnan(data)] = np.inf
return np.histogram(data, bins=self._size, range=self.np_range)[0]
def add(self, data, factor = 1):
tmp = self._histogram_data(data)
if factor != 1:
tmp *= factor
self._bins += tmp
def scale(self, a, b):
self._hmin = a * self._hmin + b
self._hmax = a * self._hmax + b
def resize(self, newsize):
binwidth = (self._hmax - self._hmin) / (self._size - 1)
oldbins = self._bins
oldsize = self._size
self._size = newsize
self._bins = np.zeros(self._size, dtype=np.int64)
if np.count_nonzero(oldbins) == 0:
return
if newsize >= oldsize:
self._bins[:oldsize] = oldbins
self._hmax = self._hmin + binwidth * (self._size - 1)
return
skiplo = np.argmax(oldbins[::1] != 0)
skiphi = np.argmax(oldbins[::-1] != 0)
factor = max(1, (oldsize-skiplo-skiphi + (newsize-1)) // (newsize-2))
factor = ((factor // 2) * 2) + 1 # Round up to make it odd.
# Very minor issue: I reserve the first and last bin to hold
# data from the misaligned part. If enerything ends up aligned
# those two end up unused. I am absolutely sure no one will
# notice. *except possibly* when running unit tests.
# Adjust skiplo and skiphi upwards so that (a) neither moves
# more than "factor", (b) neither becomes negative, (c) the
# remaining size - skiphi - skiplo is a multiple of "factor",
# and (d) any zero-centric property is preserved by making
# sure the "zero" bin in the input ends up in the middle of
# one of the output bins. The last one is where it gets really
# tricky and TODO-High must be implemented. Or YAGNI, remove
# the capability to resize.
# Combine "factor" input bins into each output bin
center_count = ((oldsize-skiphi-skiplo)//factor)*factor
skiphi = oldsize - skiplo - center_count
partial = np.sum(oldbins[skiplo:oldsize-skiphi].reshape(-1,factor),axis=1)
# Mop up the ends that might have fewer than "factor" entries.
head = np.sum(oldbins[:skiplo])
tail = np.sum(oldbins[oldsize-skiphi:])
self._bins[1:(center_count//factor)+1] = partial
self._bins[0] = head
self._bins[(center_count//factor)+1] = tail
# The new binwidth must be binwidth*factor.
# The new bin[1] corresponds to old bin[skiplo], so new bin[0]
# must be new binwidth less than that.
self._hmin = (self._hmin + binwidth * skiplo) - (binwidth*factor)
self._hmax = self._hmin + (binwidth*factor) * (self._size-1)
@property
def bins(self):
return self._bins
@property
def vv_range(self):
"""
Histogram range, voxelvision and zgy style, with numbers
representing the center value of the first and last bin.
"""
return (self._hmin, self._hmax)
@property
def np_range(self):
"""
Histogram range, numpy and salmon style, with numbers
representing the edges of the first and last bin.
"""
binwidth = (self._hmax - self._hmin) / (self._size - 1)
return (self._hmin - binwidth/2, self._hmax + binwidth/2)
def binvalue(self, bin_number):
"""
Convert a single bin number to the center value of this bin.
Note that in ZGY this will refer to storage values, so you
may need to explicitly convert the result.
"""
binwidth = (self._hmax - self._hmin) / (self._size - 1)
return self._hmin + bin_number * binwidth
# Copyright 2017-2020, Schlumberger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
{"hexsha": "a4bf305269c174e2956fd9e48ae8a98d37e61c99", "size": 7926, "ext": "py", "lang": "Python", "max_stars_repo_path": "openzgy/impl/histogram.py", "max_stars_repo_name": "equinor/pyzgy", "max_stars_repo_head_hexsha": "94cd3d9050c3027d042a83b98779da9182041137", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openzgy/impl/histogram.py", "max_issues_repo_name": "equinor/pyzgy", "max_issues_repo_head_hexsha": "94cd3d9050c3027d042a83b98779da9182041137", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openzgy/impl/histogram.py", "max_forks_repo_name": "equinor/pyzgy", "max_forks_repo_head_hexsha": "94cd3d9050c3027d042a83b98779da9182041137", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7796610169, "max_line_length": 82, "alphanum_fraction": 0.61998486, "include": true, "reason": "import numpy", "num_tokens": 1969}
|
"""
:Author(s) Adam Camer-Pesci, Ryan Forster:
This file contains the methods used to perform calculations on scuba diving profiles.
"""
from rpy2.robjects.vectors import IntVector
import rpy2.robjects as robjects
import numpy as np
import math
import DiveConstants as dc
class Calculations:
def initialise_dive(self, data, gas_combinations):
"""
Creates and initialises the dive_profile object.
:param data: dataframe:
a dataframe containing columns: time and depth
:param gas_combinations: list:
A list of gases in the format [[fO2,fHe,fN2,time]]
:returns tuple:
:dive_profile: a scuba dive object
:gas_list: a list of scuba gas objects
"""
dive = robjects.r['dive']
gas_list, tank_times = self.trimix_list(gas_combinations)
size = len(gas_list)
d = dive(data, tanklist=gas_list)
# Use Imbedded R script to name and swap gas tanks for dive
custom_gas = robjects.r('''
customGas <- function(dive_profile, numgas, list_of_times)
{
#Applies names to the tanklist in the format c("1":"n") - necessary to select which gas to use at a specific time.
names(tanklist(dive_profile)) <- c(1:numgas)
#Cuts the dive_profile and switches to the specific gas at the time listed
d <- cut(times.dive(dive_profile), breaks = c(do.call(c, list_of_times), Inf), include.lowest = TRUE, labels = names(tanklist(dive_profile)))
whichtank(dive_profile) <- cut(times.dive(dive_profile), breaks = c(do.call(c, list_of_times), Inf), include.lowest = TRUE, labels = names(tanklist(dive_profile)))
return(dive_profile)
}
''')
dive_profile = custom_gas(d, size, tank_times)
return dive_profile, gas_list
def trimix_list(self, gas_combinations):
"""
converts gas_combination string into trimix gas objects
:param gas_combinations: dataframe:
a list of strings in the format [[f02 fHe fN2 time][...]]
:returns tuple:
:gas_list: a list of trimix gas objects
:time_list: a list of times to pair with gas_list
"""
trimix = robjects.r['trimix']
gas_list = []
time_list = []
try:
# set default gas to air if no gas specified at time 0
if((len(gas_combinations) < 1 or gas_combinations[0][3] > 0)):
gas_list.append(trimix(0.21, 0, 0.79))
time_list.append(-1)
for gas in gas_combinations:
gas_list.append(trimix(gas[0], gas[1]))
time_list.append(gas[3])
except IndexError:
pass
return gas_list, time_list
def o2_tox(self, dive_profile):
"""
calculates oxygen toxcity exposure
:param dive_profile: dive:
a dive profile to test OTU level
:returns float:
value representing pulmonary oxygen toxicity dose for a given dive profile and breathing gas
is NaN if scuba is unable to calculate.
"""
oxtox = robjects.r['oxtox']
otu = oxtox(dive_profile, progressive=False)
ret = float(np.asarray(otu))
if(math.isnan(ret)):
ret = '.'
return ret
def max_ascent(self, dive_csv):
"""
finds the maximum ascent rate
:param dive_csv: dataframe:
a dataframe containing columns: time and depth
:returns:
the maximum ascent rate
"""
data = np.array(dive_csv)
max = 0
ascent_rate = 0
time_interval = data[0][3] - data[0][2]
for idx, depth in np.ndenumerate(data[1, :]):
try:
temp = data[1][idx[0]+1]
if ((depth - temp) > max):
max = depth - temp
except IndexError:
pass
# calculates the max ascent rate per min
div = 60.0 / time_interval
ascent_rate = round(max * div, 3)
return ascent_rate
def compartment_pressures(self, dive_profile, halftime_set):
"""
Gets compartment pressures from dive profile based on given half time set.
:param data: dataframe:
a dataframe containing columns: time and depth
:param halftime_set: str:
the name of the halftime set to be used
:returns:
:cp: a dataframe containing compartment pressures from 1,1b - 16
"""
# setup R functions
haldane = robjects.r['haldane']
pickmodel = robjects.r['pickmodel']
data_frame = robjects.r['data.frame']
if(not(halftime_set == 'ZH-L16A' or
halftime_set == 'ZH-L16B' or
halftime_set == 'ZH-L16C' or
halftime_set == 'Haldane' or
halftime_set == 'DSAT' or
halftime_set == 'Workman65' or
halftime_set == 'Buzzacott')):
raise ValueError('Invalid halftime-set')
else:
# if halftime set is decimate, set up decimate model.
if(halftime_set == 'Buzzacott'):
hm = robjects.r['hm']
buzzacott_model = hm(HalfT=IntVector((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)), M0=IntVector((
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)), dM=IntVector((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)))
cp = haldane(dive_profile, model=buzzacott_model,
progressive=True)
# if halftime set is ZH-L16B or ZH-L16C use ZH-L16A. This is like this in order to use model for different gradient factor calculations
elif(halftime_set == 'ZH-L16B' or halftime_set == 'ZH-L16C'):
cp = haldane(dive_profile, model=pickmodel(
'ZH-L16A'), progressive=True)
# for all other models, set up normally
else:
cp = haldane(dive_profile, model=pickmodel(
halftime_set), progressive=True)
# return the compartment pressures as dataframe
return data_frame(cp)
def max_values(self, ambient_pressures, compartment_pressures, totalIPP, nIPP, heIPP):
"""
merges max_bubble, max_inspired into a single function
Calculates max inspired, bubble and surf values as well as recording the compartment pressure values at the time of max bubble
:param ambient_pressures: float[]:
a list of ambient pressures at each time point (depth/10)
:param compartment_pressures: float[]:
a list of compartment pressure values (cp_value)
:param totalIPP: float[]:
the total inert gas partial pressure at given time points
:returns float[]:
max_values : array containing the collumns:
:max_ins: cp_value - totalIPP
:max_bub: cp_value - ambient_pressure
:n2_cp: cp_value where maxbub occured
:he_cp: helium cp_value where maxbub occured
:surf: the cp when the diver surfaces.
:he_surf: the helium cp when the diver surfaces
"""
# get compartment pressures and ambient pressure data
# cp = [row][col]
cp = np.array(compartment_pressures, dtype=np.float64).transpose()
ap = ambient_pressures
rows = cp.shape[0]
cols = cp.shape[1]
if cols > 17:
cols = 17
# initialize output array, array is same length as comparment pressures
max_values = np.zeros((cols, 8))
# for each column
for i in range(cols):
max_bub = 0
max_ins = -9999
max_he_ins = -9999
total_ins = -9999
n2_cp = 0
he_cp = 0
# for each row
for j, cp_val in np.ndenumerate(cp[:, i]):
try:
# for buhlmann models
if(cols == 17):
temp_ins = cp_val - nIPP[j]
he_ins = cp[j][i+17] - heIPP[j]
temp_total = temp_ins + he_ins
temp_bub = (cp_val + cp[j][i+17]) - ap[j]
if(he_ins > max_he_ins):
max_he_ins = he_ins
# for air dives
else:
temp_bub = cp_val - ap[j]
temp_ins = cp_val - totalIPP[j]
temp_total = temp_ins
if(temp_total > total_ins):
total_ins = temp_total
if(temp_ins > max_ins):
max_ins = temp_ins
if(temp_bub > max_bub):
max_bub = temp_bub
n2_cp = cp_val
# get he_cp value iff buhlmann model
if(cols == 17):
he_cp = cp[j][i+17]
except IndexError:
pass
max_values[i][0] = max_ins
max_values[i][1] = max_bub
max_values[i][2] = n2_cp
max_values[i][3] = he_cp
max_values[i][4] = cp[rows-1][i] # N2Surf
if(cols == 17):
max_values[i][5] = cp[rows-1][i+17] # heSurf
max_values[i][6] = max_he_ins # helium maxins values
max_values[i][7] = total_ins
return max_values
def ambient_pressures(self, dive):
"""
calculates ambient pressures
:param dive: dataframe:
a dataframe containing columns: time and depth
:returns float[]:
a list of ambient pressures at each time point
"""
# get dive data (times/depths) and convert to np array
df = np.array(dive, dtype=np.float64)
# initialize output array
ap = np.zeros(df.shape[1])
# enumerate 2nd column of array (depths) and calculate ambient pressure
for idx, depth in np.ndenumerate(df[1, :]):
ap[idx] = depth/10 + 1
return ap
def gradient_factors(self, data, compartment_pressures, halftime_set, surf_vals):
"""
Calculates the maximum percentage of the respective M-value any compartment reaches known as the GFHigh and GFLow specific to the user selected halftime set,
Finds the depth at which any compartment reaches 100% of its m value and finds the first miss according to that depth.
:param data: dataframe:
a dataframe containing columns: time and depth
:param compartment_pressures: dataframe:
a dataframe containing compartment pressures specific to each halftime set
:param halftime_set: str:
the name of the halftime set to be used
:param surf_vals: array:
a 2D array containing the surface values for nitrogen and helium needed to calculate the GFHigh values
:returns list:
:GF_Lows_final: all final values GFLowNMax
:GF_Lows_max_max: the maximum value of all GFLowNMax values,
:gf_100D: an integer representing the depth at which a compartment hits 100% of its m value
:first_miss: the closest multiple of 3 to gf_100D
:GF_Highs: all GFHigh values
:GF_Highs_max: the maximum value of all GFHigh values
"""
# convert compartment pressures to numpy array, transpose so we have [rows][cols] rather than [cols][rows]
cp = np.array(compartment_pressures, dtype=np.float64).transpose()
if halftime_set == 'ZH-L16A' or halftime_set == 'ZH-L16B' or halftime_set == 'ZH-L16C':
num_compartments = 17
elif halftime_set == 'DSAT':
num_compartments = 8
elif halftime_set == 'Workman65':
num_compartments = 9
elif halftime_set == 'Haldane':
num_compartments = 5
gaugeP = np.zeros(cp.shape[0])
# nitrogen and helium XDM, calculation = (the respective gas * gauge pressure at each timepoint)
nXDM = np.zeros((gaugeP.shape[0], num_compartments))
heXDM = np.zeros((gaugeP.shape[0], num_compartments))
# nitrogen and helium respective m values
n_mvalues = np.zeros((nXDM.shape[0], num_compartments))
he_mvalues = np.zeros((heXDM.shape[0], num_compartments))
# if a dive has both nitrogen and helium then we need to combine the m values using a weighting
total_mvalues = np.zeros((nXDM.shape[0], num_compartments))
GF_Lows = np.zeros((n_mvalues.shape[0], num_compartments))
GF_Highs = np.zeros(num_compartments)
GF_Lows_final = np.zeros(num_compartments)
# if compartment never hits 100% of its m value then leave value as N/A
gf_100D = '.'
first_miss = '.'
try:
for i in range(gaugeP.shape[0]):
gaugeP[i] = data[1][i]/10
for j in range(num_compartments):
if(halftime_set == 'ZH-L16B'):
nXDM[i][j] = gaugeP[i] * dc.ZHL16B_N_DELTA[j]
heXDM[i][j] = gaugeP[i] * dc.ZHL16B_HE_DELTA[j]
n_mvalues[i][j] = (
dc.ZHL16B_N_M_NAUGHT[j]/10) + nXDM[i][j]
he_mvalues[i][j] = (
dc.ZHL16B_HE_M_NAUGHT[j]/10) + heXDM[i][j]
GF_Highs[j] = round((surf_vals[j][4] + surf_vals[j]
[5]) * (100 / (dc.ZHL16B_N_M_NAUGHT[j]/10)))
h_val = cp[i][j+17]
n_val = cp[i][j]
total_mvalues[i][j] = (
(n_mvalues[i][j] * n_val) + (he_mvalues[i][j] * h_val)) / (h_val + n_val)
elif(halftime_set == 'ZH-L16A'):
nXDM[i][j] = gaugeP[i] * dc.ZHL16A_N_DELTA[j]
n_mvalues[i][j] = (
dc.ZHL16A_N_M_NAUGHT[j]/10) + nXDM[i][j]
GF_Highs[j] = round(surf_vals[j][4] *
(100 / (dc.ZHL16A_N_M_NAUGHT[j]/10)))
elif(halftime_set == 'ZH-L16C'):
nXDM[i][j] = gaugeP[i] * dc.ZHL16C_N_DELTA[j]
n_mvalues[i][j] = (
dc.ZHL16C_N_M_NAUGHT[j]/10) + nXDM[i][j]
GF_Highs[j] = round(surf_vals[j][4] *
(100 / (dc.ZHL16C_N_M_NAUGHT[j]/10)))
elif(halftime_set == 'DSAT'):
GF_Highs[j] = round(surf_vals[j][4] *
(100 / (dc.DSAT_N_M_NAUGHT[j]/10)))
elif(halftime_set == 'Workman65'):
nXDM[i][j] = gaugeP[i] * dc.WORKMAN_N_DELTA[j]
n_mvalues[i][j] = (
dc.WORKMAN_N_M_NAUGHT[j]/10) + nXDM[i][j]
GF_Highs[j] = round(surf_vals[j][4] *
(100 / (dc.WORKMAN_N_M_NAUGHT[j]/10)))
elif(halftime_set == 'Haldane'):
GF_Highs[j] = round(surf_vals[j][4] *
(100 / dc.HALDANE_M_NAUGHT))
# if buhlman must combine else just use nitrogen m values
if (halftime_set == 'ZH-L16B'):
# using total_mvalues for buhlman B as need to combine both N and He m-values
h_value = cp[i][j+17]
n_value = cp[i][j]
GF_Lows[i][j] = (h_value + n_value) * \
(100/total_mvalues[i][j])
elif (halftime_set == 'DSAT' or halftime_set == 'Haldane'):
pass
else:
GF_Lows[i][j] = (cp[i][j] / n_mvalues[i][j]) * 100
# compartment has hit 100% of its m value, only want the first occurence of this
if ((GF_Lows[i][j] >= 100) and (gf_100D == '.')):
gf_100D = data[1][i]
first_miss = 3 * round(gf_100D / 3)
# finds the GFLowMax for each value - must exclude all times when depth = 0
if(data[1][i] != 0):
if(GF_Lows[i][j] > GF_Lows_final[j]):
# the final GFLowNMax values
GF_Lows_final[j] = round(GF_Lows[i][j])
# GFLowMaxMax
GF_Lows_max_max = np.max(GF_Lows_final)
# GFHighMax
GF_High_max = np.max(GF_Highs)
except IndexError:
pass
# '''
# stores all necessary data in single array for return
# in order of GFLowMaxN, GFLowMaxMax, GF100D, FirstMiss, GFHighN, GFHighMax
# '''
gf_values = [GF_Lows_final, GF_Lows_max_max,
gf_100D, first_miss, GF_Highs, GF_High_max]
return gf_values
def helium_inert_pressure(self, ambient_pressures, gasses, dive):
"""
calculate inert gas partial pressure of helium at each time point
:param ambient_pressures: float[]:
a list of ambient pressures at each time point
:param gasses: array:
an array containing the gas mix in order [helium, nitrogen, oxygen]
:returns float[]:
the inert gas partial pressure of helium at each time point
"""
whichtank = robjects.r['whichtank']
heIPP = np.zeros(ambient_pressures.shape[0])
for idx, ap in np.ndenumerate(ambient_pressures):
if(len(gasses) == 0):
helium = 0
else:
i = idx[0]
tank_num = whichtank(dive)[i]
h_val = gasses[tank_num - 1][2]
helium = h_val[0]
heIPP[idx] = ap * helium
return heIPP
def nitrogen_inert_pressure(self, ambient_pressures, gasses, dive):
"""
calculate inert gas partial pressure of nitrogen at each time point
:param ambient_pressures: float[]:
a list of ambient pressures at each time point
:param gasses: array:
an array containing the gas mix in order [helium, nitrogen, oxygen]
:returns float[]:
the inert gas partial pressure of nitrogen at each time point
"""
whichtank = robjects.r['whichtank']
nIPP = np.zeros(ambient_pressures.shape[0])
for idx, ap in np.ndenumerate(ambient_pressures):
if(len(gasses) == 0):
nitrogen = 0.79
else:
i = idx[0]
tank_num = whichtank(dive)[i]
n_val = gasses[tank_num - 1][1]
nitrogen = n_val[0]
nIPP[idx] = ap * nitrogen
return nIPP
def totalIPP(self, nIPP, heIPP):
"""
calculate the total inert gas partial pressure of nitrogen and helium at each time point
:param niPP: float[]:
the inert gas partial pressure of nitrogen at a given time points
:param heIPP: float[]:
the inert gas partial pressure of helium at a given time points
:returns float[]:
the total inert gas partial pressure at given time points
"""
total_IPP = np.zeros(nIPP.shape[0])
for idx, ni in np.ndenumerate(nIPP):
total_IPP[idx] = ni + heIPP[idx]
return total_IPP
|
{"hexsha": "8115a0309904eb3cc9f6f540095a7eb7c05409c0", "size": 19780, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/py/main/Calculations.py", "max_stars_repo_name": "AdamPesci/diveR", "max_stars_repo_head_hexsha": "ae7fe415bbdbb008fadd2b96a0a3a5092b04fdc7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/py/main/Calculations.py", "max_issues_repo_name": "AdamPesci/diveR", "max_issues_repo_head_hexsha": "ae7fe415bbdbb008fadd2b96a0a3a5092b04fdc7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/py/main/Calculations.py", "max_forks_repo_name": "AdamPesci/diveR", "max_forks_repo_head_hexsha": "ae7fe415bbdbb008fadd2b96a0a3a5092b04fdc7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0138067061, "max_line_length": 175, "alphanum_fraction": 0.5368048534, "include": true, "reason": "import numpy", "num_tokens": 4837}
|
"""
This file tests the ability for serde.py to convert complex types into
simple python types which are serializable by standard serialization tools.
For more on how/why this works, see serde.py directly.
"""
from syft.serde import native_serde
from syft.serde import serde
from syft.serde import torch_serde
import syft
from syft.exceptions import CompressionNotFoundException
from syft.frameworks.torch import pointers
import msgpack
import numpy
import pytest
import torch
from torch import Tensor
def test_tuple_simplify():
"""This tests our ability to simplify tuple types.
This test is pretty simple since tuples just serialize to
themselves, with a tuple wrapper with the correct ID (1)
for tuples so that the detailer knows how to interpret it."""
input = ("hello", "world")
tuple_detail_index = serde.detailers.index(native_serde._detail_collection_tuple)
str_detail_index = serde.detailers.index(native_serde._detail_str)
target = (
tuple_detail_index,
((str_detail_index, (b"hello",)), (str_detail_index, (b"world",))),
)
assert serde._simplify(input) == target
def test_list_simplify():
"""This tests our ability to simplify list types.
This test is pretty simple since lists just serialize to
themselves, with a tuple wrapper with the correct ID (2)
for lists so that the detailer knows how to interpret it."""
input = ["hello", "world"]
list_detail_index = serde.detailers.index(native_serde._detail_collection_list)
str_detail_index = serde.detailers.index(native_serde._detail_str)
target = (list_detail_index, [(str_detail_index, (b"hello",)), (str_detail_index, (b"world",))])
assert serde._simplify(input) == target
def test_set_simplify():
"""This tests our ability to simplify set objects.
This test is pretty simple since sets just serialize to
lists, with a tuple wrapper with the correct ID (3)
for sets so that the detailer knows how to interpret it."""
input = set(["hello", "world"])
set_detail_index = serde.detailers.index(native_serde._detail_collection_set)
str_detail_index = serde.detailers.index(native_serde._detail_str)
target = (set_detail_index, [(str_detail_index, (b"hello",)), (str_detail_index, (b"world",))])
assert serde._simplify(input)[0] == target[0]
assert set(serde._simplify(input)[1]) == set(target[1])
def test_float_simplify():
"""This tests our ability to simplify float objects.
This test is pretty simple since floats just serialize to
themselves, with no tuple/id necessary."""
input = 5.6
target = 5.6
assert serde._simplify(input) == target
def test_int_simplify():
"""This tests our ability to simplify int objects.
This test is pretty simple since ints just serialize to
themselves, with no tuple/id necessary."""
input = 5
target = 5
assert serde._simplify(input) == target
def test_string_simplify():
"""This tests our ability to simplify string objects.
This test is pretty simple since strings just serialize to
themselves, with no tuple/id necessary."""
input = "hello"
target = (serde.detailers.index(native_serde._detail_str), (b"hello",))
assert serde._simplify(input) == target
def test_dict_simplify():
"""This tests our ability to simplify dict objects.
This test is pretty simple since dicts just serialize to
themselves, with a tuple wrapper with the correct ID
for dicts so that the detailer knows how to interpret it."""
input = {"hello": "world"}
detail_dict_index = serde.detailers.index(native_serde._detail_dictionary)
detail_str_index = serde.detailers.index(native_serde._detail_str)
target = (
detail_dict_index,
[((detail_str_index, (b"hello",)), (detail_str_index, (b"world",)))],
)
assert serde._simplify(input) == target
def test_range_simplify():
"""This tests our ability to simplify range objects.
This test is pretty simple since range objs just serialize to
themselves, with a tuple wrapper with the correct ID (5)
for dicts so that the detailer knows how to interpret it."""
input = range(1, 3, 4)
target = (serde.detailers.index(native_serde._detail_range), (1, 3, 4))
assert serde._simplify(input) == target
def test_torch_tensor_simplify():
"""This tests our ability to simplify torch.Tensor objects
At the time of writing, tensors simplify to a tuple where the
first value in the tuple is the tensor's ID and the second
value is a serialized version of the Tensor (serialized
by PyTorch's torch.save method)
"""
# create a tensor
input = Tensor(numpy.random.random((100, 100)))
# simplify the tnesor
output = serde._simplify(input)
# make sure outer type is correct
assert type(output) == tuple
# make sure the object type ID is correct
# (0 for torch.Tensor)
assert serde.detailers[output[0]] == torch_serde._detail_torch_tensor
# make sure inner type is correct
assert type(output[1]) == tuple
# make sure ID is correctly encoded
assert output[1][0] == input.id
# make sure tensor data type is correct
assert type(output[1][1]) == bytes
def test_ndarray_simplify():
"""This tests our ability to simplify numpy.array objects
At the time of writing, arrays simplify to an object inside
of a tuple which specifies the ID for the np.array type (6) so
that the detailer knows to turn the simplifed form to a np.array
"""
input = numpy.random.random((100, 100))
output = serde._simplify(input)
# make sure simplified type ID is correct
assert serde.detailers[output[0]] == torch_serde._detail_ndarray
# make sure serialized form is correct
assert type(output[1][0]) == bytes
assert output[1][1] == input.shape
assert output[1][2] == input.dtype.name
def test_ellipsis_simplify():
"""Make sure ellipsis simplifies correctly."""
assert serde.detailers[serde._simplify(Ellipsis)[0]] == native_serde._detail_ellipsis
# the simplified ellipsis (empty object)
assert serde._simplify(Ellipsis)[1] == b""
def test_torch_device_simplify():
"""Test the simplification of torch.device"""
device = torch.device("cpu")
assert serde.detailers[serde._simplify(device)[0]] == torch_serde._detail_torch_device
# the simplified torch.device
assert serde._simplify(device)[1] == "cpu"
def test_pointer_tensor_simplify():
"""Test the simplification of PointerTensor"""
alice = syft.VirtualWorker(syft.torch.hook, id="alice")
input_tensor = pointers.PointerTensor(id=1000, location=alice, owner=alice)
output = serde._simplify(input_tensor)
assert output[1][0] == input_tensor.id
assert output[1][1] == input_tensor.id_at_location
assert output[1][2] == input_tensor.owner.id
@pytest.mark.parametrize("compress", [True, False])
def test_torch_Tensor(compress):
if compress:
syft.serde._apply_compress_scheme = serde.apply_lz4_compression
else:
syft.serde._apply_compress_scheme = serde.apply_no_compression
t = Tensor(numpy.random.random((100, 100)))
t_serialized = serde.serialize(t)
t_serialized_deserialized = serde.deserialize(t_serialized)
assert (t == t_serialized_deserialized).all()
@pytest.mark.parametrize("compress", [True, False])
def test_torch_Tensor_convenience(compress):
"""This test evaluates torch.Tensor.serialize()
As opposed to using syft.serde.serialize(), torch objects
have a convenience function which lets you call .serialize()
directly on the tensor itself. This tests to makes sure it
works correctly."""
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
t = Tensor(numpy.random.random((100, 100)))
t_serialized = t.serialize()
t_serialized_deserialized = serde.deserialize(t_serialized)
assert (t == t_serialized_deserialized).all()
@pytest.mark.parametrize("compress", [True, False])
def test_tuple(compress):
# Test with a simple datatype
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
tuple = (1, 2)
tuple_serialized = serde.serialize(tuple)
tuple_serialized_deserialized = serde.deserialize(tuple_serialized)
assert tuple == tuple_serialized_deserialized
# Test with a complex data structure
tensor_one = Tensor(numpy.random.random((100, 100)))
tensor_two = Tensor(numpy.random.random((100, 100)))
tuple = (tensor_one, tensor_two)
tuple_serialized = serde.serialize(tuple)
tuple_serialized_deserialized = serde.deserialize(tuple_serialized)
# `assert tuple_serialized_deserialized == tuple` does not work, therefore it's split
# into 3 assertions
assert type(tuple_serialized_deserialized) == type(tuple)
assert (tuple_serialized_deserialized[0] == tensor_one).all()
assert (tuple_serialized_deserialized[1] == tensor_two).all()
@pytest.mark.parametrize("compress", [True, False])
def test_bytearray(compress):
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
bytearr = bytearray("This is a teststring", "utf-8")
bytearr_serialized = serde.serialize(bytearr)
bytearr_serialized_desirialized = serde.deserialize(bytearr_serialized)
assert bytearr == bytearr_serialized_desirialized
bytearr = bytearray(numpy.random.random((100, 100)))
bytearr_serialized = serde.serialize(bytearr)
bytearr_serialized_desirialized = serde.deserialize(bytearr_serialized)
assert bytearr == bytearr_serialized_desirialized
@pytest.mark.parametrize("compress", [True, False])
def test_ndarray_serde(compress):
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
arr = numpy.random.random((100, 100))
arr_serialized = serde.serialize(arr)
arr_serialized_deserialized = serde.deserialize(arr_serialized)
assert numpy.array_equal(arr, arr_serialized_deserialized)
@pytest.mark.parametrize("compress_scheme", [serde.LZ4, serde.ZSTD, serde.NO_COMPRESSION])
def test_compress_decompress(compress_scheme):
if compress_scheme == serde.LZ4:
serde._apply_compress_scheme = serde.apply_lz4_compression
elif compress_scheme == serde.ZSTD:
serde._apply_compress_scheme = serde.apply_zstd_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
original = msgpack.dumps([1, 2, 3])
compressed = serde._compress(original)
decompressed = serde._decompress(compressed)
assert type(compressed) == bytes
assert original == decompressed
@pytest.mark.parametrize("compress_scheme", [serde.LZ4, serde.ZSTD, serde.NO_COMPRESSION])
def test_compressed_serde(compress_scheme):
if compress_scheme == serde.LZ4:
serde._apply_compress_scheme = serde.apply_lz4_compression
elif compress_scheme == serde.ZSTD:
serde._apply_compress_scheme = serde.apply_zstd_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
# using numpy.ones because numpy.random.random is not compressed.
arr = numpy.ones((100, 100))
arr_serialized = serde.serialize(arr)
arr_serialized_deserialized = serde.deserialize(arr_serialized)
assert numpy.array_equal(arr, arr_serialized_deserialized)
@pytest.mark.parametrize("compress", [True, False])
def test_dict(compress):
# Test with integers
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
_dict = {1: 1, 2: 2, 3: 3}
dict_serialized = serde.serialize(_dict)
dict_serialized_deserialized = serde.deserialize(dict_serialized)
assert _dict == dict_serialized_deserialized
# Test with strings
_dict = {"one": 1, "two": 2, "three": 3}
dict_serialized = serde.serialize(_dict)
dict_serialized_deserialized = serde.deserialize(dict_serialized)
assert _dict == dict_serialized_deserialized
# Test with a complex data structure
tensor_one = Tensor(numpy.random.random((100, 100)))
tensor_two = Tensor(numpy.random.random((100, 100)))
_dict = {0: tensor_one, 1: tensor_two}
dict_serialized = serde.serialize(_dict)
dict_serialized_deserialized = serde.deserialize(dict_serialized)
# `assert dict_serialized_deserialized == _dict` does not work, therefore it's split
# into 3 assertions
assert type(dict_serialized_deserialized) == type(_dict)
assert (dict_serialized_deserialized[0] == tensor_one).all()
assert (dict_serialized_deserialized[1] == tensor_two).all()
@pytest.mark.parametrize("compress", [True, False])
def test_range_serde(compress):
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
_range = range(1, 2, 3)
range_serialized = serde.serialize(_range)
range_serialized_deserialized = serde.deserialize(range_serialized)
assert _range == range_serialized_deserialized
@pytest.mark.parametrize("compress", [True, False])
def test_list(compress):
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
# Test with integers
_list = [1, 2]
list_serialized = serde.serialize(_list)
list_serialized_deserialized = serde.deserialize(list_serialized)
assert _list == list_serialized_deserialized
# Test with strings
_list = ["hello", "world"]
list_serialized = serde.serialize(_list)
list_serialized_deserialized = serde.deserialize(list_serialized)
assert _list == list_serialized_deserialized
# Test with a complex data structure
tensor_one = Tensor(numpy.ones((100, 100)))
tensor_two = Tensor(numpy.ones((100, 100)) * 2)
_list = (tensor_one, tensor_two)
list_serialized = serde.serialize(_list)
if compress:
assert list_serialized[0] == serde.LZ4
else:
assert list_serialized[0] == serde.NO_COMPRESSION
list_serialized_deserialized = serde.deserialize(list_serialized)
# `assert list_serialized_deserialized == _list` does not work, therefore it's split
# into 3 assertions
assert type(list_serialized_deserialized) == type(_list)
assert (list_serialized_deserialized[0] == tensor_one).all()
assert (list_serialized_deserialized[1] == tensor_two).all()
@pytest.mark.parametrize("compress", [True, False])
def test_set(compress):
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
# Test with integers
_set = set([1, 2])
set_serialized = serde.serialize(_set)
set_serialized_deserialized = serde.deserialize(set_serialized)
assert _set == set_serialized_deserialized
# Test with strings
_set = set(["hello", "world"])
set_serialized = serde.serialize(_set)
set_serialized_deserialized = serde.deserialize(set_serialized)
assert _set == set_serialized_deserialized
# Test with a complex data structure
tensor_one = Tensor(numpy.ones((100, 100)))
tensor_two = Tensor(numpy.ones((100, 100)) * 2)
_set = (tensor_one, tensor_two)
set_serialized = serde.serialize(_set)
if compress:
assert set_serialized[0] == serde.LZ4
else:
assert set_serialized[0] == serde.NO_COMPRESSION
set_serialized_deserialized = serde.deserialize(set_serialized)
# `assert set_serialized_deserialized == _set` does not work, therefore it's split
# into 3 assertions
assert type(set_serialized_deserialized) == type(_set)
assert (set_serialized_deserialized[0] == tensor_one).all()
assert (set_serialized_deserialized[1] == tensor_two).all()
@pytest.mark.parametrize("compress", [True, False])
def test_slice(compress):
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
s = slice(0, 100, 2)
x = numpy.random.rand(100)
s_serialized = serde.serialize(s)
s_serialized_deserialized = serde.deserialize(s_serialized)
assert type(s) == type(s_serialized_deserialized)
assert (x[s] == x[s_serialized_deserialized]).all()
s = slice(40, 50)
x = numpy.random.rand(100)
s_serialized = serde.serialize(s)
s_serialized_deserialized = serde.deserialize(s_serialized)
assert type(s) == type(s_serialized_deserialized)
assert (x[s] == x[s_serialized_deserialized]).all()
@pytest.mark.parametrize("compress", [True, False])
def test_float(compress):
if compress:
serde._apply_compress_scheme = serde.apply_lz4_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
x = 0.5
y = 1.5
x_serialized = serde.serialize(x)
x_serialized_deserialized = serde.deserialize(x_serialized)
y_serialized = serde.serialize(y)
y_serialized_deserialized = serde.deserialize(y_serialized)
assert x_serialized_deserialized == x
assert y_serialized_deserialized == y
@pytest.mark.parametrize(
"compress, compress_scheme",
[
(True, serde.LZ4),
(False, serde.LZ4),
(True, serde.ZSTD),
(False, serde.ZSTD),
(True, serde.NO_COMPRESSION),
(False, serde.NO_COMPRESSION),
],
)
def test_hooked_tensor(compress, compress_scheme):
if compress:
if compress_scheme == serde.LZ4:
serde._apply_compress_scheme = serde.apply_lz4_compression
elif compress_scheme == serde.ZSTD:
serde._apply_compress_scheme = serde.apply_zstd_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
else:
serde._apply_compress_scheme = serde.apply_no_compression
t = Tensor(numpy.ones((100, 100)))
t_serialized = serde.serialize(t)
assert (
t_serialized[0] == compress_scheme if compress else t_serialized[0] == serde.NO_COMPRESSION
)
t_serialized_deserialized = serde.deserialize(t_serialized)
assert (t == t_serialized_deserialized).all()
def test_pointer_tensor(hook, workers):
serde._apply_compress_scheme = serde.apply_no_compression
t = pointers.PointerTensor(
id=1000, location=workers["alice"], owner=workers["alice"], id_at_location=12345
)
t_serialized = serde.serialize(t)
t_serialized_deserialized = serde.deserialize(t_serialized)
assert t.id == t_serialized_deserialized.id
assert t.location.id == t_serialized_deserialized.location.id
assert t.id_at_location == t_serialized_deserialized.id_at_location
@pytest.mark.parametrize("id", [1000, "1000"])
def test_pointer_tensor_detail(id):
alice = syft.VirtualWorker(syft.torch.hook, id=id)
x = torch.tensor([1, -1, 3, 4])
x_ptr = x.send(alice)
x_ptr = 2 * x_ptr
x_back = x_ptr.get()
assert (x_back == 2 * x).all()
def test_numpy_tensor_serde():
serde._apply_compress_scheme = serde.apply_lz4_compression
serde._serialize_tensor = syft.serde.numpy_tensor_serializer
serde._deserialize_tensor = syft.serde.numpy_tensor_deserializer
tensor = torch.tensor(numpy.ones((10, 10)), requires_grad=False)
tensor_serialized = serde.serialize(tensor)
assert tensor_serialized[0] != serde.NO_COMPRESSION
tensor_deserialized = serde.deserialize(tensor_serialized)
# Back to Pytorch serializer
serde._serialize_tensor = syft.serde.torch_tensor_serializer
serde._deserialize_tensor = syft.serde.torch_tensor_deserializer
assert torch.eq(tensor_deserialized, tensor).all()
@pytest.mark.parametrize("compress", [True, False])
def test_additive_sharing_tensor_serde(compress, workers):
alice, bob, james = workers["alice"], workers["bob"], workers["james"]
x = torch.tensor([[3.1, 4.3]]).fix_prec().share(alice, bob, crypto_provider=james)
additive_sharing_tensor = x.child.child
data = syft.AdditiveSharingTensor.simplify(additive_sharing_tensor)
additive_sharing_tensor_reconstructed = syft.AdditiveSharingTensor.detail(
syft.hook.local_worker, data
)
assert additive_sharing_tensor_reconstructed.field == additive_sharing_tensor.field
assert (
additive_sharing_tensor_reconstructed.child.keys() == additive_sharing_tensor.child.keys()
)
@pytest.mark.parametrize("compress", [True, False])
def test_fixed_precision_tensor_serde(compress, workers):
alice, bob, james = workers["alice"], workers["bob"], workers["james"]
x = (
torch.tensor([[3.1, 4.3]])
.fix_prec(base=12, precision_fractional=5)
.share(alice, bob, crypto_provider=james)
)
serialized_x = serde.serialize(x)
deserialied_x = serde.deserialize(serialized_x)
assert x.id == deserialied_x.child.id
assert x.child.field == deserialied_x.child.field
assert x.child.kappa == deserialied_x.child.kappa
assert x.child.precision_fractional == deserialied_x.child.precision_fractional
assert x.child.base == deserialied_x.child.base
def test_serde_object_wrapper_int():
obj = 4
obj_wrapper = pointers.ObjectWrapper(obj, id=100)
msg = serde.serialize(obj_wrapper)
obj_wrapper_received = serde.deserialize(msg)
assert obj_wrapper.obj == obj_wrapper_received.obj
assert obj_wrapper.id == obj_wrapper_received.id
@pytest.mark.skipif(
torch.__version__ >= "1.1",
reason="bug in pytorch version 1.1.0, jit.trace returns raw C function",
)
def test_serialize_and_deserialize_torch_scriptmodule(): # pragma: no cover
@torch.jit.script
def foo(x):
return x + 2
bin_message = torch_serde._simplify_script_module(foo)
foo_loaded = torch_serde._detail_script_module(None, bin_message)
assert foo.code == foo_loaded.code
@pytest.mark.skipif(
torch.__version__ >= "1.1",
reason="bug in pytorch version 1.1.0, jit.trace returns raw C function",
)
def test_torch_jit_script_module_serde(): # pragma: no cover
@torch.jit.script
def foo(x):
return x + 2
msg = serde.serialize(foo)
foo_received = serde.deserialize(msg)
assert foo.code == foo_received.code
def test_serde_virtual_worker(hook):
virtual_worker = syft.VirtualWorker(hook=hook, id="deserialized_worker1")
# Populate worker
tensor1, tensor2 = torch.tensor([1.0, 2.0]), torch.tensor([0.0])
ptr1, ptr2 = tensor1.send(virtual_worker), tensor2.send(virtual_worker)
serialized_worker = serde.serialize(virtual_worker, force_full_simplification=False)
deserialized_worker = serde.deserialize(serialized_worker)
assert virtual_worker.id == deserialized_worker.id
def test_full_serde_virtual_worker(hook):
virtual_worker = syft.VirtualWorker(hook=hook, id="deserialized_worker2")
# Populate worker
tensor1, tensor2 = torch.tensor([1.0, 2.0]), torch.tensor([0.0])
ptr1, ptr2 = tensor1.send(virtual_worker), tensor2.send(virtual_worker)
serialized_worker = serde.serialize(virtual_worker, force_full_simplification=True)
deserialized_worker = serde.deserialize(serialized_worker)
assert virtual_worker.id == deserialized_worker.id
assert virtual_worker.auto_add == deserialized_worker.auto_add
assert len(deserialized_worker._objects) == 2
assert tensor1.id in deserialized_worker._objects
assert tensor2.id in deserialized_worker._objects
def test_serde_object_wrapper_traced_module():
data = torch.tensor([[-1, 2.0], [0, 1.1], [-1, 2.1], [0, 1.2]])
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(2, 3)
def forward(self, x):
x = torch.nn.functional.relu(self.fc1(x))
return x
obj = torch.jit.trace(Net(), data)
obj_wrapper = pointers.ObjectWrapper(obj, id=200)
msg = serde.serialize(obj_wrapper)
obj_wrapper_received = serde.deserialize(msg)
pred_before = obj(data)
pred_after = obj_wrapper_received.obj(data)
assert (pred_before == pred_after).all()
assert obj_wrapper.id == obj_wrapper_received.id
|
{"hexsha": "e8362f2c34f04f495f7df297b8f3c8d8f84bf5e5", "size": 24589, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_serde.py", "max_stars_repo_name": "1000ping/PySyft", "max_stars_repo_head_hexsha": "4d8cb0de436d7335bba6eb0a4a18402698ad3964", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_serde.py", "max_issues_repo_name": "1000ping/PySyft", "max_issues_repo_head_hexsha": "4d8cb0de436d7335bba6eb0a4a18402698ad3964", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_serde.py", "max_forks_repo_name": "1000ping/PySyft", "max_forks_repo_head_hexsha": "4d8cb0de436d7335bba6eb0a4a18402698ad3964", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6323943662, "max_line_length": 100, "alphanum_fraction": 0.7236569198, "include": true, "reason": "import numpy", "num_tokens": 5891}
|
import numpy as np
import statistics as stats
#use numpy libraray to import csv file as an ndarray and assign variable for each vector
data = np.genfromtxt('data/iris.csv', delimiter=',')
sepl = data[:,0]
sepw = data[:,1]
petl = data[:,2]
petw = data[:,3]
#using numpy and stats libraries print and format results
print('\n\nSepal Length')
print('Min: {0:.1f}'.format(np.min(sepl)))
print('Max: {0:.1f}'.format(np.max(sepl)))
print('Mean: {0:.2f}'.format(np.mean(sepl)))
print('Mode: {0:.2f}'.format(stats.mode(sepl)))
print('Median: {0:.2f}'.format(stats.median(sepl)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(sepl)))
print('Variance: {0:.2f}\n'.format(stats.pvariance(sepl)))
print('\nSepal Width')
print('Min: {0:.1f}'.format(np.min(sepw)))
print('Max: {0:.1f}'.format(np.max(sepw)))
print('Mean: {0:.2f}'.format(np.mean(sepw)))
print('Mode: {0:.2f}'.format(stats.mode(sepw)))
print('Median: {0:.2f}'.format(stats.median(sepw)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(sepw)))
print('Variance: {0:.2f}\n'.format(stats.pvariance(sepw)))
print('\nPetal Length')
print('Min: {0:.1f}'.format(np.min(petl)))
print('Max: {0:.1f}'.format(np.max(petl)))
print('Mean: {0:.2f}'.format(np.mean(petl)))
print('Mode: {0:.2f}'.format(stats.mode(petl)))
print('Median: {0:.2f}'.format(stats.median(petl)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(petl)))
print('Variance: {0:.2f}\n'.format(stats.pvariance(petl)))
print('\nPetal Width')
print('Min: {0:.1f}'.format(np.min(petw)))
print('Max: {0:.1f}'.format(np.max(petw)))
print('Mean: {0:.2f}'.format(np.mean(petw)))
print('Mode: {0:.2f}'.format(stats.mode(petw)))
print('Median: {0:.2f}'.format(stats.median(petw)))
print('Std Dev: {0:.2f}'.format(stats.pstdev(petw)))
print('Variance: {0:.2f}\n\n'.format(stats.pvariance(petw)))
#References
#https://docs.scipy.org/doc/numpy-1.12.0/reference/generated/numpy.genfromtxt.html#numpy.genfromtxt
#https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.math.html
#https://docs.python.org/3/library/statistics.html#module-statistics
|
{"hexsha": "c56c6ece91365331cd15a632e505506efe8426a0", "size": 2241, "ext": "py", "lang": "Python", "max_stars_repo_path": "06_iris_numpy_stats.py", "max_stars_repo_name": "tommirrington/52167-Programming-and-Scripting-Final-Project", "max_stars_repo_head_hexsha": "c78fa312eb9f8db4d43fa472b8ab536934a5d55c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "06_iris_numpy_stats.py", "max_issues_repo_name": "tommirrington/52167-Programming-and-Scripting-Final-Project", "max_issues_repo_head_hexsha": "c78fa312eb9f8db4d43fa472b8ab536934a5d55c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "06_iris_numpy_stats.py", "max_forks_repo_name": "tommirrington/52167-Programming-and-Scripting-Final-Project", "max_forks_repo_head_hexsha": "c78fa312eb9f8db4d43fa472b8ab536934a5d55c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0178571429, "max_line_length": 99, "alphanum_fraction": 0.6189201249, "include": true, "reason": "import numpy", "num_tokens": 707}
|
import numpy as np
import networkx as nx
import math
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
# load GraphRicciCuravture package
from GraphRicciCurvature.OllivierRicci import OllivierRicci
from GraphRicciCurvature.FormanRicci import FormanRicci
from collections import defaultdict, Counter
vals = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
def convertpdb(filename):
f=open(filename, "r")
if f.mode == 'r':
contents = f.readlines()
#recordname = []
#atomNum = []
atomName = []
#altLoc = []
#resName = []
#chainID = []
#resNum = []
X = []
Y = []
Z = []
#occupancy = []
#betaFactor = []
element = []
#charge = []
for i in range(len(contents)):
thisLine = contents[i]
if thisLine[0:4]=='ATOM':
#recordname = np.append(recordname,thisLine[:6].strip())
#atomNum = np.append(atomNum, float(thisLine[6:11]))
atomName = np.append(atomName, thisLine[12:16])
#altLoc = np.append(altLoc,thisLine[16])
#resName = np.append(resName, thisLine[17:20].strip())
#chainID = np.append(chainID, thisLine[21])
#resNum = np.append(resNum, float(thisLine[23:26]))
X = np.append(X, float(thisLine[30:38]))
Y = np.append(Y, float(thisLine[38:46]))
Z = np.append(Z, float(thisLine[46:54]))
#occupancy = np.append(occupancy, float(thisLine[55:60]))
#betaFactor = np.append(betaFactor, float(thisLine[61:66]))
element = np.append(element,thisLine[12:14])
#print(atomName)
a = {'PRO': [{'atom': atomName, 'typ': element, 'pos': np.transpose([X,Y,Z])}]}
np.savez(filename[:-4]+".npz", **a)
def gen_graph(filename, cutoff):
data = np.load(filename, allow_pickle = True)
for item in data['PRO']:
coords = item['pos']
atoms = item['atom']
G = nx.Graph()
for i in range(len(atoms)):
G.add_node(i, atom = atoms[i], coords = coords[i])
for i in range(len(coords)):
for j in range(len(coords)):
if i!=j:
dist = np.linalg.norm(coords[i]-coords[j])
if round(dist,2) <= cutoff:
G.add_edge(i, j) #, weight = dist)
return G
x = np.round(np.arange(-1, 1.01, 0.01),2)
edge_mat = []
vertex_mat = []
for j in vals:
raw_output = []
for id in range(101):
s = str(id)
while len(s) < 4:
s = '0'+ s
print("Processing Frame ", id, ": ", j)
convertpdb("tmao/md_"+j+"_tmao_4p_tf_"+s+"_OW.pdb")
data = np.load("tmao/md_"+j+"_tmao_4p_tf_"+s+"_OW.npz", allow_pickle=True)
G = gen_graph("tmao/md_"+j+"_tmao_4p_tf_"+s+"_OW.npz", 4)
y = np.zeros(len(x))
vertices = np.zeros(G.number_of_nodes())
cnts = defaultdict(int)
if G.number_of_edges() > 0:
orc = OllivierRicci(G, alpha=0.5)
orc.compute_ricci_curvature()
e_temp, v_temp = nx.get_edge_attributes(orc.G, "ricciCurvature"), nx.get_node_attributes(orc.G, "ricciCurvature")
raw_output.append(list(v_temp.values()))
raw_output.append(list(e_temp.values()))
np.savez("tmao_raw_op_"+j+".npz", raw_output)
x = np.round(np.arange(-1, 1.01, 0.01),2)
edge_mat = []
vertex_mat = []
for j in vals:
raw_output = []
for id in range(101):
s = str(id)
while len(s) < 4:
s = '0'+ s
print("Processing Frame ", id, ": ", j)
convertpdb("urea/md_"+j+"_urea_4p_tf_"+s+"_OW.pdb")
data = np.load("urea/md_"+j+"_urea_4p_tf_"+s+"_OW.npz", allow_pickle=True)
G = gen_graph("urea/md_"+j+"_urea_4p_tf_"+s+"_OW.npz", 4)
y = np.zeros(len(x))
vertices = np.zeros(G.number_of_nodes())
cnts = defaultdict(int)
if G.number_of_edges() > 0:
orc = OllivierRicci(G, alpha=0.5)
orc.compute_ricci_curvature()
e_temp, v_temp = nx.get_edge_attributes(orc.G, "ricciCurvature"), nx.get_node_attributes(orc.G, "ricciCurvature")
raw_output.append(list(v_temp.values()))
raw_output.append(list(e_temp.values()))
np.savez("urea_raw_op_"+j+".npz", raw_output)
def plot_tmao_avg_vertex():
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(conc)):
data = np.load("tmao_raw_op_"+conc[i]+".npz", allow_pickle=True)
edge_data = data["arr_0"][1::2]
vertex_data = data["arr_0"][::2]
e_ = []
for j in range(len(edge_data)):
for k in range(len(edge_data[j])):
e_.append(edge_data[j][k])
v_ = []
for j in range(len(vertex_data)):
for k in range(len(vertex_data[j])):
v_.append(vertex_data[j][k])
color = plt.cm.jet(i/8)
sns.kdeplot(v_, color=color, label = conc[i], gridsize=200)
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 6])
plt.xlabel("Vertex Curvature", fontsize=12)
plt.ylabel("Average Density", fontsize=12)
plt.title("$H_{2}O$ (Tmao) - TIP4P")
#plt.savefig("tmao_avg_vertex.pdf", dpi=200)
plt.show()
def plot_tmao_avg_edge():
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(conc)):
data = np.load("tmao_raw_op_"+conc[i]+".npz", allow_pickle=True)
edge_data = data["arr_0"][1::2]
vertex_data = data["arr_0"][::2]
e_ = []
for j in range(len(edge_data)):
for k in range(len(edge_data[j])):
e_.append(edge_data[j][k])
v_ = []
for j in range(len(vertex_data)):
for k in range(len(vertex_data[j])):
v_.append(vertex_data[j][k])
color = plt.cm.jet(i/8)
sns.kdeplot(e_, color=color, label = conc[i])
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 4.5])
plt.xlabel("Edge Curvature", fontsize=12)
plt.ylabel("Average Density", fontsize=12)
plt.title("$H_{2}O$ (Tmao) - TIP4P")
plt.savefig("tmao_avg_edge.pdf", dpi=200)
plt.show()
def plot_urea_avg_vertex():
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(conc)):
data = np.load("urea_raw_op_"+conc[i]+".npz", allow_pickle=True)
edge_data = data["arr_0"][1::2]
vertex_data = data["arr_0"][::2]
e_ = []
for j in range(len(edge_data)):
for k in range(len(edge_data[j])):
e_.append(edge_data[j][k])
v_ = []
for j in range(len(vertex_data)):
for k in range(len(vertex_data[j])):
v_.append(vertex_data[j][k])
color = plt.cm.jet(i/8)
sns.kdeplot(v_, color=color, label = conc[i])
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 6])
plt.xlabel("Vertex Curvature", fontsize=12)
plt.ylabel("Average Density", fontsize=12)
plt.title("$H_{2}O$ (Urea) - TIP4P")
plt.savefig("urea_avg_vertex.pdf", dpi=200)
plt.show()
def plot_urea_avg_edge():
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(conc)):
data = np.load("urea_raw_op_"+conc[i]+".npz", allow_pickle=True)
edge_data = data["arr_0"][1::2]
vertex_data = data["arr_0"][::2]
e_ = []
for j in range(len(edge_data)):
for k in range(len(edge_data[j])):
e_.append(edge_data[j][k])
v_ = []
for j in range(len(vertex_data)):
for k in range(len(vertex_data[j])):
v_.append(vertex_data[j][k])
color = plt.cm.jet(i/8)
sns.kdeplot(e_, color=color, label = conc[i])
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 4.5])
plt.xlabel("Edge Curvature", fontsize=12)
plt.ylabel("Average Density", fontsize=12)
plt.title("$H_{2}O$ (Urea) - TIP4P")
plt.savefig("urea_avg_edge.pdf", dpi=200)
plt.show()
def plot_tmao_3d():
vals = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
for i in range(len(vals)):
edge_data, vertex_data = [], []
x, y = [], []
data = np.load("tmao_raw_op_"+vals[i]+".npz", allow_pickle=True)
edge_data = data["arr_0"][1::2]
vertex_data = data["arr_0"][::2]
x = np.outer(range(0, 101), np.ones(128))
y, z = [], []
for j in range(101):
ax = sns.kdeplot(vertex_data[j])
y.append(list(ax.lines[j].get_data()[0]))
z.append(list(ax.lines[j].get_data()[1]))
fig = plt.figure(figsize=(10,5), dpi=200)
ax= fig.add_subplot(111, projection= '3d')
ax.plot_trisurf(np.reshape(y, (128*101)), np.reshape(x, (128*101)), np.reshape(z, (128*101)), cmap='jet', vmin = 0., vmax = 6.)
#fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)])
#fig.update_traces(contours_z=dict(show=True, usecolormap=True,
#highlightcolor="limegreen", project_z=True))
#surf = ax.plot_surface(np.array(y), np.array(x), np.array(z),cmap='jet',linewidth=0,antialiased='True',rstride=3,cstride=3)
ax.contourf(np.array(y), np.array(x), np.array(z), 100, zdir='z', offset=-2,cmap='jet', vmin = 0., vmax = 6.)
#ax.set_title('$H_{2}O$ (Tmao) - TIP4P - '+vals[i])
ax.view_init(elev=10., azim=-120)
ax.set_xlabel('Vertex Curvature', rotation=0, labelpad=15)
ax.set_ylabel('Frame Index', rotation=0, labelpad=15)
ax.set_zlabel('Density', rotation=90,labelpad=15)
#ax.tick_params(axis='z', pad=10)
ax.zaxis.set_rotate_label(False)
ax.grid(False)
#ax.set_xlim([-0.6, 1])
ax.set_ylim([0, 100])
ax.set_zlim([-2, 6.1])
m = plt.cm.ScalarMappable(cmap=plt.cm.jet)
m.set_clim(0, 6.)
plt.colorbar(m, extend="both", shrink=0.75, pad=-0.05)
plt.tight_layout()
plt.savefig("tmao_3d_vertex_"+vals[i]+".pdf")
#fig.colorbar(surf)
plt.show()
def plot_urea_3d():
vals = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
for i in range(len(vals)):
edge_data, vertex_data = [], []
x, y = [], []
data = np.load("urea_raw_op_"+vals[i]+".npz", allow_pickle=True)
edge_data = data["arr_0"][1::2]
vertex_data = data["arr_0"][::2]
x = np.outer(range(0, 101), np.ones(128))
y, z = [], []
for j in range(101):
ax = sns.kdeplot(vertex_data[j])
y.append(list(ax.lines[j].get_data()[0]))
z.append(list(ax.lines[j].get_data()[1]))
fig = plt.figure(figsize=(10,5), dpi=200)
ax= fig.add_subplot(111, projection= '3d')
ax.plot_trisurf(np.reshape(y, (128*101)), np.reshape(x, (128*101)), np.reshape(z, (128*101)), cmap='jet', vmin = 4., vmax = 6.)
#fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)])
#fig.update_traces(contours_z=dict(show=True, usecolormap=True,
#highlightcolor="limegreen", project_z=True))
#surf = ax.plot_surface(np.array(y), np.array(x), np.array(z),cmap='jet',linewidth=0,antialiased='True',rstride=3,cstride=3)
ax.contourf(np.array(y), np.array(x), np.array(z), 100, zdir='z', offset=-2,cmap='jet', vmin = 4., vmax = 6.)
#ax.set_title('$H_{2}O$ (Urea) - TIP4P - '+vals[i])
ax.view_init(elev=10., azim=-120)
ax.set_xlabel('Vertex Curvature', rotation=0, labelpad=15, fontsize=20)
ax.set_ylabel('Frame Index', rotation=0, labelpad=15, fontsize=20)
ax.set_zlabel('Density', rotation=90,labelpad=15, fontsize=20)
ax.tick_params(axis='z', pad=10)
ax.zaxis.set_rotate_label(False)
ax.grid(False)
#ax.set_xlim([-0.6, 1])
ax.set_ylim([0, 100])
ax.set_zlim([-2, 6.1])
m = plt.cm.ScalarMappable(cmap=plt.cm.jet)
m.set_clim(4., 6.)
plt.colorbar(m, extend="both", shrink=0.75, pad=-0.05)
plt.tight_layout()
plt.savefig("Urea_3d_vertex_"+vals[i]+".pdf")
#fig.colorbar(surf)
plt.show()
def plot_urea_sample():
data = np.load("urea_raw_op.npz", allow_pickle=True)
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(data["arr_0"])//2):
color = plt.cm.jet(i/(len(data["arr_0"])//2))
sns.distplot(data["arr_0"][2*i], color=color, label = conc[i], kde_kws={'linewidth': 1} , norm_hist=False, kde=True, hist=False)
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 6])
plt.title("$H_{2}O$ (Urea) - TIP4P")
plt.xlabel("Vertex Curvature", fontsize=12)
plt.ylabel("Density", fontsize=12)
plt.savefig("urea_vertex.eps", dpi=200)
plt.show()
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(vals)):
color = plt.cm.jet(i/len(vals))
sns.kdeplot(data["arr_0"][2*i+1], color=color, label = conc[i], linewidth = 1)
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 3.1])
plt.title("$H_{2}O$ (Urea) - TIP4P")
plt.xlabel("Edge Curvature", fontsize=12)
plt.ylabel("Density", fontsize=12)
plt.savefig("urea_edge.eps", dpi=200)
plt.show()
def plot_tmao_sample():
data = np.load("tmao_raw_op.npz", allow_pickle=True)
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(data["arr_0"])//2):
color = plt.cm.jet(i/(len(data["arr_0"])//2))
sns.distplot(data["arr_0"][2*i], color=color, label = conc[i], kde_kws={'linewidth': 1} , kde=True, hist=False, norm_hist=True)
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 6])
plt.xlabel("Vertex Curvature", fontsize=12)
plt.ylabel("Density", fontsize=12)
plt.title("$H_{2}O$ (Tmao) - TIP4P")
plt.savefig("tmao_vertex.eps", dpi=200)
plt.show()
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure(figsize=(5,5), dpi=150)
for i in range(len(data["arr_0"])//2):
color = plt.cm.jet(i/(len(data["arr_0"])//2))
sns.kdeplot(data["arr_0"][2*i+1], color=color, label = conc[i], linewidth = 1)
plt.xticks(fontsize=11)
plt.yticks( fontsize=11)
plt.legend()
plt.axis([-1, 1, 0, 3.1])
plt.xlabel("Edge Curvature", fontsize=12)
plt.ylabel("Density", fontsize=12)
plt.title("$H_{2}O$ (Tmao) - TIP4P")
plt.savefig("tmao_edge.eps", dpi=200)
plt.show()
x = np.round(np.arange(-1, 1.01, 0.01),2)
f, axes = plt.subplots(2, 4, figsize=(15, 5), sharex=True, sharey=True, dpi=200)
plt.xlim(-1,1)
f.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Vertex Curvature", fontsize=14)
plt.ylabel("Density", fontsize=14)
for j in range(len(data["arr_0"])//2):
if j >=4:
sns.distplot(data["arr_0"][2*j], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=0.7), color='tab:blue', kde_kws={"color":"tab:red", "lw": 2}, ax=axes[1][j-4], kde=False, norm_hist=True)
else:
sns.distplot(data["arr_0"][2*j], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=.7), color='tab:blue', kde_kws={"color":"tab:red", "lw": 2}, ax=axes[0][j], kde=False, norm_hist=True)
#plt.plot(x, y_, color='red')
#plt.xticks()
#plt.yticks()
for j in range(4):
axes[0][j].xaxis.set_tick_params(which='both', labelbottom=True)
axes[0][j].yaxis.set_tick_params(which='both', labelbottom=True)
axes[1][j].yaxis.set_tick_params(which='both', labelbottom=True)
#plt.ylim(0, 4)
plt.savefig("tmao_vertex_subplots.pdf", dpi=200)
#plt.show()
x = np.round(np.arange(-1, 1.01, 0.01),2)
f, axes = plt.subplots(2, 4, figsize=(15, 5), sharex=True, sharey=True, dpi=200)
plt.xlim(-1,1)
f.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Edge Curvature", fontsize=14)
plt.ylabel("Density", fontsize=14)
for j in range(len(data["arr_0"])//2):
if j >=4:
sns.distplot(data["arr_0"][2*j+1], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=0.7), color='tab:blue', kde_kws={"color":"tab:red", "lw": 2}, ax=axes[1][j-4], kde=False, norm_hist=True)
else:
sns.distplot(data["arr_0"][2*j+1], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=.7), color='tab:blue', kde_kws={"color":"tab:red", "lw": 2}, ax=axes[0][j], kde=False, norm_hist=True)
#plt.plot(x, y_, color='red')
#plt.xticks()
#plt.yticks()
for j in range(4):
axes[0][j].xaxis.set_tick_params(which='both', labelbottom=True)
axes[0][j].yaxis.set_tick_params(which='both', labelbottom=True)
axes[1][j].yaxis.set_tick_params(which='both', labelbottom=True)
#plt.ylim(0, 4)
plt.savefig("tmao_edge_subplots.pdf", dpi=200)
#plt.show()
def plot_tmao_des():
data = np.load("tmao_raw_op.npz", allow_pickle=True)
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure()
f, axes = plt.subplots(2,1, figsize=(5, 5), dpi=200, sharex=True)
plt.xlim(-1,1)
plt.subplots_adjust(hspace=.3)
sns.distplot(data["arr_0"][14], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=0.7), color='tab:blue', kde_kws={"color":"tab:red", "lw": 2}, ax=axes[1], kde=False)
axes[1].set(xlabel="Vertex Curvature", ylabel="No. of Vertices")
sns.distplot(data["arr_0"][15], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=0.7), color='tab:blue', kde_kws={"color":"tab:red", "lw": 2}, ax=axes[0], kde=False)
axes[0].xaxis.set_tick_params(which='both', labelbottom=True)
axes[0].set(xlabel="Edge Curvature", ylabel="No. of Edges")
#plt.xticks(fontsize=11)
#plt.yticks( fontsize=11)
#plt.legend()
#plt.axis([-1, 1, 0, 6])
#plt.xlabel("Vertex Curvature", fontsize=12)
#plt.ylabel("Density", fontsize=12)
#plt.title("$H_{2}O$ (Tmao) - TIP4P")
plt.tight_layout()
plt.savefig("tmao_8M.pdf", dpi=200)
conc = ["1M", "2M", "3M", "4M", "5M", "6M", "7M", "8M"]
plt.figure()
f, axes = plt.subplots(2,1, figsize=(5, 5), dpi=200, sharex=True)
plt.xlim(-1,1)
plt.subplots_adjust(hspace=.3)
sns.distplot(data["arr_0"][14], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=0.7), color='tab:blue', kde_kws={"color":"tab:blue", "lw": 2}, ax=axes[1], hist=False)
axes[1].set(xlabel="Vertex Curvature", ylabel="Density")
sns.distplot(data["arr_0"][15], bins=50, hist_kws=dict(edgecolor="k", lw=.5, alpha=0.7), color='tab:blue', kde_kws={"color":"tab:blue", "lw": 2}, ax=axes[0], hist=False)
axes[0].xaxis.set_tick_params(which='both', labelbottom=True)
axes[0].set(xlabel="Edge Curvature", ylabel="Density")
#plt.xticks(fontsize=11)
#plt.yticks( fontsize=11)
#plt.legend()
#plt.axis([-1, 1, 0, 6])
#plt.xlabel("Vertex Curvature", fontsize=12)
#plt.ylabel("Density", fontsize=12)
#plt.title("$H_{2}O$ (Tmao) - TIP4P")
plt.tight_layout()
plt.savefig("tmao_8M_density.pdf", dpi=200)
|
{"hexsha": "307fddc765b304f7557ca704a4adc91080600f73", "size": 20160, "ext": "py", "lang": "Python", "max_stars_repo_path": "Ion Aggregation/ollivier_HBN.py", "max_stars_repo_name": "ExpectozJJ/Persistent-Ollivier-Ricci-Curvature", "max_stars_repo_head_hexsha": "508f49e9aaa9f88552b49d01b6d585df9e75d220", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Ion Aggregation/ollivier_HBN.py", "max_issues_repo_name": "ExpectozJJ/Persistent-Ollivier-Ricci-Curvature", "max_issues_repo_head_hexsha": "508f49e9aaa9f88552b49d01b6d585df9e75d220", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Ion Aggregation/ollivier_HBN.py", "max_forks_repo_name": "ExpectozJJ/Persistent-Ollivier-Ricci-Curvature", "max_forks_repo_head_hexsha": "508f49e9aaa9f88552b49d01b6d585df9e75d220", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0590631365, "max_line_length": 204, "alphanum_fraction": 0.5577876984, "include": true, "reason": "import numpy,import networkx", "num_tokens": 6263}
|
import enum
import logging
import numpy as np
import pytest
import arim.helpers
from arim.exceptions import InvalidShape, InvalidDimension, NotAnArray
def test_get_name():
metadata = dict(long_name="Nicolas", short_name="Nic")
assert arim.helpers.get_name(metadata) == "Nicolas"
del metadata["long_name"]
assert arim.helpers.get_name(metadata) == "Nic"
del metadata["short_name"]
assert isinstance(arim.helpers.get_name(metadata), str)
def test_parse_enum_constant():
Foo = enum.Enum("Foo", "foo bar")
assert arim.helpers.parse_enum_constant("foo", Foo) is Foo.foo
assert arim.helpers.parse_enum_constant(Foo.foo, Foo) is Foo.foo
assert arim.helpers.parse_enum_constant("bar", Foo) is Foo.bar
assert arim.helpers.parse_enum_constant(Foo.bar, Foo) is Foo.bar
with pytest.raises(ValueError):
arim.helpers.parse_enum_constant("baz", Foo)
with pytest.raises(ValueError):
arim.helpers.parse_enum_constant(Foo, Foo)
def test_timeit(capsys):
logger = logging.getLogger(__name__)
with arim.helpers.timeit(logger=logger):
1 + 1
out, err = capsys.readouterr()
assert out == ""
assert err == ""
with arim.helpers.timeit("Foobar"):
1 + 1
out, err = capsys.readouterr()
assert out.startswith("Foobar")
assert err == ""
def test_cache():
cache = arim.helpers.Cache()
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
cache["toto"] = "titi"
assert len(cache) == 1
assert cache.hits == 0
assert cache.misses == 0
a = cache["toto"]
assert a == "titi"
assert len(cache) == 1
assert cache.hits == 1
assert cache.misses == 0
a = cache.get("toto")
assert a == "titi"
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 0
b = cache.get("foo", None)
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 1
with pytest.raises(KeyError):
b = cache["another_miss"]
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 2
# 'in' statement do not change the hits/misses count:
"toto" in cache
"tata" in cache
assert len(cache) == 1
assert cache.hits == 2
assert cache.misses == 2
str(cache)
cache.stat()
cache.clear()
def test_nocache():
cache = arim.helpers.NoCache()
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
cache["toto"] = "titi" # this should do nothing
assert "toto" not in cache
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 0
with pytest.raises(KeyError):
a = cache["toto"]
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 1
a = cache.get("toto")
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 2
# 'in' statement do not change the hits/misses count:
"toto" in cache
"tata" in cache
assert len(cache) == 0
assert cache.hits == 0
assert cache.misses == 2
str(cache)
cache.stat()
cache.clear()
def test_git_version():
v = arim.helpers.get_git_version()
assert isinstance(v, str)
assert v != ""
v_short = arim.helpers.get_git_version(short=True)
assert v_short == v
v_long = arim.helpers.get_git_version(short=False)
assert isinstance(v_long, str)
assert v_long != ""
assert len(v_long) >= len(v_short)
def test_get_shape_safely():
shape = (3, 4, 5)
x = np.arange(3 * 4 * 5).reshape(shape)
assert arim.helpers.get_shape_safely(x, "x", shape) == shape
assert arim.helpers.get_shape_safely(x, "x", (3, None, 5)) == shape
assert arim.helpers.get_shape_safely(x, "x") == shape
assert arim.helpers.get_shape_safely(x, "x", (None, None, None)) == shape
with pytest.raises(InvalidShape):
arim.helpers.get_shape_safely(x, "x", (3, 4, 666))
with pytest.raises(InvalidDimension):
arim.helpers.get_shape_safely(x, "x", (3, 4, 5, 6))
with pytest.raises(NotAnArray):
arim.helpers.get_shape_safely(x.tolist(), "x", (3, 4, 5))
def test_chunk_array():
# 1D:
x = np.arange(10)
size = 3
res = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 1D:
x = np.arange(9)
size = 3
res = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 2D dim 0:
x = np.arange(20).reshape((10, 2))
size = 3
res = [x[0:3, :], x[3:6, :], x[6:9, :], x[9:, :]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 2D dim 1:
x = np.arange(20).reshape((2, 10))
size = 3
res = [x[:, 0:3], x[:, 3:6], x[:, 6:9], x[:, 9:]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size, axis=1), res):
w1 = x[sel]
assert np.all(w1 == w2)
# 3D dim 1:
x = np.arange(5 * 10 * 3).reshape((5, 10, 3))
size = 3
res = [x[:, 0:3, :], x[:, 3:6, :], x[:, 6:9, :], x[:, 9:, :]]
for (sel, w2) in zip(arim.helpers.chunk_array(x.shape, size, axis=1), res):
w1 = x[sel]
assert np.all(w1 == w2)
def test_smallest_uint_that_fits():
assert arim.helpers.smallest_uint_that_fits(2 ** 8 - 1) is np.uint8
assert arim.helpers.smallest_uint_that_fits(2 ** 8) is np.uint16
assert arim.helpers.smallest_uint_that_fits(2 ** 64 - 1) is np.uint64
def test_sizeof_fmt():
assert arim.helpers.sizeof_fmt(1) == "1.0 B"
assert arim.helpers.sizeof_fmt(1024) == "1.0 KiB"
assert arim.helpers.sizeof_fmt(2 * 1024) == "2.0 KiB"
assert arim.helpers.sizeof_fmt(5 * 1024 ** 2) == "5.0 MiB"
|
{"hexsha": "dbcf55198c58c82af1da0a2a7eab2ae54e7b70fe", "size": 5906, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_helpers.py", "max_stars_repo_name": "will-jj/arim", "max_stars_repo_head_hexsha": "fc15efe171a41355090123fcea10406ee75efe31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-04-05T13:43:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T21:38:19.000Z", "max_issues_repo_path": "tests/test_helpers.py", "max_issues_repo_name": "will-jj/arim", "max_issues_repo_head_hexsha": "fc15efe171a41355090123fcea10406ee75efe31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-04-09T10:38:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-17T16:23:16.000Z", "max_forks_repo_path": "tests/test_helpers.py", "max_forks_repo_name": "will-jj/arim", "max_forks_repo_head_hexsha": "fc15efe171a41355090123fcea10406ee75efe31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-04T17:02:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-30T15:36:03.000Z", "avg_line_length": 26.9680365297, "max_line_length": 79, "alphanum_fraction": 0.6054859465, "include": true, "reason": "import numpy", "num_tokens": 1871}
|
[STATEMENT]
lemma ifex_ite_opt_eq: "
ro_ifex i \<Longrightarrow> ro_ifex t \<Longrightarrow> ro_ifex e \<Longrightarrow> ifex_ite_opt i t e = ifex_ite i t e"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>ro_ifex i; ro_ifex t; ro_ifex e\<rbrakk> \<Longrightarrow> ifex_ite_opt i t e = ifex_ite i t e
[PROOF STEP]
apply(induction i t e rule: ifex_ite_opt.induct)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i t e. \<lbrakk>\<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); ro_ifex i; ro_ifex t; ro_ifex e\<rbrakk> \<Longrightarrow> ifex_ite_opt i t e = ifex_ite i t e
[PROOF STEP]
apply(subst ifex_ite_opt.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i t e. \<lbrakk>\<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); ro_ifex i; ro_ifex t; ro_ifex e\<rbrakk> \<Longrightarrow> (case param_opt i t e of None \<Rightarrow> case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)) | Some b \<Rightarrow> b) = ifex_ite i t e
[PROOF STEP]
apply(rename_tac i t e)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i t e. \<lbrakk>\<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); ro_ifex i; ro_ifex t; ro_ifex e\<rbrakk> \<Longrightarrow> (case param_opt i t e of None \<Rightarrow> case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)) | Some b \<Rightarrow> b) = ifex_ite i t e
[PROOF STEP]
apply(case_tac "\<exists>r. param_opt i t e = Some r")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>i t e. \<lbrakk>\<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); ro_ifex i; ro_ifex t; ro_ifex e; \<exists>r. param_opt i t e = Some r\<rbrakk> \<Longrightarrow> (case param_opt i t e of None \<Rightarrow> case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)) | Some b \<Rightarrow> b) = ifex_ite i t e
2. \<And>i t e. \<lbrakk>\<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); ro_ifex i; ro_ifex t; ro_ifex e; \<nexists>r. param_opt i t e = Some r\<rbrakk> \<Longrightarrow> (case param_opt i t e of None \<Rightarrow> case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)) | Some b \<Rightarrow> b) = ifex_ite i t e
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 True); ro_ifex (restrict_top t_ x2 True); ro_ifex (restrict_top e_ x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True) = ifex_ite (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True); \<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 False); ro_ifex (restrict_top t_ x2 False); ro_ifex (restrict_top e_ x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False) = ifex_ite (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False); ro_ifex i_; ro_ifex t_; ro_ifex e_; \<exists>r. param_opt i_ t_ e_ = Some r\<rbrakk> \<Longrightarrow> (case param_opt i_ t_ e_ of None \<Rightarrow> case lowest_tops [i_, t_, e_] of None \<Rightarrow> case i_ of Trueif \<Rightarrow> t_ | Falseif \<Rightarrow> e_ | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i_ x True) (restrict_top t_ x True) (restrict_top e_ x True)) (ifex_ite_opt (restrict_top i_ x False) (restrict_top t_ x False) (restrict_top e_ x False)) | Some b \<Rightarrow> b) = ifex_ite i_ t_ e_
[PROOF STEP]
apply(simp del: ifex_ite.simps restrict_top.simps lowest_tops.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 True); ro_ifex (restrict_top t_ x2 True); ro_ifex (restrict_top e_ x2 True)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 True, restrict_top t_ x2 True, restrict_top e_ x2 True] of None \<Rightarrow> case restrict_top i_ x2 True of Trueif \<Rightarrow> restrict_top t_ x2 True | Falseif \<Rightarrow> restrict_top e_ x2 True | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x True) (restrict_top (restrict_top t_ x2 True) x True) (restrict_top (restrict_top e_ x2 True) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x False) (restrict_top (restrict_top t_ x2 True) x False) (restrict_top (restrict_top e_ x2 True) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True); \<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 False); ro_ifex (restrict_top t_ x2 False); ro_ifex (restrict_top e_ x2 False)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 False, restrict_top t_ x2 False, restrict_top e_ x2 False] of None \<Rightarrow> case restrict_top i_ x2 False of Trueif \<Rightarrow> restrict_top t_ x2 False | Falseif \<Rightarrow> restrict_top e_ x2 False | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x True) (restrict_top (restrict_top t_ x2 False) x True) (restrict_top (restrict_top e_ x2 False) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x False) (restrict_top (restrict_top t_ x2 False) x False) (restrict_top (restrict_top e_ x2 False) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False); ro_ifex i_; ro_ifex t_; ro_ifex e_; \<exists>r. param_opt i_ t_ e_ = Some r\<rbrakk> \<Longrightarrow> (case param_opt i_ t_ e_ of None \<Rightarrow> case lowest_tops [i_, t_, e_] of None \<Rightarrow> case i_ of Trueif \<Rightarrow> t_ | Falseif \<Rightarrow> e_ | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i_ x True) (restrict_top t_ x True) (restrict_top e_ x True)) (ifex_ite_opt (restrict_top i_ x False) (restrict_top t_ x False) (restrict_top e_ x False)) | Some b \<Rightarrow> b) = ifex_ite i_ t_ e_
[PROOF STEP]
apply(rule param_opt_ifex_ite_eq)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>\<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 True); ro_ifex (restrict_top t_ x2 True); ro_ifex (restrict_top e_ x2 True)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 True, restrict_top t_ x2 True, restrict_top e_ x2 True] of None \<Rightarrow> case restrict_top i_ x2 True of Trueif \<Rightarrow> restrict_top t_ x2 True | Falseif \<Rightarrow> restrict_top e_ x2 True | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x True) (restrict_top (restrict_top t_ x2 True) x True) (restrict_top (restrict_top e_ x2 True) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x False) (restrict_top (restrict_top t_ x2 True) x False) (restrict_top (restrict_top e_ x2 True) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True); \<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 False); ro_ifex (restrict_top t_ x2 False); ro_ifex (restrict_top e_ x2 False)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 False, restrict_top t_ x2 False, restrict_top e_ x2 False] of None \<Rightarrow> case restrict_top i_ x2 False of Trueif \<Rightarrow> restrict_top t_ x2 False | Falseif \<Rightarrow> restrict_top e_ x2 False | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x True) (restrict_top (restrict_top t_ x2 False) x True) (restrict_top (restrict_top e_ x2 False) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x False) (restrict_top (restrict_top t_ x2 False) x False) (restrict_top (restrict_top e_ x2 False) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False); ro_ifex i_; ro_ifex t_; ro_ifex e_; \<exists>r. param_opt i_ t_ e_ = Some r\<rbrakk> \<Longrightarrow> ro_ifex i_
2. \<lbrakk>\<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 True); ro_ifex (restrict_top t_ x2 True); ro_ifex (restrict_top e_ x2 True)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 True, restrict_top t_ x2 True, restrict_top e_ x2 True] of None \<Rightarrow> case restrict_top i_ x2 True of Trueif \<Rightarrow> restrict_top t_ x2 True | Falseif \<Rightarrow> restrict_top e_ x2 True | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x True) (restrict_top (restrict_top t_ x2 True) x True) (restrict_top (restrict_top e_ x2 True) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x False) (restrict_top (restrict_top t_ x2 True) x False) (restrict_top (restrict_top e_ x2 True) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True); \<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 False); ro_ifex (restrict_top t_ x2 False); ro_ifex (restrict_top e_ x2 False)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 False, restrict_top t_ x2 False, restrict_top e_ x2 False] of None \<Rightarrow> case restrict_top i_ x2 False of Trueif \<Rightarrow> restrict_top t_ x2 False | Falseif \<Rightarrow> restrict_top e_ x2 False | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x True) (restrict_top (restrict_top t_ x2 False) x True) (restrict_top (restrict_top e_ x2 False) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x False) (restrict_top (restrict_top t_ x2 False) x False) (restrict_top (restrict_top e_ x2 False) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False); ro_ifex i_; ro_ifex t_; ro_ifex e_; \<exists>r. param_opt i_ t_ e_ = Some r\<rbrakk> \<Longrightarrow> ro_ifex t_
3. \<lbrakk>\<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 True); ro_ifex (restrict_top t_ x2 True); ro_ifex (restrict_top e_ x2 True)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 True, restrict_top t_ x2 True, restrict_top e_ x2 True] of None \<Rightarrow> case restrict_top i_ x2 True of Trueif \<Rightarrow> restrict_top t_ x2 True | Falseif \<Rightarrow> restrict_top e_ x2 True | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x True) (restrict_top (restrict_top t_ x2 True) x True) (restrict_top (restrict_top e_ x2 True) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x False) (restrict_top (restrict_top t_ x2 True) x False) (restrict_top (restrict_top e_ x2 True) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True); \<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 False); ro_ifex (restrict_top t_ x2 False); ro_ifex (restrict_top e_ x2 False)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 False, restrict_top t_ x2 False, restrict_top e_ x2 False] of None \<Rightarrow> case restrict_top i_ x2 False of Trueif \<Rightarrow> restrict_top t_ x2 False | Falseif \<Rightarrow> restrict_top e_ x2 False | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x True) (restrict_top (restrict_top t_ x2 False) x True) (restrict_top (restrict_top e_ x2 False) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x False) (restrict_top (restrict_top t_ x2 False) x False) (restrict_top (restrict_top e_ x2 False) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False); ro_ifex i_; ro_ifex t_; ro_ifex e_; \<exists>r. param_opt i_ t_ e_ = Some r\<rbrakk> \<Longrightarrow> ro_ifex e_
4. \<lbrakk>\<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 True); ro_ifex (restrict_top t_ x2 True); ro_ifex (restrict_top e_ x2 True)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 True, restrict_top t_ x2 True, restrict_top e_ x2 True] of None \<Rightarrow> case restrict_top i_ x2 True of Trueif \<Rightarrow> restrict_top t_ x2 True | Falseif \<Rightarrow> restrict_top e_ x2 True | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x True) (restrict_top (restrict_top t_ x2 True) x True) (restrict_top (restrict_top e_ x2 True) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 True) x False) (restrict_top (restrict_top t_ x2 True) x False) (restrict_top (restrict_top e_ x2 True) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 True) (restrict_top t_ x2 True) (restrict_top e_ x2 True); \<And>x2. \<lbrakk>param_opt i_ t_ e_ = None; lowest_tops [i_, t_, e_] = Some x2; ro_ifex (restrict_top i_ x2 False); ro_ifex (restrict_top t_ x2 False); ro_ifex (restrict_top e_ x2 False)\<rbrakk> \<Longrightarrow> (case param_opt (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False) of None \<Rightarrow> case lowest_tops [restrict_top i_ x2 False, restrict_top t_ x2 False, restrict_top e_ x2 False] of None \<Rightarrow> case restrict_top i_ x2 False of Trueif \<Rightarrow> restrict_top t_ x2 False | Falseif \<Rightarrow> restrict_top e_ x2 False | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x True) (restrict_top (restrict_top t_ x2 False) x True) (restrict_top (restrict_top e_ x2 False) x True)) (ifex_ite_opt (restrict_top (restrict_top i_ x2 False) x False) (restrict_top (restrict_top t_ x2 False) x False) (restrict_top (restrict_top e_ x2 False) x False)) | Some b \<Rightarrow> b) = ifex_ite (restrict_top i_ x2 False) (restrict_top t_ x2 False) (restrict_top e_ x2 False); ro_ifex i_; ro_ifex t_; ro_ifex e_; \<exists>r. param_opt i_ t_ e_ = Some r\<rbrakk> \<Longrightarrow> param_opt i_ t_ e_ = Some (case param_opt i_ t_ e_ of None \<Rightarrow> case lowest_tops [i_, t_, e_] of None \<Rightarrow> case i_ of Trueif \<Rightarrow> t_ | Falseif \<Rightarrow> e_ | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i_ x True) (restrict_top t_ x True) (restrict_top e_ x True)) (ifex_ite_opt (restrict_top i_ x False) (restrict_top t_ x False) (restrict_top e_ x False)) | Some b \<Rightarrow> b)
[PROOF STEP]
by (auto simp add: bf_ifex_rel_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>i t e. \<lbrakk>\<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); ro_ifex i; ro_ifex t; ro_ifex e; \<nexists>r. param_opt i t e = Some r\<rbrakk> \<Longrightarrow> (case param_opt i t e of None \<Rightarrow> case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)) | Some b \<Rightarrow> b) = ifex_ite i t e
[PROOF STEP]
subgoal for i t e
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>param_opt i t e = None; lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); ro_ifex i; ro_ifex t; ro_ifex e; \<nexists>r. param_opt i t e = Some r\<rbrakk> \<Longrightarrow> (case param_opt i t e of None \<Rightarrow> case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)) | Some b \<Rightarrow> b) = ifex_ite i t e
[PROOF STEP]
apply(clarsimp simp del: restrict_top.simps ifex_ite.simps ifex_ite_opt.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e\<rbrakk> \<Longrightarrow> (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False))) = ifex_ite i t e
[PROOF STEP]
apply(cases "lowest_tops [i,t,e] = None")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>\<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = None\<rbrakk> \<Longrightarrow> (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False))) = ifex_ite i t e
2. \<lbrakk>\<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] \<noteq> None\<rbrakk> \<Longrightarrow> (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False))) = ifex_ite i t e
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = None\<rbrakk> \<Longrightarrow> (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False))) = ifex_ite i t e
[PROOF STEP]
by clarsimp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] \<noteq> None\<rbrakk> \<Longrightarrow> (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False))) = ifex_ite i t e
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 True); ro_ifex (restrict_top t x2 True); ro_ifex (restrict_top e x2 True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True) = ifex_ite (restrict_top i x2 True) (restrict_top t x2 True) (restrict_top e x2 True); \<And>x2. \<lbrakk>lowest_tops [i, t, e] = Some x2; ro_ifex (restrict_top i x2 False); ro_ifex (restrict_top t x2 False); ro_ifex (restrict_top e x2 False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False) = ifex_ite (restrict_top i x2 False) (restrict_top t x2 False) (restrict_top e x2 False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] \<noteq> None\<rbrakk> \<Longrightarrow> (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite_opt (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite_opt (restrict_top i x False) (restrict_top t x False) (restrict_top e x False))) = ifex_ite i t e
[PROOF STEP]
apply(clarsimp simp del: restrict_top.simps ifex_ite.simps ifex_ite_opt.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y\<rbrakk> \<Longrightarrow> IFC y (ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)) (ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)) = ifex_ite i t e
[PROOF STEP]
apply(subst ifex_ite.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y\<rbrakk> \<Longrightarrow> IFC y (ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)) (ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)) = (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)))
[PROOF STEP]
apply(rename_tac y)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y\<rbrakk> \<Longrightarrow> IFC y (ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)) (ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)) = (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)))
[PROOF STEP]
apply(subgoal_tac "(ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)) =
(ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True))")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y; ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)\<rbrakk> \<Longrightarrow> IFC y (ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)) (ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)) = (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)))
2. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)
[PROOF STEP]
apply(subgoal_tac "(ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)) =
(ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False))")
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y; ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)\<rbrakk> \<Longrightarrow> IFC y (ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)) (ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)) = (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)))
2. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y; ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)
3. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y_ True); ro_ifex (restrict_top t y_ True); ro_ifex (restrict_top e y_ True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True); \<lbrakk>ro_ifex (restrict_top i y_ False); ro_ifex (restrict_top t y_ False); ro_ifex (restrict_top e y_ False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y_; ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True); ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False)\<rbrakk> \<Longrightarrow> IFC y_ (ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True)) (ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False)) = (case lowest_tops [i, t, e] of None \<Rightarrow> case i of Trueif \<Rightarrow> t | Falseif \<Rightarrow> e | Some x \<Rightarrow> IFC x (ifex_ite (restrict_top i x True) (restrict_top t x True) (restrict_top e x True)) (ifex_ite (restrict_top i x False) (restrict_top t x False) (restrict_top e x False)))
[PROOF STEP]
by force
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y; ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False)
2. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y_ True); ro_ifex (restrict_top t y_ True); ro_ifex (restrict_top e y_ True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True); \<lbrakk>ro_ifex (restrict_top i y_ False); ro_ifex (restrict_top t y_ False); ro_ifex (restrict_top e y_ False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y_; ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False)
[PROOF STEP]
using restrict_top_ifex_minimal_invar restrict_top_ifex_ordered_invar
[PROOF STATE]
proof (prove)
using this:
ifex_minimal ?i \<Longrightarrow> ifex_minimal (restrict_top ?i ?a ?val)
ifex_ordered ?b \<Longrightarrow> ifex_ordered (restrict_top ?b ?var ?val)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y_ True); ro_ifex (restrict_top t y_ True); ro_ifex (restrict_top e y_ True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True); \<lbrakk>ro_ifex (restrict_top i y_ False); ro_ifex (restrict_top t y_ False); ro_ifex (restrict_top e y_ False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y_; ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False)
[PROOF STEP]
by metis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y True); ro_ifex (restrict_top t y True); ro_ifex (restrict_top e y True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True); \<lbrakk>ro_ifex (restrict_top i y False); ro_ifex (restrict_top t y False); ro_ifex (restrict_top e y False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y False) (restrict_top t y False) (restrict_top e y False) = ifex_ite (restrict_top i y False) (restrict_top t y False) (restrict_top e y False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y True) (restrict_top t y True) (restrict_top e y True) = ifex_ite (restrict_top i y True) (restrict_top t y True) (restrict_top e y True)
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y_ True); ro_ifex (restrict_top t y_ True); ro_ifex (restrict_top e y_ True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True); \<lbrakk>ro_ifex (restrict_top i y_ False); ro_ifex (restrict_top t y_ False); ro_ifex (restrict_top e y_ False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y_\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True)
[PROOF STEP]
using restrict_top_ifex_minimal_invar restrict_top_ifex_ordered_invar
[PROOF STATE]
proof (prove)
using this:
ifex_minimal ?i \<Longrightarrow> ifex_minimal (restrict_top ?i ?a ?val)
ifex_ordered ?b \<Longrightarrow> ifex_ordered (restrict_top ?b ?var ?val)
goal (1 subgoal):
1. \<lbrakk>\<lbrakk>ro_ifex (restrict_top i y_ True); ro_ifex (restrict_top t y_ True); ro_ifex (restrict_top e y_ True)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True); \<lbrakk>ro_ifex (restrict_top i y_ False); ro_ifex (restrict_top t y_ False); ro_ifex (restrict_top e y_ False)\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False) = ifex_ite (restrict_top i y_ False) (restrict_top t y_ False) (restrict_top e y_ False); param_opt i t e = None; ifex_ordered i; ifex_minimal i; ifex_ordered t; ifex_minimal t; ifex_ordered e; ifex_minimal e; lowest_tops [i, t, e] = Some y_\<rbrakk> \<Longrightarrow> ifex_ite_opt (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True) = ifex_ite (restrict_top i y_ True) (restrict_top t y_ True) (restrict_top e y_ True)
[PROOF STEP]
by metis
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 20397, "file": "ROBDD_BDT", "length": 30}
|
import os
import pickle
import numpy as np
import pandas as pd
import yaml
import pyclass
#from online_reduction.pipeline import PandasClass
from sicparse import OptionParser
import logging
from jinja2 import Environment, PackageLoader
import subprocess
def jinja_raise(msg):
raise Exception(msg)
def debug(text):
return ''
ENV = Environment(
loader=PackageLoader('kosma_py_lib', 'templates'),
trim_blocks=True,
lstrip_blocks=True)
ENV.globals['jinja_raise'] = jinja_raise
ENV.filters['debug'] = debug
class captureTTY:
'''
Needed to capture output from the fortran code run with pyclass
Class to capture the terminal content. It is necessary when you want to
grab the output from a module created using f2py.
Taken from StackOverflow QUestion:
http://stackoverflow.com/questions/10803579/copy-fortran-called-via-f2py-output-in-python
'''
def __init__(self, tmpFile = '/tmp/out.tmp.dat'):
'''
Set everything up
'''
self.tmpFile = tmpFile
self.ttyData = []
self.outfile = False
self.save = False
def start(self):
'''
Start grabbing TTY data.
'''
# open outputfile
self.outfile = os.open(self.tmpFile, os.O_RDWR|os.O_CREAT)
# save the current file descriptor
self.save = os.dup(1)
# put outfile on 1
os.dup2(self.outfile, 1)
return
def stop(self):
'''
Stop recording TTY data
'''
if not self.save:
# Probably not started
return
# restore the standard output file descriptor
os.dup2(self.save, 1)
# parse temporary file
self.ttyData = open(self.tmpFile, ).readlines()
# close the output file
os.close(self.outfile)
# delete temporary file
os.remove(self.tmpFile)
def get_class_windows():
#
lower_range = pyclass.gdict.set.las.wind1.__sicdata__
upper_range = pyclass.gdict.set.las.wind2.__sicdata__
#
windows = [window for window in zip(lower_range, upper_range) if window != (0.0,0.0)]
return windows
def get_index(observatory="", additional_meta_parameters = [], force_reload=False, get_rms=False, get_stats=False, get_fft_power = []):
array = []
folder_context = {}
folder_context["script_dir"] = "/tmp/"
folder_context["memory_dir"] = "./"
config = {}
config["general"] = {}
config["general"]["observatory"] = observatory
config["pipeline"]= {}
config["pipeline"]["global"] = {}
config["pipeline"]["global"]["exclude"] = {}
config["pipeline"]["global"]["include"] = {}
config["pipeline"]["class_processing"] = {}
config["pipeline"]["class_processing"] = {}
config["pipeline"]["class_processing"]["batch"] = True
config["pipeline"]["class_processing"]["silent"] = True
# Now read the file name that is not stored in an easy accesible variable
# from the show all command
grabber = captureTTY()
grabber.start()
pyclass.comm("show file")
grabber.stop()
#print grabber.ttyData[0]
input_file = grabber.ttyData[0].split(":")[1].split("[")[0].strip()
input_file = os.path.abspath(input_file)
pyclass.message(pyclass.seve.i, "PD_INDEX", "input file {0}".format(input_file))
#print "########### ",input_file," #### ",folder_context["memory_dir"]
#if input_file[0] not in ["/","."] :
# input_file = "./"+input_file
#
if os.path.dirname(input_file)=="":
output_path="./"
else:
output_path=os.path.dirname(input_file)
pyclass.message(pyclass.seve.i, "PD_INDEX", "output path {0}".format(output_path))
folder_context["memory_dir"] = output_path
#
modification_times_yaml_file = "{}/.modification_times.yml".format(
folder_context["memory_dir"]
)
#
if os.path.exists(modification_times_yaml_file):
previous_modification_times = yaml.safe_load(open(modification_times_yaml_file))
else:
previous_modification_times = {}
#
if input_file.strip()=="none":
pyclass.message(pyclass.seve.e, "PD_INDEX", "No file loaded".format(input_file))
return
elif os.path.exists(input_file):
modification_time = os.path.getmtime(input_file)
else:
pyclass.message(pyclass.seve.e, "PD_INDEX", "{0} file not found".format(input_file))
return
#
pyclass.message(pyclass.seve.i, "PD_INDEX", "Loading index from {} into pandas".format(input_file))
new = True
print previous_modification_times
if os.path.basename(input_file) not in previous_modification_times.keys():
previous_modification_times[os.path.basename(input_file)] = 0.0
#return
#try:
if True:
if ( (force_reload == False) and (round(previous_modification_times[os.path.basename(input_file)],0) >= round(modification_time,0))):
pyclass.message(pyclass.seve.i, "PD_INDEX", "Loading from already created local table")
df = pickle.load(open("{1}/.pd_index_{0}.pkl".format(os.path.basename(input_file),
output_path), "r"))
new = False
else:
pyclass.message(pyclass.seve.i, "PD_INDEX", "Generating table from class")
pd = PandasClass(input_file=input_file, folder_context=folder_context,
config=config,reduction_parameters={},
additional_meta_parameters=additional_meta_parameters,
get_rms=get_rms,
get_stats=get_stats,
get_fft_power=get_fft_power
)
df = pd.df
#except KeyError:
# pd = PandasClass(input_file=input_file, folder_context=folder_context,
# config=config,reduction_parameters={},
# additional_meta_parameters=additional_meta_parameters, get_rms=get_rms, get_stats=get_stats)
# df = pd.df
#
pyclass.message(pyclass.seve.i, "PD_INDEX", "Pandas index loaded successfully")
#
#
if new:
pickle.dump(pd.df, open("{1}/.pd_index_{0}.pkl".format(os.path.basename(input_file),
output_path), "wb"))
previous_modification_times[os.path.basename(input_file)] = modification_time
with open(modification_times_yaml_file, 'w') as outfile:
yaml.dump(previous_modification_times, outfile)
#
return df
class DataFrameEmpty(Exception):
''' Raised when the no scans are found. '''
def __init__(self):
self.message = "The filtered DataFrame is empty"
class ClassExecution(object):
def run_class_script(self, script):
# self.log.debug("In class execution")
if hasattr(self,"config"):
try:
batch_processing = self.config["pipeline"]["class_processing"]["batch"]
except KeyError:
batch_processing = False
else:
batch_processing = True
try:
if batch_processing:
cmd = "class -nw @{} > /dev/null 2>&1".format(script)
return_code = subprocess.check_call(cmd, shell=True)
else:
return_code = subprocess.check_call(
["class", "-nw", "@{}".format(script)])
#self.log.debug(return_code)
except KeyboardInterrupt:
raise SystemExit("Killed class process")
class PandasClass(ClassExecution):
def __init__(self,
input_file=None,
folder_context=None,
config=None,
read_user_section=True,
reduction_parameters=None,
extend_dataframe=None,
additional_meta_parameters=None,
get_rms=False,
get_stats=False,
get_fft_power=[]
):
""" Represent the index of a class file in pandas
TODO: Includes from differnt categories should be filtered with and
"""
self.input_file = input_file
self.folder_context = folder_context
self.log = logging.getLogger("debug")
self.log.debug("Entering PandasClass")
self.get_stats = get_stats
self.get_rms = get_rms
self.get_fft_power = get_fft_power
if config is None:
raise SystemExit(
"Aborting. PandasClass needs a "
"loaded config file to work from."
)
self.config = config
if not input_file:
raise SystemExit(
"Aborting. PandasClass needs an input file to work with.")
self.input_file = input_file
if not os.path.exists(self.input_file):
raise SystemExit(
(
"{0} doesn't exist check filename"
).format(self.input_file))
last_number = None
if extend_dataframe is not None and not extend_dataframe.empty:
extend_dataframe_this_file = extend_dataframe[
extend_dataframe.input_file == input_file]
last_number = extend_dataframe_this_file.number.max()
if np.isnan(last_number):
last_number = None
template = ENV.get_template("pandas.class")
context = {
"input_file": input_file,
"memory_dir": folder_context["memory_dir"]
}
if last_number:
context["start_from_number"] = last_number + 1
script = ''.join(template.generate(context))
script_name = "{}/pandas.class".format(folder_context["script_dir"])
with open(script_name, "w") as script_file:
script_file.write(script)
self.run_class_script(script_name)
index_file = "{}/idx.csv".format(folder_context["memory_dir"])
standard_index = [
"index","number","version","telescope", "scan", "subscan", "boff", "loff", "line", "source"
]
# first column number is taken as index
self.df = pd.read_csv(index_file, names=standard_index)
#
if self.df.empty:
raise DataFrameEmpty
# For SOFIA/GREAT observations we can expand the data frame to also
# include items from the SOFIA user sections
try:
observatory = config["general"]["observatory"]
except KeyError:
observatory = "SOFIA_GREAT"
if observatory == "SOFIA_GREAT" and read_user_section:
scans = self.df["scan"].unique()
template = ENV.get_template("pandas_aot_aor.class")
context = {
"input_file": input_file,
"memory_dir": folder_context["memory_dir"],
"scans": scans
}
script = ''.join(template.generate(context))
script_name = "{}/get_aot_aor.class".format(
folder_context["script_dir"])
with open(script_name, "w") as script_file:
script_file.write(script)
self.run_class_script(script_name)
index_file = "{}/scan_aot_aor.csv".format(
folder_context["memory_dir"])
self.aot_df = pd.read_csv(
index_file,
names=[
"scan", "mission_id", "aot_id", "aor_id", "posangle",
"utobs", "cdobs"
])
if len(self.aot_df)==0:
self.log.error("no user section data found")
else:
self.df = pd.merge(self.df, self.aot_df, on="scan")
# Create a new column that contains the tile number (if any)
dummy_df = self.df.ix[:, "utobs":"cdobs"]
dummy_df["ut_time"] = dummy_df["utobs"] / (2. * np.pi) * 24
dummy_df["ut_time"] = dummy_df["ut_time"].map(float)
dummy_df["residual_hour"] = dummy_df["ut_time"].mod(1)
dummy_df["ut_hour"] = (
dummy_df["ut_time"] - dummy_df["residual_hour"]).map(int)
dummy_df["ut_minute"] = dummy_df["residual_hour"] * 60
dummy_df["residual_minute"] = dummy_df["residual_hour"].mod(60)
dummy_df["ut_minute"] = (
dummy_df["ut_minute"] - dummy_df["residual_minute"]).map(int)
dummy_df["ut_second"] = dummy_df["residual_minute"] * 60
dummy_df["ut_year"] = dummy_df["cdobs"].apply(
lambda x: x.split("-")[2].strip())
dummy_df["ut_month"] = dummy_df["cdobs"].apply(
lambda x: x.split("-")[1].upper().strip())
dummy_df["ut_day"] = dummy_df["cdobs"].apply(
lambda x: x.split("-")[0].strip())
self.df["timestamp_str"] = dummy_df[
["ut_year", "ut_month", "ut_day",
"ut_hour", "ut_minute", "ut_second"]
].apply(lambda x: "{}-{}-{}T{}:{}:{:8.6f}".format(
x[0], x[1], x[2], x[3], x[4], x[5]), axis=1
)
self.df['timestamp'] = pd.to_datetime(
self.df["timestamp_str"], format='%Y-%b-%dT%H:%M:%S.%f')
# append additional parameters
if additional_meta_parameters is not None:
self.log.debug("Collecting additional parameters PandasClass")
parameters_to_collect = ['number','R%HEAD%GEN%VER']
#
drop_columns = [column for column in ["cdobs", "utobs"] if column in self.df.columns]
if len(drop_columns)>0:
self.df = self.df.drop(drop_columns, 1)
# check parameters are not already in dataframe
for additional_meta_parameter in additional_meta_parameters:
if additional_meta_parameter is None:
continue
if additional_meta_parameter.split("%")[-1] not in self.df.columns:
parameters_to_collect.append(additional_meta_parameter)
#parameters_to_collect.extend(["R%HEAD%GEN%CDOBS", "utobs"])
# generate class script from template
template = ENV.get_template("pandas_additional_meta.class")
context = {
"input_file": self.input_file,
"memory_dir": self.folder_context["memory_dir"],
"meta_params":
["\'{}\'".format(param) for param in parameters_to_collect],
"print_info_on_number": 1000,
}
#
script = ''.join(template.generate(context))
script_name = "{}/pandas_additional_meta.class".format(
self.folder_context["script_dir"])
with open(script_name, "w") as script_file:
script_file.write(script)
self.run_class_script(script_name)
index_file = "{}/pandas_additional_meta.csv".format(
folder_context["memory_dir"])
#
parameters_to_collect[parameters_to_collect.index('R%HEAD%GEN%VER')] = "version"
self.additional_df = pd.read_csv(
index_file,
names=[
param.split("%")[-1].lower()
for param in parameters_to_collect
], delimiter="|")
#print self.df.number,self.additional_df.number
self.df = pd.merge(self.df, self.additional_df, on = ["number","version"])
#print self.df, all([True for column in ["utobs", "cdobs"] if column in self.df.columns]), self.df.columns
#print [column in self.df.columns for column in ["utobs", "cdobs"]]
# convert utobs column to timestamp
if all([column in self.df.columns for column in ["utobs", "cdobs"]]):
dummy_df = self.df.loc[:, ["utobs", "cdobs"]]
dummy_df["ut_time"] = dummy_df["utobs"] / (2. * np.pi) * 24
dummy_df["ut_time"] = dummy_df["ut_time"].map(float)
dummy_df["residual_hour"] = dummy_df["ut_time"].mod(1)
dummy_df["ut_hour"] = (
dummy_df["ut_time"] - dummy_df["residual_hour"]).map(int)
dummy_df["ut_minute"] = dummy_df["residual_hour"] * 60
dummy_df["residual_minute"] = dummy_df["residual_hour"].mod(60)
dummy_df["ut_minute"] = (
dummy_df["ut_minute"] - dummy_df["residual_minute"]).map(int)
dummy_df["ut_second"] = dummy_df["residual_minute"] * 60
dummy_df["ut_year"] = dummy_df["cdobs"].apply(
lambda x: x.split("-")[2].strip())
dummy_df["ut_month"] = dummy_df["cdobs"].apply(
lambda x: x.split("-")[1].upper().strip())
dummy_df["ut_day"] = dummy_df["cdobs"].apply(
lambda x: x.split("-")[0].strip())
self.df["timestamp_str"] = dummy_df[
["ut_year", "ut_month", "ut_day",
"ut_hour", "ut_minute", "ut_second"]
].apply(lambda x: "{}-{}-{}T{}:{}:{:8.6f}".format(
x[0], x[1], x[2], x[3], x[4], x[5]), axis=1
)
self.df['timestamp'] = pd.to_datetime(
self.df["timestamp_str"], format='%Y-%b-%dT%H:%M:%S.%f')
#
if get_rms:
self._get_rms()
if get_stats:
self._get_stats()
#
self.df.set_index('number',inplace=True)
# For now only one forward efficiency is implemented. Reason is that
# for all flight series on SOFIA We had a forward_efficiency of 0.97
# TODO: Make this part of the pipeline generic
forward_efficiency = reduction_parameters.get("forward_efficiency",
0.97)
self.df["feff"] = forward_efficiency
# Placeholders for the forward and beam efficiencies
self.df["beff"] = 0
main_beam_efficiencies = reduction_parameters.get(
"main_beam_efficiencies", None)
# if main_beam_efficiencies are given in the reduction_parameters add
# them to the pandas dataframe
if main_beam_efficiencies:
# For every date range check the individual defined pixels and
# update the
for date_range in main_beam_efficiencies.keys():
start_date, end_date = date_range.split("-")
start_date = [int(i) for i in start_date.split(".")]
start_date = datetime.date(start_date[2], start_date[1],
start_date[0])
start_date = start_date.strftime('%Y-%m-%d')
end_date = [int(i) for i in end_date.split(".")]
end_date = datetime.date(end_date[2], end_date[1], end_date[0])
end_date = end_date.strftime('%Y-%m-%d')
if self.df.loc[(self.df["timestamp"] >= start_date)
& (self.df["timestamp"] <= end_date)].empty:
continue
for pixel in main_beam_efficiencies[date_range].keys():
if not self.df[self.df["telescope"] == pixel].empty:
self.df.loc[(self.df["timestamp"] >= start_date) &
(self.df["timestamp"] <= end_date) &
(self.df["telescope"] == pixel),
"beff"] = main_beam_efficiencies[
date_range][pixel]
# Now the dataframe has to be filtered based on the exclude statements
for key, value in config["pipeline"]["global"]["exclude"].items():
try:
if type(value) == str:
self.df = self.df[~self.df[key].str.contains(
value, regex=False)]
if type(value) == list:
for item in value:
self.df = self.df[~self.df[key].str.contains(
item, regex=False)]
except KeyError as e:
# Here we check if extra keywords are to be read. If we do not
# want to read the user section we silently ignore all keywords
# that are not in the standard_index of class.
if read_user_section:
raise e
else:
if key not in standard_index:
continue
else:
raise e
try:
includes = config["pipeline"]["global"]["include"] or {}
except KeyError:
includes = {}
new_df = None
for key, value in includes.items():
value = [str(val) for val in value]
try:
if new_df is None:
new_df = self.df[self.df[key].map(str).isin(value)]
else:
new_df = pd.concat(
[new_df, self.df[self.df[key].map(str).isin(value)]])
except KeyError as e:
if read_user_section:
raise e
else:
if key not in standard_index:
continue
else:
raise e
if new_df is not None:
self.df = new_df
if self.df.empty:
raise DataFrameEmpty
def group(self, groups=["telescope", "source", "line"]):
self.grouped = self.df.groupby(groups, as_index=False)
def _get_stats(self):
self.windows = get_class_windows()
self.window_string = " ".join(["{0[0]} {0[1]}".format(window) for window in self.windows])
if self.window_string=="":
msg = "no windows found, skipping stats collection"
self.log.debug(msg)
pyclass.message(pyclass.seve.e, "PD_INDEX", msg)
return
template = ENV.get_template("pandas_write_stats.class")
context = {
"input_file": self.input_file,
"memory_dir": self.folder_context["memory_dir"],
"windows" : self.window_string
}
script = ''.join(template.generate(context))
script_name = "{}/get_stats.class".format(
self.folder_context["script_dir"])
with open(script_name, "w") as script_file:
script_file.write(script)
self.run_class_script(script_name)
#
index_file = "{}/pandas_stats.csv".format(
self.folder_context["memory_dir"])
#
#stats_df = pd.read_csv(
# index_file,
# names=["index","number","version",'stats_mean','stats_median','stats_max','stats_min','stats_rms','stats_sum'])
stats_df = pd.read_csv(
index_file)
stats_df.columns = stats_df.columns.str.strip()
#
if len(stats_df > 0):
self.df = pd.merge(self.df, stats_df, on=["index","number","version"])
def _get_rms(self):
#
self.windows = get_class_windows()
self.window_string = " ".join(["{0[0]} {0[1]}".format(window) for window in self.windows])
if self.window_string=="":
msg = "no windows found, skipping rms collection"
self.log.debug(msg)
pyclass.message(pyclass.seve.e, "PD_INDEX", msg)
return
if self.get_fft_power:
# for python fft version (slow)
#self.fft_command = "kosma\\fft /ignore_x_limits /no_plot "
#for fft_freq in self.get_fft_power:
# self.fft_command += " /fft_frequency_focus {0}".format(fft_freq)
self.fft_definitions = "def real fft_freqs[{0}] /global\nlet fft_freqs {1}\n".format(len(self.get_fft_power)," ".join(self.get_fft_power))
template = ENV.get_template("pandas_write_rms.class")
context = {
"input_file": self.input_file,
"memory_dir": self.folder_context["memory_dir"],
"windows" : self.window_string,
"fft_definitions" : self.fft_definitions
}
script = ''.join(template.generate(context))
script_name = "{}/get_rms.class".format(
self.folder_context["script_dir"])
with open(script_name, "w") as script_file:
script_file.write(script)
self.run_class_script(script_name)
#
index_file = "{}/pandas_rms.csv".format(
self.folder_context["memory_dir"])
#
self.rms_df = pd.read_csv(
index_file)
print self.rms_df.columns
#
if len(self.rms_df > 0):
self.df = pd.merge(self.df, self.rms_df, on=["index","number","version"])
|
{"hexsha": "fa5d01d1b1a9f03971510c018815e9b7fb56ae88", "size": 24919, "ext": "py", "lang": "Python", "max_stars_repo_path": "kosma_py_lib/build/lib/kosma_py_lib/pandas_index.py", "max_stars_repo_name": "KOSMAsubmm/kosma_gildas_dlc", "max_stars_repo_head_hexsha": "cfa61dff10713717858a90eea52af76ca95e9fb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kosma_py_lib/build/lib/kosma_py_lib/pandas_index.py", "max_issues_repo_name": "KOSMAsubmm/kosma_gildas_dlc", "max_issues_repo_head_hexsha": "cfa61dff10713717858a90eea52af76ca95e9fb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kosma_py_lib/build/lib/kosma_py_lib/pandas_index.py", "max_forks_repo_name": "KOSMAsubmm/kosma_gildas_dlc", "max_forks_repo_head_hexsha": "cfa61dff10713717858a90eea52af76ca95e9fb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0929054054, "max_line_length": 150, "alphanum_fraction": 0.5568040451, "include": true, "reason": "import numpy", "num_tokens": 5390}
|
import json
import nltk
import os
import math
import numpy as np
from tqdm import tqdm
import torch
import argparse
import random
import ast
import itertools
import csv
from Levenshtein import ratio
def convert_l(l):
if type(l) == list:
return l
else:
return ast.literal_eval(l)
def check_dist(a,set_):
ratios = [ratio(a,b) for b in set_]
return max(ratios)
def intersection(lst1, lst2):
return [t for t in lst1 if check_dist(t,lst2) >= .95]
def prepare_results(p, r, f):
return '\t{}:\t{}: {:5.2f}\t{}: {:5.2f}\t{}: {:5.2f}'.format(metric, 'P', 100.0 * p, 'R', 100.0 * r, 'F1', 100.0 * f)
def add_template(rel, dim, kg_type='atomic'):
if len(rel) == 0:
rel = 'none.'
if rel[-1] != '.':
rel += '.'
if 'xEffect' in dim:
return 'PersonX is likely: ' + rel #return 'PersonX ' + rel
if 'oEffect' in dim:
return 'PersonY is likely: ' + rel #return 'PersonY ' + rel
if 'xWant' in dim:
return 'PersonX wants: ' + rel #'PersonX will want to ' + rel
if 'oWant' in dim:
return 'PersonY wants: ' + rel
if 'xIntent' in dim:
return 'PersonX wanted: ' + rel #'The intent was ' + rel
if 'oIntent' in dim:
return 'PersonY wanted: ' + rel
if 'xAttr' in dim:
return 'PersonX is seen as: ' + rel
if 'xNeed' in dim:
return 'PersonX needed: ' + rel #'PersonX needs to ' + rel
if 'xReact' in dim:
return 'PersonX then feels: ' + rel #'PersonX feels ' + rel
if 'oReact' in dim:
return 'Others then feel: ' + rel #'PersonY feels ' + rel
return rel
def reverse_template(rel):
prefix = rel.split(':')[0]
if 'PersonY/Others want' in prefix:
return 'oWant'
if 'PersonX wants' in prefix:
return 'xWant'
if 'PersonY/Others are likely' in prefix:
return 'oEffect'
if 'PersonY/Others then feel' in prefix:
return 'oReact'
if 'PersonX then feels' in prefix:
return 'xReact'
if 'PersonX is likely' in prefix:
return 'xEffect'
if 'PersonX is seen as' in prefix:
return 'xAttr'
if 'PersonX needed' in prefix:
return 'xNeed'
if 'PersonX wanted' in prefix:
return 'xIntent'
random.seed(0)
parser = argparse.ArgumentParser(description='Evaluate novelty')
parser.add_argument('--decoded_file',type=str,default='../../data/gen_data/beam_outputs.jsonl')
parser.add_argument('--gold_file',type=str,default='../../data/gold_set.jsonl')
args = parser.parse_args()
original_data = open(args.gold_file)
original_data = [json.loads(l) for l in original_data.readlines()]
data = [json.loads(l) for l in open(args.decoded_file).readlines()]
dims = ["xNeed","xIntent","xWant","oEffect","xReact","oWant","oReact","xEffect","xAttr"]
training_rels_all = [l for l in csv.reader(open('../../data/v4_atomic_all.csv'))][1:]
training_rels_all = [list(itertools.chain.from_iterable([convert_l(o) for o in l[1:-2]])) for l in training_rels_all]
training_rels_all = set(list(itertools.chain.from_iterable(training_rels_all)))
hyps = []
refs = []
hyps_tokenized = []
refs_tokenized = []
stories = []
dim_rels = []
nodim_rels = []
for l in original_data:
stories.append(l['story'])
d_ = [entry for entry in data if entry['story'] == l['story']]
if len(d_) == 0:
continue
d_ = d_[0]
dim = reverse_template(l['prefix'])
dim_rels.append(dim)
gold_rel = l['prefix'] + ' ' + l['rel']
gen_rel = d_['<|sent' + str(l['sentID']) + '|>_generated_relations'][dims.index(dim)]
gen_rel = [g for g in gen_rel if g.lower() != 'none' and g.lower() != 'none.']
nodim_rels.extend(gen_rel)
print('num unique stories: ' + str(len(set(stories))))
nodim_rels = [l.lower() for l in nodim_rels]
training_rels_all = [l.lower() for l in training_rels_all]
training_rels_all = [l for l in training_rels_all if l != 'none' and l != 'none.']
novelty1 = intersection(nodim_rels,training_rels_all)
novelty = float(len(set(nodim_rels))-len(set(novelty1)))/len(set(nodim_rels))
print('novelty w/ all: ' + str(novelty))
|
{"hexsha": "949f4d1b390862a42bec87246aca9f8a319a2106", "size": 4086, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/eval/eval_novelty.py", "max_stars_repo_name": "skgabriel/paracomet", "max_stars_repo_head_hexsha": "58dcc88c48f14103c3b890bbda5bb1346a0cdc26", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-12-30T15:27:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T01:58:22.000Z", "max_issues_repo_path": "src/eval/eval_novelty.py", "max_issues_repo_name": "skgabriel/paracomet", "max_issues_repo_head_hexsha": "58dcc88c48f14103c3b890bbda5bb1346a0cdc26", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-03-25T02:56:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T04:36:42.000Z", "max_forks_repo_path": "src/eval/eval_novelty.py", "max_forks_repo_name": "skgabriel/paracomet", "max_forks_repo_head_hexsha": "58dcc88c48f14103c3b890bbda5bb1346a0cdc26", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-03T16:17:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-16T08:05:08.000Z", "avg_line_length": 30.7218045113, "max_line_length": 121, "alphanum_fraction": 0.6348507097, "include": true, "reason": "import numpy", "num_tokens": 1222}
|
# -*- coding: utf-8 -*-
import numpy as np
from .Qt import QtGui, QtCore
from .functions import mkColor, eq, colorDistance, clip_scalar, clip_array
from os import path, listdir
from collections.abc import Callable, Sequence
import warnings
__all__ = ['ColorMap']
_mapCache = {}
def listMaps(source=None):
"""
.. warning:: Experimental, subject to change.
List available color maps.
Parameters
----------
source: str, optional
Color map source. If omitted, locally stored maps are listed. Otherwise:
- 'matplotlib' lists maps that can be imported from Matplotlib
- 'colorcet' lists maps that can be imported from ColorCET
Returns
-------
list of str
Known color map names.
"""
if source is None:
pathname = path.join(path.dirname(__file__), 'colors','maps')
files = listdir( pathname )
list_of_maps = []
for filename in files:
if filename[-4:] == '.csv' or filename[-4:] == '.hex':
list_of_maps.append(filename[:-4])
return list_of_maps
elif source.lower() == 'matplotlib':
try:
import matplotlib.pyplot as mpl_plt
list_of_maps = mpl_plt.colormaps()
return list_of_maps
except ModuleNotFoundError:
return []
elif source.lower() == 'colorcet':
try:
import colorcet
list_of_maps = list( colorcet.palette.keys() )
list_of_maps.sort()
return list_of_maps
except ModuleNotFoundError:
return []
return []
def get(name, source=None, skipCache=False):
"""
.. warning:: Experimental, subject to change.
Returns a ColorMap object from a local definition or imported from another library.
The generated ColorMap objects are cached for fast repeated access.
Parameters
----------
name: str
Name of color map. In addition to the included maps, this can also
be a path to a file in the local folder. See the files in the
``pyqtgraph/colors/maps/`` folder for examples of the format.
source: str, optional
If omitted, a locally stored map is returned. Otherwise:
- 'matplotlib' imports a map defined by Matplotlib.
- 'colorcet' imports a map defined by ColorCET.
skipCache: bool, optional
If `skipCache=True`, the internal cache is skipped and a new
ColorMap object is generated. This can load an unaltered copy
when the previous ColorMap object has been modified.
"""
if not skipCache and name in _mapCache:
return _mapCache[name]
if source is None:
return _getFromFile(name)
elif source == 'matplotlib':
return getFromMatplotlib(name)
elif source == 'colorcet':
return getFromColorcet(name)
return None
def _getFromFile(name):
filename = name
if filename[0] !='.': # load from built-in directory
dirname = path.dirname(__file__)
filename = path.join(dirname, 'colors/maps/'+filename)
if not path.isfile( filename ): # try suffixes if file is not found:
if path.isfile( filename+'.csv' ): filename += '.csv'
elif path.isfile( filename+'.hex' ): filename += '.hex'
with open(filename,'r') as fh:
idx = 0
color_list = []
if filename[-4:].lower() != '.hex':
csv_mode = True
else:
csv_mode = False
for line in fh:
line = line.strip()
if len(line) == 0: continue # empty line
if line[0] == ';': continue # comment
parts = line.split(sep=';', maxsplit=1) # split into color and names/comments
if csv_mode:
comp = parts[0].split(',')
if len( comp ) < 3: continue # not enough components given
color_tuple = tuple( [ int(255*float(c)+0.5) for c in comp ] )
else:
hex_str = parts[0]
if hex_str[0] == '#':
hex_str = hex_str[1:] # strip leading #
if len(hex_str) < 3: continue # not enough information
if len(hex_str) == 3: # parse as abbreviated RGB
hex_str = 2*hex_str[0] + 2*hex_str[1] + 2*hex_str[2]
elif len(hex_str) == 4: # parse as abbreviated RGBA
hex_str = 2*hex_str[0] + 2*hex_str[1] + 2*hex_str[2] + 2*hex_str[3]
if len(hex_str) < 6: continue # not enough information
try:
color_tuple = tuple( bytes.fromhex( hex_str ) )
except ValueError as e:
raise ValueError(f"failed to convert hexadecimal value '{hex_str}'.") from e
color_list.append( color_tuple )
idx += 1
# end of line reading loop
# end of open
cmap = ColorMap( name=name,
pos=np.linspace(0.0, 1.0, len(color_list)),
color=color_list) #, names=color_names)
if cmap is not None:
cmap.name = name
_mapCache[name] = cmap
return cmap
def getFromMatplotlib(name):
"""
Generates a ColorMap object from a Matplotlib definition.
Same as ``colormap.get(name, source='matplotlib')``.
"""
# inspired and informed by "mpl_cmaps_in_ImageItem.py", published by Sebastian Hoefer at
# https://github.com/honkomonk/pyqtgraph_sandbox/blob/master/mpl_cmaps_in_ImageItem.py
try:
import matplotlib.pyplot as mpl_plt
except ModuleNotFoundError:
return None
cmap = None
col_map = mpl_plt.get_cmap(name)
if hasattr(col_map, '_segmentdata'): # handle LinearSegmentedColormap
data = col_map._segmentdata
if ('red' in data) and isinstance(data['red'], (Sequence, np.ndarray)):
positions = set() # super-set of handle positions in individual channels
for key in ['red','green','blue']:
for tup in data[key]:
positions.add(tup[0])
col_data = np.zeros((len(positions),4 ))
col_data[:,-1] = sorted(positions)
for idx, key in enumerate(['red','green','blue']):
positions = np.zeros( len(data[key] ) )
comp_vals = np.zeros( len(data[key] ) )
for idx2, tup in enumerate( data[key] ):
positions[idx2] = tup[0]
comp_vals[idx2] = tup[1] # these are sorted in the raw data
col_data[:,idx] = np.interp(col_data[:,3], positions, comp_vals)
cmap = ColorMap(pos=col_data[:,-1], color=255*col_data[:,:3]+0.5)
# some color maps (gnuplot in particular) are defined by RGB component functions:
elif ('red' in data) and isinstance(data['red'], Callable):
col_data = np.zeros((64, 4))
col_data[:,-1] = np.linspace(0., 1., 64)
for idx, key in enumerate(['red','green','blue']):
col_data[:,idx] = np.clip( data[key](col_data[:,-1]), 0, 1)
cmap = ColorMap(pos=col_data[:,-1], color=255*col_data[:,:3]+0.5)
elif hasattr(col_map, 'colors'): # handle ListedColormap
col_data = np.array(col_map.colors)
cmap = ColorMap( name=name,
pos = np.linspace(0.0, 1.0, col_data.shape[0]), color=255*col_data[:,:3]+0.5 )
if cmap is not None:
cmap.name = name
_mapCache[name] = cmap
return cmap
def getFromColorcet(name):
""" Generates a ColorMap object from a colorcet definition. Same as ``colormap.get(name, source='colorcet')``. """
try:
import colorcet
except ModuleNotFoundError:
return None
color_strings = colorcet.palette[name]
color_list = []
for hex_str in color_strings:
if hex_str[0] != '#': continue
if len(hex_str) != 7:
raise ValueError(f"Invalid color string '{hex_str}' in colorcet import.")
color_tuple = tuple( bytes.fromhex( hex_str[1:] ) )
color_list.append( color_tuple )
if len(color_list) == 0:
return None
cmap = ColorMap( name=name,
pos=np.linspace(0.0, 1.0, len(color_list)),
color=color_list) #, names=color_names)
if cmap is not None:
cmap.name = name
_mapCache[name] = cmap
return cmap
def makeHslCycle( hue=0.0, saturation=1.0, lightness=0.5, steps=36 ):
"""
Returns a ColorMap object that traces a circular or spiraling path around the HSL color space.
Parameters
----------
hue : float or tuple of floats
Starting point or (start, end) for hue. Values can lie outside the [0 to 1] range
to realize multiple cycles. For a single value, one full hue cycle is generated.
The default starting hue is 0.0 (red).
saturation : float or tuple of floats, optional
Saturation value for the colors in the cycle, in the range of [0 to 1].
If a (start, end) tuple is given, saturation gradually changes between these values.
The default saturation is 1.0.
lightness : float or tuple of floats, optional
Lightness value for the colors in the cycle, in the range of [0 to 1].
If a (start, end) tuple is given, lightness gradually changes between these values.
The default lightness is 1.0.
steps: int, optional
Number of steps in the cycle. Between these steps, the color map will interpolate in RGB space.
The default number of steps is 36, generating a color map with 37 stops.
"""
if isinstance( hue, (tuple, list) ):
hueA, hueB = hue
else:
hueA = hue
hueB = hueA + 1.0
if isinstance( saturation, (tuple, list) ):
satA, satB = saturation
else:
satA = satB = saturation
if isinstance( lightness, (tuple, list) ):
lgtA, lgtB = lightness
else:
lgtA = lgtB = lightness
hue_vals = np.linspace(hueA, hueB, num=steps+1)
sat_vals = np.linspace(satA, satB, num=steps+1)
lgt_vals = np.linspace(lgtA, lgtB, num=steps+1)
color_list = []
for hue, sat, lgt in zip( hue_vals, sat_vals, lgt_vals):
qcol = QtGui.QColor.fromHslF( hue%1.0, sat, lgt )
color_list.append( qcol )
name = f'Hue {hueA:0.2f}-{hueB:0.2f}'
return ColorMap( None, color_list, name=name )
def makeMonochrome(color='neutral'):
"""
Returns a ColorMap object with a dark to bright ramp and adjustable tint.
In addition to neutral, warm or cold grays, imitations of monochrome computer monitors are also
available. The following predefined color ramps are available:
`neutral`, `warm`, `cool`, `green`, `amber`, `blue`, `red`, `pink`, `lavender`.
The ramp can also be specified by a tuple of float values in the range of 0 to 1.
In this case `(h, s, l0, l1)` describe hue, saturation, minimum lightness and maximum lightness
within the HSL color space. The values `l0` and `l1` can be omitted. They default to
`l0=0.0` and `l1=1.0` in this case.
Parameters
----------
color: str or tuple of floats
Color description. Can be one of the predefined identifiers, or a tuple
`(h, s, l0, l1)`, `(h, s)` or (`h`).
'green', 'amber', 'blue', 'red', 'lavender', 'pink'
or a tuple of relative ``(R,G,B)`` contributions in range 0.0 to 1.0
"""
name=f'Monochrome {color}'
defaults = {
'neutral': (0.00, 0.00, 0.00, 1.00),
'warm' : (0.10, 0.08, 0.00, 0.95),
'cool' : (0.60, 0.08, 0.00, 0.95),
'green' : (0.35, 0.55, 0.02, 0.90),
'amber' : (0.09, 0.80, 0.02, 0.80),
'blue' : (0.58, 0.85, 0.02, 0.95),
'red' : (0.01, 0.60, 0.02, 0.90),
'pink' : (0.93, 0.65, 0.02, 0.95),
'lavender': (0.75, 0.50, 0.02, 0.90)
}
if isinstance(color, str):
if color in defaults:
h_val, s_val, l_min, l_max = defaults[color]
else:
valid = ','.join(defaults.keys())
raise ValueError(f"Undefined color descriptor '{color}', known values are:\n{valid}")
else:
s_val = 0.70 # set up default values
l_min = 0.00
l_max = 1.00
if not hasattr(color,'__len__'):
h_val = float(color)
elif len(color) == 1:
h_val = color[0]
elif len(color) == 2:
h_val, s_val = color
elif len(color) == 4:
h_val, s_val, l_min, l_max = color
else:
raise ValueError(f"Invalid color descriptor '{color}'")
l_vals = np.linspace(l_min, l_max, num=16)
color_list = []
for l_val in l_vals:
qcol = QtGui.QColor.fromHslF( h_val, s_val, l_val )
color_list.append( qcol )
return ColorMap( None, color_list, name=name, linearize=True )
def modulatedBarData(length=768, width=32):
"""
Returns an NumPy array that represents a modulated color bar ranging from 0 to 1.
This is used to judge the perceived variation of the color gradient.
Parameters
----------
length: int
Length of the data set. Values will vary from 0 to 1 over this axis.
width: int
Width of the data set. The modulation will vary from 0% to 4% over this axis.
"""
gradient = np.linspace(0.00, 1.00, length)
modulation = -0.04 * np.sin( (np.pi/4) * np.arange(length) )
data = np.zeros( (length, width) )
for idx in range(width):
data[:,idx] = gradient + (idx/(width-1)) * modulation
clip_array(data, 0.0, 1.0, out=data)
return data
class ColorMap(object):
"""
ColorMap(pos, color, mapping=ColorMap.CLIP)
ColorMap stores a mapping of specific data values to colors, for example:
| 0.0 → black
| 0.2 → red
| 0.6 → yellow
| 1.0 → white
The colors for intermediate values are determined by interpolating between
the two nearest colors in RGB color space.
A ColorMap object provides access to the interpolated colors by indexing with a float value:
``cm[0.5]`` returns a QColor corresponding to the center of ColorMap `cm`.
"""
## mapping modes
CLIP = 1
REPEAT = 2
MIRROR = 3
DIVERGING = 4
## return types
BYTE = 1
FLOAT = 2
QCOLOR = 3
enumMap = {
'clip': CLIP,
'repeat': REPEAT,
'mirror': MIRROR,
'diverging': DIVERGING,
'byte': BYTE,
'float': FLOAT,
'qcolor': QCOLOR,
}
def __init__(self, pos, color, mapping=CLIP, mode=None, linearize=False, name=''):
"""
__init__(pos, color, mapping=ColorMap.CLIP)
Parameters
----------
pos: array_like of float in range 0 to 1, or None
Assigned positions of specified colors. `None` sets equal spacing.
color: array_like of colors
List of colors, interpreted via :func:`mkColor() <pyqtgraph.mkColor>`.
mapping: str or int, optional
Controls how values outside the 0 to 1 range are mapped to colors.
See :func:`setMappingMode() <ColorMap.setMappingMode>` for details.
The default of `ColorMap.CLIP` continues to show
the colors assigned to 0 and 1 for all values below or above this range, respectively.
"""
self.name = name # storing a name helps identify ColorMaps sampled by Palette
if mode is not None:
warnings.warn(
"'mode' argument is deprecated and does nothing.",
DeprecationWarning, stacklevel=2
)
if pos is None:
order = range(len(color))
self.pos = np.linspace(0.0, 1.0, num=len(color))
else:
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
self.color = np.zeros( (len(color), 4) ) # stores float rgba values
for cnt, idx in enumerate(order):
self.color[cnt] = mkColor(color[idx]).getRgbF()
# alternative code may be more efficient, but fails to handle lists of QColor.
# self.color = np.apply_along_axis(
# func1d = lambda x: np.uint8( mkColor(x).getRgb() ), # cast RGB integer values to uint8
# axis = -1,
# arr = color,
# )[order]
self.mapping_mode = self.CLIP # default to CLIP mode
if mapping is not None:
self.setMappingMode( mapping )
self.stopsCache = {}
if linearize: self.linearize()
def setMappingMode(self, mapping):
"""
Sets the way that values outside of the range 0 to 1 are mapped to colors.
Parameters
----------
mapping: int or str
Sets mapping mode to
- `ColorMap.CLIP` or 'clip': Values are clipped to the range 0 to 1. ColorMap defaults to this.
- `ColorMap.REPEAT` or 'repeat': Colors repeat cyclically, i.e. range 1 to 2 repeats the colors for 0 to 1.
- `ColorMap.MIRROR` or 'mirror': The range 0 to -1 uses same colors (in reverse order) as 0 to 1.
- `ColorMap.DIVERGING` or 'diverging': Colors are mapped to -1 to 1 such that the central value appears at 0.
"""
if isinstance(mapping, str):
mapping = self.enumMap[mapping.lower()]
if mapping in [self.CLIP, self.REPEAT, self.DIVERGING, self.MIRROR]:
self.mapping_mode = mapping # only allow defined values
else:
raise ValueError(f"Undefined mapping type '{mapping}'")
self.stopsCache = {}
def __str__(self):
""" provide human-readable identifier """
if self.name is None:
return 'unnamed ColorMap({:d})'.format(len(self.pos))
return "ColorMap({:d}):'{:s}'".format(len(self.pos),self.name)
def __getitem__(self, key):
""" Convenient shorthand access to palette colors """
if isinstance(key, int): # access by color index
return self.getByIndex(key)
# otherwise access by map
try: # accept any numerical format that converts to float
float_idx = float(key)
return self.mapToQColor(float_idx)
except ValueError: pass
return None
def linearize(self):
"""
Adjusts the positions assigned to color stops to approximately equalize the perceived color difference
for a fixed step.
"""
colors = self.getColors(mode=self.QCOLOR)
distances = colorDistance(colors)
positions = np.insert( np.cumsum(distances), 0, 0.0 )
self.pos = positions / positions[-1] # normalize last value to 1.0
self.stopsCache = {}
def reverse(self):
"""
Reverses the color map, so that the color assigned to a value of 1 now appears at 0 and vice versa.
This is convenient to adjust imported color maps.
"""
self.pos = 1.0 - np.flip( self.pos )
self.color = np.flip( self.color, axis=0 )
self.stopsCache = {}
def getSubset(self, start, span):
"""
Returns a new ColorMap object that extracts the subset specified by 'start' and 'length'
to the full 0.0 to 1.0 range. A negative length results in a color map that is reversed
relative to the original.
Parameters
----------
start : float (0.0 to 1.0)
Starting value that defines the 0.0 value of the new color map.
span : float (-1.0 to 1.0)
span of the extracted region. The orignal color map will be trated as cyclical
if the extracted interval exceeds the 0.0 to 1.0 range.
"""
pos, col = self.getStops( mode=ColorMap.FLOAT )
start = clip_scalar(start, 0.0, 1.0)
span = clip_scalar(span, -1.0, 1.0)
if span == 0.0:
raise ValueError("'length' needs to be non-zero")
stop = (start + span)
if stop > 1.0 or stop < 0.0: stop = stop % 1.0
# find indices *inside* range, start and end will be added by sampling later
if span > 0:
ref_pos = start # lowest position value at start
idxA = np.searchsorted( pos, start, side='right' )
idxB = np.searchsorted( pos, stop , side='left' ) # + 1 # right-side element of interval
wraps = bool( stop < start ) # wraps around?
else:
ref_pos = stop # lowest position value at stop
idxA = np.searchsorted( pos, stop , side='right')
idxB = np.searchsorted( pos, start, side='left' ) # + 1 # right-side element of interval
wraps = bool( stop > start ) # wraps around?
if wraps: # wraps around:
length1 = (len(pos)-idxA) # before wrap
length2 = idxB # after wrap
new_length = length1 + length2 + 2 # combined; plus edge elements
new_pos = np.zeros( new_length )
new_col = np.zeros( (new_length, 4) )
new_pos[ 1:length1+1] = (0 + pos[idxA:] - ref_pos) / span # starting point lie in 0 to 1 range
new_pos[length1+1:-1] = (1 + pos[:idxB] - ref_pos) / span # end point wrapped to -1 to 0 range
new_pos[length1] -= np.copysign(1e-6, span) # breaks degeneracy of shifted 0.0 and 1.0 values
new_col[ 1:length1+1] = col[idxA:]
new_col[length1+1:-1] = col[:idxB]
else: # does not wrap around:
new_length = (idxB - idxA) + 2 # two additional edge values will be added
new_pos = np.zeros( new_length )
new_col = np.zeros( (new_length, 4) )
new_pos[1:-1] = (pos[idxA:idxB] - ref_pos) / span
new_col[1:-1] = col[idxA:idxB]
if span < 0: # for reversed subsets, positions now progress 0 to -1 and need to be flipped
new_pos += 1.0
new_pos = np.flip( new_pos)
new_col = np.flip( new_col, axis=0 )
new_pos[ 0] = 0.0
new_col[ 0] = self.mapToFloat(start)
new_pos[-1] = 1.0
new_col[-1] = self.mapToFloat(stop)
cmap = ColorMap( pos=new_pos, color=255.*new_col )
cmap.name = f"{self.name}[{start:.2f}({span:+.2f})]"
return cmap
def map(self, data, mode=BYTE):
"""
map(data, mode=ColorMap.BYTE)
Returns an array of colors corresponding to a single value or an array of values.
Data must be either a scalar position or an array (any shape) of positions.
Parameters
----------
data: float or array_like of float
Scalar value(s) to be mapped to colors
mode: str or int, optional
Determines return format:
- `ColorMap.BYTE` or 'byte': Colors are returned as 0-255 unsigned bytes. (default)
- `ColorMap.FLOAT` or 'float': Colors are returned as 0.0-1.0 floats.
- `ColorMap.QCOLOR` or 'qcolor': Colors are returned as QColor objects.
Returns
-------
array of color.dtype
for `ColorMap.BYTE` or `ColorMap.FLOAT`:
RGB values for each `data` value, arranged in the same shape as `data`.
list of QColor objects
for `ColorMap.QCOLOR`:
Colors for each `data` value as Qcolor objects.
"""
if isinstance(mode, str):
mode = self.enumMap[mode.lower()]
if mode == self.QCOLOR:
pos, color = self.getStops(self.FLOAT)
else:
pos, color = self.getStops(mode)
if np.isscalar(data):
interp = np.empty((color.shape[1],), dtype=color.dtype)
else:
if not isinstance(data, np.ndarray):
data = np.array(data)
interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)
if self.mapping_mode != self.CLIP:
if self.mapping_mode == self.REPEAT:
data = data % 1.0
elif self.mapping_mode == self.DIVERGING:
data = (data/2)+0.5
elif self.mapping_mode == self.MIRROR:
data = abs(data)
for i in range(color.shape[1]):
interp[...,i] = np.interp(data, pos, color[:,i])
# Convert to QColor if requested
if mode == self.QCOLOR:
if np.isscalar(data):
return QtGui.QColor.fromRgbF(*interp)
else:
return [QtGui.QColor.fromRgbF(*x.tolist()) for x in interp]
else:
return interp
def mapToQColor(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.QCOLOR)
def mapToByte(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.BYTE)
def mapToFloat(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.FLOAT)
def getByIndex(self, idx):
"""Retrieve a QColor by the index of the stop it is assigned to."""
return QtGui.QColor( *self.color[idx] )
def getGradient(self, p1=None, p2=None):
"""
Returns a QtGui.QLinearGradient corresponding to this ColorMap.
The span and orientiation is given by two points in plot coordinates.
When no parameters are given for `p1` and `p2`, the gradient is mapped to the
`y` coordinates 0 to 1, unless the color map is defined for a more limited range.
This is a somewhat expensive operation, and it is recommended to store and reuse the returned
gradient instead of repeatedly regenerating it.
Parameters
----------
p1: QtCore.QPointF, default (0,0)
Starting point (value 0) of the gradient.
p2: QtCore.QPointF, default (dy,0)
End point (value 1) of the gradient. Default parameter `dy` is the span of ``max(pos) - min(pos)``
over which the color map is defined, typically `dy=1`.
"""
if p1 is None:
p1 = QtCore.QPointF(0,0)
if p2 is None:
p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)
grad = QtGui.QLinearGradient(p1, p2)
pos, color = self.getStops(mode=self.QCOLOR)
if self.mapping_mode == self.MIRROR:
pos_n = (1. - np.flip(pos)) / 2
col_n = np.flip( color, axis=0 )
pos_p = (1. + pos) / 2
col_p = color
pos = np.concatenate( (pos_n, pos_p) )
color = np.concatenate( (col_n, col_p) )
grad.setStops(list(zip(pos, color)))
if self.mapping_mode == self.REPEAT:
grad.setSpread( QtGui.QGradient.Spread.RepeatSpread )
return grad
def getBrush(self, span=(0.,1.), orientation='vertical'):
"""
Returns a QBrush painting with the color map applied over the selected span of plot values.
When the mapping mode is set to `ColorMap.MIRROR`, the selected span includes the color map twice,
first in reversed order and then normal.
It is recommended to store and reuse this gradient brush instead of regenerating it repeatedly.
Parameters
----------
span : tuple (min, max), default (0.0, 1.0)
Span of data values covered by the gradient:
- Color map value 0.0 will appear at `min`,
- Color map value 1.0 will appear at `max`.
orientation : str, default 'vertical'
Orientiation of the gradient:
- 'vertical': `span` corresponds to the `y` coordinate.
- 'horizontal': `span` corresponds to the `x` coordinate.
"""
if orientation == 'vertical':
grad = self.getGradient( p1=QtCore.QPointF(0.,span[0]), p2=QtCore.QPointF(0.,span[1]) )
elif orientation == 'horizontal':
grad = self.getGradient( p1=QtCore.QPointF(span[0],0.), p2=QtCore.QPointF(span[1],0.) )
else:
raise ValueError("Orientation must be 'vertical' or 'horizontal'")
return QtGui.QBrush(grad)
def getPen(self, span=(0.,1.), orientation='vertical', width=1.0):
"""
Returns a QPen that draws according to the color map based on vertical or horizontal position.
It is recommended to store and reuse this gradient pen instead of regenerating it repeatedly.
Parameters
----------
span : tuple (min, max), default (0.0, 1.0)
Span of the data values covered by the gradient:
- Color map value 0.0 will appear at `min`.
- Color map value 1.0 will appear at `max`.
orientation : str, default 'vertical'
Orientiation of the gradient:
- 'vertical' creates a vertical gradient, where `span` corresponds to the `y` coordinate.
- 'horizontal' creates a horizontal gradient, where `span` correspnds to the `x` coordinate.
width : int or float
Width of the pen in pixels on screen.
"""
brush = self.getBrush( span=span, orientation=orientation )
pen = QtGui.QPen(brush, width)
pen.setCosmetic(True)
return pen
def getColors(self, mode=BYTE):
"""
Returns a list of the colors associated with the stops of the color map.
The parameter `mode` can be one of
- `ColorMap.BYTE` or 'byte' to return colors as RGBA tuples in byte format (0 to 255)
- `ColorMap.FLOAT` or 'float' to return colors as RGBA tuples in float format (0.0 to 1.0)
- `ColorMap.QCOLOR` or 'qcolor' to return a list of QColors
The default is byte format.
"""
stops, color = self.getStops(mode=mode)
return color
def getStops(self, mode=BYTE):
"""
Returns a tuple (stops, colors) containing a list of all stops (ranging 0.0 to 1.0)
and a list of the associated colors.
The parameter `mode` can be one of
- `ColorMap.BYTE` or 'byte' to return colors as RGBA tuples in byte format (0 to 255)
- `ColorMap.FLOAT` or 'float' to return colors as RGBA tuples in float format (0.0 to 1.0)
- `ColorMap.QCOLOR` or 'qcolor' to return a list of QColors
The default is byte format.
"""
if isinstance(mode, str):
mode = self.enumMap[mode.lower()]
if mode not in self.stopsCache:
color = self.color
if mode == self.BYTE and color.dtype.kind == 'f':
color = (color*255).astype(np.ubyte)
elif mode == self.FLOAT and color.dtype.kind != 'f':
color = color.astype(float) / 255.
elif mode == self.QCOLOR:
if color.dtype.kind == 'f':
factory = QtGui.QColor.fromRgbF
else:
factory = QtGui.QColor.fromRgb
color = [factory(*x.tolist()) for x in color]
self.stopsCache[mode] = (self.pos, color)
return self.stopsCache[mode]
def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode=BYTE):
"""
getLookupTable(start=0.0, stop=1.0, nPts=512, alpha=None, mode=ColorMap.BYTE)
Returns an equally-spaced lookup table of RGB(A) values created
by interpolating the specified color stops.
Parameters
----------
start: float, default=0.0
The starting value in the lookup table
stop: float, default=1.0
The final value in the lookup table
nPts: int, default is 512
The number of points in the returned lookup table.
alpha: True, False, or None
Specifies whether or not alpha values are included in the table.
If alpha is None, it will be automatically determined.
mode: int or str, default is `ColorMap.BYTE`
Determines return type as described in :func:`map() <pyqtgraph.ColorMap.map>`, can be
either `ColorMap.BYTE` (0 to 255), `ColorMap.FLOAT` (0.0 to 1.0) or `ColorMap.QColor`.
Returns
-------
array of color.dtype
for `ColorMap.BYTE` or `ColorMap.FLOAT`:
RGB values for each `data` value, arranged in the same shape as `data`.
If alpha values are included the array has shape (`nPts`, 4), otherwise (`nPts`, 3).
list of QColor objects
for `ColorMap.QCOLOR`:
Colors for each `data` value as QColor objects.
"""
if isinstance(mode, str):
mode = self.enumMap[mode.lower()]
if alpha is None:
alpha = self.usesAlpha()
x = np.linspace(start, stop, nPts)
table = self.map(x, mode)
if not alpha and mode != self.QCOLOR:
return table[:,:3]
else:
return table
def usesAlpha(self):
"""Returns `True` if any stops have assigned colors with alpha < 255."""
max = 1.0 if self.color.dtype.kind == 'f' else 255
return np.any(self.color[:,3] != max)
def isMapTrivial(self):
"""
Returns `True` if the gradient has exactly two stops in it: Black at 0.0 and white at 1.0.
"""
if len(self.pos) != 2:
return False
if self.pos[0] != 0.0 or self.pos[1] != 1.0:
return False
if self.color.dtype.kind == 'f':
return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))
else:
return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))
def __repr__(self):
pos = repr(self.pos).replace('\n', '')
color = repr(self.color).replace('\n', '')
return "ColorMap(%s, %s)" % (pos, color)
def __eq__(self, other):
if other is None:
return False
return eq(self.pos, other.pos) and eq(self.color, other.color)
|
{"hexsha": "5d985ad42f0f166797c16c014a1d9f044806641d", "size": 34031, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyqtgraph/colormap.py", "max_stars_repo_name": "leo603222/fix-displace-between-selection-area-and-mouse-pos", "max_stars_repo_head_hexsha": "1f9031884a980432795b69487bd659f5e4ef91aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2762, "max_stars_repo_stars_event_min_datetime": "2015-01-02T14:34:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:06:07.000Z", "max_issues_repo_path": "pyqtgraph/colormap.py", "max_issues_repo_name": "leo603222/fix-displace-between-selection-area-and-mouse-pos", "max_issues_repo_head_hexsha": "1f9031884a980432795b69487bd659f5e4ef91aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1901, "max_issues_repo_issues_event_min_datetime": "2015-01-12T03:20:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:33:36.000Z", "max_forks_repo_path": "pyqtgraph/colormap.py", "max_forks_repo_name": "leo603222/fix-displace-between-selection-area-and-mouse-pos", "max_forks_repo_head_hexsha": "1f9031884a980432795b69487bd659f5e4ef91aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1038, "max_forks_repo_forks_event_min_datetime": "2015-01-01T04:05:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T11:57:51.000Z", "avg_line_length": 40.2733727811, "max_line_length": 121, "alphanum_fraction": 0.5809409068, "include": true, "reason": "import numpy", "num_tokens": 8648}
|
[STATEMENT]
lemma less_eq_multiset_total:
fixes M N :: "'a :: linorder multiset"
shows "\<not> M \<le> N \<Longrightarrow> N \<le> M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> M \<le> N \<Longrightarrow> N \<le> M
[PROOF STEP]
by simp
|
{"llama_tokens": 102, "file": null, "length": 1}
|
#!/usr/bin/env python
import numpy as np
from scipy import stats
from icecube.phys_services import I3MT19937
N=1000
def kstest(rs,i3name,i3var,spname,spvar):
sample = [getattr(rs,i3name)(*i3var) for x in range(N)]
return stats.kstest(sample, spname, args=spvar)[1]
def chisqtest(rs,i3name,i3var,spname,spvar,nbins):
sample = [getattr(rs,i3name)(*i3var) for x in range(N)]
hist = np.bincount(sample,minlength=nbins)
expected = getattr(stats,spname).pmf(range(nbins),*spvar)*N
return stats.chisquare(hist,expected)[1]
def test_p(pvals):
pvals=np.array(pvals)
assert np.all(pvals>0)
frac = float((pvals < .05).sum())/pvals.size
assert(frac<.15)
for rs in [I3MT19937(), I3MT19937(0),I3MT19937([]),
I3MT19937([0]),I3MT19937([0,0]),I3MT19937([0,0,0])
]:
test_p([kstest(rs,'exp',(x,),'expon',(0,1./x)) for x in np.arange(.1,10,.1)])
test_p([kstest(rs,'uniform',(x,),'uniform',(0,x)) for x in np.arange(.1,10,.1)])
test_p([kstest(rs,'uniform',(x,x+y),'uniform',(x,y)) for x in range(1,10) for y in range(1,10)])
test_p([kstest(rs,'gaus',(x,y),'norm',(x,y)) for x in range(1,10) for y in range(1,10)])
test_p([chisqtest(rs,'integer',( x,),'randint',(0,x), x) for x in range(2,100)])
test_p([chisqtest(rs,'binomial',(x,y),'binom',(x,y),x+1) for x in range(1,10) for y in np.arange(.1,1,.1)])
test_p([chisqtest(rs,'poisson',(x,),'poisson',(x,),30) for x in np.arange(.1,10,.1)])
|
{"hexsha": "a3f01ca498875719cac83bc26ae5e08417fc469f", "size": 1489, "ext": "py", "lang": "Python", "max_stars_repo_path": "phys-services/resources/test/test_MT19937_stats.py", "max_stars_repo_name": "hschwane/offline_production", "max_stars_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-24T22:00:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-24T22:00:01.000Z", "max_issues_repo_path": "phys-services/resources/test/test_MT19937_stats.py", "max_issues_repo_name": "hschwane/offline_production", "max_issues_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "phys-services/resources/test/test_MT19937_stats.py", "max_forks_repo_name": "hschwane/offline_production", "max_forks_repo_head_hexsha": "e14a6493782f613b8bbe64217559765d5213dc1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-17T09:20:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T16:44:18.000Z", "avg_line_length": 41.3611111111, "max_line_length": 111, "alphanum_fraction": 0.6239086635, "include": true, "reason": "import numpy,from scipy", "num_tokens": 543}
|
import logging
import json
from onnxruntime import InferenceSession
import numpy as np
from pathlib import Path
from transformers import AutoTokenizer
import azure.functions as func
dir = Path.cwd()
model_path_list = [str(x) for x in dir.glob("*") if str(x).endswith("model")]
print(model_path_list)
if len(model_path_list) != 1:
raise RuntimeError("Could not find model")
model_path = model_path_list[0]
fast_tokenizer = AutoTokenizer.from_pretrained(model_path)
session = InferenceSession(f"{model_path}/german-distiled-optimized-quantized.onnx")
def create_error(error_given: str):
return func.HttpResponse(
json.dumps({"error": error_given}),
mimetype="application/json",
status_code=400,
)
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info("Python HTTP trigger function processed a request.")
req_query = req.params
setning = req_query.get("setning")
if setning is None:
return create_error("setning missing")
if setning.count("<mask>") != 1:
return create_error("either <mask> is missing or more than one <mask>")
if len(setning) > 512:
return create_error("Sentence too long")
result = fill_mask_onnx(setning.replace("<mask>", "[MASK]"))
return func.HttpResponse(json.dumps(result), mimetype="application/json")
def fill_mask_onnx(setning: str):
tokens = fast_tokenizer(setning, return_tensors="np")
if "token_type_ids" in tokens:
tokens.pop("token_type_ids")
output = session.run(None, tokens.__dict__["data"])
token_logits = output[0]
mask_token_index = np.where(tokens["input_ids"] == fast_tokenizer.mask_token_id)[1]
mask_token_logits_onnx1 = token_logits[0, mask_token_index, :]
score = np.exp(mask_token_logits_onnx1) / np.exp(mask_token_logits_onnx1).sum(-1, keepdims=True)
top_5_idx = (-score[0]).argsort()[:5]
top_5_values = score[0][top_5_idx]
result = []
for token, s in zip(top_5_idx.tolist(), top_5_values.tolist()):
result.append(f"{setning.replace(fast_tokenizer.mask_token, fast_tokenizer.decode([token]))} (score: {s})")
return {"result": result}
|
{"hexsha": "2fb6daf511e332da2cbf36af7c13248a893f8425", "size": 2242, "ext": "py", "lang": "Python", "max_stars_repo_path": "GermanBert/predict/__init__.py", "max_stars_repo_name": "NeuroCode-io/model-deployments", "max_stars_repo_head_hexsha": "1641c766fbcc6c03647b31eb996fe57c09c173c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GermanBert/predict/__init__.py", "max_issues_repo_name": "NeuroCode-io/model-deployments", "max_issues_repo_head_hexsha": "1641c766fbcc6c03647b31eb996fe57c09c173c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GermanBert/predict/__init__.py", "max_forks_repo_name": "NeuroCode-io/model-deployments", "max_forks_repo_head_hexsha": "1641c766fbcc6c03647b31eb996fe57c09c173c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2972972973, "max_line_length": 116, "alphanum_fraction": 0.6842105263, "include": true, "reason": "import numpy", "num_tokens": 543}
|
from time import time
import numpy as np
from math import pi
from .txtmark import lib
def count_ns(vts, fs):
dv1 = vts[fs[:,1]] - vts[fs[:,2]]
dv2 = vts[fs[:,1]] - vts[fs[:,0]]
ns = np.cross(dv1, dv2)
ass = np.linalg.norm(ns, axis=1)
ns /= np.linalg.norm(ns, axis=1).reshape((-1,1))
buf = np.zeros_like(vts)
for i in (0,1,2): np.add.at(buf, fs[:,i], ns)
buf /= np.linalg.norm(buf, axis=1).reshape((-1,1))
return buf
def build_grididx(r, c):
idx = np.arange(r*c, dtype=np.uint32)
rs, cs = idx//c, idx%c
idx1 = idx[(rs<r-1)*(cs<c-1)].reshape((-1,1))
did = np.array([[0, 1, 1+c, 0, 1+c, c]], dtype=np.uint32)
return rs, cs, (idx1 + did).reshape((-1,3))
def build_surf2d(img, ds=1, sigma=0, k=0.2):
from skimage.filters import sobel_h, sobel_v
from scipy.ndimage import gaussian_filter
start = time()
img = img[::-ds, ::ds]
img = gaussian_filter(img, sigma)
r, c = img.shape
rs, cs, fs = build_grididx(r, c)
vs = img[rs, cs]
vts = np.array([cs*ds, rs*ds, vs*k], dtype=np.float32).T
cs = (np.ones((3, r*c))*(vs/255)).astype(np.float32).T
dx, dy = sobel_h(img), sobel_v(img)
cx, cy = np.zeros((r*c, 3)), np.zeros((r*c, 3))
cx[:,0], cx[:,2] = 1, dx.ravel()
cy[:,1], cy[:,2] = 1, dy.ravel()
ns = np.cross(cx, cy)
ns = (ns.T/np.linalg.norm(ns, axis=1)).astype(np.float32).T
#ns = count_ns(vts, fs)
print(time()-start)
return vts, fs, ns, cs
def build_surf3d(imgs, ds, level, step=1, c=(1,0,0)):
from skimage.measure import marching_cubes_lewiner
vts, fs, ns, cs = marching_cubes_lewiner(imgs[::ds,::ds,::ds], level, step_size=step)
vts *= ds
cs = (np.ones((len(vts), 3))*c).astype(np.float32)
return vts, fs, ns, cs
def build_ball(o, r, c=(1,0,0)):
ay, ax = np.mgrid[-pi/2:pi/2:9j, 0:pi*2:17j]
zs = np.sin(ay.ravel())
xs = np.cos(ax.ravel()) * np.cos(ay.ravel())
ys = np.sin(ax.ravel()) * np.cos(ay.ravel())
ns = np.vstack((xs, ys, zs)).astype(np.float32).T
vts = (ns * r + o).astype(np.float32)
fs = build_grididx(9, 17)[2]
cs = (np.ones((len(vts), 3))*c).astype(np.float32)
#print(ns2)
return vts, fs, ns, cs
def build_mesh(xs, ys, zs, c=(1,0,0)):
rs, cs, fs = build_grididx(xs.shape[0], xs.shape[1])
vts = np.array([xs[rs, cs], ys[rs, cs], zs[rs,cs]]).astype(np.float32).T
ns = count_ns(vts, fs)
cs = (np.ones((len(vts), 3))*c).astype(np.float32)
return vts, fs, ns, cs
def build_balls(os, rs, cs=(1,0,0)):
if isinstance(cs, tuple):
cs = [cs] * len(os)
vtss, fss, nss, css = [], [], [], []
for o,r,c in zip(os, rs, cs):
vv, ff, nn, cc = build_ball(o, r, c)
fss.append(ff+len(vtss)*len(vv))
vtss.append(vv)
nss.append(nn)
css.append(cc)
return np.vstack(vtss), np.vstack(fss), np.vstack(nss), np.vstack(css)
# 0 1 1 2 2 3 3 4 4 5 5 6
def build_line(xs, ys, zs, c):
vts = np.array([xs, ys, zs], dtype=np.float32).T
n = (len(xs)-1)*2
rem = (6 - n % 6)%6
fs = np.arange(0.1,(n+rem)//2,0.5).round().astype(np.uint32)
if rem>0: fs[-rem:] = len(xs)-1
ns = np.ones((len(vts), 3), dtype=np.float32)
cs = (np.ones((len(vts), 3))*c).astype(np.float32)
return vts, fs.reshape((-1,3)), ns, cs
def build_lines(xs, ys, zs, cs):
if not isinstance(cs, list):
cs = [cs] * len(xs)
vtss, fss, nss, css = [], [], [], []
s = 0
for x, y, z, c in zip(xs, ys, zs, cs):
vv, ff, nn, cc = build_line(x, y, z, c)
fss.append(ff+s)
s += len(vv)
vtss.append(vv)
nss.append(nn)
css.append(cc)
return np.vstack(vtss), np.vstack(fss), np.vstack(nss), np.vstack(css)
def build_mark(cont, pos, dz, h, color):
vts, fss = [], []
s, sw = 0, 0
for i in cont:
xs, ys, w = lib[i]
vv, ff, nn, cc = build_lines(xs, ys, ys, (0,0,0))
fss.append(ff+s)
vts.append(vv+[sw,0,0])
vts[-1][:,2] = dz
s += len(vv)
sw += w+0.3
sw -= 0.3
vts = (np.vstack(vts)-[sw/2.0, 0.5, 0])
return vts, np.vstack(fss), pos, h, color
def build_marks(conts, poss, dz, h, color):
if not hasattr(dz, '__len__'):
dz = [dz] * len(conts)
vtss, fss, pps = [], [], []
s = 0
for cont, pos, z in zip(conts, poss, dz):
vv, ff, pp, hh, cc = build_mark(cont, pos, z, h, color)
fss.append(ff+s)
s += len(vv)
vtss.append(vv)
pps.append((np.ones((len(vv),3))*pp).astype(np.float32))
return np.vstack(vtss), np.vstack(fss), np.vstack(pps), h, color
cmp = {'rainbow':[(127, 0, 255), (43, 126, 246), (42, 220, 220), (128, 254, 179), (212, 220, 127), (255, 126, 65), (255, 0, 0)],
'jet':[(0, 0, 127), (0, 40, 255), (0, 212, 255), (124, 255, 121), (255, 229, 0), (255, 70, 0), (127, 0, 0)],
'ocean':[(0, 127, 0), (0, 64, 42), (0, 0, 85), (0, 64, 128), (0, 127, 170), (129, 192, 213), (255, 255, 255)],
'earth':[(0, 0, 0), (27, 77, 122), (54, 135, 111), (93, 160, 75), (169, 179, 91), (206, 171, 132), (253, 250, 250)]}
def linear_color(cs):
if isinstance(cs, str): cs=cmp[cs]
cmap = np.zeros((256, 3), dtype=np.uint8)
idx = np.linspace(0, 256, len(cs)).astype(np.uint16)
for i in range(1, len(cs)):
c1, c2 = cs[i-1], cs[i]
rs, gs, bs = [np.linspace(c1[j], c2[j], idx[i]-idx[i-1]) for j in (0,1,2)]
cmap[idx[i-1]:idx[i]] = np.array((rs, gs, bs)).T
return cmap
def auto_lookup(vs, cmap):
vs = vs - vs.min()
vs = vs/vs.max()
vs = (vs*255).astype(np.uint8)
return cmap[vs]
if __name__ == '__main__':
from matplotlib import cm
cmap = linear_color('earth')
import matplotlib.pyplot as plt
img = np.zeros((30,256), dtype=np.uint8)
img[:] = np.arange(256)
img = cmap[img]
plt.imshow(img)
plt.show()
|
{"hexsha": "0c6409b8faccf8cde37b0c53f685c0529fd9de05", "size": 5377, "ext": "py", "lang": "Python", "max_stars_repo_path": "imagepy/core/myvi/util.py", "max_stars_repo_name": "siyemuxu888/imagepy", "max_stars_repo_head_hexsha": "a933526483a15da282bacac54608d44d2173beb4", "max_stars_repo_licenses": ["BSD-4-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "imagepy/core/myvi/util.py", "max_issues_repo_name": "siyemuxu888/imagepy", "max_issues_repo_head_hexsha": "a933526483a15da282bacac54608d44d2173beb4", "max_issues_repo_licenses": ["BSD-4-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imagepy/core/myvi/util.py", "max_forks_repo_name": "siyemuxu888/imagepy", "max_forks_repo_head_hexsha": "a933526483a15da282bacac54608d44d2173beb4", "max_forks_repo_licenses": ["BSD-4-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8165680473, "max_line_length": 128, "alphanum_fraction": 0.5867584155, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2179}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This implements a simple routine for writing/reading tables of arrays/simarrays
in a human readable ascii form in a way that preserves dtypes and units.
See write_table() and read_table()
Created on Thu Sep 14 11:45:35 2017
@author: ibackus
"""
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
_COLPAD = 2
def get_units(x):
"""
Gets the units of x. Returns None if x has no units
"""
try:
return x.units
except AttributeError:
return None
def _line_slicer(colwidth):
"""
Makes a function to slice a line string at the column points defined
by colwidth
colwidth is list-like, defining width of columns. The standard column
padding is assumed
"""
i0 = 0
iStart = []
iEnd = []
for width in colwidth:
i1 = i0 + width
iStart.append(i0)
iEnd.append(i1)
i0 = i1
slice_ind = zip(iStart, iEnd)
def slice_line(line, strip=False):
"""
split line into different columns
if strip == True, all the columns will have .strip() applied
"""
line = line.strip('\n')
out = [line[i0:i1] for i0, i1 in slice_ind]
if strip:
out = [x.strip() for x in out]
return out
return slice_line
def _line_length(colwidth):
length = np.sum(colwidth)
return length
def _read_header(f):
"""
Reads a sim array table header from an open file buffer.
NOTE: this reads starting at the current file pointer location.
"""
import json
header = [f.readline() for _ in range(6)]
# First character in the header should be a comment, replace it with a
# space
header = [' ' + line[1:] for line in header]
# Parse header
colwidth, names, dtypes, units, ind, hline = header
colwidth = json.loads(colwidth)['colwidth']
slice_line = _line_slicer(colwidth)
names = slice_line(names, strip=True)
# Names are formated as: ' "array name" '
names = [name.strip()[1:-1] for name in names]
dtypes = slice_line(dtypes, strip=True)
units = slice_line(units, strip=True)
for i, unit in enumerate(units):
if unit in ('None', 'none', 'NoUnit()', 'NoUnit'):
units[i] = None
ind = slice_line(ind, strip=True)
ind = np.array([json.loads(i) for i in ind], dtype=int)
# Read the last line, should just be a bunch of --- lines
hline = hline.strip()
if hline != '-'*_line_length(colwidth):
raise RuntimeError, 'header format not what is expected'
return colwidth, names, dtypes, units, ind
def _read_file(f):
"""
Loads the data from a table file
"""
close_file = False
if isinstance(f, str):
close_file = True
# Try loading the file
f = open(f, 'r')
# Read the data
try:
colwidth, names, dtypes, units, ind = _read_header(f)
data_str = f.readlines()
nrow = len(data_str)
n_input_col = len(colwidth)
data_array = np.zeros([nrow, n_input_col], dtype='|S{0}'.format(max(colwidth)))
slice_line = _line_slicer(colwidth)
for i, line in enumerate(data_str):
data_array[i] = slice_line(line, strip=True)
finally:
if close_file:
f.close()
return colwidth, names, dtypes, units, ind, data_array
def read_table(f):
"""
Read a SimArray table stored by write_table()
f should be a filename or a buffer
returns a dict
"""
colwidth, names, dtypes, units, ind, data_array = _read_file(f)
nrow, n_input_col = data_array.shape
# Infer array shapes
num_cols = np.bincount(ind[:,0])
num_arrays = len(num_cols)
# Initialize arrays
array_dtype = num_arrays * [None]
array_units = num_arrays * [None]
array_names = num_arrays * [None]
for i, dtype, unit, name in zip(ind[:,0], dtypes, units, names):
array_dtype[i] = dtype
array_units[i] = unit
array_names[i] = name
arrays = []
for ncol, dtype, unit in zip(num_cols, array_dtype, array_units):
array = SimArray(np.zeros([nrow, ncol], dtype=dtype), unit)
arrays.append(array)
# Map data to arrays
for input_col in range(n_input_col):
dtype = dtypes[input_col]
iArray, iCol = ind[input_col]
column = data_array[:, input_col]
if dtype.startswith('|S'):
# handle strings
column = [val.strip()[1:-1] for val in column]
arrays[iArray][:, iCol] = column
# Default to 1D array for single-column arrays
for i, array in enumerate(arrays):
if array.shape[1] == 1:
arrays[i] = array.reshape(len(array))
return dict(zip(array_names, arrays))
def write_table(f, x, *args, **kwargs):
"""
write_table(f, x, (x1, x2, ...), **kwargs) saves a human-readable ASCII table
from arrays/SimArrays.
Parameters
----------
f : str or buffer
Filename or open buffer to save to
x, (x1, x2, ...) : array-like or SimArray or dict
Data to save. Must be 1-D or 2-D. The length of all arrays must
be the same. Must be shape (nrow,) or (nrow, ncol).
x can optionally be a dict of arrays. The optional args will be
ignored.
kwargs
------
f : str or filepointer
File or filename to save the table to.
col_labels : list-like, optional
Names of the columns. If x is a dict, then x.keys() is used.
Defaults to 0, 1, 2...
Notes
-----
This is NOT optimized for large arrays
"""
import json
# Parse kwargs
col_labels = kwargs.get('col_labels', None)
# Initial data formatting
if isinstance(x, dict):
col_labels = x.keys()
arrays = x.values()
else:
arrays = [x] + list(args)
if col_labels is None:
col_labels = np.arange(len(arrays))
# Deal with multiple-column data
for i, array in enumerate(arrays):
array = np.asanyarray(array)
if np.ndim(array) == 1:
arrays[i] = array[:,None]
elif np.ndim(array) != 2:
raise ValueError, 'Can only write tables for 1-D or 2-D arrays'
save_arrays = []
save_labels = []
array_ind = []
for i, array in enumerate(arrays):
# Concatenate along columns
col_label = col_labels[i]
for j in range(array.shape[1]):
save_labels.append('"{0}"'.format(col_label))
save_arrays.append(array[:, j])
array_ind.append(json.dumps([i, j]))
dtypes = [array.dtype for array in save_arrays]
dtypes = [dtype.str for dtype in dtypes]
# Get units
units = [get_units(array) for array in save_arrays]
units = [str(unit) for unit in units]
# Convert arrays to strings
header = zip(save_labels, dtypes, units, array_ind)
header = [[str(a) for a in header_i] for header_i in header]
array_str = []
for array, dtype in zip(save_arrays, dtypes):
if '|S' in dtype:
# we have a string
string = ['"{0}"'.format(a) for a in array]
else:
string = [str(a) for a in array]
array_str.append(string)
colwidth = [max([len(a) for a in array_str_i]) for array_str_i in array_str]
headcolwidth = [max([len(a) for a in array_str_i]) for array_str_i in header]
colwidth = [max(c, h) for c, h in zip(colwidth, headcolwidth)]
colwidth = [width + _COLPAD for width in colwidth]
colwidth[0] += 1
print_str = ''
ncols = len(save_arrays)
# Format header
for i in xrange(len(header[0])):
line = ''
for j in xrange(ncols):
width = colwidth[j]
line += header[j][i].center(width)
line = '#' + line[1:]
print_str += line + '\n'
hline = '#' + '-' * np.sum(colwidth) + '\n'
print_str += hline
print_str = '# ' + json.dumps({'colwidth': colwidth}) + '\n' + print_str
# Format arrays
for i in xrange(len(array_str[0])):
for j in xrange(ncols):
width = colwidth[j]
print_str += array_str[j][i].center(width)
print_str += '\n'
if isinstance(f, str):
with open(f, 'w') as fp:
fp.write(print_str)
else:
fp.write(print_str)
|
{"hexsha": "4ea80558f1036d2083cc686dad65bba4c4e3ca57", "size": 8440, "ext": "py", "lang": "Python", "max_stars_repo_path": "diskpy/utils/_simarraywriter.py", "max_stars_repo_name": "langfzac/diskpy", "max_stars_repo_head_hexsha": "3b0f4fdc7f1fea21efdd3ab55bbf362181c7a3c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-03-25T18:09:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-10T09:27:41.000Z", "max_issues_repo_path": "diskpy/utils/_simarraywriter.py", "max_issues_repo_name": "langfzac/diskpy", "max_issues_repo_head_hexsha": "3b0f4fdc7f1fea21efdd3ab55bbf362181c7a3c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2015-07-20T21:56:45.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-16T23:01:15.000Z", "max_forks_repo_path": "diskpy/utils/_simarraywriter.py", "max_forks_repo_name": "langfzac/diskpy", "max_forks_repo_head_hexsha": "3b0f4fdc7f1fea21efdd3ab55bbf362181c7a3c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-08-07T22:03:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-19T16:30:17.000Z", "avg_line_length": 31.4925373134, "max_line_length": 91, "alphanum_fraction": 0.5892180095, "include": true, "reason": "import numpy", "num_tokens": 2208}
|
from time import perf_counter
import sys
import numpy as np
import pandas as pd
from hurry.filesize import size
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier as RFC
import cudf
from cuml.ensemble import RandomForestClassifier as cuRFC
ndpu_list = [1, 4, 16, 64]
train_size_per_dpu = int(6e5)
test_size_per_dpu = int(1e5)
npoints_per_dpu = train_size_per_dpu + test_size_per_dpu
nfeatures = 16
random_state = 42
accuracies_gpu = []
total_times_gpu = []
init_times_gpu = []
transfer_times_gpu = []
def np2cudf(df):
# convert numpy array to cuDF dataframe
df = pd.DataFrame({"fea%d" % i: df[:, i] for i in range(df.shape[1])})
pdf = cudf.DataFrame()
for c, column in enumerate(df):
pdf[str(c)] = df[column]
return pdf
# load empty data to the GPU to pay the initialization cost
tic = perf_counter()
gpu_data = np2cudf(np.zeros((2, 2)))
toc = perf_counter()
init_time_gpu = toc - tic
del gpu_data
for i_ndpu, ndpu in enumerate(ndpu_list):
print(f"number of points per dpu : {train_size_per_dpu}")
data_size = train_size_per_dpu * (nfeatures + 1) * 4
print(f"data size per dpu= {size(data_size)}")
##################################################
# DATA GEN #
##################################################
X, y = make_classification(n_samples=npoints_per_dpu * ndpu, n_features=nfeatures, n_informative=4, n_redundant=4,
random_state=random_state)
X = X.astype(np.float32)
y = y.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_size_per_dpu * ndpu, shuffle=False)
print(f"data size = {size(sys.getsizeof(X_train) + sys.getsizeof(y_train))}")
##################################################
# GPU PERF #
##################################################
# load the data to the GPU
tic = perf_counter()
X_train_gpu = np2cudf(X_train)
y_train_gpu = cudf.DataFrame(y_train)
toc = perf_counter()
transfer_time_gpu = toc - tic
clf = cuRFC(n_estimators=1, random_state=random_state, split_criterion='gini', max_depth=10, bootstrap=False, max_features=1.0, n_streams=1)
tic = perf_counter()
clf.fit(X_train_gpu, y_train_gpu)
toc = perf_counter()
# export_graphviz(clf, out_file="tree_dpu.dot")
y_pred = clf.predict(X_test)
gpu_accuracy = accuracy_score(y_test, y_pred)
# read GPU times
accuracies_gpu.append(gpu_accuracy)
total_times_gpu.append(toc - tic)
init_times_gpu.append(init_time_gpu)
transfer_times_gpu.append(transfer_time_gpu)
print(f"Accuracy for GPUs: {gpu_accuracy}")
print(f"total time for GPUs: {toc - tic} s")
df = pd.DataFrame(
{
"GPU_times": total_times_gpu,
"GPU_init_time": init_times_gpu,
"GPU_transfer_times": transfer_times_gpu,
"GPU_scores": accuracies_gpu,
},
index=ndpu_list[:i_ndpu + 1])
df.to_csv("weak_scaling_gpu.csv")
del X_train_gpu, y_train_gpu
|
{"hexsha": "fa4cd0db4ba884f238f9438ac705163d01aad996", "size": 3248, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/scripts/weak_scaling/weak_scaling_gpu.py", "max_stars_repo_name": "upmem/scikit-dpu", "max_stars_repo_head_hexsha": "1ddeb5d195b9b119e379eb473b28c82a12a2b5fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "benchmarks/scripts/weak_scaling/weak_scaling_gpu.py", "max_issues_repo_name": "upmem/scikit-dpu", "max_issues_repo_head_hexsha": "1ddeb5d195b9b119e379eb473b28c82a12a2b5fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-22T14:52:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T14:52:01.000Z", "max_forks_repo_path": "benchmarks/scripts/weak_scaling/weak_scaling_gpu.py", "max_forks_repo_name": "upmem/scikit-dpu", "max_forks_repo_head_hexsha": "1ddeb5d195b9b119e379eb473b28c82a12a2b5fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1584158416, "max_line_length": 144, "alphanum_fraction": 0.6373152709, "include": true, "reason": "import numpy", "num_tokens": 810}
|
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""implementation of different Matrix Product Operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
import tensorflow as tf
def kron(a, b):
a1, a2 = a.shape
b1, b2 = b.shape
return tf.reshape(
tf.transpose(tf.tensordot(a, b, axes=0), (0, 2, 1, 3)),
(a1 * b1, a2 * b2))
class MPOBase:
"""
Base class for MPOs
"""
def __init__(self, tensors, name=None):
self.name = None
self._tensors = tensors
assert (np.all([self[0].dtype == m.dtype for m in self]))
def __getitem__(self, n):
return self._tensors[n]
def __setitem__(self, n):
raise NotImplementedError()
def __iter__(self):
return iter(self._tensors)
def __len__(self):
return len(self._tensors)
@property
def dtype(self):
assert (np.all([self[0].dtype == m.dtype for m in self]))
return self._tensors[0].dtype
@property
def D(self):
"""Returns a vector of all bond dimensions.
The vector will have length `N+1`, where `N == num_sites`."""
return ([self.get_tensor(0).shape[0]] +
[self.get_tensor(n).shape[1] for n in range(len(self))])
def get_tensor(self, site):
return self._tensors[site]
def set_tensor(self, site, tensors):
raise NotImplementedError()
def get_2site_mpo(self, *args, **kwargs):
raise NotImplementedError()
def get_2site_hamiltonian(self, *args, **kwargs):
raise NotImplementedError()
def get_2site_gate(self, site1, site2, tau):
"""
calculate the unitary two-site gate exp(tau*H(m,n))
routine takes MPO tensors at `site1` and `site2` and constructs local
two-site operators from them
Args
site1,site2 (int): lattice sites for which to calculate the gate
tau (float or complex): time-increment
Returns:
tf.Tensor: A two-site gate "Gate" between sites m and n by summing up (morally, for m<n)
h=\sum_s kron(mpo[m][-1,s,:,:],mpo[n][s,0,:,:]) and exponentiating the result:
Gate=scipy.linalg..expm(tau*h);
Gate is a rank-4 tensor with shape (dm,dn,dm,dn), with
dm, dn the local hilbert space dimension at site m and n, respectively
"""
tau = tf.convert_to_tensor(tau)
if site2 < site1:
d1 = self[site2].shape[2]
d2 = self[site1].shape[2]
elif site2 > site1:
d1 = self[site1].shape[2]
d2 = self[site2].shape[2]
else:
raise ValuError(
'MPOBase.get_2site_gate: site1 has to be different from site2!')
h = tf.reshape(self.get_2site_hamiltonian(site1, site2), (d1 * d2, d1 * d2))
if not h.dtype == tau.dtype:
raise TypeError(
'MPOBase.get_2site_gate: expected tau of dtype {0}, got dtype {1}'.
format(self.dtype, tau.dtype))
return tf.reshape(tf.linalg.expm(tau * h), (d1, d2, d1, d2))
class InfiniteMPO(MPOBase):
"""
Base class for implementation of infinite MPOs; the user should implement
specific infinite MPOs by deriving from InfiniteMPO
"""
def __init__(self, tensors, name=None):
super().__init__(tensors=tensors, name=name)
if not (self.D[0] == self.D[-1]):
raise ValueError(
'InfiniteMPO: left and right MPO ancillary dimension do not match')
def get_boundary_vector(self, side):
"""
return a boundary vector that can be contracted with the left-most or right-most mpo tensor
Args:
side (str): the side for which to return the boundary vector
Returns:
if side == 'l' or 'left':
tf.Tensor of shape (self.D[0],) with entries [0, 0, ..., 1]
if side == 'r' or 'right':
tf.Tensor of shape (self.D[-1],) with entries [1, 0, ..., 0]
"""
if side.lower() in ('l', 'left'):
v = tf.zeros([self.D[0]], dtype=self.dtype)
v.numpy()[-1] = 1.0
return v
if side.lower() in ('r', 'right'):
v = tf.zeros([self.D[-1]], dtype=self.dtype)
v.numpy()[0] = 1.0
return v
def get_boundary_mpo(self, side):
"""
return left or right boundary mpo
Args:
side (str): the side for which to return the boundary vector
Returns:
if side == 'l' or 'left':
tf.Tensor of shape (self.D[1], self.d[0], self.d[0])
if side == 'r' or 'right':
tf.Tensor of shape (self.D[-2], self.d[-1], self.d[-1])
"""
if side.lower() in ('l', 'left'):
out = copy.deepcopy(self._tensors[-1][-1, :, :, :])
out.numpy()[0, :, :] *= 0.0
if side.lower() in ('r', 'right'):
out = copy.deepcopy(self._tensors[0][:, 0, :, :])
out.numpy()[-1, :, :] *= 0.0
return tf.squeeze(out)
def get_2site_mpo(self, site1, site2):
if site2 < site1:
mpo1 = copy.deepcopy(self[site2][-1, :, :, :])
mpo2 = copy.deepcopy(self[site1][:, 0, :, :])
if site2 == 0:
mpo1[0, :, :] /= 2.0
if site1 == (len(self) - 1):
mpo2[-1, :, :] /= 2.0
if site2 > site1:
mpo1 = copy.deepcopy(self[site1][-1, :, :, :])
mpo2 = copy.deepcopy(self[site2][:, 0, :, :])
if site1 == 0:
mpo1[0, :, :] /= 2.0
if site2 == (len(self) - 1):
mpo2[-1, :, :] /= 2.0
d1 = mpo1.shape[1]
d2 = mpo2.shape[1]
return [tf.expand_dims(mpo1, 0), tf.expand_dims(mpo2, 1)]
def get_2site_hamiltonian(self, site1, site2):
"""
obtain a two-site Hamiltonian H_{mn} from MPO
Args:
site1,site2 (int): lattice sites for which to calculate the Hamiltonian
Returns:
tf.Tensor of shape (d1,d2,d3,d4)
A two-site Hamiltonian between sites `site1` and `site2` by summing up
(for site1<site2, and site1!=0, site2!=0)
\sum_s={0}^{M-1} kron(mpo[m][-1,s,:,:],mpo[n][s,0,:,:])
the returned tf.Tensor is a rank-4 tensor with shape (dsite1,dsite2,dsite1,dsite2), with
dsite1, dsite2 the local hilbert space dimension at sites `site1` and `site2`, respectively,
"""
mpo1, mpo2 = self.get_2site_mpo(site1, site2)
if site2 < site1:
nl = site2
mr = site1
elif site2 > site1:
nl = site1
nr = site2
mpo1 = mpo1[0, :, :, :]
mpo2 = mpo2[:, 0, :, :]
d1 = mpo1.shape[1]
d2 = mpo2.shape[1]
h = kron(mpo1[0, :, :], mpo2[0, :, :])
for s in range(1, mpo1.shape[0]):
h += kron(mpo1[s, :, :], mpo2[s, :, :])
return tf.reshape(h, (d1, d2, d1, d2))
def roll(self, num_sites):
tensors=[self._tensors[n] for n in range(num_sites,len(self._tensors))]\
+ [self._tensors[n] for n in range(num_sites)]
self._tensors = tensors
class FiniteMPO(MPOBase):
"""
Base class for implementation of finite MPOs; the user should implement
specific finite MPOs by deriving from FiniteMPO
"""
def __init__(self, tensors, name=None):
super().__init__(tensors=tensors, name=name)
if not (self.D[0] == 1) and (self.D[-1] == 1):
raise ValueError(
'FiniteMPO: left and right MPO ancillary dimension is different from 1'
)
def get_2site_mpo(self, site1, site2):
if site2 < site1:
mpo1 = self[site2][-1, :, :, :]
mpo2 = self[site1][:, 0, :, :]
if site2 > site1:
mpo1 = self[site1][-1, :, :, :]
mpo2 = self[site2][:, 0, :, :]
d1 = mpo1.shape[1]
d2 = mpo2.shape[1]
return [tf.expand_dims(mpo1, 0), tf.expand_dims(mpo2, 1)]
def get_2site_hamiltonian(self, site1, site2):
"""
obtain a two-site Hamiltonian H_{mn} from MPO
Args:
site1,site2 (int): lattice sites for which to calculate the Hamiltonian
Returns:
tf.Tensor of shape (d1,d2,d3,d4)
A two-site Hamiltonian between sites `site1` and `site2` by summing up
(for site1<site2, and site1!=0, site2!=0)
\sum_s={0}^{M-1} kron(mpo[m][-1,s,:,:],mpo[n][s,0,:,:])
the returned tf.Tensor is a rank-4 tensor with shape (dsite1,dsite2,dsite1,dsite2), with
dsite1, dsite2 the local hilbert space dimension at sites `site1` and `site2`, respectively,
"""
mpo1, mpo2 = self.get_2site_mpo(site1, site2)
if site2 < site1:
nl = site2
mr = site1
elif site2 > site1:
nl = site1
nr = site2
mpo1 = mpo1[0, :, :, :]
mpo2 = mpo2[:, 0, :, :]
d1 = mpo1.shape[1]
d2 = mpo2.shape[1]
if nl != 0 and nr != (len(self) - 1):
h = kron(mpo1[0, :, :] / 2.0, mpo2[0, :, :])
for s in range(1, mpo1.shape[0] - 1):
h += kron(mpo1[s, :, :], mpo2[s, :, :])
h += kron(mpo1[-1, :, :], mpo2[-1, :, :] / 2.0)
elif nl != 0 and nr == (len(self) - 1):
h = kron(mpo1[0, :, :] / 2.0, mpo2[0, :, :])
for s in range(1, mpo1.shape[0]):
h += kron(mpo1[s, :, :], mpo2[s, :, :])
elif nl == 0 and nr != (len(self) - 1):
h = kron(mpo1[0, :, :], mpo2[0, :, :])
for s in range(1, mpo1.shape[0] - 1):
h += kron(mpo1[s, :, :], mpo2[s, :, :])
h += kron(mpo1[-1, :, :], mpo2[-1, :, :] / 2.0)
elif nl == 0 and nr == (len(self) - 1):
h = kron(mpo1[0, :, :], mpo2[0, :, :])
for s in range(1, mpo1.shape[0]):
h += kron(mpo1[s, :, :], mpo2[s, :, :])
return tf.reshape(h, (d1, d2, d1, d2))
class FiniteXXZ(FiniteMPO):
"""
the famous Heisenberg Hamiltonian, the one we all know and love (almost as much as the TFI)!
"""
def __init__(self, Jz, Jxy, Bz, dtype):
"""
returns the MPO of the XXZ model
Args:
Jz (np.ndarray or tf.Tensor): the Sz*Sz coupling strength between nearest neighbor lattice sites
Jxy (np.ndarray or tf.Tensor): the (Sx*Sx + Sy*Sy) coupling strength between nearest neighbor lattice sites
Bz (np.ndarray or tf.Tensor): magnetic field on each lattice site
dtype (tf-dtype or numpy dtype): the dtype of the MPO
Returns:
FiniteXXZ: the mpo of the finite XXZ model
"""
if hasattr(dtype, 'as_numpy_dtype'):
dtype = dtype.as_numpy_dtype
self.Jz = Jz
self.Jxy = Jxy
self.Bz = Bz
N = len(Bz)
mpo = []
temp = np.zeros((1, 5, 2, 2), dtype=dtype)
#BSz
temp[0, 0, 0, 0] = -0.5 * Bz[0]
temp[0, 0, 1, 1] = 0.5 * Bz[0]
#Sm
temp[0, 1, 0, 1] = Jxy[0] / 2.0 * 1.0
#Sp
temp[0, 2, 1, 0] = Jxy[0] / 2.0 * 1.0
#Sz
temp[0, 3, 0, 0] = Jz[0] * (-0.5)
temp[0, 3, 1, 1] = Jz[0] * 0.5
#11
temp[0, 4, 0, 0] = 1.0
temp[0, 4, 1, 1] = 1.0
mpo.append(tf.convert_to_tensor(temp))
for n in range(1, N - 1):
temp = np.zeros((5, 5, 2, 2), dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#Sp
temp[1, 0, 1, 0] = 1.0
#Sm
temp[2, 0, 0, 1] = 1.0
#Sz
temp[3, 0, 0, 0] = -0.5
temp[3, 0, 1, 1] = 0.5
#BSz
temp[4, 0, 0, 0] = -0.5 * Bz[n]
temp[4, 0, 1, 1] = 0.5 * Bz[n]
#Sm
temp[4, 1, 0, 1] = Jxy[n] / 2.0 * 1.0
#Sp
temp[4, 2, 1, 0] = Jxy[n] / 2.0 * 1.0
#Sz
temp[4, 3, 0, 0] = Jz[n] * (-0.5)
temp[4, 3, 1, 1] = Jz[n] * 0.5
#11
temp[4, 4, 0, 0] = 1.0
temp[4, 4, 1, 1] = 1.0
mpo.append(tf.convert_to_tensor(temp))
temp = np.zeros((5, 1, 2, 2), dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#Sp
temp[1, 0, 1, 0] = 1.0
#Sm
temp[2, 0, 0, 1] = 1.0
#Sz
temp[3, 0, 0, 0] = -0.5
temp[3, 0, 1, 1] = 0.5
#BSz
temp[4, 0, 0, 0] = -0.5 * Bz[-1]
temp[4, 0, 1, 1] = 0.5 * Bz[-1]
mpo.append(tf.convert_to_tensor(temp))
super().__init__(mpo, name='XXZ_MPO')
class InfiniteXXZ(InfiniteMPO):
"""
the famous Heisenberg Hamiltonian, the one we all know and love (almost as much as the TFI)!
"""
def __init__(self, Jz, Jxy, Bz, dtype):
"""
returns the MPO of the infinite XXZ model
Args:
Jz (np.ndarray or tf.Tensor): the Sz*Sz coupling strength between nearest neighbor lattice sites
Jxy (np.ndarray or tf.Tensor): the (Sx*Sx + Sy*Sy) coupling strength between nearest neighbor lattice sites
Bz (np.ndarray or tf.Tensor): magnetic field on each lattice site
dtype (tf-dtype or numpy dtype): the dtype of the MPO
Returns:
InfiniteXXZ: the mpo of the infinite XXZ model
"""
if hasattr(dtype, 'as_numpy_dtype'):
dtype = dtype.as_numpy_dtype
self.Jz = Jz
self.Jxy = Jxy
self.Bz = Bz
N = len(Bz)
if not len(Jz) == len(Jxy):
raise TypeError('XXZ: Jz and Jxz have to be of same lengths for pbc')
if not len(Bz) == len(Jxy):
raise TypeError('XXZ: Bz and Jxz have to be of same lengths for pbc')
mpo = []
for n in range(0, N):
temp = np.zeros((5, 5, 2, 2), dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#Sp
temp[1, 0, 1, 0] = 1.0
#Sm
temp[2, 0, 0, 1] = 1.0
#Sz
temp[3, 0, 0, 0] = -0.5
temp[3, 0, 1, 1] = 0.5
#BSz
temp[4, 0, 0, 0] = -0.5 * Bz[n]
temp[4, 0, 1, 1] = 0.5 * Bz[n]
#Sm
temp[4, 1, 0, 1] = Jxy[n] / 2.0 * 1.0
#Sp
temp[4, 2, 1, 0] = Jxy[n] / 2.0 * 1.0
#Sz
temp[4, 3, 0, 0] = Jz[n] * (-0.5)
temp[4, 3, 1, 1] = Jz[n] * 0.5
#11
temp[4, 4, 0, 0] = 1.0
temp[4, 4, 1, 1] = 1.0
mpo.append(tf.convert_to_tensor(temp))
super().__init__(mpo, name='XXZ_MPO')
class FiniteTFI(FiniteMPO):
"""
the good old transverse field Ising model
convention: sigma_z=diag([-1,1])
"""
def __init__(self, Jx, Bz, dtype=tf.float64):
"""
returns the MPO of the finite TFI model
Args:
Jx (np.ndarray or tf.Tensor): the Sx*Sx coupling strength between nearest neighbor lattice sites
Bz (np.ndarray or tf.Tensor): magnetic field on each lattice site
dtype (tf-dtype or numpy dtype): the dtype of the MPO
Returns:
FiniteXXZ: the mpo of the infinite TFI model
"""
if hasattr(dtype, 'as_numpy_dtype'):
dtype = dtype.as_numpy_dtype
self.Jx = Jx.astype(dtype)
self.Bz = Bz.astype(dtype)
N = len(Bz)
sigma_x = np.array([[0, 1], [1, 0]]).astype(dtype)
sigma_z = np.diag([-1, 1]).astype(dtype)
mpo = []
temp = np.zeros(shape=[1, 3, 2, 2], dtype=dtype)
#Bsigma_z
temp[0, 0, :, :] = self.Bz[0] * sigma_z
#sigma_x
temp[0, 1, :, :] = self.Jx[0] * sigma_x
#11
temp[0, 2, 0, 0] = 1.0
temp[0, 2, 1, 1] = 1.0
mpo.append(tf.convert_to_tensor(temp))
for n in range(1, N - 1):
temp = np.zeros(shape=[3, 3, 2, 2], dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#sigma_x
temp[1, 0, :, :] = sigma_x
#Bsigma_z
temp[2, 0, :, :] = self.Bz[n] * sigma_z
#sigma_x
temp[2, 1, :, :] = self.Jx[n] * sigma_x
#11
temp[2, 2, 0, 0] = 1.0
temp[2, 2, 1, 1] = 1.0
mpo.append(tf.convert_to_tensor(temp))
temp = np.zeros([3, 1, 2, 2], dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#sigma_x
temp[1, 0, :, :] = sigma_x
#Bsigma_z
temp[2, 0, :, :] = self.Bz[-1] * sigma_z
mpo.append(tf.convert_to_tensor(temp))
super().__init__(tensors=mpo, name='TFI_MPO')
class InfiniteTFI(InfiniteMPO):
"""
the good old transverse field Ising model
convention: sigma_z=diag([-1,1])
"""
def __init__(self, Jx, Bz, dtype=tf.float64):
"""
returns the MPO of the infinite TFI model
Args:
Jx (np.ndarray or tf.Tensor): the Sx*Sx coupling strength between nearest neighbor lattice sites
Bz (np.ndarray or tf.Tensor): magnetic field on each lattice site
dtype (tf-dtype or numpy dtype): the dtype of the MPO
Returns:
InfiniteXXZ: the mpo of the infinite TFI model
"""
if hasattr(dtype, 'as_numpy_dtype'):
dtype = dtype.as_numpy_dtype
self.Jx = Jx.astype(dtype)
self.Bz = Bz.astype(dtype)
N = len(Bz)
sigma_x = np.array([[0, 1], [1, 0]]).astype(dtype)
sigma_z = np.diag([-1, 1]).astype(dtype)
mpo = []
for n in range(0, N):
temp = np.zeros(shape=[3, 3, 2, 2], dtype=dtype)
#11
temp[0, 0, 0, 0] = 1.0
temp[0, 0, 1, 1] = 1.0
#sigma_x
temp[1, 0, 1, 0] = 1
temp[1, 0, 0, 1] = 1
#Bsigma_z
temp[2, 0:, :] = sigma_z * self.Bz[n]
#sigma_x
temp[2, 1, :, :] = sigma_x * self.Jx[n]
#11
temp[2, 2, 0, 0] = 1.0
temp[2, 2, 1, 1] = 1.0
mpo.append(tf.convert_to_tensor(temp))
super().__init__(tensors=mpo, name='TFI_MPO')
|
{"hexsha": "be7d59a106c561c623d0f7726db7d056a9bce162", "size": 17148, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/MPS/matrixproductoperators.py", "max_stars_repo_name": "priyansh19/TensorNetwork", "max_stars_repo_head_hexsha": "f83406c3749ed900573b9f80987738feea098df8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-07-25T12:53:06.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-18T16:26:18.000Z", "max_issues_repo_path": "experiments/MPS/matrixproductoperators.py", "max_issues_repo_name": "molodiuc/TensorNetwork", "max_issues_repo_head_hexsha": "b754d0a9c89f7d0e47a5989a1337270f753931ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/MPS/matrixproductoperators.py", "max_forks_repo_name": "molodiuc/TensorNetwork", "max_forks_repo_head_hexsha": "b754d0a9c89f7d0e47a5989a1337270f753931ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1370826011, "max_line_length": 115, "alphanum_fraction": 0.5593655237, "include": true, "reason": "import numpy", "num_tokens": 6190}
|
import os
import random
import numpy as np
from utils import read_data
import torch
from torch.utils.data import Dataset, DataLoader
import pdb
class Federated_Dataset(Dataset):
def __init__(self, X, Y, A):
self.X = X
self.Y = Y
self.A = A
def __getitem__(self, index):
X = self.X[index]
Y = self.Y[index]
A = self.A[index]
return X, Y, A
def __len__(self):
return self.X.shape[0]
#### adult dataset x("51 White", "52 Asian-Pac-Islander", "53 Amer-Indian-Eskimo", "54 Other", "55 Black", "56 Female", "57 Male")
def LoadDataset(args):
clients_name, groups, train_data, test_data = read_data(args.train_dir, args.test_dir)
# client_name [phd, non-phd]
client_train_loads = []
client_test_loads = []
args.n_clients = len(clients_name)
# clients_name = clients_name[:1]
if args.dataset == "adult":
for client in clients_name:
X = np.array(train_data[client]["x"]).astype(np.float32)
Y = np.array(train_data[client]["y"]).astype(np.float32)
if args.sensitive_attr == "race":
A = X[:,51] # [1: white, 0: other]
X = np.delete(X, [51, 52, 53, 54, 55], axis = 1)
args.n_feats = X.shape[1]
elif args.sensitive_attr == "sex":
A = X[:, 56] # [1: female, 0: male]
X = np.delete(X, [56, 57], axis = 1)
args.n_feats = X.shape[1]
elif args.sensitive_attr == "none-race":
A = X[:, 51] # [1: white, 0: other]
args.n_feats = X.shape[1]
elif args.sensitive_attr == "none-sex":
A = X[:, 56]
args.n_feats = X.shape[1]
else:
print("error sensitive attr")
exit()
dataset = Federated_Dataset(X, Y, A)
client_train_loads.append(DataLoader(dataset, X.shape[0],
shuffle = args.shuffle,
num_workers = args.num_workers,
pin_memory = True,
drop_last = args.drop_last))
for client in clients_name:
X = np.array(test_data[client]["x"]).astype(np.float32)
Y = np.array(test_data[client]["y"]).astype(np.float32)
if args.sensitive_attr =="race":
A = X[:,51] # [1: white, 0: other]
X = np.delete(X, [51, 52, 53, 54, 55],axis = 1)
elif args.sensitive_attr == "sex":
A = X[:, 56] # [1: female, 0: male]
X = np.delete(X, [56, 57], axis = 1)
elif args.sensitive_attr == "none-race":
A = X[:, 51] # [1: white, 0: other]
args.n_feats = X.shape[1]
elif args.sensitive_attr == "none-sex":
A = X[:, 56]
args.n_feats = X.shape[1]
else:
print("error sensitive attr")
exit()
dataset = Federated_Dataset(X, Y, A)
client_test_loads.append(DataLoader(dataset, X.shape[0],
shuffle = args.shuffle,
num_workers = args.num_workers,
pin_memory = True,
drop_last = args.drop_last))
elif "eicu" in args.dataset:
# elif args.dataset == "eicu_d" or args.dataset == "eicu_los":
for client in clients_name:
X = np.array(train_data[client]["x"]).astype(np.float32)
Y = np.array(train_data[client]["y"]).astype(np.float32)
if args.sensitive_attr == "race":
A = train_data[client]["race"]
args.n_feats = X.shape[1]
elif args.sensitive_attr == "sex":
A = train_data[client]["gender"]
args.n_feats = X.shape[1]
else:
A = train_data[client]["race"]
args.n_feats = X.shape[1]
dataset = Federated_Dataset(X, Y, A)
client_train_loads.append(DataLoader(dataset, X.shape[0],
shuffle = args.shuffle,
num_workers = args.num_workers,
pin_memory = True,
drop_last = args.drop_last))
for client in clients_name:
X = np.array(test_data[client]["x"]).astype(np.float32)
Y = np.array(test_data[client]["y"]).astype(np.float32)
if args.sensitive_attr =="race":
A = test_data[client]["race"]
elif args.sensitive_attr == "sex":
A = test_data[client]["gender"]
else:
A = test_data[client]["race"]
dataset = Federated_Dataset(X, Y, A)
client_test_loads.append(DataLoader(dataset, X.shape[0],
shuffle = args.shuffle,
num_workers = args.num_workers,
pin_memory = True,
drop_last = args.drop_last))
elif args.dataset == "health":
for client in clients_name:
X = np.array(train_data[client]["x"]).astype(np.float32)
Y = np.array(train_data[client]["y"]).astype(np.float32)
if args.sensitive_attr == "race":
A = train_data[client]["race"]
args.n_feats = X.shape[1]
elif args.sensitive_attr == "sex":
A = train_data[client]["isfemale"]
args.n_feats = X.shape[1]
else:
A = train_data[client]["isfemale"]
args.n_feats = X.shape[1]
dataset = Federated_Dataset(X, Y, A)
client_train_loads.append(DataLoader(dataset, X.shape[0],
shuffle=args.shuffle,
num_workers=args.num_workers,
pin_memory=True,
drop_last=args.drop_last))
for client in clients_name:
X = np.array(test_data[client]["x"]).astype(np.float32)
Y = np.array(test_data[client]["y"]).astype(np.float32)
if args.sensitive_attr == "race":
A = test_data[client]["race"]
elif args.sensitive_attr == "sex":
A = test_data[client]["isfemale"]
else:
A = np.zeros(X.shape[0])
dataset = Federated_Dataset(X, Y, A)
client_test_loads.append(DataLoader(dataset, X.shape[0],
shuffle=args.shuffle,
num_workers=args.num_workers,
pin_memory=True,
drop_last=args.drop_last))
return client_train_loads, client_test_loads
|
{"hexsha": "b5ed61ff90fd136a8d0cda4734453d953fece182", "size": 6740, "ext": "py", "lang": "Python", "max_stars_repo_path": "FUEL/dataload.py", "max_stars_repo_name": "cuis15/FCFL", "max_stars_repo_head_hexsha": "59302004f9cfc20e305222ebb512235c6679cca8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-11-08T08:08:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T07:10:32.000Z", "max_issues_repo_path": "FUEL/dataload.py", "max_issues_repo_name": "cuis15/FCFL", "max_issues_repo_head_hexsha": "59302004f9cfc20e305222ebb512235c6679cca8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-15T07:13:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-15T07:17:00.000Z", "max_forks_repo_path": "FUEL/dataload.py", "max_forks_repo_name": "cuis15/FCFL", "max_forks_repo_head_hexsha": "59302004f9cfc20e305222ebb512235c6679cca8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-15T05:34:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-15T14:46:03.000Z", "avg_line_length": 38.5142857143, "max_line_length": 130, "alphanum_fraction": 0.5043026706, "include": true, "reason": "import numpy", "num_tokens": 1542}
|
import unittest
import ifm_contrib as ifm
from ifm import Enum
import numpy as np
class TestPlot(unittest.TestCase):
def test_fringes(self):
ifm.forceLicense("Viewer")
doc = ifm.loadDocument(r".\models\example_2D.dac")
doc.loadTimeStep(doc.getNumberOfTimeSteps() - 1)
gdf = doc.c.plot.gdf.fringes(par=Enum.P_HEAD, levels=range(11))
# check if levels are correct:
np.testing.assert_almost_equal(gdf.layer.values, [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5],
err_msg="layer mismatch in geodataframe")
np.testing.assert_almost_equal(gdf["400_min"].values,[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0],
err_msg="min mismatch in geodataframe")
np.testing.assert_almost_equal(gdf["400_max"].values,[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0],
err_msg="max mismatch in geodataframe")
# check if areas are equal:
np.testing.assert_almost_equal(gdf.area.values,
[14302.13315203, 20114.86769273, 70707.4589673, 254411.87209667, 155115.96365282,
125447.20580869, 42811.62018617, 42276.8370475, 33731.82795621, 26471.632308],
err_msg="areas of polygon differs")
doc.closeDocument()
doc = ifm.loadDocument(r".\models\example_3D_mspecies.fem")
gdf = doc.c.plot.gdf.fringes(par=Enum.P_HEAD)
gdf = doc.c.plot.gdf.fringes(par=Enum.P_HEAD, levels=range(11))
doc.closeDocument()
def test_isolines(self):
ifm.forceLicense("Viewer")
doc = ifm.loadDocument(r".\models\example_2D.dac")
doc.loadTimeStep(doc.getNumberOfTimeSteps() - 1)
gdf = doc.c.plot.gdf.isolines(par=Enum.P_HEAD)
gdf = doc.c.plot.gdf.isolines(par=Enum.P_HEAD, levels=range(11))
#TODO: add tests for distributions and expressions
doc.closeDocument()
ifm.forceLicense("Viewer")
doc = ifm.loadDocument(r".\models\example_3D_mspecies.fem")
doc.loadTimeStep(doc.getNumberOfTimeSteps() - 1)
gdf = doc.c.plot.gdf.isolines(par=Enum.P_HEAD, slice=1)
gdf = doc.c.plot.gdf.isolines(par=Enum.P_HEAD, levels=range(11))
#TODO: add tests for distributions and expressions
doc.closeDocument()
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "4e30775fbf859dfa9545a95f9166981209e19610", "size": 2475, "ext": "py", "lang": "Python", "max_stars_repo_path": "unittests/test_plot_geopandas.py", "max_stars_repo_name": "DHI/ifm_contrib", "max_stars_repo_head_hexsha": "443c3a86960990115887855a2f4adac07797fc35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-09-28T12:01:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-07T15:17:51.000Z", "max_issues_repo_path": "unittests/test_plot_geopandas.py", "max_issues_repo_name": "DHI/ifm_contrib", "max_issues_repo_head_hexsha": "443c3a86960990115887855a2f4adac07797fc35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-10-23T13:25:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T21:11:42.000Z", "max_forks_repo_path": "unittests/test_plot_geopandas.py", "max_forks_repo_name": "DHI/ifm_contrib", "max_forks_repo_head_hexsha": "443c3a86960990115887855a2f4adac07797fc35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-04-23T11:01:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T13:33:06.000Z", "avg_line_length": 45.0, "max_line_length": 120, "alphanum_fraction": 0.6036363636, "include": true, "reason": "import numpy", "num_tokens": 738}
|
import torch
from torch import nn
import numpy as np
import earthnet as en
def get_loss_from_name(loss_name):
if loss_name == "l2":
return Cube_loss(nn.MSELoss())
elif loss_name == "l1":
return Cube_loss(nn.L1Loss())
elif loss_name == "Huber":
return Cube_loss(nn.HuberLoss())
elif loss_name == "ENS":
return ENS_loss()
elif loss_name == "NDVI":
return NDVI_loss()
# simple L2 loss on the RGBI channels, mostly used for training
class Cube_loss(nn.Module):
def __init__(self, loss):
super().__init__()
self.l = loss
def forward(self, labels: torch.Tensor, prediction: torch.Tensor):
# only compute loss on non-cloudy pixels
mask = 1 - labels[:, 4:5] # [b, 1, h, w, t]
mask = mask.repeat(1, 4, 1, 1, 1)
masked_prediction = prediction * mask
masked_labels = labels[:, :4] * mask
return self.l(masked_prediction, masked_labels)
# loss using the EarthNet challenge ENS score
class ENS_loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, labels: torch.Tensor, prediction: torch.Tensor):
'''
size of labels (b, w, h, c, t)
b = batch_size (>0)
c = channels (=5)
w = width (=128)
h = height (=128)
t = time (20/40/140)
size of prediction (b, w, h, c, t)
b = batch_size (>0)
c = channels (=4) no mask
w = width (=128)
h = height (=128)
t = time (20/40/140)
'''
# numpy conversion
labels = np.array(labels.cpu()).transpose(0, 2, 3, 1, 4)
prediction = np.array(prediction.cpu()).transpose(0, 2, 3, 1, 4)
# mask
mask = 1 - np.repeat(labels[:, :, :, 4:, :], 4, axis=3)
labels = labels[:, :, :, :4, :]
# NDVI
ndvi_labels = ((labels[:, :, :, 3, :] - labels[:, :, :, 2, :]) / (
labels[:, :, :, 3, :] + labels[:, :, :, 2, :] + 1e-6))[:, :, :, np.newaxis, :]
ndvi_prediction = ((prediction[:, :, :, 3, :] - prediction[:, :, :, 2, :]) / (
prediction[:, :, :, 3, :] + prediction[:, :, :, 2, :] + 1e-6))[:, :, :, np.newaxis, :]
ndvi_mask = mask[:, :, :, 0, :][:, :, :, np.newaxis, :]
# floor and ceiling
prediction[prediction < 0] = 0
prediction[prediction > 1] = 1
labels[np.isnan(labels)] = 0
labels[labels > 1] = 1
labels[labels < 0] = 0
partial_score = np.zeros((labels.shape[0], 5))
score = np.zeros(labels.shape[0])
# partial score computation
for i in range(labels.shape[0]):
partial_score[i, 1], _ = en.parallel_score.CubeCalculator.MAD(prediction[i], labels[i], mask[i])
partial_score[i, 2], _ = en.parallel_score.CubeCalculator.SSIM(prediction[i], labels[i], mask[i])
partial_score[i, 3], _ = en.parallel_score.CubeCalculator.OLS(ndvi_prediction[i], ndvi_labels[i], ndvi_mask[i])
partial_score[i, 4], _ = en.parallel_score.CubeCalculator.EMD(ndvi_prediction[i], ndvi_labels[i], ndvi_mask[i])
if np.min(partial_score[i, 1:]) == 0:
score[i] = partial_score[i, 0] = 0
else:
score[i] = partial_score[i, 0] = 4 / (
1 / partial_score[i, 1] + 1 / partial_score[i, 2] + 1 / partial_score[i, 3] + 1 / partial_score[i, 4])
return score, partial_score
# score is a np array with all the scores
# partial scores is np array with 5 columns, ENS mad ssim ols emd, in this order (one row per elem in batch)
# NDVI l2 loss on non-cloudy pixels weighted by the proportion of valid pixels
class NDVI_loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, labels: torch.Tensor, prediction: torch.Tensor):
# only compute loss on non-cloudy pixels
# numpy conversion
labels = labels.permute(0, 2, 3, 1, 4)
prediction = prediction.permute(0, 2, 3, 1, 4)
# mask
ndvi_mask = labels[:, :, :, 4:, :]
labels = labels[:, :, :, :4, :]
# NDVI
ndvi_labels = ((labels[:, :, :, 3, :] - labels[:, :, :, 2, :]) / (
labels[:, :, :, 3, :] + labels[:, :, :, 2, :] + 1e-6))[:, :, :, np.newaxis, :]
ndvi_prediction = ((prediction[:, :, :, 3, :] - prediction[:, :, :, 2, :]) / (
prediction[:, :, :, 3, :] + prediction[:, :, :, 2, :] + 1e-6))[:, :, :, np.newaxis, :]
# floor and ceiling
prediction[prediction < 0] = 0
prediction[prediction > 1] = 1
score = np.zeros((labels.shape[0], 6))
weight = np.zeros(ndvi_labels.shape[0])
l2_loss = nn.MSELoss()
for i in range(ndvi_labels.shape[0]):
# mask which data is cloudy and shouldn't be used for calculating the score
masked_ndvi_labels = torch.mul(ndvi_labels[i], ndvi_mask[i])
masked_ndvi_prediction = torch.mul(ndvi_prediction[i], ndvi_mask[i])
score[i, 0] = score[i, 1] = score[i, 2] = score[i, 3] = score[i, 4] = l2_loss(masked_ndvi_prediction, masked_ndvi_labels)
# the loss should carry a weight corresponding to the number of valid pixels
weight[i] = 1 - torch.sum(ndvi_mask[i]) / torch.numel(ndvi_mask[i])
score[i,5] = weight[i]
return weight, score
def cloud_mask_loss(y_preds, y_truth, cloud_mask):
l2_loss = nn.MSELoss()
mask = torch.repeat_interleave(1-cloud_mask, 4, axis=1)
# mask which data is cloudy and shouldn't be used for averaging
masked_y_pred = torch.mul(y_preds, mask)
masked_y_truth = torch.mul(y_truth, mask)
return l2_loss(masked_y_pred, masked_y_truth)
|
{"hexsha": "90ae4b910923d16af719c4c61167ab96e124ddbe", "size": 5866, "ext": "py", "lang": "Python", "max_stars_repo_path": "drought_impact_forecasting/losses.py", "max_stars_repo_name": "rudolfwilliam/satellite_image_forecasting", "max_stars_repo_head_hexsha": "164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-12-16T18:32:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T15:57:27.000Z", "max_issues_repo_path": "drought_impact_forecasting/losses.py", "max_issues_repo_name": "rudolfwilliam/satellite_image_forecasting", "max_issues_repo_head_hexsha": "164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "drought_impact_forecasting/losses.py", "max_forks_repo_name": "rudolfwilliam/satellite_image_forecasting", "max_forks_repo_head_hexsha": "164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-05T15:01:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-28T15:57:14.000Z", "avg_line_length": 41.6028368794, "max_line_length": 133, "alphanum_fraction": 0.5460279577, "include": true, "reason": "import numpy", "num_tokens": 1626}
|
"""
Copyright 2020 William Rochira at York Structural Biology Laboratory
"""
import os
import gzip
import pickle
import zipfile
import requests
import numpy as np
from common import setup
from _defs import ROTAMER_OUTPUT_DIR
REFERENCE_DATA_URL = 'https://github.com/rlabduke/reference_data/archive/master.zip'
REFERENCE_DATA_DIR = os.path.join(ROTAMER_OUTPUT_DIR, 'reference_data-master', 'Top8000', 'Top8000_rotamer_pct_contour_grids')
FILENAMES = { 'ARG' : 'rota8000-arg.data',
'ASN' : 'rota8000-asn.data',
'ASP' : 'rota8000-asp.data',
'CYS' : 'rota8000-cys.data',
'GLN' : 'rota8000-gln.data',
'GLU' : 'rota8000-glu.data',
'HIS' : 'rota8000-his.data',
'ILE' : 'rota8000-ile.data',
'LEU' : 'rota8000-leu.data',
'LYS' : 'rota8000-lys.data',
'MET' : 'rota8000-met.data',
'PHE' : 'rota8000-phetyr.data',
'PRO' : 'rota8000-pro.data',
'SER' : 'rota8000-ser.data',
'THR' : 'rota8000-thr.data',
'TRP' : 'rota8000-trp.data',
'TYR' : 'rota8000-phetyr.data',
'VAL' : 'rota8000-val.data' }
SERIALISED_LIB_PATH = os.path.join(ROTAMER_OUTPUT_DIR, 'library.pkl')
COMPRESSED_LIB_PATH = os.path.join(ROTAMER_OUTPUT_DIR, 'library.gz')
P_OUTLIER = 0.003
P_ALLOWED = 0.020
def download_reference_data():
if not os.path.isdir(os.path.join(ROTAMER_OUTPUT_DIR, 'reference_data-master')):
print('Downloading reference data...')
response = requests.get(REFERENCE_DATA_URL, stream=True)
with open(os.path.join(ROTAMER_OUTPUT_DIR, 'reference_data.zip'), 'wb') as outfile:
for chunk in response.iter_content(chunk_size=1024):
outfile.write(chunk)
print('Unzipping reference data...')
with zipfile.ZipFile(os.path.join(ROTAMER_OUTPUT_DIR, 'reference_data.zip'), 'r') as infile:
infile.extractall(os.path.join(ROTAMER_OUTPUT_DIR))
print('Deleting reference data ZIP...')
os.remove(os.path.join(ROTAMER_OUTPUT_DIR, 'reference_data.zip'))
print('Done.')
# These two functions are slow, hence why they were swapped out for NumPy bit-masking in the
# testing script and in the actual implementation. Don't use these for anything time critical.
# I only left these in here in case I want them for something else
# Compress a list of values into a list of integers of bit width n*l
def int_compress(xs, n, l):
ys = [ ]
xis = [ ]
ps = [ 2**(l*(n-i-1)) for i in range(n) ]
if len(xs)%n > 0:
xs = xs + [ 0 ] * (n-len(xs)%n)
for x in xs:
xis.append(x)
if len(xis) == n:
y = 0
for i, xi in enumerate(xis):
y += xi * ps[i]
ys.append(y)
xis = [ ]
return ys
# Decompress each value in a list into n integers of bit width l
def int_decompress(xs, n, l):
ys = [ ]
ps = [ 2**(l*(n-i-1)) for i in range(n) ]
for x in xs:
yis = [ ]
for i in range(n):
yi = x
for j in range(i):
yi -= yis[j] * ps[j]
yi //= ps[i]
yis.append(yi)
ys += yis
return ys
def generate_library():
print('Generating rotamer library...')
dim_offsets = { }
dim_bin_ranges = { }
dim_bin_widths = { }
dim_num_options = { }
compressed_classification_bytes = { }
for code, filename in FILENAMES.items():
print('*** ' + code)
dim_offsets[code] = [ ]
dim_bin_ranges[code] = [ ]
dim_bin_widths[code] = [ ]
dim_num_options[code] = [ ]
# Load chi information and probabilities from file
given_classifications = { }
filepath = os.path.join(REFERENCE_DATA_DIR, filename)
with open(filepath, 'r') as infile:
for line in infile.readlines():
if line[0] == '#':
if line[:5] == '# x':
splitline = line.split(': ')[1].split(' ')
dim_min = float(splitline[0])
dim_max = float(splitline[1])
dim_size = dim_max - dim_min
dim_num_bins = int(splitline[2])
dim_bin_width = dim_size / float(dim_num_bins)
dim_bin_ranges[code].append((dim_min, dim_max))
dim_bin_widths[code].append(dim_bin_width)
continue
splitline = [ float(x) for x in line.split(' ') ]
chis = tuple(splitline[:-1])
probability = float(splitline[-1])
classification = 0
if probability < P_OUTLIER:
classification = 1
elif probability < P_ALLOWED:
classification = 2
else:
classification = 3
given_classifications[chis] = classification
# Get first chi offsets
for dimension, offset_chi in enumerate(given_classifications.keys()[0]):
dim_range = dim_bin_ranges[code][dimension]
dim_width = dim_bin_widths[code][dimension]
new_offset_chi = offset_chi - dim_width * (offset_chi // dim_width)
dim_offsets[code].append(new_offset_chi)
# Get number of combinations and initialise the array
dim_num_options[code] = [ int((r[1]-r[0])//w) for r, w in zip(dim_bin_ranges[code], dim_bin_widths[code]) ]
total_num_combinations = np.product(dim_num_options[code])
classifications = [ 0 ] * total_num_combinations
# Insert known values into the array
for chis, classification in given_classifications.items():
index = 0
for dimension, chi in enumerate(chis):
dim_offset = dim_offsets[code][dimension]
dim_width = dim_bin_widths[code][dimension]
index += (chi - dim_offset) / dim_width * np.product(dim_num_options[code][dimension+1:])
index = int(index)
classifications[index] = classification
# Generate compressed byte array
classifications_compressed = int_compress(classifications, 4, 2)
compressed_classification_bytes[code] = bytearray(classifications_compressed)
# Write serialised data
all_data = [ dim_offsets, dim_bin_ranges, dim_bin_widths, dim_num_options, compressed_classification_bytes ]
with open(SERIALISED_LIB_PATH, 'wb') as outfile:
pickle.dump(all_data, outfile)
with gzip.open(COMPRESSED_LIB_PATH, 'wb') as outfile:
pickle.dump(all_data, outfile)
print('Done.')
if __name__ == '__main__':
setup()
download_reference_data()
generate_library()
|
{"hexsha": "5a9a34fb553bed0c0f7b50cc8499ef8b44c74ee6", "size": 6824, "ext": "py", "lang": "Python", "max_stars_repo_path": "iris_tools/rotamer_generate_library.py", "max_stars_repo_name": "FilomenoSanchez/iris-validation", "max_stars_repo_head_hexsha": "a7bbb28dfe239527c32914229e69e007a519e0dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "iris_tools/rotamer_generate_library.py", "max_issues_repo_name": "FilomenoSanchez/iris-validation", "max_issues_repo_head_hexsha": "a7bbb28dfe239527c32914229e69e007a519e0dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-18T11:58:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-18T12:00:08.000Z", "max_forks_repo_path": "iris_tools/rotamer_generate_library.py", "max_forks_repo_name": "FilomenoSanchez/iris-validation", "max_forks_repo_head_hexsha": "a7bbb28dfe239527c32914229e69e007a519e0dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9064327485, "max_line_length": 126, "alphanum_fraction": 0.5861664713, "include": true, "reason": "import numpy", "num_tokens": 1658}
|
# Copyright (C) 2019 Harvard University. All Rights Reserved. Unauthorized
# copying of this file, via any medium is strictly prohibited Proprietary and
# confidential
# Developed by Mohammad Haft-Javaherian <mhaft-javaherian@mgh.harvard.edu>,
# <7javaherian@gmail.com>.
# ==============================================================================
"""plot the log file within the save model folder. 111
plots the train and validation loss values over last 100 recorded performance evaluations and update the
plot every 5 second. The figure has two subplots: top one has all the results and bottom one has last 100 log
records.
Notes:
Arguments are bash arguments.
Args:
exp_def: the experiment definition used for saving the model.
models_path: the path that model folder for `exp_def`
Returns:
PyPlot figure with two subplots.
See Also:
* :meth:`train`
"""
import argparse
import time
import os
import csv
import visdom
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def smooth(x):
"""Smoothing 1D using box filter with kernel size = 5
Args:
x: input 1D vector
Returns:
smoothed 1D vector
"""
s = np.concatenate((np.tile(x[0], 4), x))
s[4:] = (s[4:] + s[3:-1] + s[2:-2] + s[1:-3] + s[:-4]) / 5
return s[4:]
def animate(i):
""" a handle function to update at each ste
Args:
i: animation frame. The argument will be used by :meth: animation.FuncAnimation
Returns:
updated axes within the figure, which all are defined in the outer scope.
See Also:
* :meth: animation.FuncAnimation
"""
global numRecord
try:
with open(log_file, 'r') as f:
reader = csv.reader(f, delimiter=',', skipinitialspace=True)
_ = next(reader) # header
data = []
for row in reader:
data.append([float(i) for i in row[:4]])
if len(data) != numRecord:
numRecord = len(data)
ax1.clear()
ax2.clear()
data = np.array(data)
iStart = 0
ax1.plot(data[iStart:, 0], smooth(data[iStart:, 2]))
ax1.plot(data[iStart:, 0], smooth(data[iStart:, 3]))
ax1.legend(['Training Loss', 'Validation Loss'])
ax1.set_title(args.exp_def)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss')
iStart = -50 if data.shape[0] > 50 else 0
ax2.plot(data[iStart:, 0], data[iStart:, 2] / data[iStart, 2])
ax2.plot(data[iStart:, 0], data[iStart:, 3] / data[iStart, 3])
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Loss')
except:
pass
def animate_vis():
""" a handle function to update at each ste
Args:
iEpoch: animation frame. The argument will be used by :meth: animation.FuncAnimation
Returns:
updated axes within the figure, which all are defined in the outer scope.
See Also:
* :meth: animation.FuncAnimation
"""
global numRecord
try:
with open(log_file, 'r') as f:
reader = csv.reader(f, delimiter=',', skipinitialspace=True)
_ = next(reader) # header
data = []
for row in reader:
data.append([float(i) for i in row[:4]])
if len(data) != numRecord:
numRecord = len(data)
data = np.array(data)
iStart = 0
vis.line(Y=np.concatenate((smooth(data[iStart:, 2])[..., np.newaxis],
smooth(data[iStart:, 3])[..., np.newaxis]), axis=-1), X=data[iStart:, 0],
env=vis_env, win=args.exp_def, opts=dict(title=args.exp_def, xlabel='Epoch', ylabel='Loss',
legend=['Training Loss', 'Validation Loss']))
iStart = -50 if data.shape[0] > 50 else 0
vis.line(Y=np.concatenate(((data[iStart:, 2] / data[iStart, 2])[..., np.newaxis],
(data[iStart:, 3] / data[iStart, 3])[..., np.newaxis]), axis=-1),
X=data[iStart:, 0],
env=vis_env, win=args.exp_def+'-zoom', opts=dict(title=args.exp_def+' zoom', xlabel='Epoch',
ylabel='Loss',
legend=['Training Loss', 'Validation Loss']))
except:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-exp_def", type=str, default="test", help="experiment definition")
parser.add_argument("-models_path", type=str, default="../model/", help="path for saving models")
parser.add_argument("-render", type=str, default="visdom", help="plot environment: visdom (v) or pyplot (p)")
parser.add_argument("-nvidia_smi", type=int, default=0, help="print output of nvidia-smi")
args = parser.parse_args()
log_file = args.models_path + args.exp_def + '/log-' + args.exp_def + '.csv'
nGPU = len(os.popen('nvidia-smi').read().split('+\n')) - 5
numRecord = 0
if args.render[0].lower() == 'v':
vis_env = 'haft'
vis = visdom.Visdom(env=vis_env)
while True:
animate_vis()
if args.nvidia_smi:
smi = '<p style="color:blue;font-family:monospace;font-size:80%;">' + \
'<br>'.join(os.popen('nvidia-smi').read().split('\n')[3:(7 + 3 * nGPU)]) + '</p>'
vis.text(smi, env=vis_env, win='nvidia-smi')
time.sleep(5)
else:
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
animate(-1)
ani = animation.FuncAnimation(fig, animate, interval=5000)
plt.show()
|
{"hexsha": "423c52985996366f7dcf4e6fe24aca4ba2496bb3", "size": 5956, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/plot_log_file.py", "max_stars_repo_name": "mvWellman/OCTseg", "max_stars_repo_head_hexsha": "c3fe1098d031f74422956e2335dd4bae16dde7b6", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-30T00:50:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T10:16:34.000Z", "max_issues_repo_path": "util/plot_log_file.py", "max_issues_repo_name": "mvWellman/OCTseg", "max_issues_repo_head_hexsha": "c3fe1098d031f74422956e2335dd4bae16dde7b6", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/plot_log_file.py", "max_forks_repo_name": "mvWellman/OCTseg", "max_forks_repo_head_hexsha": "c3fe1098d031f74422956e2335dd4bae16dde7b6", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-30T00:30:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T13:34:51.000Z", "avg_line_length": 35.4523809524, "max_line_length": 114, "alphanum_fraction": 0.5485224983, "include": true, "reason": "import numpy", "num_tokens": 1449}
|
{-# LANGUAGE ForeignFunctionInterface #-}
module Haskstat where
import Data.List
import Data.Maybe
import Data.Complex
foreign import ccall "erf" c_erf :: Double -> Double
floatLength :: Fractional a => [b] -> a
floatLength xs = fromIntegral $ length xs
mean :: Fractional a => [a] -> a
mean xs = (foldl' (+) 0 xs) / floatLength xs
trimmedMean :: (Ord a, Fractional a) => [a] -> Int -> a
trimmedMean xs 0 = mean xs
trimmedMean xs m = mean $ take (n-2*m) $ drop m (sort xs)
where
n = length xs
var :: Fractional a => [a] -> a
var xs = (x2Sum/n - (xSum/n)^2)
where
(xSum, x2Sum) = foldl' (\(f,s) x -> (f+x,s+x^2)) (0,0) xs
n = floatLength xs
varSample :: Fractional a => [a] -> a
varSample xs = n/(n-1) * var xs
where
n = floatLength xs
std :: Floating a => [a] -> a
std x = sqrt $ var x
stdSample :: Floating a => [a] -> a
stdSample x = sqrt $ varSample x
skewness :: Floating a => [a] -> a
skewness xs = (mu3/n) / (mu2/n)**(1.5)
where
mu = mean xs
(mu2, mu3) = foldl' (\(s,t) x -> (s + (x-mu)^2, t + (x-mu)^3)) (0, 0) xs
n = floatLength xs
quartileSkewness :: (RealFrac a, Floating a) => [a] -> a
quartileSkewness xs = (q3 + q1 - 2.0*q2)/(q3 - q1)
where
q1 = percentile xs 25
q2 = median xs
q3 = percentile xs 75
octileSkewness :: (RealFrac a, Floating a) => [a] -> a
octileSkewness xs = (q875 + q125 - 2.0*q2)/(q875 - q125)
where
q125 = percentile xs 12.5
q2 = median xs
q875 = percentile xs 87.5
substractValue :: Floating a => [a] -> a -> [a]
substractValue (x:xs) v = abs(x-v):(substractValue xs v)
substractValue [] v = []
mad :: (RealFrac a, Floating a) => [a] -> a
mad xs = median dev
where
dev = substractValue xs $ median xs
--medcouple :: (Ord a, Floating a, RealFrac a) => [a] -> a
--medcouple xs = med
-- where
-- sorted_xs = reverse $ sort xs
-- med = median sorted_xs
-- zs = sorted_xs - med
-- h_zi_zj = [
-- x_plus = filter (med<) xs
-- x_minus = filter (med>) xs
rank :: (Enum a, Num a, Ord a, Fractional a) => [a] -> [a]
rank xs = fromJust <$> map (\k -> lookup k pairs) xs
where
g = groupBy (\a b -> fst a == fst b) $ zip (sort xs) [1..]
r = map (\k -> mean $ map snd k) g
pairs = zip (nub (sort xs)) r
kurtosis :: Floating a => [a] -> a
kurtosis xs = n * mu4 / mu2^2
where
mu = mean xs
(mu2, mu4) = foldl' (\(s, f) x -> (s + (x-mu)^2, f + (x-mu)^4)) (0, 0) xs
n = floatLength xs
robustKurtosis :: (RealFrac a, Floating a) => [a] -> a
robustKurtosis xs = ((q875 - q625) + (q375 - q125))/(q75 - q25)
where
q125 = percentile xs 12.5
q25 = percentile xs 25
q375 = percentile xs 37.5
q625 = percentile xs 62.5
q75 = percentile xs 75
q875 = percentile xs 87.5
percentile :: (Ord a, Fractional a, RealFrac a) => [a] -> a -> a
percentile xs 0 = minimum xs
percentile xs 100 = maximum xs
percentile xs perc = (xSorted!!idx) * (1-fraction) + (xSorted!!(idx+1)) * (fraction)
where
xSorted = sort xs
perc' = ( perc)/100.0
n = floatLength xs
idx = floor (perc'*(n-1)) :: Int
fraction = perc'*(n-1) - fromIntegral idx
median :: (Ord a, Fractional a, RealFrac a) => [a] -> a
median xs = percentile xs 50
data Summary a = Summary {
min_ :: a,
p25 :: a,
med :: a,
p75 :: a,
max_ :: a
} deriving (Show)
summary :: (Ord a, Fractional a, RealFrac a) => [a] -> Summary a
summary xs = Summary {
min_ = minimum xs,
p25 = percentile xs 25,
med = median xs,
p75 = percentile xs 75,
max_ = maximum xs
}
count :: (Ord a, Num a) => [a] -> [(a,Int)]
count x = zip (nub x_sorted) (map length (group x_sorted))
where
x_sorted = sort x
binning :: (Ord a, Fractional a) => [a] -> Int -> [((a,a), Int)]
binning [] _ = []
binning x nbrBin = zip step $ map length filters
where
step = steps ((maxX - minX) / fromIntegral nbrBin) minX maxX
maxX = maximum x
minX = minimum x
bounds = map (\(a,b) -> (&&) <$> (>=a) <*> (<b)) (init step) ++
[(\(a,b) -> (&&) <$> (>=a) <*> (<=b)) (last step)]
filters = map (flip filter x) bounds
steps :: (Ord a, Num a) => a -> a -> a -> [(a,a)]
steps step start stop
| start < (stop-step) = (start, start+step) : steps step (start+step) stop
| otherwise = [(start,stop)]
jarqueBera :: [Double] -> (Double, Double)
jarqueBera xs = (value, exp (-0.5*value))
where
n = fromIntegral $ length xs
mu = mean xs
(mu2, mu3, mu4) = foldl' (\(s,t,f) x -> (s+(x-mu)^2, t+(x-mu)^3, f+(x-mu)^4)) (0,0,0) xs
s = (mu3/n)/(mu2/n)**1.5
k = n * mu4 /mu2^2
value = n / 6.0 * (s^2 + 0.25*(k - 3.0)^2)
fft :: (Enum a, Floating a, RealFloat a) => [Complex a] -> [Complex a]
fft xs@(y:ys)
| n > 1 = zipWith (+) even_part factor1 ++ zipWith (-) even_part factor1
| otherwise = [y]
where
n = floatLength xs
factor = expBasis n
factor1 = zipWith (*) factor odd_part
even_part = fft $ evens xs
odd_part = fft $ odds xs
ifftUnnormalised :: (Enum a, Floating a, RealFloat a) => [Complex a] -> [Complex a]
ifftUnnormalised xs@(y:ys)
| n > 1 = zipWith (+) even_part factor1 ++ zipWith (-) even_part factor1
| otherwise = [y]
where
n = floatLength xs
factor = conjugate <$> expBasis n
factor1 = zipWith (*) factor odd_part
even_part = ifftUnnormalised $ evens xs
odd_part = ifftUnnormalised $ odds xs
ifft :: (Enum a, Floating a, RealFloat a) => [Complex a] -> [Complex a]
ifft xs = (/n) <$> (ifftUnnormalised xs)
where
n = floatLength xs
autocorr :: (Enum a, Floating a, RealFloat a) => Int -> [Complex a] -> [a]
autocorr nlags xs = (/(head acovf)) <$> acovf
where
n = floatLength xs
fr = fft $ zeroPadding (nextRegular (2*n+1)) (normalise xs)
acovf = take nlags (realPart <$> (ifft $ zipWith (*) fr (map conjugate fr)))
zeroPadding :: (Num a) => Double -> [a] -> [a]
zeroPadding m xs
| n < m = zeroPadding m (xs ++ [0])
| otherwise = xs
where
n = floatLength xs
nextRegular :: (RealFrac a, Floating a) => a -> a
nextRegular m = 2 ^ exponent
where
exponent = ceiling $ logBase 2 m
-- zeroPadding :: [Double] -> [Double]
-- zeroPadding xs
-- | n < m = zeroPadding (xs ++ [0])
-- | otherwise = xs
-- where
-- n = fromIntegral $ length xs
-- exponent = ceiling $ logBase 2 n
-- m = 2.0 ^ exponent
evens :: [a] -> [a]
evens (x:z:xs) = x : evens xs
evens (x:xs) = [x]
evens _ = []
odds :: [a] -> [a]
odds (x:z:xs) = z : odds xs
odds _ = []
expBasis :: (Enum a, RealFloat a, Num a) => a -> [Complex a]
expBasis n = map g [0..(n-1)]
where
exponent = (0 :+ ) <$> ((-2.0)*pi / n *)
g = exp . exponent
normalise :: (Floating a) => [a] -> [a]
normalise xs = [(x - mu) / sigma | x <- xs]
where
mu = mean xs
sigma = std xs
|
{"hexsha": "a727ff20f91704df805f2a3edaf194c9a667e3cf", "size": 7187, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "Haskstat.hs", "max_stars_repo_name": "kevintyloo/haskstat", "max_stars_repo_head_hexsha": "1df809220ff904161719e5ec5272bda239c352c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-19T14:54:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-19T14:54:58.000Z", "max_issues_repo_path": "Haskstat.hs", "max_issues_repo_name": "vekrt/haskstat", "max_issues_repo_head_hexsha": "1df809220ff904161719e5ec5272bda239c352c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Haskstat.hs", "max_forks_repo_name": "vekrt/haskstat", "max_forks_repo_head_hexsha": "1df809220ff904161719e5ec5272bda239c352c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8215767635, "max_line_length": 96, "alphanum_fraction": 0.5347154585, "num_tokens": 2478}
|
# -*- coding: utf-8 -*-
import itertools
import numpy as np
import shapely.geometry
MESH_LEVEL_ALIAS = {
"80km": 1, "10km": 2, "1km": 3,
"500m": 4, "1/2": 4, "half": 4,
"250m": 5, "1/4": 5, "quarter": 5,
"125m": 6, "1/8": 6, "oneeighth": 6
}
# lat*120 = km, lon*80 = km
# i.e. km/120 = lat, km/80 = lon
MESH_SIZE_KM = (
None, # place holder for index match
80.0, 10.0, 1.0, 0.5, 0.25, 0.125
)
MESH_SIZE_LONLAT = tuple((None, None) if km is None else (km/80.0, km/120.0) for km in MESH_SIZE_KM)
def mesh_size_in_km(level):
level = _get_mesh_level(level)
assert level >= 1 and level < len(MESH_SIZE_KM)
return MESH_SIZE_KM[level]
def mesh_size_in_lonlat(level):
level = _get_mesh_level(level)
assert level >= 1 and level < len(MESH_SIZE_KM)
return MESH_SIZE_LONLAT[level]
def mesh_level_aliases():
"""
Mesh level aliases
Returns
-------
Dictionary of mesh level aliases
"""
return dict(v for v in MESH_LEVEL_ALIAS.items())
def _get_mesh_level(level):
if isinstance(level, int): return level
assert isinstance(level, str), "level must be str or int"
if level not in MESH_LEVEL_ALIAS:
raise ValueError("invalid value for level: %s" % level)
return MESH_LEVEL_ALIAS[level]
def containing_mesh(lon, lat, level):
"""
Mesh area containing the specified coordinates
Parameters
----------
lon: float/numpy.array/list
Londitude
lat: float/numpy.array/list
Latitude
level: int/str
Mesh level. If int, 1-6.
Intuitive str expression is also allowed (e.g. "500m").
See mesh_level_aliases() for possible level expressions.
Returns
-------
meshcode as int if lon, lat are scalar.
meshcode as numpy.array of type int if lon, lat are sequences.
"""
level = _get_mesh_level(level)
assert level in [1, 2, 3, 4, 5, 6], "Only level 1-6 is supported"
lon = np.array(lon)
lat = np.array(lat)
# Output parser for convenience.
# Extract a single element array to a scalar.
_parse = lambda x: x.item() if len(x.shape) == 0 else x
# level 1
x = lon - 100; y = lat * 1.5
a = np.floor(x).astype(np.int64); b = np.floor(y).astype(np.int64)
mesh = 100*b + a
if level == 1: return _parse(mesh)
# level 2
x = x - a; y = y - b # set the sourth west corner of the parent mesh as origin
x = x * 8; y = y * 8
a = np.floor(x).astype(np.int64); b = np.floor(y).astype(np.int64)
mesh = 100*mesh + 10*b + a
if level == 2: return _parse(mesh)
# level 3
x = x - a; y = y - b
x = x * 10; y = y * 10
a = np.floor(x).astype(np.int64); b = np.floor(y).astype(np.int64)
mesh = 100*mesh + 10*b + a
if level == 3: return _parse(mesh)
# level 4-6
for j in range(4, 7):
x = x - a; y = y - b
x = x * 2; y = y * 2
a = np.floor(x).astype(np.int64); b = np.floor(y).astype(np.int64)
mesh = 10*mesh + (1 + a + 2*b)
if j == level: return _parse(mesh)
# This part shouldn't be reached
raise ValueError("level must be 1-6")
def mesh_center(mesh):
"""
Center coordinates of mesh areas
Parameters
----------
mesh: int/str/list/numpy.array
mesh code. If sequence, all codes must be of the same level.
Returns
-------
Tuple of lon, lat.
Shape of each element matches the shape of x.
"""
x1, y1, x2, y2 = mesh_coord(mesh)
return (x1 + x2) / 2.0, (y1 + y2) / 2.0
def mesh_coord(mesh):
"""
Coordinates of mesh areas
Parameters
----------
mesh: int/str/list/numpy.array
mesh code. If sequence, all codes must be of the same level.
Returns
-------
Tuple of lon1, lat1, lon2, lat2
Shape of each element matches the shape of x.
"""
x = np.array(mesh).astype(np.int64)
digits = np.floor(np.log10(x)).astype(int) + 1
if len(digits.shape) > 0:
digits = np.unique(digits)
assert len(digits) == 1, "mesh code level must be must be unique (#digits = %s)" % level
digits = digits.item()
level = 1 if digits == 4 else \
2 if digits == 6 else \
3 if digits == 8 else \
4 if digits == 9 else \
5 if digits == 10 else \
6 if digits == 11 else \
None
assert level is not None, "code length is %d, matching no mesh level" % digits
# get mesh size
width, height = MESH_SIZE_LONLAT[level]
# get south-west coordinates
# level 1
b, x = np.divmod(x, 10**(digits-2))
digits -= 2
a, x = np.divmod(x, 10**(digits-2))
digits -= 2
lon = a + 100.0 ; lat = b * 2.0 / 3
if level == 1: return lon, lat, lon + width, lat + height
# level 2
b, x = np.divmod(x, 10**(digits-1))
digits -= 1
a, x = np.divmod(x, 10**(digits-1))
digits -= 1
lon += a*MESH_SIZE_LONLAT[2][0]; lat += b*MESH_SIZE_LONLAT[2][1]
if level == 2: return lon, lat, lon + width, lat + height
# level 3
b, x = np.divmod(x, 10**(digits-1))
digits -= 1
a, x = np.divmod(x, 10**(digits-1))
digits -= 1
lon += a*MESH_SIZE_LONLAT[3][0]; lat += b*MESH_SIZE_LONLAT[3][1]
if level == 3: return lon, lat, lon + width, lat + height
# level 4
for j in range(4, 7):
k, x = np.divmod(x, 10**(digits-1))
digits -= 1
b, a = np.divmod(k - 1, 2)
lon += a*MESH_SIZE_LONLAT[j][0]; lat += b*MESH_SIZE_LONLAT[j][1]
if level == j: return lon, lat, lon + width, lat + height
# This part shouldn't be reached
raise ValueError("level must be 1-6")
def mesh_polygon(mesh):
"""
Mesh area(s) as polygon object
Parameters
----------
mesh: int/str/list/numpy.array
mesh code. If sequence, all codes must be of the same level.
Returns
-------
shapely.geometry.Polygon if mesh is scalar.
List of shapely.geometry.Polygon if mesh is a sequence.
"""
mesh = np.array(mesh).astype(np.int64)
coords = mesh_coord(mesh)
rects = [shapely.geometry.box(*c) for c in zip(*coords)]
if len(rects) == 1:
return rects[0]
else:
return rects
def mesh_cover(g, level, rectonly=False):
"""
Find a set of mesh areas that cover a geometry
Parameters
----------
g: geometry object
Geometry to be covered
level: int
Mesh level.
rectonly: bool (default: False)
If the covered areas form a rectngle
Returns
-------
List of mesh codes that cover the geometry.
"""
level = _get_mesh_level(level)
x1, y1, x2, y2 = g.bounds
sw, ne = containing_mesh([x1, x2], [y1, y2], level=level)
centx, centy = mesh_center([sw, ne])
w, h = MESH_SIZE_LONLAT[level]
lons, lats = zip(*itertools.product(
np.arange(centx[0], centx[1] + w, w),
np.arange(centy[0], centy[1] + h, h)
))
meshes = containing_mesh(lons, lats, level=level)
rects = mesh_polygon(meshes)
tmp = [(m, r.intersection(g).area / r.area) \
for m, r in zip(meshes, rects) if rectonly or r.intersects(g)]
meshes, fractions = zip(*tmp)
return meshes, fractions
def contained_mesh(g, level):
"""
Find mesh areas that sit inside a geometry
Parameters
----------
g: geometry object
Geometry to be covered
level: int
Mesh level.
Returns
-------
List of mesh codes inside the geometry
"""
# mesh_cover finds mesh areas that intersects with g
# cotained mesh should be a subset of covers
meshes, fracs = mesh_cover(g, level, rectonly=True)
contained = np.isclose(fracs, 1.0)
meshes = [m for m, c in zip(meshes, contained) if c]
return meshes
|
{"hexsha": "67c3b5061384ff40a420ee7e52e661a2e3d5c85b", "size": 7843, "ext": "py", "lang": "Python", "max_stars_repo_path": "meshjp/meshjp.py", "max_stars_repo_name": "kotamori4/meshjp", "max_stars_repo_head_hexsha": "598100298bd46d05ef6dd90a49c305db3118d721", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "meshjp/meshjp.py", "max_issues_repo_name": "kotamori4/meshjp", "max_issues_repo_head_hexsha": "598100298bd46d05ef6dd90a49c305db3118d721", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-01-23T13:46:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-23T13:47:22.000Z", "max_forks_repo_path": "meshjp/meshjp.py", "max_forks_repo_name": "kotamori4/meshjp", "max_forks_repo_head_hexsha": "598100298bd46d05ef6dd90a49c305db3118d721", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-05T14:41:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-05T14:41:17.000Z", "avg_line_length": 27.6161971831, "max_line_length": 100, "alphanum_fraction": 0.5773301033, "include": true, "reason": "import numpy", "num_tokens": 2406}
|
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
import numpy as np
BATCH_SIZE = 128
NUM_CLASSES = 10
EPOCHS = 20
def get_dataset():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype(np.float32)/255
x_test = x_test.astype(np.float32)/255
# create one hot vector
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
return x_train, y_train, x_test, y_test
def define_model():
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(NUM_CLASSES, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
return model
def main():
x_train, y_train, x_test, y_test = get_dataset()
model = define_model()
history = model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test,y_test,verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
if __name__ == '__main__':
main()
|
{"hexsha": "865244eaeb21537c364a18e6e892b0dff7d307cd", "size": 1595, "ext": "py", "lang": "Python", "max_stars_repo_path": "deeplearning/keras/mnist/mnist_mlp.py", "max_stars_repo_name": "terasakisatoshi/pythonCodes", "max_stars_repo_head_hexsha": "baee095ecee96f6b5ec6431267cdc6c40512a542", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deeplearning/keras/mnist/mnist_mlp.py", "max_issues_repo_name": "terasakisatoshi/pythonCodes", "max_issues_repo_head_hexsha": "baee095ecee96f6b5ec6431267cdc6c40512a542", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deeplearning/keras/mnist/mnist_mlp.py", "max_forks_repo_name": "terasakisatoshi/pythonCodes", "max_forks_repo_head_hexsha": "baee095ecee96f6b5ec6431267cdc6c40512a542", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1475409836, "max_line_length": 64, "alphanum_fraction": 0.6482758621, "include": true, "reason": "import numpy", "num_tokens": 390}
|
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
from typing import List, Optional, Sequence
import numpy as np
class LaneSegment:
def __init__(
self,
id: int,
has_traffic_control: bool,
turn_direction: str,
is_intersection: bool,
l_neighbor_id: Optional[int],
r_neighbor_id: Optional[int],
predecessors: List[int],
successors: Optional[List[int]],
centerline: np.ndarray,
) -> None:
"""Initialize the lane segment.
Args:
id: Unique lane ID that serves as identifier for this "Way"
has_traffic_control:
turn_direction: 'RIGHT', 'LEFT', or 'NONE'
is_intersection: Whether or not this lane segment is an intersection
l_neighbor_id: Unique ID for left neighbor
r_neighbor_id: Unique ID for right neighbor
predecessors: The IDs of the lane segments that come after this one
successors: The IDs of the lane segments that come before this one.
centerline: The coordinates of the lane segment's center line.
"""
self.id = id
self.has_traffic_control = has_traffic_control
self.turn_direction = turn_direction
self.is_intersection = is_intersection
self.l_neighbor_id = l_neighbor_id
self.r_neighbor_id = r_neighbor_id
self.predecessors = predecessors
self.successors = successors
self.centerline = centerline
|
{"hexsha": "07616f99062f8374c8e18c81de70e96bdacb138a", "size": 1515, "ext": "py", "lang": "Python", "max_stars_repo_path": "argoverse/map_representation/lane_segment.py", "max_stars_repo_name": "ajinkyakhoche/argoverse-api", "max_stars_repo_head_hexsha": "b1730f9e4377325436f3364abb4c1fe54ec71b0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-29T00:43:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-29T00:43:28.000Z", "max_issues_repo_path": "argoverse/map_representation/lane_segment.py", "max_issues_repo_name": "ajinkyakhoche/argoverse-api", "max_issues_repo_head_hexsha": "b1730f9e4377325436f3364abb4c1fe54ec71b0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "argoverse/map_representation/lane_segment.py", "max_forks_repo_name": "ajinkyakhoche/argoverse-api", "max_forks_repo_head_hexsha": "b1730f9e4377325436f3364abb4c1fe54ec71b0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-15T17:31:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T17:31:42.000Z", "avg_line_length": 36.0714285714, "max_line_length": 80, "alphanum_fraction": 0.6429042904, "include": true, "reason": "import numpy", "num_tokens": 312}
|
import os
# import tensorflow as tf
import tensorrt as trt
from tensorrt.parsers import uffparser
import pycuda.driver as cuda
# import uff
import cv2
import numpy as np
from tqdm import tqdm
TEST_PATH = "/media/andy/Data/DevWorkSpace/Projects/imageClassifier/data/test/"
# TEST_PATH = "/home/andy/caffe/examples/mydata/slot_classifier/data/train_extend/all"
ENGINE_PATH = "/home/andy/caffe/examples/mydata/slot_classifier/engine/slot_6_model.engine"
# ENGINE_PATH = "/home/andy/caffe/examples/mydata/slot_classifier/engine/px2_classifier.engine"
OUTPUT_PATH = "./result/predict_out"
NET_INPUT_SHAPE = (256, 256)
NET_OUTPUT_SHAPE = 6
class_labels = ['error', 'half', 'invlb', 'invls', 'valid', 'corner']
# class_labels = ['valid_black', 'valid_leaf', 'valid_other', 'valid_shadow', 'valid_water', 'void_underground']
# Load Image
def load_image(img_path, net_input_shape):
imgBGR = cv2.imread(img_path)
img = cv2.resize(imgBGR, net_input_shape)
# BGR -> RGB
#img = img[:,:, (2, 1, 0)]
## Method 1
# imgT = np.transpose(img, (2, 0, 1)) # c,w,h
# imgF = np.asarray(imgT, dtype=np.float32)
# mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
# imgS = np.subtract(imgF,mean)
## Method 2
imgF = np.asarray(img, dtype=np.float32)
mean = [128.0, 128.0, 128.0] # Caffe image mean
# mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
imgSS = np.subtract(imgF, mean)/128.0
imgS = np.transpose(imgSS, (2, 0, 1)) # c,w,h
# RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)
return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
def test_Loader(TEST_PATH, net_input_shape):
label_list = []
img_list = []
imgBGR_list = []
img_path_list = []
pair = []
folders = os.listdir(TEST_PATH)
for folder in folders:
folder_path = os.path.join(TEST_PATH, folder)
imgs = os.listdir(folder_path)
for img in tqdm(imgs):
img_path = os.path.join(folder_path, img)
imgBGR, img = load_image(img_path, net_input_shape)
label = class_labels.index(folder)
img_list.append(img)
imgBGR_list.append(imgBGR)
img_path_list.append(img_path)
label_list.append(label)
pair.append((img, label))
return pair, img_list, label_list, imgBGR_list, img_path_list
imgTestData = test_Loader(TEST_PATH, NET_INPUT_SHAPE)
# Load Engine file
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.ERROR)
engine = trt.utils.load_engine(G_LOGGER, ENGINE_PATH)
context = engine.create_execution_context()
runtime = trt.infer.create_infer_runtime(G_LOGGER)
# output = np.empty(1, dtype = np.float32)
# # Alocate device memory
# d_input = cuda.mem_alloc(1 * imgTestData[0][0][0].nbytes)
# d_output = cuda.mem_alloc(NET_OUTPUT_SHAPE * output.nbytes)
# bindings = [int(d_input), int(d_output)]
# stream = cuda.Stream()
predicts = []
pair = imgTestData[0]
imgBGRList = imgTestData[3]
imgPathList = imgTestData[4]
p0 = 0
p1 = 0
p2 = 0
p3 = 0
p4 = 0
p5 = 0
p = 0
for (img, label), imgBGR, imgPath in zip(pair, imgBGRList, imgPathList):
output = np.empty(NET_OUTPUT_SHAPE, dtype = np.float32)
# Alocate device memory
d_input = cuda.mem_alloc(1 * img.nbytes)
d_output = cuda.mem_alloc(1 * output.nbytes)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, img, stream)
# Execute model
context.enqueue(1, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Syncronize threads
stream.synchronize()
softmax = np.exp(output) / np.sum(np.exp(output))
predict = np.argmax(softmax)
predicts.append(predict)
shape = np.shape(imgBGR)
cv2.putText(imgBGR, str(predict), (shape[1]//2,shape[0]//2), cv2.FONT_HERSHEY_SIMPLEX, 2 , (0, 0, 255), 2)
img_dst_folder = os.path.join(OUTPUT_PATH, os.path.dirname(imgPath).split("/")[-1])
if not os.path.exists(img_dst_folder):
os.makedirs(img_dst_folder)
# os.makedirs(img_dst_folder) # 可以递归创建
img_dst_path = os.path.join(img_dst_folder, os.path.basename(imgPath))
cv2.imwrite(img_dst_path, imgBGR)
# Calculate Precision
if label == 0 and label == predict:
p0 += 1
p += 1
if label == 1 and label == predict:
p1 += 1
p += 1
if label == 2 and label == predict:
p2 += 1
p += 1
if label == 3 and label == predict:
p3 += 1
p += 1
if label == 4 and label == predict:
p4 += 1
p += 1
if label == 5 and label == predict:
p5 += 1
p += 1
# 将错误识别成有效车位以及有效车位识别错误的打印出来
img_dst_folder_4 = img_dst_folder + "_"
if not os.path.exists(img_dst_folder_4):
os.makedirs(img_dst_folder_5)
if ((label == 0 or label == 1 or label ==2 or label == 3 or label == 5) and predict == 4) or (label==4 and predict != 4):
img_dst_path_4 = os.path.join(img_dst_folder_4, os.path.basename(imgPath))
cv2.imwrite(img_dst_path_4, imgBGR)
print("True = ",label, ", predict = ", predict, ", softmax = ", softmax)
grandTrue = np.array(imgTestData[1][1])
predicts = np.array(predicts)
error = predicts[predicts!=grandTrue]
print(imgTestData[1][1])
print("-------")
print(predicts)
print("-------")
print(len(error))
print((len(imgTestData[0])-len(error))/len(imgTestData[0]))
print("p0=",p0)
print("p1=",p1)
print("p2=",p2)
print("p3=",p3)
print("p4=",p4)
print("p5=",p5)
|
{"hexsha": "86ebee0ee11fc6591c5267c871d8c85eb398d558", "size": 5685, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tensorrt/tools/caffe_engine/call_engine_to_infer_all_print_predict_on_image_6classes.py", "max_stars_repo_name": "aimuch/AIEnvConfig", "max_stars_repo_head_hexsha": "4ccd54e9c601e8c91efebcec1a50115d75d0cf96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 250, "max_stars_repo_stars_event_min_datetime": "2019-06-14T16:12:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T09:56:26.000Z", "max_issues_repo_path": "src/tensorrt/tools/caffe_engine/call_engine_to_infer_all_print_predict_on_image_6classes.py", "max_issues_repo_name": "aimuch/AIEnvConfig", "max_issues_repo_head_hexsha": "4ccd54e9c601e8c91efebcec1a50115d75d0cf96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-08-10T07:15:39.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-23T01:51:17.000Z", "max_forks_repo_path": "src/tensorrt/tools/caffe_engine/call_engine_to_infer_all_print_predict_on_image_6classes.py", "max_forks_repo_name": "aimuch/AIEnvConfig", "max_forks_repo_head_hexsha": "4ccd54e9c601e8c91efebcec1a50115d75d0cf96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2019-08-16T13:42:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:38:09.000Z", "avg_line_length": 31.7597765363, "max_line_length": 125, "alphanum_fraction": 0.6569920844, "include": true, "reason": "import numpy,import pycuda", "num_tokens": 1702}
|
import sys
sys.path.append('..')
from common.core import *
from common.gfxutil import *
from common.audio import *
from common.mixer import *
from common.note import *
from common.wavegen import *
from common.wavesrc import *
from common.writer import *
from Enemy import *
from Background import *
from Foreground import *
from Player import *
from MusicHelper import *
from TonalFlowChart import *
from AudioController import *
from leap.LeapHelper import *
from LeapHand import *
from Flame import Flame
from kivy.core.window import Window
from kivy.uix.image import Image
from kivy.clock import Clock as kivyClock
from kivy.uix.label import Label
from kivy.graphics.instructions import InstructionGroup
from kivy.graphics import Color, Ellipse, Rectangle
from kivy.graphics import PushMatrix, PopMatrix, Translate, Scale, Rotate
from kivy.config import Config
from random import random, randint, choice
import numpy as np
#beat_per_sec
bps = 10.0/9.0
#data = [(2*bps,0),(4*bps,-250),(6*bps,250),(8*bps,-400),(12*bps,0)]
#data = [(2*bps,0),(2*bps,-500),(4*bps,500),(4*bps,-250),(6*bps,250),(8*bps,-400),(12*bps,0)]
#data = [(2*bps,0)]
data = []
for i in xrange(2,60,4):
time = i*bps
num_enemies = randint(1,1)
existing_x_positions = []
while num_enemies > 0:
x_pos = randint(-600,600)
cont = False
for x in existing_x_positions:
if abs(x-x_pos) <= 200:
cont = True
break;
if cont:
continue
else:
data.append((time,x_pos))
existing_x_positions.append(x_pos)
num_enemies -= 1
print data
class E_List(InstructionGroup):
def __init__(self):
super(E_List, self).__init__()
self.enemies = []
def add(self, obj):
super(E_List, self).insert(0,obj)
self.enemies.insert(0,obj)
def on_update(self, dt):
kill_list = []
for e in self.enemies:
if not e.on_update(dt):
kill_list.append(e)
for k in kill_list:
self.enemies.remove(k)
self.remove(k)
return True
class ProgressionManager(InstructionGroup):
def __init__(self):
super(ProgressionManager, self).__init__()
# list of tuples: (scale_degree, display_rect)
self.progression = []
super(ProgressionManager, self).add(Color(0.6,0.6,0.8))
self.prev_scale_degree = 1
def add(self, scale_degree):
x = 50 + len(self.progression)*50
y = Window.height - 200
txt = self.get_chord_texture(scale_degree)
display_rect = Rectangle( texture=txt, pos=(x,y) , size=(45,45) )
tup = (scale_degree, display_rect)
super(ProgressionManager, self).add(display_rect)
self.progression.append(tup)
self.prev_scale_degree = scale_degree
def clear(self):
for c in self.progression:
super(ProgressionManager, self).remove(c[1])
self.progression[:] = []
def get_chord_texture(self, scale_degree):
data_path = "../data/"
romanNumeral = Chord.majorKeyRomanNumerals[scale_degree]
#get string with name of chord
name = data_path + romanNumeral
if name.isupper():
return Image(source=name+'.png').texture
else:
return Image(source=name.upper()+'_.png').texture
def length(self):
return len(self.progression)
def on_update(self, dt):
return True
class Damage_Rect(InstructionGroup):
def __init__(self):
super(Damage_Rect, self).__init__()
self.was_hit = False
self.isfading = False
self.damage_rect = Rectangle(pos=(0,0), size=(Subwindow.width(),Window.height))
self.damage_color = Color(rgba=(1,0,0,0.0))
self.add(self.damage_color)
self.add(self.damage_rect)
def on_hit(self):
self.was_hit = True
def flash_rect(self, inc):
a = self.damage_color.rgba[3]
if a >= 0.55:
self.was_hit = False
self.isfading = True
elif a <= 0.0:
self.damage_color.rgba = (1,0,0,0.0)
self.isfading = False
if self.was_hit:
self.damage_color.rgba = (1, 0, 0, a+inc*2)
else:
self.damage_color.rgba = (1, 0, 0, a-inc)
def on_update(self, dt):
if self.was_hit or self.isfading:
self.flash_rect(0.07)
return True
class Sight_Line(InstructionGroup):
def __init__(self):
super(Sight_Line, self).__init__()
pts = [200 + random() * 200 for i in range(16)]
self.line = Line(points=pts, width=3)
self.add(Color(1,1,1))
self.add(self.line)
def on_update(self, dt):
return True
class Handler(InstructionGroup):
def __init__(self):
super(Handler, self).__init__()
self.key = Notes.C
self.audio_controller = None
self.time = 0.0
self.enemy_data = data
self.sightLine = Sight_Line()
# Handles and displays progressions near top of screen
self.tonalFlowChart = TonalFlowChart()
self.PM = ProgressionManager()
# Displays Damage rectangle when player is hit
self.dmg_rect = Damage_Rect()
# List of all objects in the game to be drawn
self.objects = []
# References to game elements interacted with
self.target = None
self.enemies = E_List()
self.background = Background()
self.foreground = Foreground(self.key)
self.player = Player()
# Add Instruction Groups to self
self.add(self.background)
self.add(self.enemies)
self.add(self.foreground)
self.add(self.player)
self.add(self.PM)
self.add(self.sightLine)
self.add(self.dmg_rect)
def include_audio(self, audio_controller):
self.audio_controller = audio_controller
def on_touch_down(self, touch):
if touch.pos[0] >= Subwindow.width():
self.player.rightHand.set_pos(touch.pos)
self.player.attacking = True
self.try_fire() # TODO: make separte touch_fire()
def on_touch_up(self, touch):
if touch.pos[0] >= Subwindow.width():
self.player.attacking = False
def on_touch_move(self, touch):
if touch.pos[0] < Subwindow.width():
self.player.leftHand.set_pos(touch.pos)
else:
self.player.rightHand.set_pos(touch.pos)
def on_update(self):
self.audio_controller.on_update()
dt = kivyClock.frametime
kill_list = []
for o in self.objects:
if o.on_update(dt) == False:
kill_list.append(o)
for o in kill_list:
self.remove(o)
# Reset any disabled buttons if valid to do so
for btn in self.foreground.buttons:
btn.on_update(dt)
if not self.player.is_attacking():
for btn in self.foreground.buttons:
pass
#btn.enable()
self.crosshair_on_enemy()
self.select_button()
self.try_fire()
self.add_enemies(self.time)
self.time += dt
def move_hand(self, hand, currentHand):
pos = LeapHelper.position_as_pixels(hand)
def checkBounds(x, y):
return 0 <= x and x <= Subwindow.width() and 0 <= y and y <= Window.height
if not checkBounds(*pos):
currentHand.release_flame()
currentHand.set_visible(False)
else:
currentHand.set_visible(True)
currentHand.set_pos(pos)
if not currentHand.isVisible:
self.objects.add(currentHand)
currentHand.set_visible(True)
def crosshair_on_enemy(self):
# TODO: find points in some bounding box of the enemy
crosshair = self.player.leftHand.get_pos()
del_x = crosshair[0] - Subwindow.width()/2
del_y = crosshair[1] + 100000000
A = (Subwindow.width()/2, -100000000)
B = (Subwindow.width()/2 + 3*del_x, 3*del_y)
self.sightLine.line.points = ((50,50),(500,500)) #(A[0],A[1],B[0],B[1])
self.target = None
sorted_enemies = sorted(self.enemies.enemies, key = lambda x: x.cbrect.pos[1])
for e in sorted_enemies:
x = e.cbrect.pos[0]
y = e.cbrect.pos[1]
w = e.cbrect.size[0]
h = e.cbrect.size[1]
pts = [(x,y),(x+w,y),(x+w,y+h),(x,y+h),(x,y)]
cont = False
for i in xrange(3):
C = pts[i]
D = pts[i+1]
if self.intersect(A,B,C,D):
e.lit()
e.on_target()
e.set_is_targeted(True)
self.target = e
cont = True
break
if cont:
break
for e in sorted_enemies:
if e != self.target:
e.un_lit()
e.set_is_targeted(False)
def select_button(self):
if self.player.is_attacking():
return
flame = self.get_flame()
if flame == None:
return
flameX = flame.emitter_x
flameY = flame.emitter_y
for btn in self.foreground.buttons:
if not btn.is_enabled:
continue
x1, y1, x2, y2 = btn.get_boundaries()
if x1 <= flameX and flameX <= x2 and y1 <= flameY and flameY <= y2:
self.player.arm_weapon(btn)
active_button = btn
return
def try_fire(self):
if self.player.is_attacking() and self.target:
flame = self.get_flame()
active_button = self.player.get_button()
if not active_button:
return
note = active_button.get_note()
enemyKilled = self.target.on_hit(note.get_pitch())
active_button.disable()
self.player.unarm_weapon()
if enemyKilled:
chord = self.target.resolvedPitches
chordType, root = Chord.get_chord_type(self.key, chord)
scaleDeg = MusicHelper.get_scale_degree(self.key, root)
if not self.tonalFlowChart.is_valid_progression(scaleDeg, self.PM.prev_scale_degree):
self.PM.clear()
self.PM.add(scaleDeg)
# Increment player streak
self.player.set_score_mult(self.PM.length())
self.player.score_up()
def ccw(self, A,B,C):
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
# Return true if line segments AB and CD intersect
def intersect(self, A,B,C,D):
return self.ccw(A,C,D) != self.ccw(B,C,D) and self.ccw(A,B,C) != self.ccw(A,B,D)
def add_enemies(self, time):
remove_list = []
for e in self.enemy_data:
if e[0] <= time:
E = Enemy(e[1], self.key, audio_callback = self.play_enemy_sound, clear_prog = self.PM.clear, hurt_player_callback=self.player.decrement_health, dmg_rect_on_hit_callback=self.dmg_rect.on_hit, add_sound=self.audio_controller.add_enemy_sound, remove_sound=self.audio_controller.remove_enemy_sound)
self.enemies.add(E)
remove_list.append(e)
for r in remove_list:
self.enemy_data.remove(r)
def play_enemy_sound(self, pitches):
for pitch in pitches:
self.audio_controller.play_sfx(pitch)
def get_flame(self):
return self.player.get_flame()
def add(self, obj):
super(Handler, self).add(obj)
self.objects.append(obj)
def remove(self, obj):
super(Handler, self).remove(obj)
self.objects.remove(obj)
|
{"hexsha": "51f3223b910e62715e229f32a2aea19878e09cfc", "size": 12024, "ext": "py", "lang": "Python", "max_stars_repo_path": "project/Handler.py", "max_stars_repo_name": "osmidy/Dischord", "max_stars_repo_head_hexsha": "3c3802eb4917adb9384256d8a0c7ba4f123fd166", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project/Handler.py", "max_issues_repo_name": "osmidy/Dischord", "max_issues_repo_head_hexsha": "3c3802eb4917adb9384256d8a0c7ba4f123fd166", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/Handler.py", "max_forks_repo_name": "osmidy/Dischord", "max_forks_repo_head_hexsha": "3c3802eb4917adb9384256d8a0c7ba4f123fd166", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3985330073, "max_line_length": 311, "alphanum_fraction": 0.5763473054, "include": true, "reason": "import numpy", "num_tokens": 2848}
|
SUBROUTINE RU_PLVL ( field, above, level, pres, iret )
C************************************************************************
C* RU_PLVL *
C* *
C* This subroutine gets the level number and pressure from a group *
C* which is in the form LLPPP. LL must be the same integer, repeated; *
C* for example, 11 corresponds to level 1. *
C* *
C* RU_PLVL ( FIELD, ABOVE, LEVEL, PRES, IRET ) *
C* *
C* Input parameters: *
C* FIELD CHAR* Input field *
C* ABOVE LOGICAL Above 100 mb flag *
C* *
C* Output parameters: *
C* LEVEL INTEGER Level number *
C* -1 = level not found *
C* 0 = valid surface level *
C* 1 - 9 = valid levels *
C* PRES REAL Pressure *
C* IRET INTEGER Return code *
C* 0 = normal return *
C** *
C* Log: *
C* M. desJardins/GSFC 6/86 *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
C*
LOGICAL above
CHARACTER*(*) field
CHARACTER clev(10)*2
CHARACTER cc*2
DATA clev / '00','11','22','33','44','55','66',
+ '77','88','99' /
C------------------------------------------------------------------------
iret = 0
level = -1
pres = RMISSD
C
C* Check first two character for level number.
C
cc = field ( 1:2 )
DO i = 1, 10
IF ( cc .eq. clev (i) ) level = i - 1
END DO
C
C* If a level was found, decode the pressure.
C
IF ( level .ne. -1 ) THEN
CALL ST_INTG ( field (3:5), ipres, ier )
C
C* Save the pressure if it could be decoded.
C
IF ( ier .eq. 0 ) THEN
C
C* Pressures above 100 mb are in tenths; below 100 mb are
C* in units.
C
IF ( above ) THEN
pres = FLOAT ( ipres ) / 10.
ELSE
pres = FLOAT ( ipres )
IF ( pres .lt. 100. ) pres = pres + 1000.
END IF
C
C* If the pressure is missing, reset the level to -1.
C
ELSE
level = -1
END IF
END IF
C*
RETURN
END
|
{"hexsha": "4675a9b128e136c42783e6f39f4a1656605aef36", "size": 1936, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/bridge/ru/ruplvl.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/bridge/ru/ruplvl.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/bridge/ru/ruplvl.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 25.8133333333, "max_line_length": 73, "alphanum_fraction": 0.5020661157, "num_tokens": 663}
|
# David R Thompson
import argparse, sys, os
import numpy as np
import pylab as plt
from copy import deepcopy
from glob import glob
from spectral.io import envi
from scipy.stats import norm
from scipy.linalg import solve, inv
from astropy import modeling
from sklearn.linear_model import RANSACRegressor
from scipy.optimize import minimize
from scipy.interpolate import BSpline,interp1d
from skimage.filters import threshold_otsu
from scipy.ndimage import gaussian_filter
import json
from scipy.optimize import minimize
from numba import jit
from fixghost import fix_ghost
from fixghostraster import build_ghost_matrix, build_ghost_blur
from fpa import FPA
import ray
rayargs={'num_cpus':40}
ray.init(**rayargs)
def find_header(infile):
if os.path.exists(infile+'.hdr'):
return infile+'.hdr'
elif os.path.exists('.'.join(infile.split('.')[:-1])+'.hdr'):
return '.'.join(infile.split('.')[:-1])+'.hdr'
else:
raise FileNotFoundError('Did not find header file')
def randomize_ghost_config(config, seed):
new_config = deepcopy(config)
if seed is None:
return config
rng = np.random.default_rng(int(seed))
slopes = np.array([f['intensity_slope'] for f in config['orders']])
offsets = np.array([f['intensity_offset'] for f in config['orders']])
scalings = np.array([f['scaling'] for f in config['orders']])
for i in range(len(config['orders'])):
new_config['orders'][i]['scaling'] = \
rng.random(None,float) * (scalings.max()-scalings.min()) + scalings.min()
new_config['orders'][i]['intensity_offset'] = \
rng.random(None,float) * (offsets.max()-offsets.min()) + offsets.min()
new_config['orders'][i]['intensity_slope'] = \
rng.random(None,float) * (slopes.max()-slopes.min()) + slopes.min()
return new_config
def serialize_ghost_config(config, coarse):
x, bounds = [],[]
if coarse==1:
for i in range(len(config['orders'])):
x.append(config['orders'][i]['scaling'])
bounds.append((0,9999))
elif coarse==2:
for i in range(len(config['orders'])):
x.append(config['orders'][i]['intensity_slope'])
bounds.append((-0.001,0.001))
x.append(config['orders'][i]['intensity_offset'])
bounds.append((-0.1,0.1))
else:
for zone in config['psf_zones']:
for psf in zone['psfs']:
x.append(psf['sigma'])
bounds.append((0,100))
x.append(psf['peak'])
bounds.append((0,5))
return x,bounds
def deserialize_ghost_config(x, config, coarse):
ghost_config = deepcopy(config)
if coarse==1:
for i in range(len(ghost_config['orders'])):
ghost_config['orders'][i]['scaling'] = x[i]
elif coarse==2:
for i in range(len(ghost_config['orders'])):
ghost_config['orders'][i]['intensity_slope'] = x[i*2]
ghost_config['orders'][i]['intensity_offset'] = x[i*2+1]
else:
ind = 0
for zone in range(len(ghost_config['psf_zones'])):
for psf in range(len(ghost_config['psf_zones'][zone]['psfs'])):
ghost_config['psf_zones'][zone]['psfs'][psf]['sigma'] = x[ind]
ghost_config['psf_zones'][zone]['psfs'][psf]['peak'] = x[ind+1]
ind = ind+2
return ghost_config
def frame_error(frame, fpa, ghostmap, blur, center):
try:
fixed = fix_ghost(frame, fpa, ghostmap, blur=blur, center=center, plot=False)
except IndexError:
# Something is out of bounds
return 9e99
half = int(round(fpa.native_columns/2))
max_left = np.percentile(frame[:,:half],99)
max_right = np.percentile(frame[:,half:],99)
if max_left>max_right:
return np.mean(pow(fixed[:,half:],2))# / np.mean(pow(frame[:,half:],2))
else:
return np.mean(pow(fixed[:,:half],2))# / np.mean(pow(frame[:,:half],2))
def err(x, fpa, frames, ghost_config, coarse):
new_config = deserialize_ghost_config(x, ghost_config, coarse)
ghostmap = build_ghost_matrix(new_config, fpa)
blur = build_ghost_blur(new_config, fpa)
center = new_config['center']
jobs = [frame_error(frame, fpa, ghostmap, blur,
center) for frame in frames]
errs = np.array(jobs)
print(sum(errs))
#for i,err in enumerate(jobs):
# print('frame %i error %10.2f'%(i,err))
return sum(errs)
@ray.remote
def partial(x, i, fpa, frames, ghost_config, base_cost, coarse):
x_perturb = x.copy()
if coarse:
eps = 1e-7
else:
eps = 0.001
x_perturb[i] = x[i] + eps
perturb_cost = err(x_perturb, fpa, frames, ghost_config, coarse)
return (perturb_cost - base_cost)/eps
def jac(x, fpa, frames, ghost_config, coarse):
base_cost = err(x,fpa, frames,ghost_config, coarse)
jobs = [partial.remote(x,i,fpa,frames,ghost_config,base_cost, coarse) for i in range(len(x))]
derivs = ray.get(jobs)
return np.array(derivs)
def main():
description = "Optimize ghost model"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('ghost_config')
parser.add_argument('--config', default=None)
parser.add_argument('--seed', default=None)
parser.add_argument('--method', default='TNC')
parser.add_argument('input',nargs='+')
parser.add_argument('output')
args = parser.parse_args()
fpa = FPA(args.config)
frames = []
for infile in args.input:
I = envi.open(find_header(infile))
frame = np.squeeze(I.load())
if frame.shape[0] > frame.shape[1]:
frame = frame.T
frames.append(frame)
frames = np.array(frames)
with open(args.ghost_config,'r') as fin:
ghost_config = json.load(fin)
ghost_config = randomize_ghost_config(ghost_config, args.seed)
# We perform coordinate descent on different state vector subspaces
for coarse in [1,0,2,1,0,2,1,0,2]:
# nonlinear solution
x0, bounds = serialize_ghost_config(ghost_config, coarse=coarse)
best = minimize(err, x0, args=(fpa, frames, ghost_config, coarse), \
jac=jac, bounds=bounds, method=args.method)
best_config = deserialize_ghost_config(best.x, ghost_config, \
coarse=coarse)
# Print the result to screen
print(best.nit,'iterations')
print('final error:',err(best.x, fpa, frames, ghost_config, coarse=coarse))
print(best.message)
# Record final error
xbest, bounds = serialize_ghost_config(best_config, coarse=2)
best_config['final_error'] = err(xbest, fpa, frames, ghost_config, coarse=2)
# Write provisional configuration to the output file
with open(args.output,'w') as fout:
fout.write(json.dumps(best_config,indent=2))
# Initialize for the next round
ghost_config = best_config
if __name__ == '__main__':
main()
|
{"hexsha": "6b8a84177bc57b204d6cb3209ed04b287754ed46", "size": 6859, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/optimizeghost.py", "max_stars_repo_name": "emit-sds/emit-sds-l1b", "max_stars_repo_head_hexsha": "be5307fe6821a043971becdd33609b4cf89b1974", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/optimizeghost.py", "max_issues_repo_name": "emit-sds/emit-sds-l1b", "max_issues_repo_head_hexsha": "be5307fe6821a043971becdd33609b4cf89b1974", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/optimizeghost.py", "max_forks_repo_name": "emit-sds/emit-sds-l1b", "max_forks_repo_head_hexsha": "be5307fe6821a043971becdd33609b4cf89b1974", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1243781095, "max_line_length": 98, "alphanum_fraction": 0.6464499198, "include": true, "reason": "import numpy,from scipy,from numba,from astropy", "num_tokens": 1759}
|
c { dg-do run }
c { dg-options "-std=legacy" }
c
c Produced a link error through not eliminating the unused statement
c function after 1998-05-15 change to gcc/toplev.c. It's in
c `execute' since it needs to link.
c Fixed by 1998-05-23 change to f/com.c.
values(i,j) = val((i-1)*n+j)
end
|
{"hexsha": "855b9a442d70c1f826771565fffbcb9755063eb6", "size": 317, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/g77/980520-1.f", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/Fortran_tests/gfortranTestSuite/gfortran.dg/g77/980520-1.f", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/Fortran_tests/gfortranTestSuite/gfortran.dg/g77/980520-1.f", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 31.7, "max_line_length": 72, "alphanum_fraction": 0.643533123, "num_tokens": 101}
|
# -*- coding: utf-8 -*-
"""
computeKey
computes the musical key of an input audio file
Args:
afAudioData: array with floating point audio data.
f_s: sample rate
afWindow: FFT window of length iBlockLength (default: hann)
iBlockLength: internal block length (default: 4096 samples)
iHopLength: internal hop length (default: 2048 samples)
Returns:
key string
"""
import numpy as np
from scipy.signal import spectrogram
from pyACA.ToolComputeHann import ToolComputeHann
from pyACA.FeatureSpectralPitchChroma import FeatureSpectralPitchChroma
from pyACA.ToolPreprocAudio import ToolPreprocAudio
from pyACA.ToolReadAudio import ToolReadAudio
def computeKey(afAudioData, f_s, afWindow=None, iBlockLength=4096, iHopLength=2048):
# compute window function for FFT
if afWindow is None:
afWindow = ToolComputeHann(iBlockLength)
assert(afWindow.shape[0] == iBlockLength), "parameter error: invalid window dimension"
# key names
cKeyNames = np.array(['C Maj', 'C# Maj', 'D Maj', 'D# Maj', 'E Maj', 'F Maj', 'F# Maj', 'G Maj', 'G# Maj', 'A Maj', 'A# Maj', 'B Maj',
'c min', 'c# min', 'd min', 'd# min', 'e min', 'f min', 'f# min', 'g min', 'g# min', 'a min', 'a# min', 'b min'])
# template pitch chroma (Krumhansl major/minor), normalized to a sum of 1
t_pc = np.array([[6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88],
[6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17]])
t_pc = t_pc / t_pc.sum(axis=1, keepdims=True)
# pre-processing
afAudioData = ToolPreprocAudio(afAudioData, iBlockLength)
# in the real world, we would do this block by block...
[f, t, X] = spectrogram(afAudioData,
f_s,
afWindow,
iBlockLength,
iBlockLength - iHopLength,
iBlockLength,
False,
True,
'spectrum')
# scale the same as for matlab
X = np.sqrt(X / 2)
# compute instantaneous pitch chroma
v_pc = FeatureSpectralPitchChroma(X, f_s)
# average pitch chroma
v_pc = v_pc.mean(axis=1)
# compute manhattan distances for modes (major and minor)
d = np.zeros(t_pc.shape)
v_pc = np.concatenate((v_pc, v_pc), axis=0).reshape(2, 12)
for i in range(0, 12):
d[:, i] = np.sum(np.abs(v_pc - np.roll(t_pc, i, axis=1)), axis=1)
# get unwrapped key index
iKeyIdx = d.argmin()
cKey = cKeyNames[iKeyIdx]
return (cKey)
def computeKeyCl(cPath):
[f_s, afAudioData] = ToolReadAudio(cPath)
# afAudioData = np.sin(2*np.pi * np.arange(f_s*1)*440./f_s)
cKey = computeKey(afAudioData, f_s)
print("\ndetected key: ", cKey)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Compute key of wav file')
parser.add_argument('--infile', metavar='path', required=False,
help='path to input audio file')
args = parser.parse_args()
cPath = args.infile
# only for debugging
if not cPath:
cPath = "c:/temp/test.wav"
# call the function
computeKeyCl(cPath)
|
{"hexsha": "230e161d8822436d9c22ff4a9faef18a5b6893b8", "size": 3311, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyACA/computeKey.py", "max_stars_repo_name": "ruohoruotsi/pyACA", "max_stars_repo_head_hexsha": "339e9395b65a217aa5965638af941b32d5c95454", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 81, "max_stars_repo_stars_event_min_datetime": "2019-07-08T15:48:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T22:52:25.000Z", "max_issues_repo_path": "pyACA/computeKey.py", "max_issues_repo_name": "ruohoruotsi/pyACA", "max_issues_repo_head_hexsha": "339e9395b65a217aa5965638af941b32d5c95454", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2019-10-03T19:20:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T17:20:40.000Z", "max_forks_repo_path": "pyACA/computeKey.py", "max_forks_repo_name": "ruohoruotsi/pyACA", "max_forks_repo_head_hexsha": "339e9395b65a217aa5965638af941b32d5c95454", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2019-07-18T23:50:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T14:59:35.000Z", "avg_line_length": 32.145631068, "max_line_length": 138, "alphanum_fraction": 0.6019329508, "include": true, "reason": "import numpy,from scipy", "num_tokens": 979}
|
import cv2
import numpy as np
import sys
from libs.real.get_map import rotateImage
def write_files(lower, upper, color_name):
np.save('color_values/' + 'lower_' + color_name, lower)
np.save('color_values/' + 'upper_' + color_name, upper)
def load_map_setup():
map_img = cv2.imread('map_setup/map.png')
f = open('map_setup/rotate_angle', 'r')
map_angle = float(f.read())
f.close()
f = open('map_setup/br', 'r')
map_br = f.read()
map_br = tuple(map(int, map_br[1:-1].split(',')))
f.close()
f = open('map_setup/tl', 'r')
map_tl = f.read()
map_tl = tuple(map(int, map_tl[1:-1].split(',')))
f.close()
return map_img, map_angle, map_tl, map_br
def get_frame():
global camera, map_angle, map_tl, map_br
ret, frame = camera.read()
if ret:
frame = cv2.resize(frame,(640,480))
frame = rotateImage(frame,map_angle, map_tl)
frame = frame[map_tl[1]:map_br[1],map_tl[0]:map_br[0]]
kernel = np.ones((5,5),np.float32)/25
frame = cv2.filter2D(frame,-1,kernel)
return frame
else:
return 1
def new_info(current):
global lower, upper
for i in range(3):
lower[i] = int(current[i])-10 if current[i] < lower[i] else lower[i]
upper[i] = int(current[i])+10 if current[i] > upper[i] else upper[i]
def colorSetup(event,x,y,flags,param):
global lower,upper,on,control
B = frame[y,x,0]
G = frame[y,x,1]
R = frame[y,x,2]
if event == 1: # Mouse clicked
if control == 0:
lower = np.array([B,G,R])
upper = np.array([B,G,R])
on = 1
control = 1
elif event == 4: # mouse lifted
on = 0
elif event == 0 and on: # Dragging
current = [B,G,R]
new_info(current)
cv2.namedWindow('colorSetup')
cv2.setMouseCallback('colorSetup',colorSetup)
camera = cv2.VideoCapture(0)
color_name = sys.argv[1]
_, map_angle, map_tl, map_br = load_map_setup()
lower = np.array([120,120,120])
upper = np.array([120,120,120])
on = 0
control = 0
color = sys.argv[1]
while(True):
frame = 1
while (isinstance(frame,int)):
frame = get_frame()
framecp = frame.copy()
c1 = cv2.inRange(frame, lower, upper)
framecp[c1 > 0] = [0,100,100]
cv2.imshow('colorSetup', frame)
cv2.imshow('masks', framecp)
if cv2.waitKey(1) == 27:
write_files(lower,upper,color)
break
camera.release()
cv2.destroyAllWindows()
|
{"hexsha": "ecdf522f2c78f7b2afd8ae84e9afbdba396e45db", "size": 2454, "ext": "py", "lang": "Python", "max_stars_repo_path": "real_setup_colors.py", "max_stars_repo_name": "skkywalker/tcc", "max_stars_repo_head_hexsha": "5c0faf6dd6c4a66fb7774aae7caf33c5af8f7721", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "real_setup_colors.py", "max_issues_repo_name": "skkywalker/tcc", "max_issues_repo_head_hexsha": "5c0faf6dd6c4a66fb7774aae7caf33c5af8f7721", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "real_setup_colors.py", "max_forks_repo_name": "skkywalker/tcc", "max_forks_repo_head_hexsha": "5c0faf6dd6c4a66fb7774aae7caf33c5af8f7721", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.967032967, "max_line_length": 76, "alphanum_fraction": 0.6059494703, "include": true, "reason": "import numpy", "num_tokens": 730}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.